1 /* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <drm/drmP.h> 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include "radeon_ucode.h" 29 #include "cikd.h" 30 #include "r600_dpm.h" 31 #include "ci_dpm.h" 32 #include "ni_dpm.h" 33 #include "atom.h" 34 #include <linux/seq_file.h> 35 36 #define MC_CG_ARB_FREQ_F0 0x0a 37 #define MC_CG_ARB_FREQ_F1 0x0b 38 #define MC_CG_ARB_FREQ_F2 0x0c 39 #define MC_CG_ARB_FREQ_F3 0x0d 40 41 #define SMC_RAM_END 0x40000 42 43 #define VOLTAGE_SCALE 4 44 #define VOLTAGE_VID_OFFSET_SCALE1 625 45 #define VOLTAGE_VID_OFFSET_SCALE2 100 46 47 static const struct ci_pt_defaults defaults_hawaii_xt = 48 { 49 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 50 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 51 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 52 }; 53 54 static const struct ci_pt_defaults defaults_hawaii_pro = 55 { 56 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 57 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 58 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 59 }; 60 61 static const struct ci_pt_defaults defaults_bonaire_xt = 62 { 63 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 64 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 65 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 66 }; 67 68 static const struct ci_pt_defaults defaults_bonaire_pro = 69 { 70 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 71 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 72 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 73 }; 74 75 static const struct ci_pt_defaults defaults_saturn_xt = 76 { 77 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 78 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 79 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 80 }; 81 82 static const struct ci_pt_defaults defaults_saturn_pro = 83 { 84 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 85 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 86 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 87 }; 88 89 static const struct ci_pt_config_reg didt_config_ci[] = 90 { 91 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 92 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 93 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 94 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 95 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 96 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 97 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 98 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 99 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 100 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 101 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 102 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 103 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 104 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 105 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 106 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 107 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 108 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 109 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 110 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 111 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 112 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 113 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 114 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 115 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 116 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 117 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 118 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 119 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 120 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 121 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 122 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 123 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 124 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 125 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 126 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 127 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 128 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 129 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 130 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 131 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 132 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 133 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 134 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 135 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 136 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 137 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 138 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 139 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 140 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 141 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 142 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 143 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 144 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 145 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 146 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 147 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 148 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 149 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 150 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 151 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 152 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 153 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 154 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 155 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 156 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 157 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 158 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 159 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 160 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 161 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 162 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 163 { 0xFFFFFFFF } 164 }; 165 166 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 167 struct atom_voltage_table_entry *voltage_table, 168 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); 169 static int ci_set_power_limit(struct radeon_device *rdev, u32 n); 170 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 171 u32 target_tdp); 172 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); 173 174 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 175 PPSMC_Msg msg, u32 parameter); 176 177 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev); 178 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev); 179 180 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) 181 { 182 struct ci_power_info *pi = rdev->pm.dpm.priv; 183 184 return pi; 185 } 186 187 static struct ci_ps *ci_get_ps(struct radeon_ps *rps) 188 { 189 struct ci_ps *ps = rps->ps_priv; 190 191 return ps; 192 } 193 194 static void ci_initialize_powertune_defaults(struct radeon_device *rdev) 195 { 196 struct ci_power_info *pi = ci_get_pi(rdev); 197 198 switch (rdev->pdev->device) { 199 case 0x6649: 200 case 0x6650: 201 case 0x6651: 202 case 0x6658: 203 case 0x665C: 204 case 0x665D: 205 default: 206 pi->powertune_defaults = &defaults_bonaire_xt; 207 break; 208 case 0x6640: 209 case 0x6641: 210 case 0x6646: 211 case 0x6647: 212 pi->powertune_defaults = &defaults_saturn_xt; 213 break; 214 case 0x67B8: 215 case 0x67B0: 216 pi->powertune_defaults = &defaults_hawaii_xt; 217 break; 218 case 0x67BA: 219 case 0x67B1: 220 pi->powertune_defaults = &defaults_hawaii_pro; 221 break; 222 case 0x67A0: 223 case 0x67A1: 224 case 0x67A2: 225 case 0x67A8: 226 case 0x67A9: 227 case 0x67AA: 228 case 0x67B9: 229 case 0x67BE: 230 pi->powertune_defaults = &defaults_bonaire_xt; 231 break; 232 } 233 234 pi->dte_tj_offset = 0; 235 236 pi->caps_power_containment = true; 237 pi->caps_cac = false; 238 pi->caps_sq_ramping = false; 239 pi->caps_db_ramping = false; 240 pi->caps_td_ramping = false; 241 pi->caps_tcp_ramping = false; 242 243 if (pi->caps_power_containment) { 244 pi->caps_cac = true; 245 if (rdev->family == CHIP_HAWAII) 246 pi->enable_bapm_feature = false; 247 else 248 pi->enable_bapm_feature = true; 249 pi->enable_tdc_limit_feature = true; 250 pi->enable_pkg_pwr_tracking_feature = true; 251 } 252 } 253 254 static u8 ci_convert_to_vid(u16 vddc) 255 { 256 return (6200 - (vddc * VOLTAGE_SCALE)) / 25; 257 } 258 259 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev) 260 { 261 struct ci_power_info *pi = ci_get_pi(rdev); 262 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 263 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 264 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; 265 u32 i; 266 267 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) 268 return -EINVAL; 269 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8) 270 return -EINVAL; 271 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count != 272 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) 273 return -EINVAL; 274 275 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { 276 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 277 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); 278 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); 279 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); 280 } else { 281 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); 282 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); 283 } 284 } 285 return 0; 286 } 287 288 static int ci_populate_vddc_vid(struct radeon_device *rdev) 289 { 290 struct ci_power_info *pi = ci_get_pi(rdev); 291 u8 *vid = pi->smc_powertune_table.VddCVid; 292 u32 i; 293 294 if (pi->vddc_voltage_table.count > 8) 295 return -EINVAL; 296 297 for (i = 0; i < pi->vddc_voltage_table.count; i++) 298 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); 299 300 return 0; 301 } 302 303 static int ci_populate_svi_load_line(struct radeon_device *rdev) 304 { 305 struct ci_power_info *pi = ci_get_pi(rdev); 306 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 307 308 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; 309 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; 310 pi->smc_powertune_table.SviLoadLineTrimVddC = 3; 311 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; 312 313 return 0; 314 } 315 316 static int ci_populate_tdc_limit(struct radeon_device *rdev) 317 { 318 struct ci_power_info *pi = ci_get_pi(rdev); 319 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 320 u16 tdc_limit; 321 322 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; 323 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); 324 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 325 pt_defaults->tdc_vddc_throttle_release_limit_perc; 326 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; 327 328 return 0; 329 } 330 331 static int ci_populate_dw8(struct radeon_device *rdev) 332 { 333 struct ci_power_info *pi = ci_get_pi(rdev); 334 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 335 int ret; 336 337 ret = ci_read_smc_sram_dword(rdev, 338 SMU7_FIRMWARE_HEADER_LOCATION + 339 offsetof(SMU7_Firmware_Header, PmFuseTable) + 340 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 341 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, 342 pi->sram_end); 343 if (ret) 344 return -EINVAL; 345 else 346 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; 347 348 return 0; 349 } 350 351 static int ci_populate_fuzzy_fan(struct radeon_device *rdev) 352 { 353 struct ci_power_info *pi = ci_get_pi(rdev); 354 355 if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || 356 (rdev->pm.dpm.fan.fan_output_sensitivity == 0)) 357 rdev->pm.dpm.fan.fan_output_sensitivity = 358 rdev->pm.dpm.fan.default_fan_output_sensitivity; 359 360 pi->smc_powertune_table.FuzzyFan_PwmSetDelta = 361 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity); 362 363 return 0; 364 } 365 366 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) 367 { 368 struct ci_power_info *pi = ci_get_pi(rdev); 369 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 370 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 371 int i, min, max; 372 373 min = max = hi_vid[0]; 374 for (i = 0; i < 8; i++) { 375 if (0 != hi_vid[i]) { 376 if (min > hi_vid[i]) 377 min = hi_vid[i]; 378 if (max < hi_vid[i]) 379 max = hi_vid[i]; 380 } 381 382 if (0 != lo_vid[i]) { 383 if (min > lo_vid[i]) 384 min = lo_vid[i]; 385 if (max < lo_vid[i]) 386 max = lo_vid[i]; 387 } 388 } 389 390 if ((min == 0) || (max == 0)) 391 return -EINVAL; 392 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; 393 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; 394 395 return 0; 396 } 397 398 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) 399 { 400 struct ci_power_info *pi = ci_get_pi(rdev); 401 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; 402 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; 403 struct radeon_cac_tdp_table *cac_tdp_table = 404 rdev->pm.dpm.dyn_state.cac_tdp_table; 405 406 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; 407 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; 408 409 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); 410 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); 411 412 return 0; 413 } 414 415 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev) 416 { 417 struct ci_power_info *pi = ci_get_pi(rdev); 418 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 419 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; 420 struct radeon_cac_tdp_table *cac_tdp_table = 421 rdev->pm.dpm.dyn_state.cac_tdp_table; 422 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table; 423 int i, j, k; 424 const u16 *def1; 425 const u16 *def2; 426 427 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; 428 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; 429 430 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; 431 dpm_table->GpuTjMax = 432 (u8)(pi->thermal_temp_setting.temperature_high / 1000); 433 dpm_table->GpuTjHyst = 8; 434 435 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; 436 437 if (ppm) { 438 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); 439 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); 440 } else { 441 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); 442 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); 443 } 444 445 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); 446 def1 = pt_defaults->bapmti_r; 447 def2 = pt_defaults->bapmti_rc; 448 449 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 450 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 451 for (k = 0; k < SMU7_DTE_SINKS; k++) { 452 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); 453 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); 454 def1++; 455 def2++; 456 } 457 } 458 } 459 460 return 0; 461 } 462 463 static int ci_populate_pm_base(struct radeon_device *rdev) 464 { 465 struct ci_power_info *pi = ci_get_pi(rdev); 466 u32 pm_fuse_table_offset; 467 int ret; 468 469 if (pi->caps_power_containment) { 470 ret = ci_read_smc_sram_dword(rdev, 471 SMU7_FIRMWARE_HEADER_LOCATION + 472 offsetof(SMU7_Firmware_Header, PmFuseTable), 473 &pm_fuse_table_offset, pi->sram_end); 474 if (ret) 475 return ret; 476 ret = ci_populate_bapm_vddc_vid_sidd(rdev); 477 if (ret) 478 return ret; 479 ret = ci_populate_vddc_vid(rdev); 480 if (ret) 481 return ret; 482 ret = ci_populate_svi_load_line(rdev); 483 if (ret) 484 return ret; 485 ret = ci_populate_tdc_limit(rdev); 486 if (ret) 487 return ret; 488 ret = ci_populate_dw8(rdev); 489 if (ret) 490 return ret; 491 ret = ci_populate_fuzzy_fan(rdev); 492 if (ret) 493 return ret; 494 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); 495 if (ret) 496 return ret; 497 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev); 498 if (ret) 499 return ret; 500 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset, 501 (u8 *)&pi->smc_powertune_table, 502 sizeof(SMU7_Discrete_PmFuses), pi->sram_end); 503 if (ret) 504 return ret; 505 } 506 507 return 0; 508 } 509 510 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable) 511 { 512 struct ci_power_info *pi = ci_get_pi(rdev); 513 u32 data; 514 515 if (pi->caps_sq_ramping) { 516 data = RREG32_DIDT(DIDT_SQ_CTRL0); 517 if (enable) 518 data |= DIDT_CTRL_EN; 519 else 520 data &= ~DIDT_CTRL_EN; 521 WREG32_DIDT(DIDT_SQ_CTRL0, data); 522 } 523 524 if (pi->caps_db_ramping) { 525 data = RREG32_DIDT(DIDT_DB_CTRL0); 526 if (enable) 527 data |= DIDT_CTRL_EN; 528 else 529 data &= ~DIDT_CTRL_EN; 530 WREG32_DIDT(DIDT_DB_CTRL0, data); 531 } 532 533 if (pi->caps_td_ramping) { 534 data = RREG32_DIDT(DIDT_TD_CTRL0); 535 if (enable) 536 data |= DIDT_CTRL_EN; 537 else 538 data &= ~DIDT_CTRL_EN; 539 WREG32_DIDT(DIDT_TD_CTRL0, data); 540 } 541 542 if (pi->caps_tcp_ramping) { 543 data = RREG32_DIDT(DIDT_TCP_CTRL0); 544 if (enable) 545 data |= DIDT_CTRL_EN; 546 else 547 data &= ~DIDT_CTRL_EN; 548 WREG32_DIDT(DIDT_TCP_CTRL0, data); 549 } 550 } 551 552 static int ci_program_pt_config_registers(struct radeon_device *rdev, 553 const struct ci_pt_config_reg *cac_config_regs) 554 { 555 const struct ci_pt_config_reg *config_regs = cac_config_regs; 556 u32 data; 557 u32 cache = 0; 558 559 if (config_regs == NULL) 560 return -EINVAL; 561 562 while (config_regs->offset != 0xFFFFFFFF) { 563 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { 564 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 565 } else { 566 switch (config_regs->type) { 567 case CISLANDS_CONFIGREG_SMC_IND: 568 data = RREG32_SMC(config_regs->offset); 569 break; 570 case CISLANDS_CONFIGREG_DIDT_IND: 571 data = RREG32_DIDT(config_regs->offset); 572 break; 573 default: 574 data = RREG32(config_regs->offset << 2); 575 break; 576 } 577 578 data &= ~config_regs->mask; 579 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 580 data |= cache; 581 582 switch (config_regs->type) { 583 case CISLANDS_CONFIGREG_SMC_IND: 584 WREG32_SMC(config_regs->offset, data); 585 break; 586 case CISLANDS_CONFIGREG_DIDT_IND: 587 WREG32_DIDT(config_regs->offset, data); 588 break; 589 default: 590 WREG32(config_regs->offset << 2, data); 591 break; 592 } 593 cache = 0; 594 } 595 config_regs++; 596 } 597 return 0; 598 } 599 600 static int ci_enable_didt(struct radeon_device *rdev, bool enable) 601 { 602 struct ci_power_info *pi = ci_get_pi(rdev); 603 int ret; 604 605 if (pi->caps_sq_ramping || pi->caps_db_ramping || 606 pi->caps_td_ramping || pi->caps_tcp_ramping) { 607 cik_enter_rlc_safe_mode(rdev); 608 609 if (enable) { 610 ret = ci_program_pt_config_registers(rdev, didt_config_ci); 611 if (ret) { 612 cik_exit_rlc_safe_mode(rdev); 613 return ret; 614 } 615 } 616 617 ci_do_enable_didt(rdev, enable); 618 619 cik_exit_rlc_safe_mode(rdev); 620 } 621 622 return 0; 623 } 624 625 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable) 626 { 627 struct ci_power_info *pi = ci_get_pi(rdev); 628 PPSMC_Result smc_result; 629 int ret = 0; 630 631 if (enable) { 632 pi->power_containment_features = 0; 633 if (pi->caps_power_containment) { 634 if (pi->enable_bapm_feature) { 635 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE); 636 if (smc_result != PPSMC_Result_OK) 637 ret = -EINVAL; 638 else 639 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; 640 } 641 642 if (pi->enable_tdc_limit_feature) { 643 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable); 644 if (smc_result != PPSMC_Result_OK) 645 ret = -EINVAL; 646 else 647 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; 648 } 649 650 if (pi->enable_pkg_pwr_tracking_feature) { 651 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable); 652 if (smc_result != PPSMC_Result_OK) { 653 ret = -EINVAL; 654 } else { 655 struct radeon_cac_tdp_table *cac_tdp_table = 656 rdev->pm.dpm.dyn_state.cac_tdp_table; 657 u32 default_pwr_limit = 658 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 659 660 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; 661 662 ci_set_power_limit(rdev, default_pwr_limit); 663 } 664 } 665 } 666 } else { 667 if (pi->caps_power_containment && pi->power_containment_features) { 668 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) 669 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable); 670 671 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) 672 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE); 673 674 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) 675 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable); 676 pi->power_containment_features = 0; 677 } 678 } 679 680 return ret; 681 } 682 683 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable) 684 { 685 struct ci_power_info *pi = ci_get_pi(rdev); 686 PPSMC_Result smc_result; 687 int ret = 0; 688 689 if (pi->caps_cac) { 690 if (enable) { 691 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac); 692 if (smc_result != PPSMC_Result_OK) { 693 ret = -EINVAL; 694 pi->cac_enabled = false; 695 } else { 696 pi->cac_enabled = true; 697 } 698 } else if (pi->cac_enabled) { 699 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac); 700 pi->cac_enabled = false; 701 } 702 } 703 704 return ret; 705 } 706 707 static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev, 708 bool enable) 709 { 710 struct ci_power_info *pi = ci_get_pi(rdev); 711 PPSMC_Result smc_result = PPSMC_Result_OK; 712 713 if (pi->thermal_sclk_dpm_enabled) { 714 if (enable) 715 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM); 716 else 717 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM); 718 } 719 720 if (smc_result == PPSMC_Result_OK) 721 return 0; 722 else 723 return -EINVAL; 724 } 725 726 static int ci_power_control_set_level(struct radeon_device *rdev) 727 { 728 struct ci_power_info *pi = ci_get_pi(rdev); 729 struct radeon_cac_tdp_table *cac_tdp_table = 730 rdev->pm.dpm.dyn_state.cac_tdp_table; 731 s32 adjust_percent; 732 s32 target_tdp; 733 int ret = 0; 734 bool adjust_polarity = false; /* ??? */ 735 736 if (pi->caps_power_containment) { 737 adjust_percent = adjust_polarity ? 738 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); 739 target_tdp = ((100 + adjust_percent) * 740 (s32)cac_tdp_table->configurable_tdp) / 100; 741 742 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); 743 } 744 745 return ret; 746 } 747 748 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate) 749 { 750 struct ci_power_info *pi = ci_get_pi(rdev); 751 752 if (pi->uvd_power_gated == gate) 753 return; 754 755 pi->uvd_power_gated = gate; 756 757 ci_update_uvd_dpm(rdev, gate); 758 } 759 760 bool ci_dpm_vblank_too_short(struct radeon_device *rdev) 761 { 762 struct ci_power_info *pi = ci_get_pi(rdev); 763 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 764 u32 switch_limit = pi->mem_gddr5 ? 450 : 300; 765 766 if (vblank_time < switch_limit) 767 return true; 768 else 769 return false; 770 771 } 772 773 static void ci_apply_state_adjust_rules(struct radeon_device *rdev, 774 struct radeon_ps *rps) 775 { 776 struct ci_ps *ps = ci_get_ps(rps); 777 struct ci_power_info *pi = ci_get_pi(rdev); 778 struct radeon_clock_and_voltage_limits *max_limits; 779 bool disable_mclk_switching; 780 u32 sclk, mclk; 781 int i; 782 783 if (rps->vce_active) { 784 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 785 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; 786 } else { 787 rps->evclk = 0; 788 rps->ecclk = 0; 789 } 790 791 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 792 ci_dpm_vblank_too_short(rdev)) 793 disable_mclk_switching = true; 794 else 795 disable_mclk_switching = false; 796 797 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 798 pi->battery_state = true; 799 else 800 pi->battery_state = false; 801 802 if (rdev->pm.dpm.ac_power) 803 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 804 else 805 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 806 807 if (rdev->pm.dpm.ac_power == false) { 808 for (i = 0; i < ps->performance_level_count; i++) { 809 if (ps->performance_levels[i].mclk > max_limits->mclk) 810 ps->performance_levels[i].mclk = max_limits->mclk; 811 if (ps->performance_levels[i].sclk > max_limits->sclk) 812 ps->performance_levels[i].sclk = max_limits->sclk; 813 } 814 } 815 816 /* XXX validate the min clocks required for display */ 817 818 if (disable_mclk_switching) { 819 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 820 sclk = ps->performance_levels[0].sclk; 821 } else { 822 mclk = ps->performance_levels[0].mclk; 823 sclk = ps->performance_levels[0].sclk; 824 } 825 826 if (rps->vce_active) { 827 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) 828 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; 829 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) 830 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; 831 } 832 833 ps->performance_levels[0].sclk = sclk; 834 ps->performance_levels[0].mclk = mclk; 835 836 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) 837 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; 838 839 if (disable_mclk_switching) { 840 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) 841 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; 842 } else { 843 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) 844 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; 845 } 846 } 847 848 static int ci_thermal_set_temperature_range(struct radeon_device *rdev, 849 int min_temp, int max_temp) 850 { 851 int low_temp = 0 * 1000; 852 int high_temp = 255 * 1000; 853 u32 tmp; 854 855 if (low_temp < min_temp) 856 low_temp = min_temp; 857 if (high_temp > max_temp) 858 high_temp = max_temp; 859 if (high_temp < low_temp) { 860 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 861 return -EINVAL; 862 } 863 864 tmp = RREG32_SMC(CG_THERMAL_INT); 865 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK); 866 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) | 867 CI_DIG_THERM_INTL(low_temp / 1000); 868 WREG32_SMC(CG_THERMAL_INT, tmp); 869 870 #if 0 871 /* XXX: need to figure out how to handle this properly */ 872 tmp = RREG32_SMC(CG_THERMAL_CTRL); 873 tmp &= DIG_THERM_DPM_MASK; 874 tmp |= DIG_THERM_DPM(high_temp / 1000); 875 WREG32_SMC(CG_THERMAL_CTRL, tmp); 876 #endif 877 878 rdev->pm.dpm.thermal.min_temp = low_temp; 879 rdev->pm.dpm.thermal.max_temp = high_temp; 880 881 return 0; 882 } 883 884 static int ci_thermal_enable_alert(struct radeon_device *rdev, 885 bool enable) 886 { 887 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT); 888 PPSMC_Result result; 889 890 if (enable) { 891 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 892 WREG32_SMC(CG_THERMAL_INT, thermal_int); 893 rdev->irq.dpm_thermal = false; 894 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable); 895 if (result != PPSMC_Result_OK) { 896 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 897 return -EINVAL; 898 } 899 } else { 900 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 901 WREG32_SMC(CG_THERMAL_INT, thermal_int); 902 rdev->irq.dpm_thermal = true; 903 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable); 904 if (result != PPSMC_Result_OK) { 905 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); 906 return -EINVAL; 907 } 908 } 909 910 return 0; 911 } 912 913 static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) 914 { 915 struct ci_power_info *pi = ci_get_pi(rdev); 916 u32 tmp; 917 918 if (pi->fan_ctrl_is_in_default_mode) { 919 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; 920 pi->fan_ctrl_default_mode = tmp; 921 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; 922 pi->t_min = tmp; 923 pi->fan_ctrl_is_in_default_mode = false; 924 } 925 926 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 927 tmp |= TMIN(0); 928 WREG32_SMC(CG_FDO_CTRL2, tmp); 929 930 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 931 tmp |= FDO_PWM_MODE(mode); 932 WREG32_SMC(CG_FDO_CTRL2, tmp); 933 } 934 935 static int ci_thermal_setup_fan_table(struct radeon_device *rdev) 936 { 937 struct ci_power_info *pi = ci_get_pi(rdev); 938 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; 939 u32 duty100; 940 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; 941 u16 fdo_min, slope1, slope2; 942 u32 reference_clock, tmp; 943 int ret; 944 u64 tmp64; 945 946 if (!pi->fan_table_start) { 947 rdev->pm.dpm.fan.ucode_fan_control = false; 948 return 0; 949 } 950 951 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 952 953 if (duty100 == 0) { 954 rdev->pm.dpm.fan.ucode_fan_control = false; 955 return 0; 956 } 957 958 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; 959 do_div(tmp64, 10000); 960 fdo_min = (u16)tmp64; 961 962 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; 963 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; 964 965 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; 966 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; 967 968 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 969 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 970 971 fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); 972 fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); 973 fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); 974 975 fan_table.Slope1 = cpu_to_be16(slope1); 976 fan_table.Slope2 = cpu_to_be16(slope2); 977 978 fan_table.FdoMin = cpu_to_be16(fdo_min); 979 980 fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); 981 982 fan_table.HystUp = cpu_to_be16(1); 983 984 fan_table.HystSlope = cpu_to_be16(1); 985 986 fan_table.TempRespLim = cpu_to_be16(5); 987 988 reference_clock = radeon_get_xclk(rdev); 989 990 fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * 991 reference_clock) / 1600); 992 993 fan_table.FdoMax = cpu_to_be16((u16)duty100); 994 995 tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; 996 fan_table.TempSrc = (uint8_t)tmp; 997 998 ret = ci_copy_bytes_to_smc(rdev, 999 pi->fan_table_start, 1000 (u8 *)(&fan_table), 1001 sizeof(fan_table), 1002 pi->sram_end); 1003 1004 if (ret) { 1005 DRM_ERROR("Failed to load fan table to the SMC."); 1006 rdev->pm.dpm.fan.ucode_fan_control = false; 1007 } 1008 1009 return 0; 1010 } 1011 1012 static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) 1013 { 1014 struct ci_power_info *pi = ci_get_pi(rdev); 1015 PPSMC_Result ret; 1016 1017 if (pi->caps_od_fuzzy_fan_control_support) { 1018 ret = ci_send_msg_to_smc_with_parameter(rdev, 1019 PPSMC_StartFanControl, 1020 FAN_CONTROL_FUZZY); 1021 if (ret != PPSMC_Result_OK) 1022 return -EINVAL; 1023 ret = ci_send_msg_to_smc_with_parameter(rdev, 1024 PPSMC_MSG_SetFanPwmMax, 1025 rdev->pm.dpm.fan.default_max_fan_pwm); 1026 if (ret != PPSMC_Result_OK) 1027 return -EINVAL; 1028 } else { 1029 ret = ci_send_msg_to_smc_with_parameter(rdev, 1030 PPSMC_StartFanControl, 1031 FAN_CONTROL_TABLE); 1032 if (ret != PPSMC_Result_OK) 1033 return -EINVAL; 1034 } 1035 1036 pi->fan_is_controlled_by_smc = true; 1037 return 0; 1038 } 1039 1040 static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) 1041 { 1042 PPSMC_Result ret; 1043 struct ci_power_info *pi = ci_get_pi(rdev); 1044 1045 ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); 1046 if (ret == PPSMC_Result_OK) { 1047 pi->fan_is_controlled_by_smc = false; 1048 return 0; 1049 } else 1050 return -EINVAL; 1051 } 1052 1053 int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, 1054 u32 *speed) 1055 { 1056 u32 duty, duty100; 1057 u64 tmp64; 1058 1059 if (rdev->pm.no_fan) 1060 return -ENOENT; 1061 1062 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1063 duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; 1064 1065 if (duty100 == 0) 1066 return -EINVAL; 1067 1068 tmp64 = (u64)duty * 100; 1069 do_div(tmp64, duty100); 1070 *speed = (u32)tmp64; 1071 1072 if (*speed > 100) 1073 *speed = 100; 1074 1075 return 0; 1076 } 1077 1078 int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, 1079 u32 speed) 1080 { 1081 u32 tmp; 1082 u32 duty, duty100; 1083 u64 tmp64; 1084 struct ci_power_info *pi = ci_get_pi(rdev); 1085 1086 if (rdev->pm.no_fan) 1087 return -ENOENT; 1088 1089 if (pi->fan_is_controlled_by_smc) 1090 return -EINVAL; 1091 1092 if (speed > 100) 1093 return -EINVAL; 1094 1095 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; 1096 1097 if (duty100 == 0) 1098 return -EINVAL; 1099 1100 tmp64 = (u64)speed * duty100; 1101 do_div(tmp64, 100); 1102 duty = (u32)tmp64; 1103 1104 tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; 1105 tmp |= FDO_STATIC_DUTY(duty); 1106 WREG32_SMC(CG_FDO_CTRL0, tmp); 1107 1108 return 0; 1109 } 1110 1111 void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode) 1112 { 1113 if (mode) { 1114 /* stop auto-manage */ 1115 if (rdev->pm.dpm.fan.ucode_fan_control) 1116 ci_fan_ctrl_stop_smc_fan_control(rdev); 1117 ci_fan_ctrl_set_static_mode(rdev, mode); 1118 } else { 1119 /* restart auto-manage */ 1120 if (rdev->pm.dpm.fan.ucode_fan_control) 1121 ci_thermal_start_smc_fan_control(rdev); 1122 else 1123 ci_fan_ctrl_set_default_mode(rdev); 1124 } 1125 } 1126 1127 u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev) 1128 { 1129 struct ci_power_info *pi = ci_get_pi(rdev); 1130 u32 tmp; 1131 1132 if (pi->fan_is_controlled_by_smc) 1133 return 0; 1134 1135 tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; 1136 return (tmp >> FDO_PWM_MODE_SHIFT); 1137 } 1138 1139 #if 0 1140 static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, 1141 u32 *speed) 1142 { 1143 u32 tach_period; 1144 u32 xclk = radeon_get_xclk(rdev); 1145 1146 if (rdev->pm.no_fan) 1147 return -ENOENT; 1148 1149 if (rdev->pm.fan_pulses_per_revolution == 0) 1150 return -ENOENT; 1151 1152 tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; 1153 if (tach_period == 0) 1154 return -ENOENT; 1155 1156 *speed = 60 * xclk * 10000 / tach_period; 1157 1158 return 0; 1159 } 1160 1161 static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, 1162 u32 speed) 1163 { 1164 u32 tach_period, tmp; 1165 u32 xclk = radeon_get_xclk(rdev); 1166 1167 if (rdev->pm.no_fan) 1168 return -ENOENT; 1169 1170 if (rdev->pm.fan_pulses_per_revolution == 0) 1171 return -ENOENT; 1172 1173 if ((speed < rdev->pm.fan_min_rpm) || 1174 (speed > rdev->pm.fan_max_rpm)) 1175 return -EINVAL; 1176 1177 if (rdev->pm.dpm.fan.ucode_fan_control) 1178 ci_fan_ctrl_stop_smc_fan_control(rdev); 1179 1180 tach_period = 60 * xclk * 10000 / (8 * speed); 1181 tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; 1182 tmp |= TARGET_PERIOD(tach_period); 1183 WREG32_SMC(CG_TACH_CTRL, tmp); 1184 1185 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); 1186 1187 return 0; 1188 } 1189 #endif 1190 1191 static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) 1192 { 1193 struct ci_power_info *pi = ci_get_pi(rdev); 1194 u32 tmp; 1195 1196 if (!pi->fan_ctrl_is_in_default_mode) { 1197 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; 1198 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); 1199 WREG32_SMC(CG_FDO_CTRL2, tmp); 1200 1201 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; 1202 tmp |= TMIN(pi->t_min); 1203 WREG32_SMC(CG_FDO_CTRL2, tmp); 1204 pi->fan_ctrl_is_in_default_mode = true; 1205 } 1206 } 1207 1208 static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev) 1209 { 1210 if (rdev->pm.dpm.fan.ucode_fan_control) { 1211 ci_fan_ctrl_start_smc_fan_control(rdev); 1212 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); 1213 } 1214 } 1215 1216 static void ci_thermal_initialize(struct radeon_device *rdev) 1217 { 1218 u32 tmp; 1219 1220 if (rdev->pm.fan_pulses_per_revolution) { 1221 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; 1222 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); 1223 WREG32_SMC(CG_TACH_CTRL, tmp); 1224 } 1225 1226 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; 1227 tmp |= TACH_PWM_RESP_RATE(0x28); 1228 WREG32_SMC(CG_FDO_CTRL2, tmp); 1229 } 1230 1231 static int ci_thermal_start_thermal_controller(struct radeon_device *rdev) 1232 { 1233 int ret; 1234 1235 ci_thermal_initialize(rdev); 1236 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 1237 if (ret) 1238 return ret; 1239 ret = ci_thermal_enable_alert(rdev, true); 1240 if (ret) 1241 return ret; 1242 if (rdev->pm.dpm.fan.ucode_fan_control) { 1243 ret = ci_thermal_setup_fan_table(rdev); 1244 if (ret) 1245 return ret; 1246 ci_thermal_start_smc_fan_control(rdev); 1247 } 1248 1249 return 0; 1250 } 1251 1252 static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev) 1253 { 1254 if (!rdev->pm.no_fan) 1255 ci_fan_ctrl_set_default_mode(rdev); 1256 } 1257 1258 #if 0 1259 static int ci_read_smc_soft_register(struct radeon_device *rdev, 1260 u16 reg_offset, u32 *value) 1261 { 1262 struct ci_power_info *pi = ci_get_pi(rdev); 1263 1264 return ci_read_smc_sram_dword(rdev, 1265 pi->soft_regs_start + reg_offset, 1266 value, pi->sram_end); 1267 } 1268 #endif 1269 1270 static int ci_write_smc_soft_register(struct radeon_device *rdev, 1271 u16 reg_offset, u32 value) 1272 { 1273 struct ci_power_info *pi = ci_get_pi(rdev); 1274 1275 return ci_write_smc_sram_dword(rdev, 1276 pi->soft_regs_start + reg_offset, 1277 value, pi->sram_end); 1278 } 1279 1280 static void ci_init_fps_limits(struct radeon_device *rdev) 1281 { 1282 struct ci_power_info *pi = ci_get_pi(rdev); 1283 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 1284 1285 if (pi->caps_fps) { 1286 u16 tmp; 1287 1288 tmp = 45; 1289 table->FpsHighT = cpu_to_be16(tmp); 1290 1291 tmp = 30; 1292 table->FpsLowT = cpu_to_be16(tmp); 1293 } 1294 } 1295 1296 static int ci_update_sclk_t(struct radeon_device *rdev) 1297 { 1298 struct ci_power_info *pi = ci_get_pi(rdev); 1299 int ret = 0; 1300 u32 low_sclk_interrupt_t = 0; 1301 1302 if (pi->caps_sclk_throttle_low_notification) { 1303 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 1304 1305 ret = ci_copy_bytes_to_smc(rdev, 1306 pi->dpm_table_start + 1307 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), 1308 (u8 *)&low_sclk_interrupt_t, 1309 sizeof(u32), pi->sram_end); 1310 1311 } 1312 1313 return ret; 1314 } 1315 1316 static void ci_get_leakage_voltages(struct radeon_device *rdev) 1317 { 1318 struct ci_power_info *pi = ci_get_pi(rdev); 1319 u16 leakage_id, virtual_voltage_id; 1320 u16 vddc, vddci; 1321 int i; 1322 1323 pi->vddc_leakage.count = 0; 1324 pi->vddci_leakage.count = 0; 1325 1326 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1327 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1328 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1329 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) 1330 continue; 1331 if (vddc != 0 && vddc != virtual_voltage_id) { 1332 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1333 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1334 pi->vddc_leakage.count++; 1335 } 1336 } 1337 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { 1338 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1339 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1340 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, 1341 virtual_voltage_id, 1342 leakage_id) == 0) { 1343 if (vddc != 0 && vddc != virtual_voltage_id) { 1344 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1345 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1346 pi->vddc_leakage.count++; 1347 } 1348 if (vddci != 0 && vddci != virtual_voltage_id) { 1349 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; 1350 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; 1351 pi->vddci_leakage.count++; 1352 } 1353 } 1354 } 1355 } 1356 } 1357 1358 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources) 1359 { 1360 struct ci_power_info *pi = ci_get_pi(rdev); 1361 bool want_thermal_protection; 1362 enum radeon_dpm_event_src dpm_event_src; 1363 u32 tmp; 1364 1365 switch (sources) { 1366 case 0: 1367 default: 1368 want_thermal_protection = false; 1369 break; 1370 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): 1371 want_thermal_protection = true; 1372 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; 1373 break; 1374 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 1375 want_thermal_protection = true; 1376 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; 1377 break; 1378 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 1379 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): 1380 want_thermal_protection = true; 1381 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 1382 break; 1383 } 1384 1385 if (want_thermal_protection) { 1386 #if 0 1387 /* XXX: need to figure out how to handle this properly */ 1388 tmp = RREG32_SMC(CG_THERMAL_CTRL); 1389 tmp &= DPM_EVENT_SRC_MASK; 1390 tmp |= DPM_EVENT_SRC(dpm_event_src); 1391 WREG32_SMC(CG_THERMAL_CTRL, tmp); 1392 #endif 1393 1394 tmp = RREG32_SMC(GENERAL_PWRMGT); 1395 if (pi->thermal_protection) 1396 tmp &= ~THERMAL_PROTECTION_DIS; 1397 else 1398 tmp |= THERMAL_PROTECTION_DIS; 1399 WREG32_SMC(GENERAL_PWRMGT, tmp); 1400 } else { 1401 tmp = RREG32_SMC(GENERAL_PWRMGT); 1402 tmp |= THERMAL_PROTECTION_DIS; 1403 WREG32_SMC(GENERAL_PWRMGT, tmp); 1404 } 1405 } 1406 1407 static void ci_enable_auto_throttle_source(struct radeon_device *rdev, 1408 enum radeon_dpm_auto_throttle_src source, 1409 bool enable) 1410 { 1411 struct ci_power_info *pi = ci_get_pi(rdev); 1412 1413 if (enable) { 1414 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1415 pi->active_auto_throttle_sources |= 1 << source; 1416 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1417 } 1418 } else { 1419 if (pi->active_auto_throttle_sources & (1 << source)) { 1420 pi->active_auto_throttle_sources &= ~(1 << source); 1421 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources); 1422 } 1423 } 1424 } 1425 1426 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev) 1427 { 1428 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1429 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt); 1430 } 1431 1432 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev) 1433 { 1434 struct ci_power_info *pi = ci_get_pi(rdev); 1435 PPSMC_Result smc_result; 1436 1437 if (!pi->need_update_smu7_dpm_table) 1438 return 0; 1439 1440 if ((!pi->sclk_dpm_key_disabled) && 1441 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1442 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 1443 if (smc_result != PPSMC_Result_OK) 1444 return -EINVAL; 1445 } 1446 1447 if ((!pi->mclk_dpm_key_disabled) && 1448 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1449 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 1450 if (smc_result != PPSMC_Result_OK) 1451 return -EINVAL; 1452 } 1453 1454 pi->need_update_smu7_dpm_table = 0; 1455 return 0; 1456 } 1457 1458 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable) 1459 { 1460 struct ci_power_info *pi = ci_get_pi(rdev); 1461 PPSMC_Result smc_result; 1462 1463 if (enable) { 1464 if (!pi->sclk_dpm_key_disabled) { 1465 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable); 1466 if (smc_result != PPSMC_Result_OK) 1467 return -EINVAL; 1468 } 1469 1470 if (!pi->mclk_dpm_key_disabled) { 1471 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable); 1472 if (smc_result != PPSMC_Result_OK) 1473 return -EINVAL; 1474 1475 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN); 1476 1477 WREG32_SMC(LCAC_MC0_CNTL, 0x05); 1478 WREG32_SMC(LCAC_MC1_CNTL, 0x05); 1479 WREG32_SMC(LCAC_CPL_CNTL, 0x100005); 1480 1481 udelay(10); 1482 1483 WREG32_SMC(LCAC_MC0_CNTL, 0x400005); 1484 WREG32_SMC(LCAC_MC1_CNTL, 0x400005); 1485 WREG32_SMC(LCAC_CPL_CNTL, 0x500005); 1486 } 1487 } else { 1488 if (!pi->sclk_dpm_key_disabled) { 1489 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable); 1490 if (smc_result != PPSMC_Result_OK) 1491 return -EINVAL; 1492 } 1493 1494 if (!pi->mclk_dpm_key_disabled) { 1495 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable); 1496 if (smc_result != PPSMC_Result_OK) 1497 return -EINVAL; 1498 } 1499 } 1500 1501 return 0; 1502 } 1503 1504 static int ci_start_dpm(struct radeon_device *rdev) 1505 { 1506 struct ci_power_info *pi = ci_get_pi(rdev); 1507 PPSMC_Result smc_result; 1508 int ret; 1509 u32 tmp; 1510 1511 tmp = RREG32_SMC(GENERAL_PWRMGT); 1512 tmp |= GLOBAL_PWRMGT_EN; 1513 WREG32_SMC(GENERAL_PWRMGT, tmp); 1514 1515 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1516 tmp |= DYNAMIC_PM_EN; 1517 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1518 1519 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); 1520 1521 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN); 1522 1523 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable); 1524 if (smc_result != PPSMC_Result_OK) 1525 return -EINVAL; 1526 1527 ret = ci_enable_sclk_mclk_dpm(rdev, true); 1528 if (ret) 1529 return ret; 1530 1531 if (!pi->pcie_dpm_key_disabled) { 1532 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable); 1533 if (smc_result != PPSMC_Result_OK) 1534 return -EINVAL; 1535 } 1536 1537 return 0; 1538 } 1539 1540 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev) 1541 { 1542 struct ci_power_info *pi = ci_get_pi(rdev); 1543 PPSMC_Result smc_result; 1544 1545 if (!pi->need_update_smu7_dpm_table) 1546 return 0; 1547 1548 if ((!pi->sclk_dpm_key_disabled) && 1549 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1550 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel); 1551 if (smc_result != PPSMC_Result_OK) 1552 return -EINVAL; 1553 } 1554 1555 if ((!pi->mclk_dpm_key_disabled) && 1556 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1557 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel); 1558 if (smc_result != PPSMC_Result_OK) 1559 return -EINVAL; 1560 } 1561 1562 return 0; 1563 } 1564 1565 static int ci_stop_dpm(struct radeon_device *rdev) 1566 { 1567 struct ci_power_info *pi = ci_get_pi(rdev); 1568 PPSMC_Result smc_result; 1569 int ret; 1570 u32 tmp; 1571 1572 tmp = RREG32_SMC(GENERAL_PWRMGT); 1573 tmp &= ~GLOBAL_PWRMGT_EN; 1574 WREG32_SMC(GENERAL_PWRMGT, tmp); 1575 1576 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1577 tmp &= ~DYNAMIC_PM_EN; 1578 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1579 1580 if (!pi->pcie_dpm_key_disabled) { 1581 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable); 1582 if (smc_result != PPSMC_Result_OK) 1583 return -EINVAL; 1584 } 1585 1586 ret = ci_enable_sclk_mclk_dpm(rdev, false); 1587 if (ret) 1588 return ret; 1589 1590 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable); 1591 if (smc_result != PPSMC_Result_OK) 1592 return -EINVAL; 1593 1594 return 0; 1595 } 1596 1597 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable) 1598 { 1599 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 1600 1601 if (enable) 1602 tmp &= ~SCLK_PWRMGT_OFF; 1603 else 1604 tmp |= SCLK_PWRMGT_OFF; 1605 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1606 } 1607 1608 #if 0 1609 static int ci_notify_hw_of_power_source(struct radeon_device *rdev, 1610 bool ac_power) 1611 { 1612 struct ci_power_info *pi = ci_get_pi(rdev); 1613 struct radeon_cac_tdp_table *cac_tdp_table = 1614 rdev->pm.dpm.dyn_state.cac_tdp_table; 1615 u32 power_limit; 1616 1617 if (ac_power) 1618 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 1619 else 1620 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); 1621 1622 ci_set_power_limit(rdev, power_limit); 1623 1624 if (pi->caps_automatic_dc_transition) { 1625 if (ac_power) 1626 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC); 1627 else 1628 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp); 1629 } 1630 1631 return 0; 1632 } 1633 #endif 1634 1635 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, 1636 PPSMC_Msg msg, u32 parameter) 1637 { 1638 WREG32(SMC_MSG_ARG_0, parameter); 1639 return ci_send_msg_to_smc(rdev, msg); 1640 } 1641 1642 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev, 1643 PPSMC_Msg msg, u32 *parameter) 1644 { 1645 PPSMC_Result smc_result; 1646 1647 smc_result = ci_send_msg_to_smc(rdev, msg); 1648 1649 if ((smc_result == PPSMC_Result_OK) && parameter) 1650 *parameter = RREG32(SMC_MSG_ARG_0); 1651 1652 return smc_result; 1653 } 1654 1655 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n) 1656 { 1657 struct ci_power_info *pi = ci_get_pi(rdev); 1658 1659 if (!pi->sclk_dpm_key_disabled) { 1660 PPSMC_Result smc_result = 1661 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); 1662 if (smc_result != PPSMC_Result_OK) 1663 return -EINVAL; 1664 } 1665 1666 return 0; 1667 } 1668 1669 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n) 1670 { 1671 struct ci_power_info *pi = ci_get_pi(rdev); 1672 1673 if (!pi->mclk_dpm_key_disabled) { 1674 PPSMC_Result smc_result = 1675 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); 1676 if (smc_result != PPSMC_Result_OK) 1677 return -EINVAL; 1678 } 1679 1680 return 0; 1681 } 1682 1683 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n) 1684 { 1685 struct ci_power_info *pi = ci_get_pi(rdev); 1686 1687 if (!pi->pcie_dpm_key_disabled) { 1688 PPSMC_Result smc_result = 1689 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n); 1690 if (smc_result != PPSMC_Result_OK) 1691 return -EINVAL; 1692 } 1693 1694 return 0; 1695 } 1696 1697 static int ci_set_power_limit(struct radeon_device *rdev, u32 n) 1698 { 1699 struct ci_power_info *pi = ci_get_pi(rdev); 1700 1701 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1702 PPSMC_Result smc_result = 1703 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n); 1704 if (smc_result != PPSMC_Result_OK) 1705 return -EINVAL; 1706 } 1707 1708 return 0; 1709 } 1710 1711 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev, 1712 u32 target_tdp) 1713 { 1714 PPSMC_Result smc_result = 1715 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1716 if (smc_result != PPSMC_Result_OK) 1717 return -EINVAL; 1718 return 0; 1719 } 1720 1721 #if 0 1722 static int ci_set_boot_state(struct radeon_device *rdev) 1723 { 1724 return ci_enable_sclk_mclk_dpm(rdev, false); 1725 } 1726 #endif 1727 1728 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) 1729 { 1730 u32 sclk_freq; 1731 PPSMC_Result smc_result = 1732 ci_send_msg_to_smc_return_parameter(rdev, 1733 PPSMC_MSG_API_GetSclkFrequency, 1734 &sclk_freq); 1735 if (smc_result != PPSMC_Result_OK) 1736 sclk_freq = 0; 1737 1738 return sclk_freq; 1739 } 1740 1741 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev) 1742 { 1743 u32 mclk_freq; 1744 PPSMC_Result smc_result = 1745 ci_send_msg_to_smc_return_parameter(rdev, 1746 PPSMC_MSG_API_GetMclkFrequency, 1747 &mclk_freq); 1748 if (smc_result != PPSMC_Result_OK) 1749 mclk_freq = 0; 1750 1751 return mclk_freq; 1752 } 1753 1754 static void ci_dpm_start_smc(struct radeon_device *rdev) 1755 { 1756 int i; 1757 1758 ci_program_jump_on_start(rdev); 1759 ci_start_smc_clock(rdev); 1760 ci_start_smc(rdev); 1761 for (i = 0; i < rdev->usec_timeout; i++) { 1762 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED) 1763 break; 1764 } 1765 } 1766 1767 static void ci_dpm_stop_smc(struct radeon_device *rdev) 1768 { 1769 ci_reset_smc(rdev); 1770 ci_stop_smc_clock(rdev); 1771 } 1772 1773 static int ci_process_firmware_header(struct radeon_device *rdev) 1774 { 1775 struct ci_power_info *pi = ci_get_pi(rdev); 1776 u32 tmp; 1777 int ret; 1778 1779 ret = ci_read_smc_sram_dword(rdev, 1780 SMU7_FIRMWARE_HEADER_LOCATION + 1781 offsetof(SMU7_Firmware_Header, DpmTable), 1782 &tmp, pi->sram_end); 1783 if (ret) 1784 return ret; 1785 1786 pi->dpm_table_start = tmp; 1787 1788 ret = ci_read_smc_sram_dword(rdev, 1789 SMU7_FIRMWARE_HEADER_LOCATION + 1790 offsetof(SMU7_Firmware_Header, SoftRegisters), 1791 &tmp, pi->sram_end); 1792 if (ret) 1793 return ret; 1794 1795 pi->soft_regs_start = tmp; 1796 1797 ret = ci_read_smc_sram_dword(rdev, 1798 SMU7_FIRMWARE_HEADER_LOCATION + 1799 offsetof(SMU7_Firmware_Header, mcRegisterTable), 1800 &tmp, pi->sram_end); 1801 if (ret) 1802 return ret; 1803 1804 pi->mc_reg_table_start = tmp; 1805 1806 ret = ci_read_smc_sram_dword(rdev, 1807 SMU7_FIRMWARE_HEADER_LOCATION + 1808 offsetof(SMU7_Firmware_Header, FanTable), 1809 &tmp, pi->sram_end); 1810 if (ret) 1811 return ret; 1812 1813 pi->fan_table_start = tmp; 1814 1815 ret = ci_read_smc_sram_dword(rdev, 1816 SMU7_FIRMWARE_HEADER_LOCATION + 1817 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 1818 &tmp, pi->sram_end); 1819 if (ret) 1820 return ret; 1821 1822 pi->arb_table_start = tmp; 1823 1824 return 0; 1825 } 1826 1827 static void ci_read_clock_registers(struct radeon_device *rdev) 1828 { 1829 struct ci_power_info *pi = ci_get_pi(rdev); 1830 1831 pi->clock_registers.cg_spll_func_cntl = 1832 RREG32_SMC(CG_SPLL_FUNC_CNTL); 1833 pi->clock_registers.cg_spll_func_cntl_2 = 1834 RREG32_SMC(CG_SPLL_FUNC_CNTL_2); 1835 pi->clock_registers.cg_spll_func_cntl_3 = 1836 RREG32_SMC(CG_SPLL_FUNC_CNTL_3); 1837 pi->clock_registers.cg_spll_func_cntl_4 = 1838 RREG32_SMC(CG_SPLL_FUNC_CNTL_4); 1839 pi->clock_registers.cg_spll_spread_spectrum = 1840 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1841 pi->clock_registers.cg_spll_spread_spectrum_2 = 1842 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2); 1843 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); 1844 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); 1845 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); 1846 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); 1847 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); 1848 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); 1849 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); 1850 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); 1851 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); 1852 } 1853 1854 static void ci_init_sclk_t(struct radeon_device *rdev) 1855 { 1856 struct ci_power_info *pi = ci_get_pi(rdev); 1857 1858 pi->low_sclk_interrupt_t = 0; 1859 } 1860 1861 static void ci_enable_thermal_protection(struct radeon_device *rdev, 1862 bool enable) 1863 { 1864 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1865 1866 if (enable) 1867 tmp &= ~THERMAL_PROTECTION_DIS; 1868 else 1869 tmp |= THERMAL_PROTECTION_DIS; 1870 WREG32_SMC(GENERAL_PWRMGT, tmp); 1871 } 1872 1873 static void ci_enable_acpi_power_management(struct radeon_device *rdev) 1874 { 1875 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 1876 1877 tmp |= STATIC_PM_EN; 1878 1879 WREG32_SMC(GENERAL_PWRMGT, tmp); 1880 } 1881 1882 #if 0 1883 static int ci_enter_ulp_state(struct radeon_device *rdev) 1884 { 1885 1886 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 1887 1888 udelay(25000); 1889 1890 return 0; 1891 } 1892 1893 static int ci_exit_ulp_state(struct radeon_device *rdev) 1894 { 1895 int i; 1896 1897 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 1898 1899 udelay(7000); 1900 1901 for (i = 0; i < rdev->usec_timeout; i++) { 1902 if (RREG32(SMC_RESP_0) == 1) 1903 break; 1904 udelay(1000); 1905 } 1906 1907 return 0; 1908 } 1909 #endif 1910 1911 static int ci_notify_smc_display_change(struct radeon_device *rdev, 1912 bool has_display) 1913 { 1914 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 1915 1916 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; 1917 } 1918 1919 static int ci_enable_ds_master_switch(struct radeon_device *rdev, 1920 bool enable) 1921 { 1922 struct ci_power_info *pi = ci_get_pi(rdev); 1923 1924 if (enable) { 1925 if (pi->caps_sclk_ds) { 1926 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) 1927 return -EINVAL; 1928 } else { 1929 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1930 return -EINVAL; 1931 } 1932 } else { 1933 if (pi->caps_sclk_ds) { 1934 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 1935 return -EINVAL; 1936 } 1937 } 1938 1939 return 0; 1940 } 1941 1942 static void ci_program_display_gap(struct radeon_device *rdev) 1943 { 1944 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 1945 u32 pre_vbi_time_in_us; 1946 u32 frame_time_in_us; 1947 u32 ref_clock = rdev->clock.spll.reference_freq; 1948 u32 refresh_rate = r600_dpm_get_vrefresh(rdev); 1949 u32 vblank_time = r600_dpm_get_vblank_time(rdev); 1950 1951 tmp &= ~DISP_GAP_MASK; 1952 if (rdev->pm.dpm.new_active_crtc_count > 0) 1953 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); 1954 else 1955 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE); 1956 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 1957 1958 if (refresh_rate == 0) 1959 refresh_rate = 60; 1960 if (vblank_time == 0xffffffff) 1961 vblank_time = 500; 1962 frame_time_in_us = 1000000 / refresh_rate; 1963 pre_vbi_time_in_us = 1964 frame_time_in_us - 200 - vblank_time; 1965 tmp = pre_vbi_time_in_us * (ref_clock / 100); 1966 1967 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp); 1968 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); 1969 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 1970 1971 1972 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1)); 1973 1974 } 1975 1976 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable) 1977 { 1978 struct ci_power_info *pi = ci_get_pi(rdev); 1979 u32 tmp; 1980 1981 if (enable) { 1982 if (pi->caps_sclk_ss_support) { 1983 tmp = RREG32_SMC(GENERAL_PWRMGT); 1984 tmp |= DYN_SPREAD_SPECTRUM_EN; 1985 WREG32_SMC(GENERAL_PWRMGT, tmp); 1986 } 1987 } else { 1988 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM); 1989 tmp &= ~SSEN; 1990 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp); 1991 1992 tmp = RREG32_SMC(GENERAL_PWRMGT); 1993 tmp &= ~DYN_SPREAD_SPECTRUM_EN; 1994 WREG32_SMC(GENERAL_PWRMGT, tmp); 1995 } 1996 } 1997 1998 static void ci_program_sstp(struct radeon_device *rdev) 1999 { 2000 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); 2001 } 2002 2003 static void ci_enable_display_gap(struct radeon_device *rdev) 2004 { 2005 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); 2006 2007 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); 2008 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | 2009 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); 2010 2011 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); 2012 } 2013 2014 static void ci_program_vc(struct radeon_device *rdev) 2015 { 2016 u32 tmp; 2017 2018 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2019 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT); 2020 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2021 2022 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0); 2023 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1); 2024 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2); 2025 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3); 2026 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4); 2027 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5); 2028 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6); 2029 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7); 2030 } 2031 2032 static void ci_clear_vc(struct radeon_device *rdev) 2033 { 2034 u32 tmp; 2035 2036 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); 2037 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT); 2038 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 2039 2040 WREG32_SMC(CG_FTV_0, 0); 2041 WREG32_SMC(CG_FTV_1, 0); 2042 WREG32_SMC(CG_FTV_2, 0); 2043 WREG32_SMC(CG_FTV_3, 0); 2044 WREG32_SMC(CG_FTV_4, 0); 2045 WREG32_SMC(CG_FTV_5, 0); 2046 WREG32_SMC(CG_FTV_6, 0); 2047 WREG32_SMC(CG_FTV_7, 0); 2048 } 2049 2050 static int ci_upload_firmware(struct radeon_device *rdev) 2051 { 2052 struct ci_power_info *pi = ci_get_pi(rdev); 2053 int i, ret; 2054 2055 for (i = 0; i < rdev->usec_timeout; i++) { 2056 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE) 2057 break; 2058 } 2059 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1); 2060 2061 ci_stop_smc_clock(rdev); 2062 ci_reset_smc(rdev); 2063 2064 ret = ci_load_smc_ucode(rdev, pi->sram_end); 2065 2066 return ret; 2067 2068 } 2069 2070 static int ci_get_svi2_voltage_table(struct radeon_device *rdev, 2071 struct radeon_clock_voltage_dependency_table *voltage_dependency_table, 2072 struct atom_voltage_table *voltage_table) 2073 { 2074 u32 i; 2075 2076 if (voltage_dependency_table == NULL) 2077 return -EINVAL; 2078 2079 voltage_table->mask_low = 0; 2080 voltage_table->phase_delay = 0; 2081 2082 voltage_table->count = voltage_dependency_table->count; 2083 for (i = 0; i < voltage_table->count; i++) { 2084 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 2085 voltage_table->entries[i].smio_low = 0; 2086 } 2087 2088 return 0; 2089 } 2090 2091 static int ci_construct_voltage_tables(struct radeon_device *rdev) 2092 { 2093 struct ci_power_info *pi = ci_get_pi(rdev); 2094 int ret; 2095 2096 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2097 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC, 2098 VOLTAGE_OBJ_GPIO_LUT, 2099 &pi->vddc_voltage_table); 2100 if (ret) 2101 return ret; 2102 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2103 ret = ci_get_svi2_voltage_table(rdev, 2104 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2105 &pi->vddc_voltage_table); 2106 if (ret) 2107 return ret; 2108 } 2109 2110 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) 2111 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC, 2112 &pi->vddc_voltage_table); 2113 2114 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2115 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI, 2116 VOLTAGE_OBJ_GPIO_LUT, 2117 &pi->vddci_voltage_table); 2118 if (ret) 2119 return ret; 2120 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2121 ret = ci_get_svi2_voltage_table(rdev, 2122 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2123 &pi->vddci_voltage_table); 2124 if (ret) 2125 return ret; 2126 } 2127 2128 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) 2129 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI, 2130 &pi->vddci_voltage_table); 2131 2132 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2133 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC, 2134 VOLTAGE_OBJ_GPIO_LUT, 2135 &pi->mvdd_voltage_table); 2136 if (ret) 2137 return ret; 2138 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2139 ret = ci_get_svi2_voltage_table(rdev, 2140 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2141 &pi->mvdd_voltage_table); 2142 if (ret) 2143 return ret; 2144 } 2145 2146 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) 2147 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD, 2148 &pi->mvdd_voltage_table); 2149 2150 return 0; 2151 } 2152 2153 static void ci_populate_smc_voltage_table(struct radeon_device *rdev, 2154 struct atom_voltage_table_entry *voltage_table, 2155 SMU7_Discrete_VoltageLevel *smc_voltage_table) 2156 { 2157 int ret; 2158 2159 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table, 2160 &smc_voltage_table->StdVoltageHiSidd, 2161 &smc_voltage_table->StdVoltageLoSidd); 2162 2163 if (ret) { 2164 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; 2165 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; 2166 } 2167 2168 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); 2169 smc_voltage_table->StdVoltageHiSidd = 2170 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); 2171 smc_voltage_table->StdVoltageLoSidd = 2172 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); 2173 } 2174 2175 static int ci_populate_smc_vddc_table(struct radeon_device *rdev, 2176 SMU7_Discrete_DpmTable *table) 2177 { 2178 struct ci_power_info *pi = ci_get_pi(rdev); 2179 unsigned int count; 2180 2181 table->VddcLevelCount = pi->vddc_voltage_table.count; 2182 for (count = 0; count < table->VddcLevelCount; count++) { 2183 ci_populate_smc_voltage_table(rdev, 2184 &pi->vddc_voltage_table.entries[count], 2185 &table->VddcLevel[count]); 2186 2187 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2188 table->VddcLevel[count].Smio |= 2189 pi->vddc_voltage_table.entries[count].smio_low; 2190 else 2191 table->VddcLevel[count].Smio = 0; 2192 } 2193 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); 2194 2195 return 0; 2196 } 2197 2198 static int ci_populate_smc_vddci_table(struct radeon_device *rdev, 2199 SMU7_Discrete_DpmTable *table) 2200 { 2201 unsigned int count; 2202 struct ci_power_info *pi = ci_get_pi(rdev); 2203 2204 table->VddciLevelCount = pi->vddci_voltage_table.count; 2205 for (count = 0; count < table->VddciLevelCount; count++) { 2206 ci_populate_smc_voltage_table(rdev, 2207 &pi->vddci_voltage_table.entries[count], 2208 &table->VddciLevel[count]); 2209 2210 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2211 table->VddciLevel[count].Smio |= 2212 pi->vddci_voltage_table.entries[count].smio_low; 2213 else 2214 table->VddciLevel[count].Smio = 0; 2215 } 2216 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); 2217 2218 return 0; 2219 } 2220 2221 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev, 2222 SMU7_Discrete_DpmTable *table) 2223 { 2224 struct ci_power_info *pi = ci_get_pi(rdev); 2225 unsigned int count; 2226 2227 table->MvddLevelCount = pi->mvdd_voltage_table.count; 2228 for (count = 0; count < table->MvddLevelCount; count++) { 2229 ci_populate_smc_voltage_table(rdev, 2230 &pi->mvdd_voltage_table.entries[count], 2231 &table->MvddLevel[count]); 2232 2233 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2234 table->MvddLevel[count].Smio |= 2235 pi->mvdd_voltage_table.entries[count].smio_low; 2236 else 2237 table->MvddLevel[count].Smio = 0; 2238 } 2239 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); 2240 2241 return 0; 2242 } 2243 2244 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev, 2245 SMU7_Discrete_DpmTable *table) 2246 { 2247 int ret; 2248 2249 ret = ci_populate_smc_vddc_table(rdev, table); 2250 if (ret) 2251 return ret; 2252 2253 ret = ci_populate_smc_vddci_table(rdev, table); 2254 if (ret) 2255 return ret; 2256 2257 ret = ci_populate_smc_mvdd_table(rdev, table); 2258 if (ret) 2259 return ret; 2260 2261 return 0; 2262 } 2263 2264 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk, 2265 SMU7_Discrete_VoltageLevel *voltage) 2266 { 2267 struct ci_power_info *pi = ci_get_pi(rdev); 2268 u32 i = 0; 2269 2270 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 2271 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { 2272 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { 2273 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; 2274 break; 2275 } 2276 } 2277 2278 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) 2279 return -EINVAL; 2280 } 2281 2282 return -EINVAL; 2283 } 2284 2285 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev, 2286 struct atom_voltage_table_entry *voltage_table, 2287 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) 2288 { 2289 u16 v_index, idx; 2290 bool voltage_found = false; 2291 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; 2292 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; 2293 2294 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 2295 return -EINVAL; 2296 2297 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { 2298 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2299 if (voltage_table->value == 2300 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2301 voltage_found = true; 2302 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2303 idx = v_index; 2304 else 2305 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2306 *std_voltage_lo_sidd = 2307 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2308 *std_voltage_hi_sidd = 2309 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2310 break; 2311 } 2312 } 2313 2314 if (!voltage_found) { 2315 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2316 if (voltage_table->value <= 2317 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2318 voltage_found = true; 2319 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count) 2320 idx = v_index; 2321 else 2322 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2323 *std_voltage_lo_sidd = 2324 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2325 *std_voltage_hi_sidd = 2326 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2327 break; 2328 } 2329 } 2330 } 2331 } 2332 2333 return 0; 2334 } 2335 2336 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev, 2337 const struct radeon_phase_shedding_limits_table *limits, 2338 u32 sclk, 2339 u32 *phase_shedding) 2340 { 2341 unsigned int i; 2342 2343 *phase_shedding = 1; 2344 2345 for (i = 0; i < limits->count; i++) { 2346 if (sclk < limits->entries[i].sclk) { 2347 *phase_shedding = i; 2348 break; 2349 } 2350 } 2351 } 2352 2353 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev, 2354 const struct radeon_phase_shedding_limits_table *limits, 2355 u32 mclk, 2356 u32 *phase_shedding) 2357 { 2358 unsigned int i; 2359 2360 *phase_shedding = 1; 2361 2362 for (i = 0; i < limits->count; i++) { 2363 if (mclk < limits->entries[i].mclk) { 2364 *phase_shedding = i; 2365 break; 2366 } 2367 } 2368 } 2369 2370 static int ci_init_arb_table_index(struct radeon_device *rdev) 2371 { 2372 struct ci_power_info *pi = ci_get_pi(rdev); 2373 u32 tmp; 2374 int ret; 2375 2376 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start, 2377 &tmp, pi->sram_end); 2378 if (ret) 2379 return ret; 2380 2381 tmp &= 0x00FFFFFF; 2382 tmp |= MC_CG_ARB_FREQ_F1 << 24; 2383 2384 return ci_write_smc_sram_dword(rdev, pi->arb_table_start, 2385 tmp, pi->sram_end); 2386 } 2387 2388 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev, 2389 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table, 2390 u32 clock, u32 *voltage) 2391 { 2392 u32 i = 0; 2393 2394 if (allowed_clock_voltage_table->count == 0) 2395 return -EINVAL; 2396 2397 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 2398 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 2399 *voltage = allowed_clock_voltage_table->entries[i].v; 2400 return 0; 2401 } 2402 } 2403 2404 *voltage = allowed_clock_voltage_table->entries[i-1].v; 2405 2406 return 0; 2407 } 2408 2409 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev, 2410 u32 sclk, u32 min_sclk_in_sr) 2411 { 2412 u32 i; 2413 u32 tmp; 2414 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? 2415 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; 2416 2417 if (sclk < min) 2418 return 0; 2419 2420 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 2421 tmp = sclk / (1 << i); 2422 if (tmp >= min || i == 0) 2423 break; 2424 } 2425 2426 return (u8)i; 2427 } 2428 2429 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev) 2430 { 2431 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 2432 } 2433 2434 static int ci_reset_to_default(struct radeon_device *rdev) 2435 { 2436 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 2437 0 : -EINVAL; 2438 } 2439 2440 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev) 2441 { 2442 u32 tmp; 2443 2444 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8; 2445 2446 if (tmp == MC_CG_ARB_FREQ_F0) 2447 return 0; 2448 2449 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 2450 } 2451 2452 static void ci_register_patching_mc_arb(struct radeon_device *rdev, 2453 const u32 engine_clock, 2454 const u32 memory_clock, 2455 u32 *dram_timimg2) 2456 { 2457 bool patch; 2458 u32 tmp, tmp2; 2459 2460 tmp = RREG32(MC_SEQ_MISC0); 2461 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 2462 2463 if (patch && 2464 ((rdev->pdev->device == 0x67B0) || 2465 (rdev->pdev->device == 0x67B1))) { 2466 if ((memory_clock > 100000) && (memory_clock <= 125000)) { 2467 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; 2468 *dram_timimg2 &= ~0x00ff0000; 2469 *dram_timimg2 |= tmp2 << 16; 2470 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { 2471 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; 2472 *dram_timimg2 &= ~0x00ff0000; 2473 *dram_timimg2 |= tmp2 << 16; 2474 } 2475 } 2476 } 2477 2478 2479 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, 2480 u32 sclk, 2481 u32 mclk, 2482 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) 2483 { 2484 u32 dram_timing; 2485 u32 dram_timing2; 2486 u32 burst_time; 2487 2488 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk); 2489 2490 dram_timing = RREG32(MC_ARB_DRAM_TIMING); 2491 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 2492 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 2493 2494 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2); 2495 2496 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2497 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2498 arb_regs->McArbBurstTime = (u8)burst_time; 2499 2500 return 0; 2501 } 2502 2503 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev) 2504 { 2505 struct ci_power_info *pi = ci_get_pi(rdev); 2506 SMU7_Discrete_MCArbDramTimingTable arb_regs; 2507 u32 i, j; 2508 int ret = 0; 2509 2510 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 2511 2512 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { 2513 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { 2514 ret = ci_populate_memory_timing_parameters(rdev, 2515 pi->dpm_table.sclk_table.dpm_levels[i].value, 2516 pi->dpm_table.mclk_table.dpm_levels[j].value, 2517 &arb_regs.entries[i][j]); 2518 if (ret) 2519 break; 2520 } 2521 } 2522 2523 if (ret == 0) 2524 ret = ci_copy_bytes_to_smc(rdev, 2525 pi->arb_table_start, 2526 (u8 *)&arb_regs, 2527 sizeof(SMU7_Discrete_MCArbDramTimingTable), 2528 pi->sram_end); 2529 2530 return ret; 2531 } 2532 2533 static int ci_program_memory_timing_parameters(struct radeon_device *rdev) 2534 { 2535 struct ci_power_info *pi = ci_get_pi(rdev); 2536 2537 if (pi->need_update_smu7_dpm_table == 0) 2538 return 0; 2539 2540 return ci_do_program_memory_timing_parameters(rdev); 2541 } 2542 2543 static void ci_populate_smc_initial_state(struct radeon_device *rdev, 2544 struct radeon_ps *radeon_boot_state) 2545 { 2546 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state); 2547 struct ci_power_info *pi = ci_get_pi(rdev); 2548 u32 level = 0; 2549 2550 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { 2551 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= 2552 boot_state->performance_levels[0].sclk) { 2553 pi->smc_state_table.GraphicsBootLevel = level; 2554 break; 2555 } 2556 } 2557 2558 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { 2559 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= 2560 boot_state->performance_levels[0].mclk) { 2561 pi->smc_state_table.MemoryBootLevel = level; 2562 break; 2563 } 2564 } 2565 } 2566 2567 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) 2568 { 2569 u32 i; 2570 u32 mask_value = 0; 2571 2572 for (i = dpm_table->count; i > 0; i--) { 2573 mask_value = mask_value << 1; 2574 if (dpm_table->dpm_levels[i-1].enabled) 2575 mask_value |= 0x1; 2576 else 2577 mask_value &= 0xFFFFFFFE; 2578 } 2579 2580 return mask_value; 2581 } 2582 2583 static void ci_populate_smc_link_level(struct radeon_device *rdev, 2584 SMU7_Discrete_DpmTable *table) 2585 { 2586 struct ci_power_info *pi = ci_get_pi(rdev); 2587 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2588 u32 i; 2589 2590 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { 2591 table->LinkLevel[i].PcieGenSpeed = 2592 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; 2593 table->LinkLevel[i].PcieLaneCount = 2594 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 2595 table->LinkLevel[i].EnabledForActivity = 1; 2596 table->LinkLevel[i].DownT = cpu_to_be32(5); 2597 table->LinkLevel[i].UpT = cpu_to_be32(30); 2598 } 2599 2600 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; 2601 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 2602 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 2603 } 2604 2605 static int ci_populate_smc_uvd_level(struct radeon_device *rdev, 2606 SMU7_Discrete_DpmTable *table) 2607 { 2608 u32 count; 2609 struct atom_clock_dividers dividers; 2610 int ret = -EINVAL; 2611 2612 table->UvdLevelCount = 2613 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; 2614 2615 for (count = 0; count < table->UvdLevelCount; count++) { 2616 table->UvdLevel[count].VclkFrequency = 2617 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; 2618 table->UvdLevel[count].DclkFrequency = 2619 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; 2620 table->UvdLevel[count].MinVddc = 2621 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2622 table->UvdLevel[count].MinVddcPhases = 1; 2623 2624 ret = radeon_atom_get_clock_dividers(rdev, 2625 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2626 table->UvdLevel[count].VclkFrequency, false, ÷rs); 2627 if (ret) 2628 return ret; 2629 2630 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; 2631 2632 ret = radeon_atom_get_clock_dividers(rdev, 2633 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2634 table->UvdLevel[count].DclkFrequency, false, ÷rs); 2635 if (ret) 2636 return ret; 2637 2638 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; 2639 2640 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); 2641 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); 2642 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); 2643 } 2644 2645 return ret; 2646 } 2647 2648 static int ci_populate_smc_vce_level(struct radeon_device *rdev, 2649 SMU7_Discrete_DpmTable *table) 2650 { 2651 u32 count; 2652 struct atom_clock_dividers dividers; 2653 int ret = -EINVAL; 2654 2655 table->VceLevelCount = 2656 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; 2657 2658 for (count = 0; count < table->VceLevelCount; count++) { 2659 table->VceLevel[count].Frequency = 2660 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; 2661 table->VceLevel[count].MinVoltage = 2662 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2663 table->VceLevel[count].MinPhases = 1; 2664 2665 ret = radeon_atom_get_clock_dividers(rdev, 2666 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2667 table->VceLevel[count].Frequency, false, ÷rs); 2668 if (ret) 2669 return ret; 2670 2671 table->VceLevel[count].Divider = (u8)dividers.post_divider; 2672 2673 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); 2674 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); 2675 } 2676 2677 return ret; 2678 2679 } 2680 2681 static int ci_populate_smc_acp_level(struct radeon_device *rdev, 2682 SMU7_Discrete_DpmTable *table) 2683 { 2684 u32 count; 2685 struct atom_clock_dividers dividers; 2686 int ret = -EINVAL; 2687 2688 table->AcpLevelCount = (u8) 2689 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); 2690 2691 for (count = 0; count < table->AcpLevelCount; count++) { 2692 table->AcpLevel[count].Frequency = 2693 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; 2694 table->AcpLevel[count].MinVoltage = 2695 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; 2696 table->AcpLevel[count].MinPhases = 1; 2697 2698 ret = radeon_atom_get_clock_dividers(rdev, 2699 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2700 table->AcpLevel[count].Frequency, false, ÷rs); 2701 if (ret) 2702 return ret; 2703 2704 table->AcpLevel[count].Divider = (u8)dividers.post_divider; 2705 2706 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); 2707 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); 2708 } 2709 2710 return ret; 2711 } 2712 2713 static int ci_populate_smc_samu_level(struct radeon_device *rdev, 2714 SMU7_Discrete_DpmTable *table) 2715 { 2716 u32 count; 2717 struct atom_clock_dividers dividers; 2718 int ret = -EINVAL; 2719 2720 table->SamuLevelCount = 2721 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; 2722 2723 for (count = 0; count < table->SamuLevelCount; count++) { 2724 table->SamuLevel[count].Frequency = 2725 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; 2726 table->SamuLevel[count].MinVoltage = 2727 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2728 table->SamuLevel[count].MinPhases = 1; 2729 2730 ret = radeon_atom_get_clock_dividers(rdev, 2731 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2732 table->SamuLevel[count].Frequency, false, ÷rs); 2733 if (ret) 2734 return ret; 2735 2736 table->SamuLevel[count].Divider = (u8)dividers.post_divider; 2737 2738 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); 2739 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); 2740 } 2741 2742 return ret; 2743 } 2744 2745 static int ci_calculate_mclk_params(struct radeon_device *rdev, 2746 u32 memory_clock, 2747 SMU7_Discrete_MemoryLevel *mclk, 2748 bool strobe_mode, 2749 bool dll_state_on) 2750 { 2751 struct ci_power_info *pi = ci_get_pi(rdev); 2752 u32 dll_cntl = pi->clock_registers.dll_cntl; 2753 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2754 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; 2755 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; 2756 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; 2757 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; 2758 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; 2759 u32 mpll_ss1 = pi->clock_registers.mpll_ss1; 2760 u32 mpll_ss2 = pi->clock_registers.mpll_ss2; 2761 struct atom_mpll_param mpll_param; 2762 int ret; 2763 2764 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param); 2765 if (ret) 2766 return ret; 2767 2768 mpll_func_cntl &= ~BWCTRL_MASK; 2769 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); 2770 2771 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); 2772 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | 2773 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); 2774 2775 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; 2776 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); 2777 2778 if (pi->mem_gddr5) { 2779 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); 2780 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | 2781 YCLK_POST_DIV(mpll_param.post_div); 2782 } 2783 2784 if (pi->caps_mclk_ss_support) { 2785 struct radeon_atom_ss ss; 2786 u32 freq_nom; 2787 u32 tmp; 2788 u32 reference_clock = rdev->clock.mpll.reference_freq; 2789 2790 if (mpll_param.qdr == 1) 2791 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); 2792 else 2793 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); 2794 2795 tmp = (freq_nom / reference_clock); 2796 tmp = tmp * tmp; 2797 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 2798 ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 2799 u32 clks = reference_clock * 5 / ss.rate; 2800 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 2801 2802 mpll_ss1 &= ~CLKV_MASK; 2803 mpll_ss1 |= CLKV(clkv); 2804 2805 mpll_ss2 &= ~CLKS_MASK; 2806 mpll_ss2 |= CLKS(clks); 2807 } 2808 } 2809 2810 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; 2811 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); 2812 2813 if (dll_state_on) 2814 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; 2815 else 2816 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 2817 2818 mclk->MclkFrequency = memory_clock; 2819 mclk->MpllFuncCntl = mpll_func_cntl; 2820 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 2821 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 2822 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 2823 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 2824 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 2825 mclk->DllCntl = dll_cntl; 2826 mclk->MpllSs1 = mpll_ss1; 2827 mclk->MpllSs2 = mpll_ss2; 2828 2829 return 0; 2830 } 2831 2832 static int ci_populate_single_memory_level(struct radeon_device *rdev, 2833 u32 memory_clock, 2834 SMU7_Discrete_MemoryLevel *memory_level) 2835 { 2836 struct ci_power_info *pi = ci_get_pi(rdev); 2837 int ret; 2838 bool dll_state_on; 2839 2840 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { 2841 ret = ci_get_dependency_volt_by_clk(rdev, 2842 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2843 memory_clock, &memory_level->MinVddc); 2844 if (ret) 2845 return ret; 2846 } 2847 2848 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { 2849 ret = ci_get_dependency_volt_by_clk(rdev, 2850 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2851 memory_clock, &memory_level->MinVddci); 2852 if (ret) 2853 return ret; 2854 } 2855 2856 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { 2857 ret = ci_get_dependency_volt_by_clk(rdev, 2858 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2859 memory_clock, &memory_level->MinMvdd); 2860 if (ret) 2861 return ret; 2862 } 2863 2864 memory_level->MinVddcPhases = 1; 2865 2866 if (pi->vddc_phase_shed_control) 2867 ci_populate_phase_value_based_on_mclk(rdev, 2868 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 2869 memory_clock, 2870 &memory_level->MinVddcPhases); 2871 2872 memory_level->EnabledForThrottle = 1; 2873 memory_level->UpH = 0; 2874 memory_level->DownH = 100; 2875 memory_level->VoltageDownH = 0; 2876 memory_level->ActivityLevel = (u16)pi->mclk_activity_target; 2877 2878 memory_level->StutterEnable = false; 2879 memory_level->StrobeEnable = false; 2880 memory_level->EdcReadEnable = false; 2881 memory_level->EdcWriteEnable = false; 2882 memory_level->RttEnable = false; 2883 2884 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2885 2886 if (pi->mclk_stutter_mode_threshold && 2887 (memory_clock <= pi->mclk_stutter_mode_threshold) && 2888 (pi->uvd_enabled == false) && 2889 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && 2890 (rdev->pm.dpm.new_active_crtc_count <= 2)) 2891 memory_level->StutterEnable = true; 2892 2893 if (pi->mclk_strobe_mode_threshold && 2894 (memory_clock <= pi->mclk_strobe_mode_threshold)) 2895 memory_level->StrobeEnable = 1; 2896 2897 if (pi->mem_gddr5) { 2898 memory_level->StrobeRatio = 2899 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 2900 if (pi->mclk_edc_enable_threshold && 2901 (memory_clock > pi->mclk_edc_enable_threshold)) 2902 memory_level->EdcReadEnable = true; 2903 2904 if (pi->mclk_edc_wr_enable_threshold && 2905 (memory_clock > pi->mclk_edc_wr_enable_threshold)) 2906 memory_level->EdcWriteEnable = true; 2907 2908 if (memory_level->StrobeEnable) { 2909 if (si_get_mclk_frequency_ratio(memory_clock, true) >= 2910 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) 2911 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2912 else 2913 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 2914 } else { 2915 dll_state_on = pi->dll_default_on; 2916 } 2917 } else { 2918 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock); 2919 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 2920 } 2921 2922 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 2923 if (ret) 2924 return ret; 2925 2926 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); 2927 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); 2928 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); 2929 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); 2930 2931 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); 2932 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); 2933 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); 2934 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); 2935 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); 2936 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); 2937 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); 2938 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); 2939 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); 2940 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); 2941 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); 2942 2943 return 0; 2944 } 2945 2946 static int ci_populate_smc_acpi_level(struct radeon_device *rdev, 2947 SMU7_Discrete_DpmTable *table) 2948 { 2949 struct ci_power_info *pi = ci_get_pi(rdev); 2950 struct atom_clock_dividers dividers; 2951 SMU7_Discrete_VoltageLevel voltage_level; 2952 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; 2953 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; 2954 u32 dll_cntl = pi->clock_registers.dll_cntl; 2955 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2956 int ret; 2957 2958 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 2959 2960 if (pi->acpi_vddc) 2961 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); 2962 else 2963 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); 2964 2965 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; 2966 2967 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq; 2968 2969 ret = radeon_atom_get_clock_dividers(rdev, 2970 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 2971 table->ACPILevel.SclkFrequency, false, ÷rs); 2972 if (ret) 2973 return ret; 2974 2975 table->ACPILevel.SclkDid = (u8)dividers.post_divider; 2976 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 2977 table->ACPILevel.DeepSleepDivId = 0; 2978 2979 spll_func_cntl &= ~SPLL_PWRON; 2980 spll_func_cntl |= SPLL_RESET; 2981 2982 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; 2983 spll_func_cntl_2 |= SCLK_MUX_SEL(4); 2984 2985 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 2986 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 2987 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; 2988 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; 2989 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; 2990 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; 2991 table->ACPILevel.CcPwrDynRm = 0; 2992 table->ACPILevel.CcPwrDynRm1 = 0; 2993 2994 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); 2995 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); 2996 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); 2997 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); 2998 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); 2999 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); 3000 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); 3001 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); 3002 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); 3003 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); 3004 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); 3005 3006 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 3007 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 3008 3009 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 3010 if (pi->acpi_vddci) 3011 table->MemoryACPILevel.MinVddci = 3012 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); 3013 else 3014 table->MemoryACPILevel.MinVddci = 3015 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); 3016 } 3017 3018 if (ci_populate_mvdd_value(rdev, 0, &voltage_level)) 3019 table->MemoryACPILevel.MinMvdd = 0; 3020 else 3021 table->MemoryACPILevel.MinMvdd = 3022 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); 3023 3024 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; 3025 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); 3026 3027 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); 3028 3029 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); 3030 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); 3031 table->MemoryACPILevel.MpllAdFuncCntl = 3032 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); 3033 table->MemoryACPILevel.MpllDqFuncCntl = 3034 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); 3035 table->MemoryACPILevel.MpllFuncCntl = 3036 cpu_to_be32(pi->clock_registers.mpll_func_cntl); 3037 table->MemoryACPILevel.MpllFuncCntl_1 = 3038 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); 3039 table->MemoryACPILevel.MpllFuncCntl_2 = 3040 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); 3041 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); 3042 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); 3043 3044 table->MemoryACPILevel.EnabledForThrottle = 0; 3045 table->MemoryACPILevel.EnabledForActivity = 0; 3046 table->MemoryACPILevel.UpH = 0; 3047 table->MemoryACPILevel.DownH = 100; 3048 table->MemoryACPILevel.VoltageDownH = 0; 3049 table->MemoryACPILevel.ActivityLevel = 3050 cpu_to_be16((u16)pi->mclk_activity_target); 3051 3052 table->MemoryACPILevel.StutterEnable = false; 3053 table->MemoryACPILevel.StrobeEnable = false; 3054 table->MemoryACPILevel.EdcReadEnable = false; 3055 table->MemoryACPILevel.EdcWriteEnable = false; 3056 table->MemoryACPILevel.RttEnable = false; 3057 3058 return 0; 3059 } 3060 3061 3062 static int ci_enable_ulv(struct radeon_device *rdev, bool enable) 3063 { 3064 struct ci_power_info *pi = ci_get_pi(rdev); 3065 struct ci_ulv_parm *ulv = &pi->ulv; 3066 3067 if (ulv->supported) { 3068 if (enable) 3069 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 3070 0 : -EINVAL; 3071 else 3072 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 3073 0 : -EINVAL; 3074 } 3075 3076 return 0; 3077 } 3078 3079 static int ci_populate_ulv_level(struct radeon_device *rdev, 3080 SMU7_Discrete_Ulv *state) 3081 { 3082 struct ci_power_info *pi = ci_get_pi(rdev); 3083 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time; 3084 3085 state->CcPwrDynRm = 0; 3086 state->CcPwrDynRm1 = 0; 3087 3088 if (ulv_voltage == 0) { 3089 pi->ulv.supported = false; 3090 return 0; 3091 } 3092 3093 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 3094 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3095 state->VddcOffset = 0; 3096 else 3097 state->VddcOffset = 3098 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; 3099 } else { 3100 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3101 state->VddcOffsetVid = 0; 3102 else 3103 state->VddcOffsetVid = (u8) 3104 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * 3105 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 3106 } 3107 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; 3108 3109 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); 3110 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); 3111 state->VddcOffset = cpu_to_be16(state->VddcOffset); 3112 3113 return 0; 3114 } 3115 3116 static int ci_calculate_sclk_params(struct radeon_device *rdev, 3117 u32 engine_clock, 3118 SMU7_Discrete_GraphicsLevel *sclk) 3119 { 3120 struct ci_power_info *pi = ci_get_pi(rdev); 3121 struct atom_clock_dividers dividers; 3122 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; 3123 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; 3124 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; 3125 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3126 u32 reference_clock = rdev->clock.spll.reference_freq; 3127 u32 reference_divider; 3128 u32 fbdiv; 3129 int ret; 3130 3131 ret = radeon_atom_get_clock_dividers(rdev, 3132 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3133 engine_clock, false, ÷rs); 3134 if (ret) 3135 return ret; 3136 3137 reference_divider = 1 + dividers.ref_div; 3138 fbdiv = dividers.fb_div & 0x3FFFFFF; 3139 3140 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; 3141 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); 3142 spll_func_cntl_3 |= SPLL_DITHEN; 3143 3144 if (pi->caps_sclk_ss_support) { 3145 struct radeon_atom_ss ss; 3146 u32 vco_freq = engine_clock * dividers.post_div; 3147 3148 if (radeon_atombios_get_asic_ss_info(rdev, &ss, 3149 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 3150 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 3151 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 3152 3153 cg_spll_spread_spectrum &= ~CLK_S_MASK; 3154 cg_spll_spread_spectrum |= CLK_S(clk_s); 3155 cg_spll_spread_spectrum |= SSEN; 3156 3157 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; 3158 cg_spll_spread_spectrum_2 |= CLK_V(clk_v); 3159 } 3160 } 3161 3162 sclk->SclkFrequency = engine_clock; 3163 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 3164 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 3165 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 3166 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 3167 sclk->SclkDid = (u8)dividers.post_divider; 3168 3169 return 0; 3170 } 3171 3172 static int ci_populate_single_graphic_level(struct radeon_device *rdev, 3173 u32 engine_clock, 3174 u16 sclk_activity_level_t, 3175 SMU7_Discrete_GraphicsLevel *graphic_level) 3176 { 3177 struct ci_power_info *pi = ci_get_pi(rdev); 3178 int ret; 3179 3180 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level); 3181 if (ret) 3182 return ret; 3183 3184 ret = ci_get_dependency_volt_by_clk(rdev, 3185 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3186 engine_clock, &graphic_level->MinVddc); 3187 if (ret) 3188 return ret; 3189 3190 graphic_level->SclkFrequency = engine_clock; 3191 3192 graphic_level->Flags = 0; 3193 graphic_level->MinVddcPhases = 1; 3194 3195 if (pi->vddc_phase_shed_control) 3196 ci_populate_phase_value_based_on_sclk(rdev, 3197 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table, 3198 engine_clock, 3199 &graphic_level->MinVddcPhases); 3200 3201 graphic_level->ActivityLevel = sclk_activity_level_t; 3202 3203 graphic_level->CcPwrDynRm = 0; 3204 graphic_level->CcPwrDynRm1 = 0; 3205 graphic_level->EnabledForThrottle = 1; 3206 graphic_level->UpH = 0; 3207 graphic_level->DownH = 0; 3208 graphic_level->VoltageDownH = 0; 3209 graphic_level->PowerThrottle = 0; 3210 3211 if (pi->caps_sclk_ds) 3212 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev, 3213 engine_clock, 3214 CISLAND_MINIMUM_ENGINE_CLOCK); 3215 3216 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3217 3218 graphic_level->Flags = cpu_to_be32(graphic_level->Flags); 3219 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); 3220 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); 3221 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); 3222 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); 3223 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); 3224 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); 3225 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); 3226 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); 3227 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); 3228 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); 3229 3230 return 0; 3231 } 3232 3233 static int ci_populate_all_graphic_levels(struct radeon_device *rdev) 3234 { 3235 struct ci_power_info *pi = ci_get_pi(rdev); 3236 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3237 u32 level_array_address = pi->dpm_table_start + 3238 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 3239 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * 3240 SMU7_MAX_LEVELS_GRAPHICS; 3241 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; 3242 u32 i, ret; 3243 3244 memset(levels, 0, level_array_size); 3245 3246 for (i = 0; i < dpm_table->sclk_table.count; i++) { 3247 ret = ci_populate_single_graphic_level(rdev, 3248 dpm_table->sclk_table.dpm_levels[i].value, 3249 (u16)pi->activity_target[i], 3250 &pi->smc_state_table.GraphicsLevel[i]); 3251 if (ret) 3252 return ret; 3253 if (i > 1) 3254 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; 3255 if (i == (dpm_table->sclk_table.count - 1)) 3256 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 3257 PPSMC_DISPLAY_WATERMARK_HIGH; 3258 } 3259 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; 3260 3261 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 3262 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3263 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 3264 3265 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3266 (u8 *)levels, level_array_size, 3267 pi->sram_end); 3268 if (ret) 3269 return ret; 3270 3271 return 0; 3272 } 3273 3274 static int ci_populate_ulv_state(struct radeon_device *rdev, 3275 SMU7_Discrete_Ulv *ulv_level) 3276 { 3277 return ci_populate_ulv_level(rdev, ulv_level); 3278 } 3279 3280 static int ci_populate_all_memory_levels(struct radeon_device *rdev) 3281 { 3282 struct ci_power_info *pi = ci_get_pi(rdev); 3283 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3284 u32 level_array_address = pi->dpm_table_start + 3285 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 3286 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * 3287 SMU7_MAX_LEVELS_MEMORY; 3288 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; 3289 u32 i, ret; 3290 3291 memset(levels, 0, level_array_size); 3292 3293 for (i = 0; i < dpm_table->mclk_table.count; i++) { 3294 if (dpm_table->mclk_table.dpm_levels[i].value == 0) 3295 return -EINVAL; 3296 ret = ci_populate_single_memory_level(rdev, 3297 dpm_table->mclk_table.dpm_levels[i].value, 3298 &pi->smc_state_table.MemoryLevel[i]); 3299 if (ret) 3300 return ret; 3301 } 3302 3303 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 3304 3305 if ((dpm_table->mclk_table.count >= 2) && 3306 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) { 3307 pi->smc_state_table.MemoryLevel[1].MinVddc = 3308 pi->smc_state_table.MemoryLevel[0].MinVddc; 3309 pi->smc_state_table.MemoryLevel[1].MinVddcPhases = 3310 pi->smc_state_table.MemoryLevel[0].MinVddcPhases; 3311 } 3312 3313 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 3314 3315 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 3316 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 3317 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 3318 3319 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = 3320 PPSMC_DISPLAY_WATERMARK_HIGH; 3321 3322 ret = ci_copy_bytes_to_smc(rdev, level_array_address, 3323 (u8 *)levels, level_array_size, 3324 pi->sram_end); 3325 if (ret) 3326 return ret; 3327 3328 return 0; 3329 } 3330 3331 static void ci_reset_single_dpm_table(struct radeon_device *rdev, 3332 struct ci_single_dpm_table* dpm_table, 3333 u32 count) 3334 { 3335 u32 i; 3336 3337 dpm_table->count = count; 3338 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) 3339 dpm_table->dpm_levels[i].enabled = false; 3340 } 3341 3342 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, 3343 u32 index, u32 pcie_gen, u32 pcie_lanes) 3344 { 3345 dpm_table->dpm_levels[index].value = pcie_gen; 3346 dpm_table->dpm_levels[index].param1 = pcie_lanes; 3347 dpm_table->dpm_levels[index].enabled = true; 3348 } 3349 3350 static int ci_setup_default_pcie_tables(struct radeon_device *rdev) 3351 { 3352 struct ci_power_info *pi = ci_get_pi(rdev); 3353 3354 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) 3355 return -EINVAL; 3356 3357 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { 3358 pi->pcie_gen_powersaving = pi->pcie_gen_performance; 3359 pi->pcie_lane_powersaving = pi->pcie_lane_performance; 3360 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { 3361 pi->pcie_gen_performance = pi->pcie_gen_powersaving; 3362 pi->pcie_lane_performance = pi->pcie_lane_powersaving; 3363 } 3364 3365 ci_reset_single_dpm_table(rdev, 3366 &pi->dpm_table.pcie_speed_table, 3367 SMU7_MAX_LEVELS_LINK); 3368 3369 if (rdev->family == CHIP_BONAIRE) 3370 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3371 pi->pcie_gen_powersaving.min, 3372 pi->pcie_lane_powersaving.max); 3373 else 3374 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3375 pi->pcie_gen_powersaving.min, 3376 pi->pcie_lane_powersaving.min); 3377 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 3378 pi->pcie_gen_performance.min, 3379 pi->pcie_lane_performance.min); 3380 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, 3381 pi->pcie_gen_powersaving.min, 3382 pi->pcie_lane_powersaving.max); 3383 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, 3384 pi->pcie_gen_performance.min, 3385 pi->pcie_lane_performance.max); 3386 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, 3387 pi->pcie_gen_powersaving.max, 3388 pi->pcie_lane_powersaving.max); 3389 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, 3390 pi->pcie_gen_performance.max, 3391 pi->pcie_lane_performance.max); 3392 3393 pi->dpm_table.pcie_speed_table.count = 6; 3394 3395 return 0; 3396 } 3397 3398 static int ci_setup_default_dpm_tables(struct radeon_device *rdev) 3399 { 3400 struct ci_power_info *pi = ci_get_pi(rdev); 3401 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 3402 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3403 struct radeon_clock_voltage_dependency_table *allowed_mclk_table = 3404 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 3405 struct radeon_cac_leakage_table *std_voltage_table = 3406 &rdev->pm.dpm.dyn_state.cac_leakage_table; 3407 u32 i; 3408 3409 if (allowed_sclk_vddc_table == NULL) 3410 return -EINVAL; 3411 if (allowed_sclk_vddc_table->count < 1) 3412 return -EINVAL; 3413 if (allowed_mclk_table == NULL) 3414 return -EINVAL; 3415 if (allowed_mclk_table->count < 1) 3416 return -EINVAL; 3417 3418 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); 3419 3420 ci_reset_single_dpm_table(rdev, 3421 &pi->dpm_table.sclk_table, 3422 SMU7_MAX_LEVELS_GRAPHICS); 3423 ci_reset_single_dpm_table(rdev, 3424 &pi->dpm_table.mclk_table, 3425 SMU7_MAX_LEVELS_MEMORY); 3426 ci_reset_single_dpm_table(rdev, 3427 &pi->dpm_table.vddc_table, 3428 SMU7_MAX_LEVELS_VDDC); 3429 ci_reset_single_dpm_table(rdev, 3430 &pi->dpm_table.vddci_table, 3431 SMU7_MAX_LEVELS_VDDCI); 3432 ci_reset_single_dpm_table(rdev, 3433 &pi->dpm_table.mvdd_table, 3434 SMU7_MAX_LEVELS_MVDD); 3435 3436 pi->dpm_table.sclk_table.count = 0; 3437 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3438 if ((i == 0) || 3439 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != 3440 allowed_sclk_vddc_table->entries[i].clk)) { 3441 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 3442 allowed_sclk_vddc_table->entries[i].clk; 3443 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = 3444 (i == 0) ? true : false; 3445 pi->dpm_table.sclk_table.count++; 3446 } 3447 } 3448 3449 pi->dpm_table.mclk_table.count = 0; 3450 for (i = 0; i < allowed_mclk_table->count; i++) { 3451 if ((i == 0) || 3452 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3453 allowed_mclk_table->entries[i].clk)) { 3454 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3455 allowed_mclk_table->entries[i].clk; 3456 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = 3457 (i == 0) ? true : false; 3458 pi->dpm_table.mclk_table.count++; 3459 } 3460 } 3461 3462 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3463 pi->dpm_table.vddc_table.dpm_levels[i].value = 3464 allowed_sclk_vddc_table->entries[i].v; 3465 pi->dpm_table.vddc_table.dpm_levels[i].param1 = 3466 std_voltage_table->entries[i].leakage; 3467 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; 3468 } 3469 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; 3470 3471 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 3472 if (allowed_mclk_table) { 3473 for (i = 0; i < allowed_mclk_table->count; i++) { 3474 pi->dpm_table.vddci_table.dpm_levels[i].value = 3475 allowed_mclk_table->entries[i].v; 3476 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; 3477 } 3478 pi->dpm_table.vddci_table.count = allowed_mclk_table->count; 3479 } 3480 3481 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; 3482 if (allowed_mclk_table) { 3483 for (i = 0; i < allowed_mclk_table->count; i++) { 3484 pi->dpm_table.mvdd_table.dpm_levels[i].value = 3485 allowed_mclk_table->entries[i].v; 3486 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 3487 } 3488 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; 3489 } 3490 3491 ci_setup_default_pcie_tables(rdev); 3492 3493 return 0; 3494 } 3495 3496 static int ci_find_boot_level(struct ci_single_dpm_table *table, 3497 u32 value, u32 *boot_level) 3498 { 3499 u32 i; 3500 int ret = -EINVAL; 3501 3502 for(i = 0; i < table->count; i++) { 3503 if (value == table->dpm_levels[i].value) { 3504 *boot_level = i; 3505 ret = 0; 3506 } 3507 } 3508 3509 return ret; 3510 } 3511 3512 static int ci_init_smc_table(struct radeon_device *rdev) 3513 { 3514 struct ci_power_info *pi = ci_get_pi(rdev); 3515 struct ci_ulv_parm *ulv = &pi->ulv; 3516 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps; 3517 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 3518 int ret; 3519 3520 ret = ci_setup_default_dpm_tables(rdev); 3521 if (ret) 3522 return ret; 3523 3524 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) 3525 ci_populate_smc_voltage_tables(rdev, table); 3526 3527 ci_init_fps_limits(rdev); 3528 3529 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 3530 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 3531 3532 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3533 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3534 3535 if (pi->mem_gddr5) 3536 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3537 3538 if (ulv->supported) { 3539 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv); 3540 if (ret) 3541 return ret; 3542 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); 3543 } 3544 3545 ret = ci_populate_all_graphic_levels(rdev); 3546 if (ret) 3547 return ret; 3548 3549 ret = ci_populate_all_memory_levels(rdev); 3550 if (ret) 3551 return ret; 3552 3553 ci_populate_smc_link_level(rdev, table); 3554 3555 ret = ci_populate_smc_acpi_level(rdev, table); 3556 if (ret) 3557 return ret; 3558 3559 ret = ci_populate_smc_vce_level(rdev, table); 3560 if (ret) 3561 return ret; 3562 3563 ret = ci_populate_smc_acp_level(rdev, table); 3564 if (ret) 3565 return ret; 3566 3567 ret = ci_populate_smc_samu_level(rdev, table); 3568 if (ret) 3569 return ret; 3570 3571 ret = ci_do_program_memory_timing_parameters(rdev); 3572 if (ret) 3573 return ret; 3574 3575 ret = ci_populate_smc_uvd_level(rdev, table); 3576 if (ret) 3577 return ret; 3578 3579 table->UvdBootLevel = 0; 3580 table->VceBootLevel = 0; 3581 table->AcpBootLevel = 0; 3582 table->SamuBootLevel = 0; 3583 table->GraphicsBootLevel = 0; 3584 table->MemoryBootLevel = 0; 3585 3586 ret = ci_find_boot_level(&pi->dpm_table.sclk_table, 3587 pi->vbios_boot_state.sclk_bootup_value, 3588 (u32 *)&pi->smc_state_table.GraphicsBootLevel); 3589 3590 ret = ci_find_boot_level(&pi->dpm_table.mclk_table, 3591 pi->vbios_boot_state.mclk_bootup_value, 3592 (u32 *)&pi->smc_state_table.MemoryBootLevel); 3593 3594 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; 3595 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; 3596 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; 3597 3598 ci_populate_smc_initial_state(rdev, radeon_boot_state); 3599 3600 ret = ci_populate_bapm_parameters_in_dpm_table(rdev); 3601 if (ret) 3602 return ret; 3603 3604 table->UVDInterval = 1; 3605 table->VCEInterval = 1; 3606 table->ACPInterval = 1; 3607 table->SAMUInterval = 1; 3608 table->GraphicsVoltageChangeEnable = 1; 3609 table->GraphicsThermThrottleEnable = 1; 3610 table->GraphicsInterval = 1; 3611 table->VoltageInterval = 1; 3612 table->ThermalInterval = 1; 3613 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * 3614 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3615 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * 3616 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3617 table->MemoryVoltageChangeEnable = 1; 3618 table->MemoryInterval = 1; 3619 table->VoltageResponseTime = 0; 3620 table->VddcVddciDelta = 4000; 3621 table->PhaseResponseTime = 0; 3622 table->MemoryThermThrottleEnable = 1; 3623 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; 3624 table->PCIeGenInterval = 1; 3625 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3626 table->SVI2Enable = 1; 3627 else 3628 table->SVI2Enable = 0; 3629 3630 table->ThermGpio = 17; 3631 table->SclkStepSize = 0x4000; 3632 3633 table->SystemFlags = cpu_to_be32(table->SystemFlags); 3634 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); 3635 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); 3636 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); 3637 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); 3638 table->SclkStepSize = cpu_to_be32(table->SclkStepSize); 3639 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); 3640 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); 3641 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); 3642 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); 3643 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); 3644 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); 3645 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); 3646 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); 3647 3648 ret = ci_copy_bytes_to_smc(rdev, 3649 pi->dpm_table_start + 3650 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 3651 (u8 *)&table->SystemFlags, 3652 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), 3653 pi->sram_end); 3654 if (ret) 3655 return ret; 3656 3657 return 0; 3658 } 3659 3660 static void ci_trim_single_dpm_states(struct radeon_device *rdev, 3661 struct ci_single_dpm_table *dpm_table, 3662 u32 low_limit, u32 high_limit) 3663 { 3664 u32 i; 3665 3666 for (i = 0; i < dpm_table->count; i++) { 3667 if ((dpm_table->dpm_levels[i].value < low_limit) || 3668 (dpm_table->dpm_levels[i].value > high_limit)) 3669 dpm_table->dpm_levels[i].enabled = false; 3670 else 3671 dpm_table->dpm_levels[i].enabled = true; 3672 } 3673 } 3674 3675 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev, 3676 u32 speed_low, u32 lanes_low, 3677 u32 speed_high, u32 lanes_high) 3678 { 3679 struct ci_power_info *pi = ci_get_pi(rdev); 3680 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; 3681 u32 i, j; 3682 3683 for (i = 0; i < pcie_table->count; i++) { 3684 if ((pcie_table->dpm_levels[i].value < speed_low) || 3685 (pcie_table->dpm_levels[i].param1 < lanes_low) || 3686 (pcie_table->dpm_levels[i].value > speed_high) || 3687 (pcie_table->dpm_levels[i].param1 > lanes_high)) 3688 pcie_table->dpm_levels[i].enabled = false; 3689 else 3690 pcie_table->dpm_levels[i].enabled = true; 3691 } 3692 3693 for (i = 0; i < pcie_table->count; i++) { 3694 if (pcie_table->dpm_levels[i].enabled) { 3695 for (j = i + 1; j < pcie_table->count; j++) { 3696 if (pcie_table->dpm_levels[j].enabled) { 3697 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && 3698 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) 3699 pcie_table->dpm_levels[j].enabled = false; 3700 } 3701 } 3702 } 3703 } 3704 } 3705 3706 static int ci_trim_dpm_states(struct radeon_device *rdev, 3707 struct radeon_ps *radeon_state) 3708 { 3709 struct ci_ps *state = ci_get_ps(radeon_state); 3710 struct ci_power_info *pi = ci_get_pi(rdev); 3711 u32 high_limit_count; 3712 3713 if (state->performance_level_count < 1) 3714 return -EINVAL; 3715 3716 if (state->performance_level_count == 1) 3717 high_limit_count = 0; 3718 else 3719 high_limit_count = 1; 3720 3721 ci_trim_single_dpm_states(rdev, 3722 &pi->dpm_table.sclk_table, 3723 state->performance_levels[0].sclk, 3724 state->performance_levels[high_limit_count].sclk); 3725 3726 ci_trim_single_dpm_states(rdev, 3727 &pi->dpm_table.mclk_table, 3728 state->performance_levels[0].mclk, 3729 state->performance_levels[high_limit_count].mclk); 3730 3731 ci_trim_pcie_dpm_states(rdev, 3732 state->performance_levels[0].pcie_gen, 3733 state->performance_levels[0].pcie_lane, 3734 state->performance_levels[high_limit_count].pcie_gen, 3735 state->performance_levels[high_limit_count].pcie_lane); 3736 3737 return 0; 3738 } 3739 3740 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev) 3741 { 3742 struct radeon_clock_voltage_dependency_table *disp_voltage_table = 3743 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; 3744 struct radeon_clock_voltage_dependency_table *vddc_table = 3745 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3746 u32 requested_voltage = 0; 3747 u32 i; 3748 3749 if (disp_voltage_table == NULL) 3750 return -EINVAL; 3751 if (!disp_voltage_table->count) 3752 return -EINVAL; 3753 3754 for (i = 0; i < disp_voltage_table->count; i++) { 3755 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) 3756 requested_voltage = disp_voltage_table->entries[i].v; 3757 } 3758 3759 for (i = 0; i < vddc_table->count; i++) { 3760 if (requested_voltage <= vddc_table->entries[i].v) { 3761 requested_voltage = vddc_table->entries[i].v; 3762 return (ci_send_msg_to_smc_with_parameter(rdev, 3763 PPSMC_MSG_VddC_Request, 3764 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? 3765 0 : -EINVAL; 3766 } 3767 } 3768 3769 return -EINVAL; 3770 } 3771 3772 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev) 3773 { 3774 struct ci_power_info *pi = ci_get_pi(rdev); 3775 PPSMC_Result result; 3776 3777 ci_apply_disp_minimum_voltage_request(rdev); 3778 3779 if (!pi->sclk_dpm_key_disabled) { 3780 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3781 result = ci_send_msg_to_smc_with_parameter(rdev, 3782 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3783 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3784 if (result != PPSMC_Result_OK) 3785 return -EINVAL; 3786 } 3787 } 3788 3789 if (!pi->mclk_dpm_key_disabled) { 3790 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3791 result = ci_send_msg_to_smc_with_parameter(rdev, 3792 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3793 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3794 if (result != PPSMC_Result_OK) 3795 return -EINVAL; 3796 } 3797 } 3798 #if 0 3799 if (!pi->pcie_dpm_key_disabled) { 3800 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3801 result = ci_send_msg_to_smc_with_parameter(rdev, 3802 PPSMC_MSG_PCIeDPM_SetEnabledMask, 3803 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3804 if (result != PPSMC_Result_OK) 3805 return -EINVAL; 3806 } 3807 } 3808 #endif 3809 return 0; 3810 } 3811 3812 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, 3813 struct radeon_ps *radeon_state) 3814 { 3815 struct ci_power_info *pi = ci_get_pi(rdev); 3816 struct ci_ps *state = ci_get_ps(radeon_state); 3817 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; 3818 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3819 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; 3820 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3821 u32 i; 3822 3823 pi->need_update_smu7_dpm_table = 0; 3824 3825 for (i = 0; i < sclk_table->count; i++) { 3826 if (sclk == sclk_table->dpm_levels[i].value) 3827 break; 3828 } 3829 3830 if (i >= sclk_table->count) { 3831 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3832 } else { 3833 /* XXX check display min clock requirements */ 3834 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) 3835 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3836 } 3837 3838 for (i = 0; i < mclk_table->count; i++) { 3839 if (mclk == mclk_table->dpm_levels[i].value) 3840 break; 3841 } 3842 3843 if (i >= mclk_table->count) 3844 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3845 3846 if (rdev->pm.dpm.current_active_crtc_count != 3847 rdev->pm.dpm.new_active_crtc_count) 3848 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 3849 } 3850 3851 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev, 3852 struct radeon_ps *radeon_state) 3853 { 3854 struct ci_power_info *pi = ci_get_pi(rdev); 3855 struct ci_ps *state = ci_get_ps(radeon_state); 3856 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3857 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3858 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3859 int ret; 3860 3861 if (!pi->need_update_smu7_dpm_table) 3862 return 0; 3863 3864 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 3865 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; 3866 3867 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 3868 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; 3869 3870 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { 3871 ret = ci_populate_all_graphic_levels(rdev); 3872 if (ret) 3873 return ret; 3874 } 3875 3876 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 3877 ret = ci_populate_all_memory_levels(rdev); 3878 if (ret) 3879 return ret; 3880 } 3881 3882 return 0; 3883 } 3884 3885 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable) 3886 { 3887 struct ci_power_info *pi = ci_get_pi(rdev); 3888 const struct radeon_clock_and_voltage_limits *max_limits; 3889 int i; 3890 3891 if (rdev->pm.dpm.ac_power) 3892 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3893 else 3894 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3895 3896 if (enable) { 3897 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 3898 3899 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3900 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3901 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 3902 3903 if (!pi->caps_uvd_dpm) 3904 break; 3905 } 3906 } 3907 3908 ci_send_msg_to_smc_with_parameter(rdev, 3909 PPSMC_MSG_UVDDPM_SetEnabledMask, 3910 pi->dpm_level_enable_mask.uvd_dpm_enable_mask); 3911 3912 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3913 pi->uvd_enabled = true; 3914 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 3915 ci_send_msg_to_smc_with_parameter(rdev, 3916 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3917 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3918 } 3919 } else { 3920 if (pi->last_mclk_dpm_enable_mask & 0x1) { 3921 pi->uvd_enabled = false; 3922 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 3923 ci_send_msg_to_smc_with_parameter(rdev, 3924 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3925 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3926 } 3927 } 3928 3929 return (ci_send_msg_to_smc(rdev, enable ? 3930 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? 3931 0 : -EINVAL; 3932 } 3933 3934 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable) 3935 { 3936 struct ci_power_info *pi = ci_get_pi(rdev); 3937 const struct radeon_clock_and_voltage_limits *max_limits; 3938 int i; 3939 3940 if (rdev->pm.dpm.ac_power) 3941 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3942 else 3943 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3944 3945 if (enable) { 3946 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 3947 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3948 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3949 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 3950 3951 if (!pi->caps_vce_dpm) 3952 break; 3953 } 3954 } 3955 3956 ci_send_msg_to_smc_with_parameter(rdev, 3957 PPSMC_MSG_VCEDPM_SetEnabledMask, 3958 pi->dpm_level_enable_mask.vce_dpm_enable_mask); 3959 } 3960 3961 return (ci_send_msg_to_smc(rdev, enable ? 3962 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? 3963 0 : -EINVAL; 3964 } 3965 3966 #if 0 3967 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable) 3968 { 3969 struct ci_power_info *pi = ci_get_pi(rdev); 3970 const struct radeon_clock_and_voltage_limits *max_limits; 3971 int i; 3972 3973 if (rdev->pm.dpm.ac_power) 3974 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 3975 else 3976 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 3977 3978 if (enable) { 3979 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; 3980 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 3981 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 3982 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; 3983 3984 if (!pi->caps_samu_dpm) 3985 break; 3986 } 3987 } 3988 3989 ci_send_msg_to_smc_with_parameter(rdev, 3990 PPSMC_MSG_SAMUDPM_SetEnabledMask, 3991 pi->dpm_level_enable_mask.samu_dpm_enable_mask); 3992 } 3993 return (ci_send_msg_to_smc(rdev, enable ? 3994 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? 3995 0 : -EINVAL; 3996 } 3997 3998 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable) 3999 { 4000 struct ci_power_info *pi = ci_get_pi(rdev); 4001 const struct radeon_clock_and_voltage_limits *max_limits; 4002 int i; 4003 4004 if (rdev->pm.dpm.ac_power) 4005 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4006 else 4007 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4008 4009 if (enable) { 4010 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; 4011 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4012 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4013 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; 4014 4015 if (!pi->caps_acp_dpm) 4016 break; 4017 } 4018 } 4019 4020 ci_send_msg_to_smc_with_parameter(rdev, 4021 PPSMC_MSG_ACPDPM_SetEnabledMask, 4022 pi->dpm_level_enable_mask.acp_dpm_enable_mask); 4023 } 4024 4025 return (ci_send_msg_to_smc(rdev, enable ? 4026 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? 4027 0 : -EINVAL; 4028 } 4029 #endif 4030 4031 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate) 4032 { 4033 struct ci_power_info *pi = ci_get_pi(rdev); 4034 u32 tmp; 4035 4036 if (!gate) { 4037 if (pi->caps_uvd_dpm || 4038 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) 4039 pi->smc_state_table.UvdBootLevel = 0; 4040 else 4041 pi->smc_state_table.UvdBootLevel = 4042 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; 4043 4044 tmp = RREG32_SMC(DPM_TABLE_475); 4045 tmp &= ~UvdBootLevel_MASK; 4046 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel); 4047 WREG32_SMC(DPM_TABLE_475, tmp); 4048 } 4049 4050 return ci_enable_uvd_dpm(rdev, !gate); 4051 } 4052 4053 static u8 ci_get_vce_boot_level(struct radeon_device *rdev) 4054 { 4055 u8 i; 4056 u32 min_evclk = 30000; /* ??? */ 4057 struct radeon_vce_clock_voltage_dependency_table *table = 4058 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 4059 4060 for (i = 0; i < table->count; i++) { 4061 if (table->entries[i].evclk >= min_evclk) 4062 return i; 4063 } 4064 4065 return table->count - 1; 4066 } 4067 4068 static int ci_update_vce_dpm(struct radeon_device *rdev, 4069 struct radeon_ps *radeon_new_state, 4070 struct radeon_ps *radeon_current_state) 4071 { 4072 struct ci_power_info *pi = ci_get_pi(rdev); 4073 int ret = 0; 4074 u32 tmp; 4075 4076 if (radeon_current_state->evclk != radeon_new_state->evclk) { 4077 if (radeon_new_state->evclk) { 4078 /* turn the clocks on when encoding */ 4079 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false); 4080 4081 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev); 4082 tmp = RREG32_SMC(DPM_TABLE_475); 4083 tmp &= ~VceBootLevel_MASK; 4084 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel); 4085 WREG32_SMC(DPM_TABLE_475, tmp); 4086 4087 ret = ci_enable_vce_dpm(rdev, true); 4088 } else { 4089 /* turn the clocks off when not encoding */ 4090 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true); 4091 4092 ret = ci_enable_vce_dpm(rdev, false); 4093 } 4094 } 4095 return ret; 4096 } 4097 4098 #if 0 4099 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate) 4100 { 4101 return ci_enable_samu_dpm(rdev, gate); 4102 } 4103 4104 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate) 4105 { 4106 struct ci_power_info *pi = ci_get_pi(rdev); 4107 u32 tmp; 4108 4109 if (!gate) { 4110 pi->smc_state_table.AcpBootLevel = 0; 4111 4112 tmp = RREG32_SMC(DPM_TABLE_475); 4113 tmp &= ~AcpBootLevel_MASK; 4114 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); 4115 WREG32_SMC(DPM_TABLE_475, tmp); 4116 } 4117 4118 return ci_enable_acp_dpm(rdev, !gate); 4119 } 4120 #endif 4121 4122 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev, 4123 struct radeon_ps *radeon_state) 4124 { 4125 struct ci_power_info *pi = ci_get_pi(rdev); 4126 int ret; 4127 4128 ret = ci_trim_dpm_states(rdev, radeon_state); 4129 if (ret) 4130 return ret; 4131 4132 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 4133 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); 4134 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 4135 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); 4136 pi->last_mclk_dpm_enable_mask = 4137 pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4138 if (pi->uvd_enabled) { 4139 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) 4140 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 4141 } 4142 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 4143 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); 4144 4145 return 0; 4146 } 4147 4148 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev, 4149 u32 level_mask) 4150 { 4151 u32 level = 0; 4152 4153 while ((level_mask & (1 << level)) == 0) 4154 level++; 4155 4156 return level; 4157 } 4158 4159 4160 int ci_dpm_force_performance_level(struct radeon_device *rdev, 4161 enum radeon_dpm_forced_level level) 4162 { 4163 struct ci_power_info *pi = ci_get_pi(rdev); 4164 u32 tmp, levels, i; 4165 int ret; 4166 4167 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 4168 if ((!pi->pcie_dpm_key_disabled) && 4169 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4170 levels = 0; 4171 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 4172 while (tmp >>= 1) 4173 levels++; 4174 if (levels) { 4175 ret = ci_dpm_force_state_pcie(rdev, level); 4176 if (ret) 4177 return ret; 4178 for (i = 0; i < rdev->usec_timeout; i++) { 4179 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4180 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4181 if (tmp == levels) 4182 break; 4183 udelay(1); 4184 } 4185 } 4186 } 4187 if ((!pi->sclk_dpm_key_disabled) && 4188 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4189 levels = 0; 4190 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 4191 while (tmp >>= 1) 4192 levels++; 4193 if (levels) { 4194 ret = ci_dpm_force_state_sclk(rdev, levels); 4195 if (ret) 4196 return ret; 4197 for (i = 0; i < rdev->usec_timeout; i++) { 4198 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4199 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4200 if (tmp == levels) 4201 break; 4202 udelay(1); 4203 } 4204 } 4205 } 4206 if ((!pi->mclk_dpm_key_disabled) && 4207 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4208 levels = 0; 4209 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4210 while (tmp >>= 1) 4211 levels++; 4212 if (levels) { 4213 ret = ci_dpm_force_state_mclk(rdev, levels); 4214 if (ret) 4215 return ret; 4216 for (i = 0; i < rdev->usec_timeout; i++) { 4217 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4218 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4219 if (tmp == levels) 4220 break; 4221 udelay(1); 4222 } 4223 } 4224 } 4225 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { 4226 if ((!pi->sclk_dpm_key_disabled) && 4227 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4228 levels = ci_get_lowest_enabled_level(rdev, 4229 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 4230 ret = ci_dpm_force_state_sclk(rdev, levels); 4231 if (ret) 4232 return ret; 4233 for (i = 0; i < rdev->usec_timeout; i++) { 4234 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4235 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4236 if (tmp == levels) 4237 break; 4238 udelay(1); 4239 } 4240 } 4241 if ((!pi->mclk_dpm_key_disabled) && 4242 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4243 levels = ci_get_lowest_enabled_level(rdev, 4244 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4245 ret = ci_dpm_force_state_mclk(rdev, levels); 4246 if (ret) 4247 return ret; 4248 for (i = 0; i < rdev->usec_timeout; i++) { 4249 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4250 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4251 if (tmp == levels) 4252 break; 4253 udelay(1); 4254 } 4255 } 4256 if ((!pi->pcie_dpm_key_disabled) && 4257 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4258 levels = ci_get_lowest_enabled_level(rdev, 4259 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 4260 ret = ci_dpm_force_state_pcie(rdev, levels); 4261 if (ret) 4262 return ret; 4263 for (i = 0; i < rdev->usec_timeout; i++) { 4264 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4265 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4266 if (tmp == levels) 4267 break; 4268 udelay(1); 4269 } 4270 } 4271 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 4272 if (!pi->pcie_dpm_key_disabled) { 4273 PPSMC_Result smc_result; 4274 4275 smc_result = ci_send_msg_to_smc(rdev, 4276 PPSMC_MSG_PCIeDPM_UnForceLevel); 4277 if (smc_result != PPSMC_Result_OK) 4278 return -EINVAL; 4279 } 4280 ret = ci_upload_dpm_level_enable_mask(rdev); 4281 if (ret) 4282 return ret; 4283 } 4284 4285 rdev->pm.dpm.forced_level = level; 4286 4287 return 0; 4288 } 4289 4290 static int ci_set_mc_special_registers(struct radeon_device *rdev, 4291 struct ci_mc_reg_table *table) 4292 { 4293 struct ci_power_info *pi = ci_get_pi(rdev); 4294 u8 i, j, k; 4295 u32 temp_reg; 4296 4297 for (i = 0, j = table->last; i < table->last; i++) { 4298 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4299 return -EINVAL; 4300 switch(table->mc_reg_address[i].s1 << 2) { 4301 case MC_SEQ_MISC1: 4302 temp_reg = RREG32(MC_PMG_CMD_EMRS); 4303 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2; 4304 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4305 for (k = 0; k < table->num_entries; k++) { 4306 table->mc_reg_table_entry[k].mc_data[j] = 4307 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 4308 } 4309 j++; 4310 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4311 return -EINVAL; 4312 4313 temp_reg = RREG32(MC_PMG_CMD_MRS); 4314 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2; 4315 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4316 for (k = 0; k < table->num_entries; k++) { 4317 table->mc_reg_table_entry[k].mc_data[j] = 4318 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4319 if (!pi->mem_gddr5) 4320 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 4321 } 4322 j++; 4323 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4324 return -EINVAL; 4325 4326 if (!pi->mem_gddr5) { 4327 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2; 4328 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2; 4329 for (k = 0; k < table->num_entries; k++) { 4330 table->mc_reg_table_entry[k].mc_data[j] = 4331 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 4332 } 4333 j++; 4334 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4335 return -EINVAL; 4336 } 4337 break; 4338 case MC_SEQ_RESERVE_M: 4339 temp_reg = RREG32(MC_PMG_CMD_MRS1); 4340 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2; 4341 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4342 for (k = 0; k < table->num_entries; k++) { 4343 table->mc_reg_table_entry[k].mc_data[j] = 4344 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4345 } 4346 j++; 4347 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4348 return -EINVAL; 4349 break; 4350 default: 4351 break; 4352 } 4353 4354 } 4355 4356 table->last = j; 4357 4358 return 0; 4359 } 4360 4361 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 4362 { 4363 bool result = true; 4364 4365 switch(in_reg) { 4366 case MC_SEQ_RAS_TIMING >> 2: 4367 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2; 4368 break; 4369 case MC_SEQ_DLL_STBY >> 2: 4370 *out_reg = MC_SEQ_DLL_STBY_LP >> 2; 4371 break; 4372 case MC_SEQ_G5PDX_CMD0 >> 2: 4373 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2; 4374 break; 4375 case MC_SEQ_G5PDX_CMD1 >> 2: 4376 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2; 4377 break; 4378 case MC_SEQ_G5PDX_CTRL >> 2: 4379 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2; 4380 break; 4381 case MC_SEQ_CAS_TIMING >> 2: 4382 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2; 4383 break; 4384 case MC_SEQ_MISC_TIMING >> 2: 4385 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2; 4386 break; 4387 case MC_SEQ_MISC_TIMING2 >> 2: 4388 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; 4389 break; 4390 case MC_SEQ_PMG_DVS_CMD >> 2: 4391 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2; 4392 break; 4393 case MC_SEQ_PMG_DVS_CTL >> 2: 4394 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2; 4395 break; 4396 case MC_SEQ_RD_CTL_D0 >> 2: 4397 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; 4398 break; 4399 case MC_SEQ_RD_CTL_D1 >> 2: 4400 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; 4401 break; 4402 case MC_SEQ_WR_CTL_D0 >> 2: 4403 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; 4404 break; 4405 case MC_SEQ_WR_CTL_D1 >> 2: 4406 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; 4407 break; 4408 case MC_PMG_CMD_EMRS >> 2: 4409 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; 4410 break; 4411 case MC_PMG_CMD_MRS >> 2: 4412 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; 4413 break; 4414 case MC_PMG_CMD_MRS1 >> 2: 4415 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; 4416 break; 4417 case MC_SEQ_PMG_TIMING >> 2: 4418 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2; 4419 break; 4420 case MC_PMG_CMD_MRS2 >> 2: 4421 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; 4422 break; 4423 case MC_SEQ_WR_CTL_2 >> 2: 4424 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2; 4425 break; 4426 default: 4427 result = false; 4428 break; 4429 } 4430 4431 return result; 4432 } 4433 4434 static void ci_set_valid_flag(struct ci_mc_reg_table *table) 4435 { 4436 u8 i, j; 4437 4438 for (i = 0; i < table->last; i++) { 4439 for (j = 1; j < table->num_entries; j++) { 4440 if (table->mc_reg_table_entry[j-1].mc_data[i] != 4441 table->mc_reg_table_entry[j].mc_data[i]) { 4442 table->valid_flag |= 1 << i; 4443 break; 4444 } 4445 } 4446 } 4447 } 4448 4449 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 4450 { 4451 u32 i; 4452 u16 address; 4453 4454 for (i = 0; i < table->last; i++) { 4455 table->mc_reg_address[i].s0 = 4456 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 4457 address : table->mc_reg_address[i].s1; 4458 } 4459 } 4460 4461 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, 4462 struct ci_mc_reg_table *ci_table) 4463 { 4464 u8 i, j; 4465 4466 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4467 return -EINVAL; 4468 if (table->num_entries > MAX_AC_TIMING_ENTRIES) 4469 return -EINVAL; 4470 4471 for (i = 0; i < table->last; i++) 4472 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 4473 4474 ci_table->last = table->last; 4475 4476 for (i = 0; i < table->num_entries; i++) { 4477 ci_table->mc_reg_table_entry[i].mclk_max = 4478 table->mc_reg_table_entry[i].mclk_max; 4479 for (j = 0; j < table->last; j++) 4480 ci_table->mc_reg_table_entry[i].mc_data[j] = 4481 table->mc_reg_table_entry[i].mc_data[j]; 4482 } 4483 ci_table->num_entries = table->num_entries; 4484 4485 return 0; 4486 } 4487 4488 static int ci_register_patching_mc_seq(struct radeon_device *rdev, 4489 struct ci_mc_reg_table *table) 4490 { 4491 u8 i, k; 4492 u32 tmp; 4493 bool patch; 4494 4495 tmp = RREG32(MC_SEQ_MISC0); 4496 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 4497 4498 if (patch && 4499 ((rdev->pdev->device == 0x67B0) || 4500 (rdev->pdev->device == 0x67B1))) { 4501 for (i = 0; i < table->last; i++) { 4502 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4503 return -EINVAL; 4504 switch(table->mc_reg_address[i].s1 >> 2) { 4505 case MC_SEQ_MISC1: 4506 for (k = 0; k < table->num_entries; k++) { 4507 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4508 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4509 table->mc_reg_table_entry[k].mc_data[i] = 4510 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | 4511 0x00000007; 4512 } 4513 break; 4514 case MC_SEQ_WR_CTL_D0: 4515 for (k = 0; k < table->num_entries; k++) { 4516 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4517 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4518 table->mc_reg_table_entry[k].mc_data[i] = 4519 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4520 0x0000D0DD; 4521 } 4522 break; 4523 case MC_SEQ_WR_CTL_D1: 4524 for (k = 0; k < table->num_entries; k++) { 4525 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4526 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4527 table->mc_reg_table_entry[k].mc_data[i] = 4528 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4529 0x0000D0DD; 4530 } 4531 break; 4532 case MC_SEQ_WR_CTL_2: 4533 for (k = 0; k < table->num_entries; k++) { 4534 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4535 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4536 table->mc_reg_table_entry[k].mc_data[i] = 0; 4537 } 4538 break; 4539 case MC_SEQ_CAS_TIMING: 4540 for (k = 0; k < table->num_entries; k++) { 4541 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4542 table->mc_reg_table_entry[k].mc_data[i] = 4543 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4544 0x000C0140; 4545 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4546 table->mc_reg_table_entry[k].mc_data[i] = 4547 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4548 0x000C0150; 4549 } 4550 break; 4551 case MC_SEQ_MISC_TIMING: 4552 for (k = 0; k < table->num_entries; k++) { 4553 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4554 table->mc_reg_table_entry[k].mc_data[i] = 4555 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4556 0x00000030; 4557 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4558 table->mc_reg_table_entry[k].mc_data[i] = 4559 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4560 0x00000035; 4561 } 4562 break; 4563 default: 4564 break; 4565 } 4566 } 4567 4568 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4569 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA); 4570 tmp = (tmp & 0xFFF8FFFF) | (1 << 16); 4571 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); 4572 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp); 4573 } 4574 4575 return 0; 4576 } 4577 4578 static int ci_initialize_mc_reg_table(struct radeon_device *rdev) 4579 { 4580 struct ci_power_info *pi = ci_get_pi(rdev); 4581 struct atom_mc_reg_table *table; 4582 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; 4583 u8 module_index = rv770_get_memory_module_index(rdev); 4584 int ret; 4585 4586 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 4587 if (!table) 4588 return -ENOMEM; 4589 4590 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); 4591 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); 4592 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY)); 4593 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0)); 4594 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1)); 4595 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL)); 4596 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD)); 4597 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL)); 4598 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); 4599 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); 4600 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); 4601 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); 4602 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); 4603 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); 4604 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); 4605 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); 4606 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); 4607 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); 4608 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); 4609 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); 4610 4611 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); 4612 if (ret) 4613 goto init_mc_done; 4614 4615 ret = ci_copy_vbios_mc_reg_table(table, ci_table); 4616 if (ret) 4617 goto init_mc_done; 4618 4619 ci_set_s0_mc_reg_index(ci_table); 4620 4621 ret = ci_register_patching_mc_seq(rdev, ci_table); 4622 if (ret) 4623 goto init_mc_done; 4624 4625 ret = ci_set_mc_special_registers(rdev, ci_table); 4626 if (ret) 4627 goto init_mc_done; 4628 4629 ci_set_valid_flag(ci_table); 4630 4631 init_mc_done: 4632 kfree(table); 4633 4634 return ret; 4635 } 4636 4637 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev, 4638 SMU7_Discrete_MCRegisters *mc_reg_table) 4639 { 4640 struct ci_power_info *pi = ci_get_pi(rdev); 4641 u32 i, j; 4642 4643 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { 4644 if (pi->mc_reg_table.valid_flag & (1 << j)) { 4645 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4646 return -EINVAL; 4647 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); 4648 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); 4649 i++; 4650 } 4651 } 4652 4653 mc_reg_table->last = (u8)i; 4654 4655 return 0; 4656 } 4657 4658 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, 4659 SMU7_Discrete_MCRegisterSet *data, 4660 u32 num_entries, u32 valid_flag) 4661 { 4662 u32 i, j; 4663 4664 for (i = 0, j = 0; j < num_entries; j++) { 4665 if (valid_flag & (1 << j)) { 4666 data->value[i] = cpu_to_be32(entry->mc_data[j]); 4667 i++; 4668 } 4669 } 4670 } 4671 4672 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev, 4673 const u32 memory_clock, 4674 SMU7_Discrete_MCRegisterSet *mc_reg_table_data) 4675 { 4676 struct ci_power_info *pi = ci_get_pi(rdev); 4677 u32 i = 0; 4678 4679 for(i = 0; i < pi->mc_reg_table.num_entries; i++) { 4680 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 4681 break; 4682 } 4683 4684 if ((i == pi->mc_reg_table.num_entries) && (i > 0)) 4685 --i; 4686 4687 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], 4688 mc_reg_table_data, pi->mc_reg_table.last, 4689 pi->mc_reg_table.valid_flag); 4690 } 4691 4692 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev, 4693 SMU7_Discrete_MCRegisters *mc_reg_table) 4694 { 4695 struct ci_power_info *pi = ci_get_pi(rdev); 4696 u32 i; 4697 4698 for (i = 0; i < pi->dpm_table.mclk_table.count; i++) 4699 ci_convert_mc_reg_table_entry_to_smc(rdev, 4700 pi->dpm_table.mclk_table.dpm_levels[i].value, 4701 &mc_reg_table->data[i]); 4702 } 4703 4704 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev) 4705 { 4706 struct ci_power_info *pi = ci_get_pi(rdev); 4707 int ret; 4708 4709 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4710 4711 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table); 4712 if (ret) 4713 return ret; 4714 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4715 4716 return ci_copy_bytes_to_smc(rdev, 4717 pi->mc_reg_table_start, 4718 (u8 *)&pi->smc_mc_reg_table, 4719 sizeof(SMU7_Discrete_MCRegisters), 4720 pi->sram_end); 4721 } 4722 4723 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev) 4724 { 4725 struct ci_power_info *pi = ci_get_pi(rdev); 4726 4727 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 4728 return 0; 4729 4730 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4731 4732 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table); 4733 4734 return ci_copy_bytes_to_smc(rdev, 4735 pi->mc_reg_table_start + 4736 offsetof(SMU7_Discrete_MCRegisters, data[0]), 4737 (u8 *)&pi->smc_mc_reg_table.data[0], 4738 sizeof(SMU7_Discrete_MCRegisterSet) * 4739 pi->dpm_table.mclk_table.count, 4740 pi->sram_end); 4741 } 4742 4743 static void ci_enable_voltage_control(struct radeon_device *rdev) 4744 { 4745 u32 tmp = RREG32_SMC(GENERAL_PWRMGT); 4746 4747 tmp |= VOLT_PWRMGT_EN; 4748 WREG32_SMC(GENERAL_PWRMGT, tmp); 4749 } 4750 4751 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev, 4752 struct radeon_ps *radeon_state) 4753 { 4754 struct ci_ps *state = ci_get_ps(radeon_state); 4755 int i; 4756 u16 pcie_speed, max_speed = 0; 4757 4758 for (i = 0; i < state->performance_level_count; i++) { 4759 pcie_speed = state->performance_levels[i].pcie_gen; 4760 if (max_speed < pcie_speed) 4761 max_speed = pcie_speed; 4762 } 4763 4764 return max_speed; 4765 } 4766 4767 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev) 4768 { 4769 u32 speed_cntl = 0; 4770 4771 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; 4772 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; 4773 4774 return (u16)speed_cntl; 4775 } 4776 4777 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev) 4778 { 4779 u32 link_width = 0; 4780 4781 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK; 4782 link_width >>= LC_LINK_WIDTH_RD_SHIFT; 4783 4784 switch (link_width) { 4785 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4786 return 1; 4787 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4788 return 2; 4789 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4790 return 4; 4791 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4792 return 8; 4793 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4794 /* not actually supported */ 4795 return 12; 4796 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4797 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4798 default: 4799 return 16; 4800 } 4801 } 4802 4803 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev, 4804 struct radeon_ps *radeon_new_state, 4805 struct radeon_ps *radeon_current_state) 4806 { 4807 struct ci_power_info *pi = ci_get_pi(rdev); 4808 enum radeon_pcie_gen target_link_speed = 4809 ci_get_maximum_link_speed(rdev, radeon_new_state); 4810 enum radeon_pcie_gen current_link_speed; 4811 4812 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID) 4813 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state); 4814 else 4815 current_link_speed = pi->force_pcie_gen; 4816 4817 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 4818 pi->pspp_notify_required = false; 4819 if (target_link_speed > current_link_speed) { 4820 switch (target_link_speed) { 4821 #ifdef CONFIG_ACPI 4822 case RADEON_PCIE_GEN3: 4823 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4824 break; 4825 pi->force_pcie_gen = RADEON_PCIE_GEN2; 4826 if (current_link_speed == RADEON_PCIE_GEN2) 4827 break; 4828 case RADEON_PCIE_GEN2: 4829 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4830 break; 4831 #endif 4832 default: 4833 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev); 4834 break; 4835 } 4836 } else { 4837 if (target_link_speed < current_link_speed) 4838 pi->pspp_notify_required = true; 4839 } 4840 } 4841 4842 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev, 4843 struct radeon_ps *radeon_new_state, 4844 struct radeon_ps *radeon_current_state) 4845 { 4846 struct ci_power_info *pi = ci_get_pi(rdev); 4847 enum radeon_pcie_gen target_link_speed = 4848 ci_get_maximum_link_speed(rdev, radeon_new_state); 4849 u8 request; 4850 4851 if (pi->pspp_notify_required) { 4852 if (target_link_speed == RADEON_PCIE_GEN3) 4853 request = PCIE_PERF_REQ_PECI_GEN3; 4854 else if (target_link_speed == RADEON_PCIE_GEN2) 4855 request = PCIE_PERF_REQ_PECI_GEN2; 4856 else 4857 request = PCIE_PERF_REQ_PECI_GEN1; 4858 4859 if ((request == PCIE_PERF_REQ_PECI_GEN1) && 4860 (ci_get_current_pcie_speed(rdev) > 0)) 4861 return; 4862 4863 #ifdef CONFIG_ACPI 4864 radeon_acpi_pcie_performance_request(rdev, request, false); 4865 #endif 4866 } 4867 } 4868 4869 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev) 4870 { 4871 struct ci_power_info *pi = ci_get_pi(rdev); 4872 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table = 4873 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 4874 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table = 4875 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 4876 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table = 4877 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 4878 4879 if (allowed_sclk_vddc_table == NULL) 4880 return -EINVAL; 4881 if (allowed_sclk_vddc_table->count < 1) 4882 return -EINVAL; 4883 if (allowed_mclk_vddc_table == NULL) 4884 return -EINVAL; 4885 if (allowed_mclk_vddc_table->count < 1) 4886 return -EINVAL; 4887 if (allowed_mclk_vddci_table == NULL) 4888 return -EINVAL; 4889 if (allowed_mclk_vddci_table->count < 1) 4890 return -EINVAL; 4891 4892 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; 4893 pi->max_vddc_in_pp_table = 4894 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4895 4896 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; 4897 pi->max_vddci_in_pp_table = 4898 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4899 4900 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = 4901 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4902 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = 4903 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 4904 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = 4905 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 4906 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = 4907 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 4908 4909 return 0; 4910 } 4911 4912 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc) 4913 { 4914 struct ci_power_info *pi = ci_get_pi(rdev); 4915 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; 4916 u32 leakage_index; 4917 4918 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4919 if (leakage_table->leakage_id[leakage_index] == *vddc) { 4920 *vddc = leakage_table->actual_voltage[leakage_index]; 4921 break; 4922 } 4923 } 4924 } 4925 4926 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci) 4927 { 4928 struct ci_power_info *pi = ci_get_pi(rdev); 4929 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; 4930 u32 leakage_index; 4931 4932 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 4933 if (leakage_table->leakage_id[leakage_index] == *vddci) { 4934 *vddci = leakage_table->actual_voltage[leakage_index]; 4935 break; 4936 } 4937 } 4938 } 4939 4940 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4941 struct radeon_clock_voltage_dependency_table *table) 4942 { 4943 u32 i; 4944 4945 if (table) { 4946 for (i = 0; i < table->count; i++) 4947 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4948 } 4949 } 4950 4951 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev, 4952 struct radeon_clock_voltage_dependency_table *table) 4953 { 4954 u32 i; 4955 4956 if (table) { 4957 for (i = 0; i < table->count; i++) 4958 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); 4959 } 4960 } 4961 4962 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4963 struct radeon_vce_clock_voltage_dependency_table *table) 4964 { 4965 u32 i; 4966 4967 if (table) { 4968 for (i = 0; i < table->count; i++) 4969 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4970 } 4971 } 4972 4973 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev, 4974 struct radeon_uvd_clock_voltage_dependency_table *table) 4975 { 4976 u32 i; 4977 4978 if (table) { 4979 for (i = 0; i < table->count; i++) 4980 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); 4981 } 4982 } 4983 4984 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev, 4985 struct radeon_phase_shedding_limits_table *table) 4986 { 4987 u32 i; 4988 4989 if (table) { 4990 for (i = 0; i < table->count; i++) 4991 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); 4992 } 4993 } 4994 4995 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev, 4996 struct radeon_clock_and_voltage_limits *table) 4997 { 4998 if (table) { 4999 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc); 5000 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci); 5001 } 5002 } 5003 5004 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev, 5005 struct radeon_cac_leakage_table *table) 5006 { 5007 u32 i; 5008 5009 if (table) { 5010 for (i = 0; i < table->count; i++) 5011 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); 5012 } 5013 } 5014 5015 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev) 5016 { 5017 5018 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5019 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 5020 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5021 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 5022 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5023 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); 5024 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev, 5025 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 5026 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5027 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); 5028 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5029 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); 5030 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5031 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); 5032 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev, 5033 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); 5034 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev, 5035 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table); 5036 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5037 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 5038 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev, 5039 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc); 5040 ci_patch_cac_leakage_table_with_vddc_leakage(rdev, 5041 &rdev->pm.dpm.dyn_state.cac_leakage_table); 5042 5043 } 5044 5045 static void ci_get_memory_type(struct radeon_device *rdev) 5046 { 5047 struct ci_power_info *pi = ci_get_pi(rdev); 5048 u32 tmp; 5049 5050 tmp = RREG32(MC_SEQ_MISC0); 5051 5052 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) == 5053 MC_SEQ_MISC0_GDDR5_VALUE) 5054 pi->mem_gddr5 = true; 5055 else 5056 pi->mem_gddr5 = false; 5057 5058 } 5059 5060 static void ci_update_current_ps(struct radeon_device *rdev, 5061 struct radeon_ps *rps) 5062 { 5063 struct ci_ps *new_ps = ci_get_ps(rps); 5064 struct ci_power_info *pi = ci_get_pi(rdev); 5065 5066 pi->current_rps = *rps; 5067 pi->current_ps = *new_ps; 5068 pi->current_rps.ps_priv = &pi->current_ps; 5069 } 5070 5071 static void ci_update_requested_ps(struct radeon_device *rdev, 5072 struct radeon_ps *rps) 5073 { 5074 struct ci_ps *new_ps = ci_get_ps(rps); 5075 struct ci_power_info *pi = ci_get_pi(rdev); 5076 5077 pi->requested_rps = *rps; 5078 pi->requested_ps = *new_ps; 5079 pi->requested_rps.ps_priv = &pi->requested_ps; 5080 } 5081 5082 int ci_dpm_pre_set_power_state(struct radeon_device *rdev) 5083 { 5084 struct ci_power_info *pi = ci_get_pi(rdev); 5085 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps; 5086 struct radeon_ps *new_ps = &requested_ps; 5087 5088 ci_update_requested_ps(rdev, new_ps); 5089 5090 ci_apply_state_adjust_rules(rdev, &pi->requested_rps); 5091 5092 return 0; 5093 } 5094 5095 void ci_dpm_post_set_power_state(struct radeon_device *rdev) 5096 { 5097 struct ci_power_info *pi = ci_get_pi(rdev); 5098 struct radeon_ps *new_ps = &pi->requested_rps; 5099 5100 ci_update_current_ps(rdev, new_ps); 5101 } 5102 5103 5104 void ci_dpm_setup_asic(struct radeon_device *rdev) 5105 { 5106 int r; 5107 5108 r = ci_mc_load_microcode(rdev); 5109 if (r) 5110 DRM_ERROR("Failed to load MC firmware!\n"); 5111 ci_read_clock_registers(rdev); 5112 ci_get_memory_type(rdev); 5113 ci_enable_acpi_power_management(rdev); 5114 ci_init_sclk_t(rdev); 5115 } 5116 5117 int ci_dpm_enable(struct radeon_device *rdev) 5118 { 5119 struct ci_power_info *pi = ci_get_pi(rdev); 5120 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5121 int ret; 5122 5123 if (ci_is_smc_running(rdev)) 5124 return -EINVAL; 5125 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 5126 ci_enable_voltage_control(rdev); 5127 ret = ci_construct_voltage_tables(rdev); 5128 if (ret) { 5129 DRM_ERROR("ci_construct_voltage_tables failed\n"); 5130 return ret; 5131 } 5132 } 5133 if (pi->caps_dynamic_ac_timing) { 5134 ret = ci_initialize_mc_reg_table(rdev); 5135 if (ret) 5136 pi->caps_dynamic_ac_timing = false; 5137 } 5138 if (pi->dynamic_ss) 5139 ci_enable_spread_spectrum(rdev, true); 5140 if (pi->thermal_protection) 5141 ci_enable_thermal_protection(rdev, true); 5142 ci_program_sstp(rdev); 5143 ci_enable_display_gap(rdev); 5144 ci_program_vc(rdev); 5145 ret = ci_upload_firmware(rdev); 5146 if (ret) { 5147 DRM_ERROR("ci_upload_firmware failed\n"); 5148 return ret; 5149 } 5150 ret = ci_process_firmware_header(rdev); 5151 if (ret) { 5152 DRM_ERROR("ci_process_firmware_header failed\n"); 5153 return ret; 5154 } 5155 ret = ci_initial_switch_from_arb_f0_to_f1(rdev); 5156 if (ret) { 5157 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); 5158 return ret; 5159 } 5160 ret = ci_init_smc_table(rdev); 5161 if (ret) { 5162 DRM_ERROR("ci_init_smc_table failed\n"); 5163 return ret; 5164 } 5165 ret = ci_init_arb_table_index(rdev); 5166 if (ret) { 5167 DRM_ERROR("ci_init_arb_table_index failed\n"); 5168 return ret; 5169 } 5170 if (pi->caps_dynamic_ac_timing) { 5171 ret = ci_populate_initial_mc_reg_table(rdev); 5172 if (ret) { 5173 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); 5174 return ret; 5175 } 5176 } 5177 ret = ci_populate_pm_base(rdev); 5178 if (ret) { 5179 DRM_ERROR("ci_populate_pm_base failed\n"); 5180 return ret; 5181 } 5182 ci_dpm_start_smc(rdev); 5183 ci_enable_vr_hot_gpio_interrupt(rdev); 5184 ret = ci_notify_smc_display_change(rdev, false); 5185 if (ret) { 5186 DRM_ERROR("ci_notify_smc_display_change failed\n"); 5187 return ret; 5188 } 5189 ci_enable_sclk_control(rdev, true); 5190 ret = ci_enable_ulv(rdev, true); 5191 if (ret) { 5192 DRM_ERROR("ci_enable_ulv failed\n"); 5193 return ret; 5194 } 5195 ret = ci_enable_ds_master_switch(rdev, true); 5196 if (ret) { 5197 DRM_ERROR("ci_enable_ds_master_switch failed\n"); 5198 return ret; 5199 } 5200 ret = ci_start_dpm(rdev); 5201 if (ret) { 5202 DRM_ERROR("ci_start_dpm failed\n"); 5203 return ret; 5204 } 5205 ret = ci_enable_didt(rdev, true); 5206 if (ret) { 5207 DRM_ERROR("ci_enable_didt failed\n"); 5208 return ret; 5209 } 5210 ret = ci_enable_smc_cac(rdev, true); 5211 if (ret) { 5212 DRM_ERROR("ci_enable_smc_cac failed\n"); 5213 return ret; 5214 } 5215 ret = ci_enable_power_containment(rdev, true); 5216 if (ret) { 5217 DRM_ERROR("ci_enable_power_containment failed\n"); 5218 return ret; 5219 } 5220 5221 ret = ci_power_control_set_level(rdev); 5222 if (ret) { 5223 DRM_ERROR("ci_power_control_set_level failed\n"); 5224 return ret; 5225 } 5226 5227 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5228 5229 ret = ci_enable_thermal_based_sclk_dpm(rdev, true); 5230 if (ret) { 5231 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); 5232 return ret; 5233 } 5234 5235 ci_thermal_start_thermal_controller(rdev); 5236 5237 ci_update_current_ps(rdev, boot_ps); 5238 5239 return 0; 5240 } 5241 5242 static int ci_set_temperature_range(struct radeon_device *rdev) 5243 { 5244 int ret; 5245 5246 ret = ci_thermal_enable_alert(rdev, false); 5247 if (ret) 5248 return ret; 5249 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 5250 if (ret) 5251 return ret; 5252 ret = ci_thermal_enable_alert(rdev, true); 5253 if (ret) 5254 return ret; 5255 5256 return ret; 5257 } 5258 5259 int ci_dpm_late_enable(struct radeon_device *rdev) 5260 { 5261 int ret; 5262 5263 ret = ci_set_temperature_range(rdev); 5264 if (ret) 5265 return ret; 5266 5267 ci_dpm_powergate_uvd(rdev, true); 5268 5269 return 0; 5270 } 5271 5272 void ci_dpm_disable(struct radeon_device *rdev) 5273 { 5274 struct ci_power_info *pi = ci_get_pi(rdev); 5275 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps; 5276 5277 ci_dpm_powergate_uvd(rdev, false); 5278 5279 if (!ci_is_smc_running(rdev)) 5280 return; 5281 5282 ci_thermal_stop_thermal_controller(rdev); 5283 5284 if (pi->thermal_protection) 5285 ci_enable_thermal_protection(rdev, false); 5286 ci_enable_power_containment(rdev, false); 5287 ci_enable_smc_cac(rdev, false); 5288 ci_enable_didt(rdev, false); 5289 ci_enable_spread_spectrum(rdev, false); 5290 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 5291 ci_stop_dpm(rdev); 5292 ci_enable_ds_master_switch(rdev, false); 5293 ci_enable_ulv(rdev, false); 5294 ci_clear_vc(rdev); 5295 ci_reset_to_default(rdev); 5296 ci_dpm_stop_smc(rdev); 5297 ci_force_switch_to_arb_f0(rdev); 5298 ci_enable_thermal_based_sclk_dpm(rdev, false); 5299 5300 ci_update_current_ps(rdev, boot_ps); 5301 } 5302 5303 int ci_dpm_set_power_state(struct radeon_device *rdev) 5304 { 5305 struct ci_power_info *pi = ci_get_pi(rdev); 5306 struct radeon_ps *new_ps = &pi->requested_rps; 5307 struct radeon_ps *old_ps = &pi->current_rps; 5308 int ret; 5309 5310 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps); 5311 if (pi->pcie_performance_request) 5312 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps); 5313 ret = ci_freeze_sclk_mclk_dpm(rdev); 5314 if (ret) { 5315 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); 5316 return ret; 5317 } 5318 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps); 5319 if (ret) { 5320 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); 5321 return ret; 5322 } 5323 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps); 5324 if (ret) { 5325 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 5326 return ret; 5327 } 5328 5329 ret = ci_update_vce_dpm(rdev, new_ps, old_ps); 5330 if (ret) { 5331 DRM_ERROR("ci_update_vce_dpm failed\n"); 5332 return ret; 5333 } 5334 5335 ret = ci_update_sclk_t(rdev); 5336 if (ret) { 5337 DRM_ERROR("ci_update_sclk_t failed\n"); 5338 return ret; 5339 } 5340 if (pi->caps_dynamic_ac_timing) { 5341 ret = ci_update_and_upload_mc_reg_table(rdev); 5342 if (ret) { 5343 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); 5344 return ret; 5345 } 5346 } 5347 ret = ci_program_memory_timing_parameters(rdev); 5348 if (ret) { 5349 DRM_ERROR("ci_program_memory_timing_parameters failed\n"); 5350 return ret; 5351 } 5352 ret = ci_unfreeze_sclk_mclk_dpm(rdev); 5353 if (ret) { 5354 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); 5355 return ret; 5356 } 5357 ret = ci_upload_dpm_level_enable_mask(rdev); 5358 if (ret) { 5359 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); 5360 return ret; 5361 } 5362 if (pi->pcie_performance_request) 5363 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); 5364 5365 return 0; 5366 } 5367 5368 #if 0 5369 void ci_dpm_reset_asic(struct radeon_device *rdev) 5370 { 5371 ci_set_boot_state(rdev); 5372 } 5373 #endif 5374 5375 void ci_dpm_display_configuration_changed(struct radeon_device *rdev) 5376 { 5377 ci_program_display_gap(rdev); 5378 } 5379 5380 union power_info { 5381 struct _ATOM_POWERPLAY_INFO info; 5382 struct _ATOM_POWERPLAY_INFO_V2 info_2; 5383 struct _ATOM_POWERPLAY_INFO_V3 info_3; 5384 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 5385 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 5386 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 5387 }; 5388 5389 union pplib_clock_info { 5390 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 5391 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 5392 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 5393 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 5394 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 5395 struct _ATOM_PPLIB_CI_CLOCK_INFO ci; 5396 }; 5397 5398 union pplib_power_state { 5399 struct _ATOM_PPLIB_STATE v1; 5400 struct _ATOM_PPLIB_STATE_V2 v2; 5401 }; 5402 5403 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev, 5404 struct radeon_ps *rps, 5405 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 5406 u8 table_rev) 5407 { 5408 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 5409 rps->class = le16_to_cpu(non_clock_info->usClassification); 5410 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 5411 5412 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 5413 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 5414 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 5415 } else { 5416 rps->vclk = 0; 5417 rps->dclk = 0; 5418 } 5419 5420 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 5421 rdev->pm.dpm.boot_ps = rps; 5422 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 5423 rdev->pm.dpm.uvd_ps = rps; 5424 } 5425 5426 static void ci_parse_pplib_clock_info(struct radeon_device *rdev, 5427 struct radeon_ps *rps, int index, 5428 union pplib_clock_info *clock_info) 5429 { 5430 struct ci_power_info *pi = ci_get_pi(rdev); 5431 struct ci_ps *ps = ci_get_ps(rps); 5432 struct ci_pl *pl = &ps->performance_levels[index]; 5433 5434 ps->performance_level_count = index + 1; 5435 5436 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5437 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; 5438 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5439 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5440 5441 pl->pcie_gen = r600_get_pcie_gen_support(rdev, 5442 pi->sys_pcie_mask, 5443 pi->vbios_boot_state.pcie_gen_bootup_value, 5444 clock_info->ci.ucPCIEGen); 5445 pl->pcie_lane = r600_get_pcie_lane_support(rdev, 5446 pi->vbios_boot_state.pcie_lane_bootup_value, 5447 le16_to_cpu(clock_info->ci.usPCIELane)); 5448 5449 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 5450 pi->acpi_pcie_gen = pl->pcie_gen; 5451 } 5452 5453 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 5454 pi->ulv.supported = true; 5455 pi->ulv.pl = *pl; 5456 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; 5457 } 5458 5459 /* patch up boot state */ 5460 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 5461 pl->mclk = pi->vbios_boot_state.mclk_bootup_value; 5462 pl->sclk = pi->vbios_boot_state.sclk_bootup_value; 5463 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; 5464 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; 5465 } 5466 5467 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 5468 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 5469 pi->use_pcie_powersaving_levels = true; 5470 if (pi->pcie_gen_powersaving.max < pl->pcie_gen) 5471 pi->pcie_gen_powersaving.max = pl->pcie_gen; 5472 if (pi->pcie_gen_powersaving.min > pl->pcie_gen) 5473 pi->pcie_gen_powersaving.min = pl->pcie_gen; 5474 if (pi->pcie_lane_powersaving.max < pl->pcie_lane) 5475 pi->pcie_lane_powersaving.max = pl->pcie_lane; 5476 if (pi->pcie_lane_powersaving.min > pl->pcie_lane) 5477 pi->pcie_lane_powersaving.min = pl->pcie_lane; 5478 break; 5479 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 5480 pi->use_pcie_performance_levels = true; 5481 if (pi->pcie_gen_performance.max < pl->pcie_gen) 5482 pi->pcie_gen_performance.max = pl->pcie_gen; 5483 if (pi->pcie_gen_performance.min > pl->pcie_gen) 5484 pi->pcie_gen_performance.min = pl->pcie_gen; 5485 if (pi->pcie_lane_performance.max < pl->pcie_lane) 5486 pi->pcie_lane_performance.max = pl->pcie_lane; 5487 if (pi->pcie_lane_performance.min > pl->pcie_lane) 5488 pi->pcie_lane_performance.min = pl->pcie_lane; 5489 break; 5490 default: 5491 break; 5492 } 5493 } 5494 5495 static int ci_parse_power_table(struct radeon_device *rdev) 5496 { 5497 struct radeon_mode_info *mode_info = &rdev->mode_info; 5498 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 5499 union pplib_power_state *power_state; 5500 int i, j, k, non_clock_array_index, clock_array_index; 5501 union pplib_clock_info *clock_info; 5502 struct _StateArray *state_array; 5503 struct _ClockInfoArray *clock_info_array; 5504 struct _NonClockInfoArray *non_clock_info_array; 5505 union power_info *power_info; 5506 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 5507 u16 data_offset; 5508 u8 frev, crev; 5509 u8 *power_state_offset; 5510 struct ci_ps *ps; 5511 5512 if (!atom_parse_data_header(mode_info->atom_context, index, NULL, 5513 &frev, &crev, &data_offset)) 5514 return -EINVAL; 5515 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 5516 5517 state_array = (struct _StateArray *) 5518 (mode_info->atom_context->bios + data_offset + 5519 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 5520 clock_info_array = (struct _ClockInfoArray *) 5521 (mode_info->atom_context->bios + data_offset + 5522 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 5523 non_clock_info_array = (struct _NonClockInfoArray *) 5524 (mode_info->atom_context->bios + data_offset + 5525 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 5526 5527 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) * 5528 state_array->ucNumEntries, GFP_KERNEL); 5529 if (!rdev->pm.dpm.ps) 5530 return -ENOMEM; 5531 power_state_offset = (u8 *)state_array->states; 5532 for (i = 0; i < state_array->ucNumEntries; i++) { 5533 u8 *idx; 5534 power_state = (union pplib_power_state *)power_state_offset; 5535 non_clock_array_index = power_state->v2.nonClockInfoIndex; 5536 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 5537 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 5538 if (!rdev->pm.power_state[i].clock_info) 5539 return -EINVAL; 5540 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 5541 if (ps == NULL) { 5542 kfree(rdev->pm.dpm.ps); 5543 return -ENOMEM; 5544 } 5545 rdev->pm.dpm.ps[i].ps_priv = ps; 5546 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], 5547 non_clock_info, 5548 non_clock_info_array->ucEntrySize); 5549 k = 0; 5550 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 5551 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 5552 clock_array_index = idx[j]; 5553 if (clock_array_index >= clock_info_array->ucNumEntries) 5554 continue; 5555 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) 5556 break; 5557 clock_info = (union pplib_clock_info *) 5558 ((u8 *)&clock_info_array->clockInfo[0] + 5559 (clock_array_index * clock_info_array->ucEntrySize)); 5560 ci_parse_pplib_clock_info(rdev, 5561 &rdev->pm.dpm.ps[i], k, 5562 clock_info); 5563 k++; 5564 } 5565 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5566 } 5567 rdev->pm.dpm.num_ps = state_array->ucNumEntries; 5568 5569 /* fill in the vce power states */ 5570 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { 5571 u32 sclk, mclk; 5572 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; 5573 clock_info = (union pplib_clock_info *) 5574 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 5575 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5576 sclk |= clock_info->ci.ucEngineClockHigh << 16; 5577 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5578 mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5579 rdev->pm.dpm.vce_states[i].sclk = sclk; 5580 rdev->pm.dpm.vce_states[i].mclk = mclk; 5581 } 5582 5583 return 0; 5584 } 5585 5586 static int ci_get_vbios_boot_values(struct radeon_device *rdev, 5587 struct ci_vbios_boot_state *boot_state) 5588 { 5589 struct radeon_mode_info *mode_info = &rdev->mode_info; 5590 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 5591 ATOM_FIRMWARE_INFO_V2_2 *firmware_info; 5592 u8 frev, crev; 5593 u16 data_offset; 5594 5595 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 5596 &frev, &crev, &data_offset)) { 5597 firmware_info = 5598 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + 5599 data_offset); 5600 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); 5601 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); 5602 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); 5603 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev); 5604 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev); 5605 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); 5606 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); 5607 5608 return 0; 5609 } 5610 return -EINVAL; 5611 } 5612 5613 void ci_dpm_fini(struct radeon_device *rdev) 5614 { 5615 int i; 5616 5617 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 5618 kfree(rdev->pm.dpm.ps[i].ps_priv); 5619 } 5620 kfree(rdev->pm.dpm.ps); 5621 kfree(rdev->pm.dpm.priv); 5622 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 5623 r600_free_extended_power_table(rdev); 5624 } 5625 5626 int ci_dpm_init(struct radeon_device *rdev) 5627 { 5628 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5629 SMU7_Discrete_DpmTable *dpm_table; 5630 struct radeon_gpio_rec gpio; 5631 u16 data_offset, size; 5632 u8 frev, crev; 5633 struct ci_power_info *pi; 5634 int ret; 5635 u32 mask; 5636 5637 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5638 if (pi == NULL) 5639 return -ENOMEM; 5640 rdev->pm.dpm.priv = pi; 5641 5642 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 5643 if (ret) 5644 pi->sys_pcie_mask = 0; 5645 else 5646 pi->sys_pcie_mask = mask; 5647 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID; 5648 5649 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1; 5650 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3; 5651 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1; 5652 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3; 5653 5654 pi->pcie_lane_performance.max = 0; 5655 pi->pcie_lane_performance.min = 16; 5656 pi->pcie_lane_powersaving.max = 0; 5657 pi->pcie_lane_powersaving.min = 16; 5658 5659 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state); 5660 if (ret) { 5661 ci_dpm_fini(rdev); 5662 return ret; 5663 } 5664 5665 ret = r600_get_platform_caps(rdev); 5666 if (ret) { 5667 ci_dpm_fini(rdev); 5668 return ret; 5669 } 5670 5671 ret = r600_parse_extended_power_table(rdev); 5672 if (ret) { 5673 ci_dpm_fini(rdev); 5674 return ret; 5675 } 5676 5677 ret = ci_parse_power_table(rdev); 5678 if (ret) { 5679 ci_dpm_fini(rdev); 5680 return ret; 5681 } 5682 5683 pi->dll_default_on = false; 5684 pi->sram_end = SMC_RAM_END; 5685 5686 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; 5687 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; 5688 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; 5689 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; 5690 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; 5691 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; 5692 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; 5693 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; 5694 5695 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; 5696 5697 pi->sclk_dpm_key_disabled = 0; 5698 pi->mclk_dpm_key_disabled = 0; 5699 pi->pcie_dpm_key_disabled = 0; 5700 pi->thermal_sclk_dpm_enabled = 0; 5701 5702 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ 5703 if ((rdev->pdev->device == 0x6658) && 5704 (rdev->mc_fw->datasize == (BONAIRE_MC_UCODE_SIZE * 4))) { 5705 pi->mclk_dpm_key_disabled = 1; 5706 } 5707 5708 pi->caps_sclk_ds = true; 5709 5710 pi->mclk_strobe_mode_threshold = 40000; 5711 pi->mclk_stutter_mode_threshold = 40000; 5712 pi->mclk_edc_enable_threshold = 40000; 5713 pi->mclk_edc_wr_enable_threshold = 40000; 5714 5715 ci_initialize_powertune_defaults(rdev); 5716 5717 pi->caps_fps = false; 5718 5719 pi->caps_sclk_throttle_low_notification = false; 5720 5721 pi->caps_uvd_dpm = true; 5722 pi->caps_vce_dpm = true; 5723 5724 ci_get_leakage_voltages(rdev); 5725 ci_patch_dependency_tables_with_leakage(rdev); 5726 ci_set_private_data_variables_based_on_pptable(rdev); 5727 5728 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5729 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); 5730 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5731 ci_dpm_fini(rdev); 5732 return -ENOMEM; 5733 } 5734 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 5735 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 5736 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 5737 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 5738 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 5739 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 5740 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 5741 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 5742 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 5743 5744 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 5745 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 5746 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 5747 5748 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 5749 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 5750 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5751 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5752 5753 if (rdev->family == CHIP_HAWAII) { 5754 pi->thermal_temp_setting.temperature_low = 94500; 5755 pi->thermal_temp_setting.temperature_high = 95000; 5756 pi->thermal_temp_setting.temperature_shutdown = 104000; 5757 } else { 5758 pi->thermal_temp_setting.temperature_low = 99500; 5759 pi->thermal_temp_setting.temperature_high = 100000; 5760 pi->thermal_temp_setting.temperature_shutdown = 104000; 5761 } 5762 5763 pi->uvd_enabled = false; 5764 5765 dpm_table = &pi->smc_state_table; 5766 5767 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID); 5768 if (gpio.valid) { 5769 dpm_table->VRHotGpio = gpio.shift; 5770 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5771 } else { 5772 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; 5773 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5774 } 5775 5776 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID); 5777 if (gpio.valid) { 5778 dpm_table->AcDcGpio = gpio.shift; 5779 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5780 } else { 5781 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; 5782 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5783 } 5784 5785 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID); 5786 if (gpio.valid) { 5787 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL); 5788 5789 switch (gpio.shift) { 5790 case 0: 5791 tmp &= ~GNB_SLOW_MODE_MASK; 5792 tmp |= GNB_SLOW_MODE(1); 5793 break; 5794 case 1: 5795 tmp &= ~GNB_SLOW_MODE_MASK; 5796 tmp |= GNB_SLOW_MODE(2); 5797 break; 5798 case 2: 5799 tmp |= GNB_SLOW; 5800 break; 5801 case 3: 5802 tmp |= FORCE_NB_PS1; 5803 break; 5804 case 4: 5805 tmp |= DPM_ENABLED; 5806 break; 5807 default: 5808 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); 5809 break; 5810 } 5811 WREG32_SMC(CNB_PWRMGT_CNTL, tmp); 5812 } 5813 5814 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5815 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5816 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5817 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 5818 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5819 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 5820 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5821 5822 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { 5823 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 5824 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5825 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 5826 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5827 else 5828 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; 5829 } 5830 5831 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { 5832 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 5833 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5834 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 5835 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5836 else 5837 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; 5838 } 5839 5840 pi->vddc_phase_shed_control = true; 5841 5842 #if defined(CONFIG_ACPI) 5843 pi->pcie_performance_request = 5844 radeon_acpi_is_pcie_performance_request_supported(rdev); 5845 #else 5846 pi->pcie_performance_request = false; 5847 #endif 5848 5849 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, 5850 &frev, &crev, &data_offset)) { 5851 pi->caps_sclk_ss_support = true; 5852 pi->caps_mclk_ss_support = true; 5853 pi->dynamic_ss = true; 5854 } else { 5855 pi->caps_sclk_ss_support = false; 5856 pi->caps_mclk_ss_support = false; 5857 pi->dynamic_ss = true; 5858 } 5859 5860 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) 5861 pi->thermal_protection = true; 5862 else 5863 pi->thermal_protection = false; 5864 5865 pi->caps_dynamic_ac_timing = true; 5866 5867 pi->uvd_power_gated = false; 5868 5869 /* make sure dc limits are valid */ 5870 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 5871 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 5872 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 5873 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5874 5875 pi->fan_ctrl_is_in_default_mode = true; 5876 5877 return 0; 5878 } 5879 5880 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, 5881 struct seq_file *m) 5882 { 5883 struct ci_power_info *pi = ci_get_pi(rdev); 5884 struct radeon_ps *rps = &pi->current_rps; 5885 u32 sclk = ci_get_average_sclk_freq(rdev); 5886 u32 mclk = ci_get_average_mclk_freq(rdev); 5887 5888 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 5889 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 5890 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 5891 sclk, mclk); 5892 } 5893 5894 void ci_dpm_print_power_state(struct radeon_device *rdev, 5895 struct radeon_ps *rps) 5896 { 5897 struct ci_ps *ps = ci_get_ps(rps); 5898 struct ci_pl *pl; 5899 int i; 5900 5901 r600_dpm_print_class_info(rps->class, rps->class2); 5902 r600_dpm_print_cap_info(rps->caps); 5903 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 5904 for (i = 0; i < ps->performance_level_count; i++) { 5905 pl = &ps->performance_levels[i]; 5906 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", 5907 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); 5908 } 5909 r600_dpm_print_ps_status(rdev, rps); 5910 } 5911 5912 u32 ci_dpm_get_current_sclk(struct radeon_device *rdev) 5913 { 5914 u32 sclk = ci_get_average_sclk_freq(rdev); 5915 5916 return sclk; 5917 } 5918 5919 u32 ci_dpm_get_current_mclk(struct radeon_device *rdev) 5920 { 5921 u32 mclk = ci_get_average_mclk_freq(rdev); 5922 5923 return mclk; 5924 } 5925 5926 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) 5927 { 5928 struct ci_power_info *pi = ci_get_pi(rdev); 5929 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5930 5931 if (low) 5932 return requested_state->performance_levels[0].sclk; 5933 else 5934 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 5935 } 5936 5937 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low) 5938 { 5939 struct ci_power_info *pi = ci_get_pi(rdev); 5940 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 5941 5942 if (low) 5943 return requested_state->performance_levels[0].mclk; 5944 else 5945 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 5946 } 5947