1 /* $NetBSD: amdgpu_vega20_hwmgr.c,v 1.3 2021/12/19 12:21:30 riastradh Exp $ */
2
3 /*
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vega20_hwmgr.c,v 1.3 2021/12/19 12:21:30 riastradh Exp $");
28
29 #include <linux/delay.h>
30 #include <linux/fb.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33
34 #include "hwmgr.h"
35 #include "amd_powerplay.h"
36 #include "vega20_smumgr.h"
37 #include "hardwaremanager.h"
38 #include "ppatomfwctrl.h"
39 #include "atomfirmware.h"
40 #include "cgs_common.h"
41 #include "vega20_powertune.h"
42 #include "vega20_inc.h"
43 #include "pppcielanes.h"
44 #include "vega20_hwmgr.h"
45 #include "vega20_processpptables.h"
46 #include "vega20_pptable.h"
47 #include "vega20_thermal.h"
48 #include "vega20_ppsmc.h"
49 #include "pp_debug.h"
50 #include "amd_pcie_helpers.h"
51 #include "ppinterrupt.h"
52 #include "pp_overdriver.h"
53 #include "pp_thermal.h"
54 #include "soc15_common.h"
55 #include "vega20_baco.h"
56 #include "smuio/smuio_9_0_offset.h"
57 #include "smuio/smuio_9_0_sh_mask.h"
58 #include "nbio/nbio_7_4_sh_mask.h"
59
60 #include <linux/nbsd-namespace.h>
61
62 #define smnPCIE_LC_SPEED_CNTL 0x11140290
63 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
64
vega20_set_default_registry_data(struct pp_hwmgr * hwmgr)65 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
66 {
67 struct vega20_hwmgr *data =
68 (struct vega20_hwmgr *)(hwmgr->backend);
69
70 data->gfxclk_average_alpha = PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT;
71 data->socclk_average_alpha = PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT;
72 data->uclk_average_alpha = PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT;
73 data->gfx_activity_average_alpha = PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT;
74 data->lowest_uclk_reserved_for_ulv = PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT;
75
76 data->display_voltage_mode = PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT;
77 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
78 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
79 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
80 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
81 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
82 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
83 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
84 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
85 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
86 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
87 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
88 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
89
90 /*
91 * Disable the following features for now:
92 * GFXCLK DS
93 * SOCLK DS
94 * LCLK DS
95 * DCEFCLK DS
96 * FCLK DS
97 * MP1CLK DS
98 * MP0CLK DS
99 */
100 data->registry_data.disallowed_features = 0xE0041C00;
101 /* ECC feature should be disabled on old SMUs */
102 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
103 hwmgr->smu_version = smum_get_argument(hwmgr);
104 if (hwmgr->smu_version < 0x282100)
105 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
106
107 if (!(hwmgr->feature_mask & PP_PCIE_DPM_MASK))
108 data->registry_data.disallowed_features |= FEATURE_DPM_LINK_MASK;
109
110 if (!(hwmgr->feature_mask & PP_SCLK_DPM_MASK))
111 data->registry_data.disallowed_features |= FEATURE_DPM_GFXCLK_MASK;
112
113 if (!(hwmgr->feature_mask & PP_SOCCLK_DPM_MASK))
114 data->registry_data.disallowed_features |= FEATURE_DPM_SOCCLK_MASK;
115
116 if (!(hwmgr->feature_mask & PP_MCLK_DPM_MASK))
117 data->registry_data.disallowed_features |= FEATURE_DPM_UCLK_MASK;
118
119 if (!(hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK))
120 data->registry_data.disallowed_features |= FEATURE_DPM_DCEFCLK_MASK;
121
122 if (!(hwmgr->feature_mask & PP_ULV_MASK))
123 data->registry_data.disallowed_features |= FEATURE_ULV_MASK;
124
125 if (!(hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK))
126 data->registry_data.disallowed_features |= FEATURE_DS_GFXCLK_MASK;
127
128 data->registry_data.od_state_in_dc_support = 0;
129 data->registry_data.thermal_support = 1;
130 data->registry_data.skip_baco_hardware = 0;
131
132 data->registry_data.log_avfs_param = 0;
133 data->registry_data.sclk_throttle_low_notification = 1;
134 data->registry_data.force_dpm_high = 0;
135 data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
136
137 data->registry_data.didt_support = 0;
138 if (data->registry_data.didt_support) {
139 data->registry_data.didt_mode = 6;
140 data->registry_data.sq_ramping_support = 1;
141 data->registry_data.db_ramping_support = 0;
142 data->registry_data.td_ramping_support = 0;
143 data->registry_data.tcp_ramping_support = 0;
144 data->registry_data.dbr_ramping_support = 0;
145 data->registry_data.edc_didt_support = 1;
146 data->registry_data.gc_didt_support = 0;
147 data->registry_data.psm_didt_support = 0;
148 }
149
150 data->registry_data.pcie_lane_override = 0xff;
151 data->registry_data.pcie_speed_override = 0xff;
152 data->registry_data.pcie_clock_override = 0xffffffff;
153 data->registry_data.regulator_hot_gpio_support = 1;
154 data->registry_data.ac_dc_switch_gpio_support = 0;
155 data->registry_data.quick_transition_support = 0;
156 data->registry_data.zrpm_start_temp = 0xffff;
157 data->registry_data.zrpm_stop_temp = 0xffff;
158 data->registry_data.od8_feature_enable = 1;
159 data->registry_data.disable_water_mark = 0;
160 data->registry_data.disable_pp_tuning = 0;
161 data->registry_data.disable_xlpp_tuning = 0;
162 data->registry_data.disable_workload_policy = 0;
163 data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
164 data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
165 data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
166 data->registry_data.force_workload_policy_mask = 0;
167 data->registry_data.disable_3d_fs_detection = 0;
168 data->registry_data.fps_support = 1;
169 data->registry_data.disable_auto_wattman = 1;
170 data->registry_data.auto_wattman_debug = 0;
171 data->registry_data.auto_wattman_sample_period = 100;
172 data->registry_data.fclk_gfxclk_ratio = 0;
173 data->registry_data.auto_wattman_threshold = 50;
174 data->registry_data.gfxoff_controlled_by_driver = 1;
175 data->gfxoff_allowed = false;
176 data->counter_gfxoff = 0;
177 }
178
vega20_set_features_platform_caps(struct pp_hwmgr * hwmgr)179 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
180 {
181 struct vega20_hwmgr *data =
182 (struct vega20_hwmgr *)(hwmgr->backend);
183 struct amdgpu_device *adev = hwmgr->adev;
184
185 if (data->vddci_control == VEGA20_VOLTAGE_CONTROL_NONE)
186 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
187 PHM_PlatformCaps_ControlVDDCI);
188
189 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190 PHM_PlatformCaps_TablelessHardwareInterface);
191
192 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
193 PHM_PlatformCaps_BACO);
194
195 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196 PHM_PlatformCaps_EnableSMU7ThermalManagement);
197
198 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_UVDPowerGating);
201
202 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_VCEPowerGating);
205
206 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
207 PHM_PlatformCaps_UnTabledHardwareInterface);
208
209 if (data->registry_data.od8_feature_enable)
210 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
211 PHM_PlatformCaps_OD8inACSupport);
212
213 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214 PHM_PlatformCaps_ActivityReporting);
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_FanSpeedInTableIsRPM);
217
218 if (data->registry_data.od_state_in_dc_support) {
219 if (data->registry_data.od8_feature_enable)
220 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221 PHM_PlatformCaps_OD8inDCSupport);
222 }
223
224 if (data->registry_data.thermal_support &&
225 data->registry_data.fuzzy_fan_control_support &&
226 hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_ODFuzzyFanControlSupport);
229
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_DynamicPowerManagement);
232 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
233 PHM_PlatformCaps_SMC);
234 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
235 PHM_PlatformCaps_ThermalPolicyDelay);
236
237 if (data->registry_data.force_dpm_high)
238 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
240
241 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
242 PHM_PlatformCaps_DynamicUVDState);
243
244 if (data->registry_data.sclk_throttle_low_notification)
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_SclkThrottleLowNotification);
247
248 /* power tune caps */
249 /* assume disabled */
250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_PowerContainment);
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_DiDtSupport);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_SQRamping);
256 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
257 PHM_PlatformCaps_DBRamping);
258 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
259 PHM_PlatformCaps_TDRamping);
260 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
261 PHM_PlatformCaps_TCPRamping);
262 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
263 PHM_PlatformCaps_DBRRamping);
264 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_DiDtEDCEnable);
266 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
267 PHM_PlatformCaps_GCEDC);
268 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
269 PHM_PlatformCaps_PSM);
270
271 if (data->registry_data.didt_support) {
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
273 PHM_PlatformCaps_DiDtSupport);
274 if (data->registry_data.sq_ramping_support)
275 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
276 PHM_PlatformCaps_SQRamping);
277 if (data->registry_data.db_ramping_support)
278 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 PHM_PlatformCaps_DBRamping);
280 if (data->registry_data.td_ramping_support)
281 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
282 PHM_PlatformCaps_TDRamping);
283 if (data->registry_data.tcp_ramping_support)
284 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
285 PHM_PlatformCaps_TCPRamping);
286 if (data->registry_data.dbr_ramping_support)
287 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_DBRRamping);
289 if (data->registry_data.edc_didt_support)
290 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
291 PHM_PlatformCaps_DiDtEDCEnable);
292 if (data->registry_data.gc_didt_support)
293 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
294 PHM_PlatformCaps_GCEDC);
295 if (data->registry_data.psm_didt_support)
296 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
297 PHM_PlatformCaps_PSM);
298 }
299
300 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
301 PHM_PlatformCaps_RegulatorHot);
302
303 if (data->registry_data.ac_dc_switch_gpio_support) {
304 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
305 PHM_PlatformCaps_AutomaticDCTransition);
306 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
307 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
308 }
309
310 if (data->registry_data.quick_transition_support) {
311 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
312 PHM_PlatformCaps_AutomaticDCTransition);
313 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
314 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
315 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
316 PHM_PlatformCaps_Falcon_QuickTransition);
317 }
318
319 if (data->lowest_uclk_reserved_for_ulv != PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT) {
320 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
321 PHM_PlatformCaps_LowestUclkReservedForUlv);
322 if (data->lowest_uclk_reserved_for_ulv == 1)
323 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
324 PHM_PlatformCaps_LowestUclkReservedForUlv);
325 }
326
327 if (data->registry_data.custom_fan_support)
328 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
329 PHM_PlatformCaps_CustomFanControlSupport);
330
331 return 0;
332 }
333
vega20_init_dpm_defaults(struct pp_hwmgr * hwmgr)334 static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
335 {
336 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
337 struct amdgpu_device *adev = hwmgr->adev;
338 uint32_t top32, bottom32;
339 int i;
340
341 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
342 FEATURE_DPM_PREFETCHER_BIT;
343 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
344 FEATURE_DPM_GFXCLK_BIT;
345 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
346 FEATURE_DPM_UCLK_BIT;
347 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
348 FEATURE_DPM_SOCCLK_BIT;
349 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
350 FEATURE_DPM_UVD_BIT;
351 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
352 FEATURE_DPM_VCE_BIT;
353 data->smu_features[GNLD_ULV].smu_feature_id =
354 FEATURE_ULV_BIT;
355 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
356 FEATURE_DPM_MP0CLK_BIT;
357 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
358 FEATURE_DPM_LINK_BIT;
359 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
360 FEATURE_DPM_DCEFCLK_BIT;
361 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
362 FEATURE_DS_GFXCLK_BIT;
363 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
364 FEATURE_DS_SOCCLK_BIT;
365 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
366 FEATURE_DS_LCLK_BIT;
367 data->smu_features[GNLD_PPT].smu_feature_id =
368 FEATURE_PPT_BIT;
369 data->smu_features[GNLD_TDC].smu_feature_id =
370 FEATURE_TDC_BIT;
371 data->smu_features[GNLD_THERMAL].smu_feature_id =
372 FEATURE_THERMAL_BIT;
373 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
374 FEATURE_GFX_PER_CU_CG_BIT;
375 data->smu_features[GNLD_RM].smu_feature_id =
376 FEATURE_RM_BIT;
377 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
378 FEATURE_DS_DCEFCLK_BIT;
379 data->smu_features[GNLD_ACDC].smu_feature_id =
380 FEATURE_ACDC_BIT;
381 data->smu_features[GNLD_VR0HOT].smu_feature_id =
382 FEATURE_VR0HOT_BIT;
383 data->smu_features[GNLD_VR1HOT].smu_feature_id =
384 FEATURE_VR1HOT_BIT;
385 data->smu_features[GNLD_FW_CTF].smu_feature_id =
386 FEATURE_FW_CTF_BIT;
387 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
388 FEATURE_LED_DISPLAY_BIT;
389 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
390 FEATURE_FAN_CONTROL_BIT;
391 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
392 data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
393 data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
394 data->smu_features[GNLD_DPM_FCLK].smu_feature_id = FEATURE_DPM_FCLK_BIT;
395 data->smu_features[GNLD_DS_FCLK].smu_feature_id = FEATURE_DS_FCLK_BIT;
396 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
397 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
398 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
399 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
400
401 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
402 data->smu_features[i].smu_feature_bitmap =
403 (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
404 data->smu_features[i].allowed =
405 ((data->registry_data.disallowed_features >> i) & 1) ?
406 false : true;
407 }
408
409 /* Get the SN to turn into a Unique ID */
410 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
411 top32 = smum_get_argument(hwmgr);
412 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
413 bottom32 = smum_get_argument(hwmgr);
414
415 adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
416 }
417
vega20_set_private_data_based_on_pptable(struct pp_hwmgr * hwmgr)418 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
419 {
420 return 0;
421 }
422
vega20_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)423 static int vega20_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
424 {
425 kfree(hwmgr->backend);
426 hwmgr->backend = NULL;
427
428 return 0;
429 }
430
vega20_hwmgr_backend_init(struct pp_hwmgr * hwmgr)431 static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
432 {
433 struct vega20_hwmgr *data;
434 struct amdgpu_device *adev = hwmgr->adev;
435
436 data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
437 if (data == NULL)
438 return -ENOMEM;
439
440 hwmgr->backend = data;
441
442 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
443 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
444 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
445
446 vega20_set_default_registry_data(hwmgr);
447
448 data->disable_dpm_mask = 0xff;
449
450 /* need to set voltage control types before EVV patching */
451 data->vddc_control = VEGA20_VOLTAGE_CONTROL_NONE;
452 data->mvdd_control = VEGA20_VOLTAGE_CONTROL_NONE;
453 data->vddci_control = VEGA20_VOLTAGE_CONTROL_NONE;
454
455 data->water_marks_bitmap = 0;
456 data->avfs_exist = false;
457
458 vega20_set_features_platform_caps(hwmgr);
459
460 vega20_init_dpm_defaults(hwmgr);
461
462 /* Parse pptable data read from VBIOS */
463 vega20_set_private_data_based_on_pptable(hwmgr);
464
465 data->is_tlu_enabled = false;
466
467 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
468 VEGA20_MAX_HARDWARE_POWERLEVELS;
469 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
470 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
471
472 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
473 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
474 hwmgr->platform_descriptor.clockStep.engineClock = 500;
475 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
476
477 data->total_active_cus = adev->gfx.cu_info.number;
478 data->is_custom_profile_set = false;
479
480 return 0;
481 }
482
vega20_init_sclk_threshold(struct pp_hwmgr * hwmgr)483 static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
484 {
485 struct vega20_hwmgr *data =
486 (struct vega20_hwmgr *)(hwmgr->backend);
487
488 data->low_sclk_interrupt_threshold = 0;
489
490 return 0;
491 }
492
vega20_setup_asic_task(struct pp_hwmgr * hwmgr)493 static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
494 {
495 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
496 int ret = 0;
497
498 ret = vega20_init_sclk_threshold(hwmgr);
499 PP_ASSERT_WITH_CODE(!ret,
500 "Failed to init sclk threshold!",
501 return ret);
502
503 if (adev->in_gpu_reset &&
504 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
505
506 ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
507 if (ret)
508 pr_err("Failed to apply vega20 baco workaround!\n");
509 }
510
511 return ret;
512 }
513
514 /*
515 * @fn vega20_init_dpm_state
516 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
517 *
518 * @param dpm_state - the address of the DPM Table to initiailize.
519 * @return None.
520 */
vega20_init_dpm_state(struct vega20_dpm_state * dpm_state)521 static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
522 {
523 dpm_state->soft_min_level = 0x0;
524 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
525 dpm_state->hard_min_level = 0x0;
526 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
527 }
528
vega20_get_number_of_dpm_level(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * num_of_levels)529 static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
530 PPCLK_e clk_id, uint32_t *num_of_levels)
531 {
532 int ret = 0;
533
534 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
535 PPSMC_MSG_GetDpmFreqByIndex,
536 (clk_id << 16 | 0xFF));
537 PP_ASSERT_WITH_CODE(!ret,
538 "[GetNumOfDpmLevel] failed to get dpm levels!",
539 return ret);
540
541 *num_of_levels = smum_get_argument(hwmgr);
542 PP_ASSERT_WITH_CODE(*num_of_levels > 0,
543 "[GetNumOfDpmLevel] number of clk levels is invalid!",
544 return -EINVAL);
545
546 return ret;
547 }
548
vega20_get_dpm_frequency_by_index(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t index,uint32_t * clk)549 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
550 PPCLK_e clk_id, uint32_t index, uint32_t *clk)
551 {
552 int ret = 0;
553
554 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
555 PPSMC_MSG_GetDpmFreqByIndex,
556 (clk_id << 16 | index));
557 PP_ASSERT_WITH_CODE(!ret,
558 "[GetDpmFreqByIndex] failed to get dpm freq by index!",
559 return ret);
560
561 *clk = smum_get_argument(hwmgr);
562 PP_ASSERT_WITH_CODE(*clk,
563 "[GetDpmFreqByIndex] clk value is invalid!",
564 return -EINVAL);
565
566 return ret;
567 }
568
vega20_setup_single_dpm_table(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table,PPCLK_e clk_id)569 static int vega20_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
570 struct vega20_single_dpm_table *dpm_table, PPCLK_e clk_id)
571 {
572 int ret = 0;
573 uint32_t i, num_of_levels, clk;
574
575 ret = vega20_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
576 PP_ASSERT_WITH_CODE(!ret,
577 "[SetupSingleDpmTable] failed to get clk levels!",
578 return ret);
579
580 dpm_table->count = num_of_levels;
581
582 for (i = 0; i < num_of_levels; i++) {
583 ret = vega20_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
584 PP_ASSERT_WITH_CODE(!ret,
585 "[SetupSingleDpmTable] failed to get clk of specific level!",
586 return ret);
587 dpm_table->dpm_levels[i].value = clk;
588 dpm_table->dpm_levels[i].enabled = true;
589 }
590
591 return ret;
592 }
593
vega20_setup_gfxclk_dpm_table(struct pp_hwmgr * hwmgr)594 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr *hwmgr)
595 {
596 struct vega20_hwmgr *data =
597 (struct vega20_hwmgr *)(hwmgr->backend);
598 struct vega20_single_dpm_table *dpm_table;
599 int ret = 0;
600
601 dpm_table = &(data->dpm_table.gfx_table);
602 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
603 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
604 PP_ASSERT_WITH_CODE(!ret,
605 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
606 return ret);
607 } else {
608 dpm_table->count = 1;
609 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
610 }
611
612 return ret;
613 }
614
vega20_setup_memclk_dpm_table(struct pp_hwmgr * hwmgr)615 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr *hwmgr)
616 {
617 struct vega20_hwmgr *data =
618 (struct vega20_hwmgr *)(hwmgr->backend);
619 struct vega20_single_dpm_table *dpm_table;
620 int ret = 0;
621
622 dpm_table = &(data->dpm_table.mem_table);
623 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
624 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
625 PP_ASSERT_WITH_CODE(!ret,
626 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
627 return ret);
628 } else {
629 dpm_table->count = 1;
630 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
631 }
632
633 return ret;
634 }
635
636 /*
637 * This function is to initialize all DPM state tables
638 * for SMU based on the dependency table.
639 * Dynamic state patching function will then trim these
640 * state tables to the allowed range based
641 * on the power policy or external client requests,
642 * such as UVD request, etc.
643 */
vega20_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)644 static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
645 {
646 struct vega20_hwmgr *data =
647 (struct vega20_hwmgr *)(hwmgr->backend);
648 struct vega20_single_dpm_table *dpm_table;
649 int ret = 0;
650
651 memset(&data->dpm_table, 0, sizeof(data->dpm_table));
652
653 /* socclk */
654 dpm_table = &(data->dpm_table.soc_table);
655 if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
656 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
657 PP_ASSERT_WITH_CODE(!ret,
658 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
659 return ret);
660 } else {
661 dpm_table->count = 1;
662 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
663 }
664 vega20_init_dpm_state(&(dpm_table->dpm_state));
665
666 /* gfxclk */
667 dpm_table = &(data->dpm_table.gfx_table);
668 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
669 if (ret)
670 return ret;
671 vega20_init_dpm_state(&(dpm_table->dpm_state));
672
673 /* memclk */
674 dpm_table = &(data->dpm_table.mem_table);
675 ret = vega20_setup_memclk_dpm_table(hwmgr);
676 if (ret)
677 return ret;
678 vega20_init_dpm_state(&(dpm_table->dpm_state));
679
680 /* eclk */
681 dpm_table = &(data->dpm_table.eclk_table);
682 if (data->smu_features[GNLD_DPM_VCE].enabled) {
683 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
684 PP_ASSERT_WITH_CODE(!ret,
685 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
686 return ret);
687 } else {
688 dpm_table->count = 1;
689 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
690 }
691 vega20_init_dpm_state(&(dpm_table->dpm_state));
692
693 /* vclk */
694 dpm_table = &(data->dpm_table.vclk_table);
695 if (data->smu_features[GNLD_DPM_UVD].enabled) {
696 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
697 PP_ASSERT_WITH_CODE(!ret,
698 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
699 return ret);
700 } else {
701 dpm_table->count = 1;
702 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
703 }
704 vega20_init_dpm_state(&(dpm_table->dpm_state));
705
706 /* dclk */
707 dpm_table = &(data->dpm_table.dclk_table);
708 if (data->smu_features[GNLD_DPM_UVD].enabled) {
709 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
710 PP_ASSERT_WITH_CODE(!ret,
711 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
712 return ret);
713 } else {
714 dpm_table->count = 1;
715 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
716 }
717 vega20_init_dpm_state(&(dpm_table->dpm_state));
718
719 /* dcefclk */
720 dpm_table = &(data->dpm_table.dcef_table);
721 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
722 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
723 PP_ASSERT_WITH_CODE(!ret,
724 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
725 return ret);
726 } else {
727 dpm_table->count = 1;
728 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
729 }
730 vega20_init_dpm_state(&(dpm_table->dpm_state));
731
732 /* pixclk */
733 dpm_table = &(data->dpm_table.pixel_table);
734 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
735 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
736 PP_ASSERT_WITH_CODE(!ret,
737 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
738 return ret);
739 } else
740 dpm_table->count = 0;
741 vega20_init_dpm_state(&(dpm_table->dpm_state));
742
743 /* dispclk */
744 dpm_table = &(data->dpm_table.display_table);
745 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
746 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
747 PP_ASSERT_WITH_CODE(!ret,
748 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
749 return ret);
750 } else
751 dpm_table->count = 0;
752 vega20_init_dpm_state(&(dpm_table->dpm_state));
753
754 /* phyclk */
755 dpm_table = &(data->dpm_table.phy_table);
756 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
757 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
758 PP_ASSERT_WITH_CODE(!ret,
759 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
760 return ret);
761 } else
762 dpm_table->count = 0;
763 vega20_init_dpm_state(&(dpm_table->dpm_state));
764
765 /* fclk */
766 dpm_table = &(data->dpm_table.fclk_table);
767 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
768 ret = vega20_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_FCLK);
769 PP_ASSERT_WITH_CODE(!ret,
770 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
771 return ret);
772 } else {
773 dpm_table->count = 1;
774 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
775 }
776 vega20_init_dpm_state(&(dpm_table->dpm_state));
777
778 /* save a copy of the default DPM table */
779 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
780 sizeof(struct vega20_dpm_table));
781
782 return 0;
783 }
784
785 /**
786 * Initializes the SMC table and uploads it
787 *
788 * @param hwmgr the address of the powerplay hardware manager.
789 * @param pInput the pointer to input data (PowerState)
790 * @return always 0
791 */
vega20_init_smc_table(struct pp_hwmgr * hwmgr)792 static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
793 {
794 int result;
795 struct vega20_hwmgr *data =
796 (struct vega20_hwmgr *)(hwmgr->backend);
797 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
798 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
799 struct phm_ppt_v3_information *pptable_information =
800 (struct phm_ppt_v3_information *)hwmgr->pptable;
801
802 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
803 PP_ASSERT_WITH_CODE(!result,
804 "[InitSMCTable] Failed to get vbios bootup values!",
805 return result);
806
807 data->vbios_boot_state.vddc = boot_up_values.usVddc;
808 data->vbios_boot_state.vddci = boot_up_values.usVddci;
809 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
810 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
811 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
812 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
813 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
814 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
815 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
816 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
817 data->vbios_boot_state.fclock = boot_up_values.ulFClk;
818 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
819
820 smum_send_msg_to_smc_with_parameter(hwmgr,
821 PPSMC_MSG_SetMinDeepSleepDcefclk,
822 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
823
824 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
825
826 result = smum_smc_table_manager(hwmgr,
827 (uint8_t *)pp_table, TABLE_PPTABLE, false);
828 PP_ASSERT_WITH_CODE(!result,
829 "[InitSMCTable] Failed to upload PPtable!",
830 return result);
831
832 return 0;
833 }
834
835 /*
836 * Override PCIe link speed and link width for DPM Level 1. PPTable entries
837 * reflect the ASIC capabilities and not the system capabilities. For e.g.
838 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
839 * to DPM1, it fails as system doesn't support Gen4.
840 */
vega20_override_pcie_parameters(struct pp_hwmgr * hwmgr)841 static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
842 {
843 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
844 struct vega20_hwmgr *data =
845 (struct vega20_hwmgr *)(hwmgr->backend);
846 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
847 int ret;
848
849 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
850 pcie_gen = 3;
851 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
852 pcie_gen = 2;
853 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
854 pcie_gen = 1;
855 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
856 pcie_gen = 0;
857
858 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
859 pcie_width = 6;
860 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
861 pcie_width = 5;
862 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
863 pcie_width = 4;
864 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
865 pcie_width = 3;
866 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
867 pcie_width = 2;
868 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
869 pcie_width = 1;
870
871 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
872 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
873 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
874 */
875 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
876 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
877 PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
878 PP_ASSERT_WITH_CODE(!ret,
879 "[OverridePcieParameters] Attempt to override pcie params failed!",
880 return ret);
881
882 data->pcie_parameters_override = true;
883 data->pcie_gen_level1 = pcie_gen;
884 data->pcie_width_level1 = pcie_width;
885
886 return 0;
887 }
888
vega20_set_allowed_featuresmask(struct pp_hwmgr * hwmgr)889 static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
890 {
891 struct vega20_hwmgr *data =
892 (struct vega20_hwmgr *)(hwmgr->backend);
893 uint32_t allowed_features_low = 0, allowed_features_high = 0;
894 int i;
895 int ret = 0;
896
897 for (i = 0; i < GNLD_FEATURES_MAX; i++)
898 if (data->smu_features[i].allowed)
899 data->smu_features[i].smu_feature_id > 31 ?
900 (allowed_features_high |=
901 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT)
902 & 0xFFFFFFFF)) :
903 (allowed_features_low |=
904 ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT)
905 & 0xFFFFFFFF));
906
907 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
908 PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
909 PP_ASSERT_WITH_CODE(!ret,
910 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
911 return ret);
912
913 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
914 PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
915 PP_ASSERT_WITH_CODE(!ret,
916 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
917 return ret);
918
919 return 0;
920 }
921
vega20_run_btc(struct pp_hwmgr * hwmgr)922 static int vega20_run_btc(struct pp_hwmgr *hwmgr)
923 {
924 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
925 }
926
vega20_run_btc_afll(struct pp_hwmgr * hwmgr)927 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
928 {
929 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
930 }
931
vega20_enable_all_smu_features(struct pp_hwmgr * hwmgr)932 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
933 {
934 struct vega20_hwmgr *data =
935 (struct vega20_hwmgr *)(hwmgr->backend);
936 uint64_t features_enabled;
937 int i;
938 bool enabled;
939 int ret = 0;
940
941 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
942 PPSMC_MSG_EnableAllSmuFeatures)) == 0,
943 "[EnableAllSMUFeatures] Failed to enable all smu features!",
944 return ret);
945
946 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
947 PP_ASSERT_WITH_CODE(!ret,
948 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
949 return ret);
950
951 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
952 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
953 true : false;
954 data->smu_features[i].enabled = enabled;
955 data->smu_features[i].supported = enabled;
956
957 #if 0
958 if (data->smu_features[i].allowed && !enabled)
959 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i);
960 else if (!data->smu_features[i].allowed && enabled)
961 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i);
962 #endif
963 }
964
965 return 0;
966 }
967
vega20_notify_smc_display_change(struct pp_hwmgr * hwmgr)968 static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
969 {
970 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
971
972 if (data->smu_features[GNLD_DPM_UCLK].enabled)
973 return smum_send_msg_to_smc_with_parameter(hwmgr,
974 PPSMC_MSG_SetUclkFastSwitch,
975 1);
976
977 return 0;
978 }
979
vega20_send_clock_ratio(struct pp_hwmgr * hwmgr)980 static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
981 {
982 struct vega20_hwmgr *data =
983 (struct vega20_hwmgr *)(hwmgr->backend);
984
985 return smum_send_msg_to_smc_with_parameter(hwmgr,
986 PPSMC_MSG_SetFclkGfxClkRatio,
987 data->registry_data.fclk_gfxclk_ratio);
988 }
989
vega20_disable_all_smu_features(struct pp_hwmgr * hwmgr)990 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
991 {
992 struct vega20_hwmgr *data =
993 (struct vega20_hwmgr *)(hwmgr->backend);
994 uint64_t features_enabled;
995 int i;
996 bool enabled;
997 int ret = 0;
998
999 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
1000 PPSMC_MSG_DisableAllSmuFeatures)) == 0,
1001 "[DisableAllSMUFeatures] Failed to disable all smu features!",
1002 return ret);
1003
1004 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
1005 PP_ASSERT_WITH_CODE(!ret,
1006 "[DisableAllSMUFeatures] Failed to get enabled smc features!",
1007 return ret);
1008
1009 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
1010 enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
1011 true : false;
1012 data->smu_features[i].enabled = enabled;
1013 data->smu_features[i].supported = enabled;
1014 }
1015
1016 return 0;
1017 }
1018
vega20_od8_set_feature_capabilities(struct pp_hwmgr * hwmgr)1019 static int vega20_od8_set_feature_capabilities(
1020 struct pp_hwmgr *hwmgr)
1021 {
1022 struct phm_ppt_v3_information *pptable_information =
1023 (struct phm_ppt_v3_information *)hwmgr->pptable;
1024 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1025 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1026 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1027
1028 od_settings->overdrive8_capabilities = 0;
1029
1030 if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1031 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
1032 pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
1033 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
1034 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
1035 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_FMIN]))
1036 od_settings->overdrive8_capabilities |= OD8_GFXCLK_LIMITS;
1037
1038 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
1039 (pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
1040 pp_table->MinVoltageGfx / VOLTAGE_SCALE) &&
1041 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
1042 pp_table->MaxVoltageGfx / VOLTAGE_SCALE) &&
1043 (pptable_information->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] >=
1044 pptable_information->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1]))
1045 od_settings->overdrive8_capabilities |= OD8_GFXCLK_CURVE;
1046 }
1047
1048 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1049 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
1050 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
1051 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
1052 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
1053 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
1054 (pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
1055 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX]))
1056 od_settings->overdrive8_capabilities |= OD8_UCLK_MAX;
1057 }
1058
1059 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
1060 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1061 pptable_information->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
1062 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
1063 pptable_information->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100)
1064 od_settings->overdrive8_capabilities |= OD8_POWER_LIMIT;
1065
1066 if (data->smu_features[GNLD_FAN_CONTROL].enabled) {
1067 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
1068 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1069 pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
1070 (pptable_information->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
1071 pptable_information->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT]))
1072 od_settings->overdrive8_capabilities |= OD8_ACOUSTIC_LIMIT_SCLK;
1073
1074 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
1075 (pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] >=
1076 (pp_table->FanPwmMin * pp_table->FanMaximumRpm / 100)) &&
1077 pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
1078 (pptable_information->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
1079 pptable_information->od_settings_min[OD8_SETTING_FAN_MIN_SPEED]))
1080 od_settings->overdrive8_capabilities |= OD8_FAN_SPEED_MIN;
1081 }
1082
1083 if (data->smu_features[GNLD_THERMAL].enabled) {
1084 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
1085 pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1086 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
1087 (pptable_information->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
1088 pptable_information->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP]))
1089 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_FAN;
1090
1091 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
1092 pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1093 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
1094 (pptable_information->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
1095 pptable_information->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX]))
1096 od_settings->overdrive8_capabilities |= OD8_TEMPERATURE_SYSTEM;
1097 }
1098
1099 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE])
1100 od_settings->overdrive8_capabilities |= OD8_MEMORY_TIMING_TUNE;
1101
1102 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL] &&
1103 pp_table->FanZeroRpmEnable)
1104 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
1105
1106 if (!od_settings->overdrive8_capabilities)
1107 hwmgr->od_enabled = false;
1108
1109 return 0;
1110 }
1111
vega20_od8_set_feature_id(struct pp_hwmgr * hwmgr)1112 static int vega20_od8_set_feature_id(
1113 struct pp_hwmgr *hwmgr)
1114 {
1115 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1116 struct vega20_od8_settings *od_settings = &(data->od8_settings);
1117
1118 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1119 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1120 OD8_GFXCLK_LIMITS;
1121 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1122 OD8_GFXCLK_LIMITS;
1123 } else {
1124 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
1125 0;
1126 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
1127 0;
1128 }
1129
1130 if (od_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1131 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1132 OD8_GFXCLK_CURVE;
1133 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1134 OD8_GFXCLK_CURVE;
1135 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1136 OD8_GFXCLK_CURVE;
1137 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1138 OD8_GFXCLK_CURVE;
1139 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1140 OD8_GFXCLK_CURVE;
1141 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1142 OD8_GFXCLK_CURVE;
1143 } else {
1144 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
1145 0;
1146 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
1147 0;
1148 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
1149 0;
1150 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
1151 0;
1152 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
1153 0;
1154 od_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
1155 0;
1156 }
1157
1158 if (od_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1159 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = OD8_UCLK_MAX;
1160 else
1161 od_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id = 0;
1162
1163 if (od_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1164 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = OD8_POWER_LIMIT;
1165 else
1166 od_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id = 0;
1167
1168 if (od_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1169 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1170 OD8_ACOUSTIC_LIMIT_SCLK;
1171 else
1172 od_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
1173 0;
1174
1175 if (od_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1176 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1177 OD8_FAN_SPEED_MIN;
1178 else
1179 od_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
1180 0;
1181
1182 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1183 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1184 OD8_TEMPERATURE_FAN;
1185 else
1186 od_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
1187 0;
1188
1189 if (od_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1190 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1191 OD8_TEMPERATURE_SYSTEM;
1192 else
1193 od_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
1194 0;
1195
1196 return 0;
1197 }
1198
vega20_od8_get_gfx_clock_base_voltage(struct pp_hwmgr * hwmgr,uint32_t * voltage,uint32_t freq)1199 static int vega20_od8_get_gfx_clock_base_voltage(
1200 struct pp_hwmgr *hwmgr,
1201 uint32_t *voltage,
1202 uint32_t freq)
1203 {
1204 int ret = 0;
1205
1206 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1207 PPSMC_MSG_GetAVFSVoltageByDpm,
1208 ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
1209 PP_ASSERT_WITH_CODE(!ret,
1210 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
1211 return ret);
1212
1213 *voltage = smum_get_argument(hwmgr);
1214 *voltage = *voltage / VOLTAGE_SCALE;
1215
1216 return 0;
1217 }
1218
vega20_od8_initialize_default_settings(struct pp_hwmgr * hwmgr)1219 static int vega20_od8_initialize_default_settings(
1220 struct pp_hwmgr *hwmgr)
1221 {
1222 struct phm_ppt_v3_information *pptable_information =
1223 (struct phm_ppt_v3_information *)hwmgr->pptable;
1224 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1225 struct vega20_od8_settings *od8_settings = &(data->od8_settings);
1226 OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table);
1227 int i, ret = 0;
1228
1229 /* Set Feature Capabilities */
1230 vega20_od8_set_feature_capabilities(hwmgr);
1231
1232 /* Map FeatureID to individual settings */
1233 vega20_od8_set_feature_id(hwmgr);
1234
1235 /* Set default values */
1236 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true);
1237 PP_ASSERT_WITH_CODE(!ret,
1238 "Failed to export over drive table!",
1239 return ret);
1240
1241 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_LIMITS) {
1242 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1243 od_table->GfxclkFmin;
1244 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1245 od_table->GfxclkFmax;
1246 } else {
1247 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
1248 0;
1249 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
1250 0;
1251 }
1252
1253 if (od8_settings->overdrive8_capabilities & OD8_GFXCLK_CURVE) {
1254 od_table->GfxclkFreq1 = od_table->GfxclkFmin;
1255 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1256 od_table->GfxclkFreq1;
1257
1258 od_table->GfxclkFreq3 = od_table->GfxclkFmax;
1259 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1260 od_table->GfxclkFreq3;
1261
1262 od_table->GfxclkFreq2 = (od_table->GfxclkFreq1 + od_table->GfxclkFreq3) / 2;
1263 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1264 od_table->GfxclkFreq2;
1265
1266 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1267 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value),
1268 od_table->GfxclkFreq1),
1269 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1270 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0);
1271 od_table->GfxclkVolt1 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
1272 * VOLTAGE_SCALE;
1273
1274 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1275 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value),
1276 od_table->GfxclkFreq2),
1277 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1278 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0);
1279 od_table->GfxclkVolt2 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
1280 * VOLTAGE_SCALE;
1281
1282 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr,
1283 &(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value),
1284 od_table->GfxclkFreq3),
1285 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1286 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0);
1287 od_table->GfxclkVolt3 = od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
1288 * VOLTAGE_SCALE;
1289 } else {
1290 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
1291 0;
1292 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value =
1293 0;
1294 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
1295 0;
1296 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value =
1297 0;
1298 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
1299 0;
1300 od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value =
1301 0;
1302 }
1303
1304 if (od8_settings->overdrive8_capabilities & OD8_UCLK_MAX)
1305 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1306 od_table->UclkFmax;
1307 else
1308 od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
1309 0;
1310
1311 if (od8_settings->overdrive8_capabilities & OD8_POWER_LIMIT)
1312 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1313 od_table->OverDrivePct;
1314 else
1315 od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
1316 0;
1317
1318 if (od8_settings->overdrive8_capabilities & OD8_ACOUSTIC_LIMIT_SCLK)
1319 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1320 od_table->FanMaximumRpm;
1321 else
1322 od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
1323 0;
1324
1325 if (od8_settings->overdrive8_capabilities & OD8_FAN_SPEED_MIN)
1326 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1327 od_table->FanMinimumPwm * data->smc_state_table.pp_table.FanMaximumRpm / 100;
1328 else
1329 od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
1330 0;
1331
1332 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_FAN)
1333 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1334 od_table->FanTargetTemperature;
1335 else
1336 od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
1337 0;
1338
1339 if (od8_settings->overdrive8_capabilities & OD8_TEMPERATURE_SYSTEM)
1340 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1341 od_table->MaxOpTemp;
1342 else
1343 od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
1344 0;
1345
1346 for (i = 0; i < OD8_SETTING_COUNT; i++) {
1347 if (od8_settings->od8_settings_array[i].feature_id) {
1348 od8_settings->od8_settings_array[i].min_value =
1349 pptable_information->od_settings_min[i];
1350 od8_settings->od8_settings_array[i].max_value =
1351 pptable_information->od_settings_max[i];
1352 od8_settings->od8_settings_array[i].current_value =
1353 od8_settings->od8_settings_array[i].default_value;
1354 } else {
1355 od8_settings->od8_settings_array[i].min_value =
1356 0;
1357 od8_settings->od8_settings_array[i].max_value =
1358 0;
1359 od8_settings->od8_settings_array[i].current_value =
1360 0;
1361 }
1362 }
1363
1364 ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false);
1365 PP_ASSERT_WITH_CODE(!ret,
1366 "Failed to import over drive table!",
1367 return ret);
1368
1369 return 0;
1370 }
1371
vega20_od8_set_settings(struct pp_hwmgr * hwmgr,uint32_t index,uint32_t value)1372 static int vega20_od8_set_settings(
1373 struct pp_hwmgr *hwmgr,
1374 uint32_t index,
1375 uint32_t value)
1376 {
1377 OverDriveTable_t od_table;
1378 int ret = 0;
1379 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1380 struct vega20_od8_single_setting *od8_settings =
1381 data->od8_settings.od8_settings_array;
1382
1383 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true);
1384 PP_ASSERT_WITH_CODE(!ret,
1385 "Failed to export over drive table!",
1386 return ret);
1387
1388 switch(index) {
1389 case OD8_SETTING_GFXCLK_FMIN:
1390 od_table.GfxclkFmin = (uint16_t)value;
1391 break;
1392 case OD8_SETTING_GFXCLK_FMAX:
1393 if (value < od8_settings[OD8_SETTING_GFXCLK_FMAX].min_value ||
1394 value > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value)
1395 return -EINVAL;
1396
1397 od_table.GfxclkFmax = (uint16_t)value;
1398 break;
1399 case OD8_SETTING_GFXCLK_FREQ1:
1400 od_table.GfxclkFreq1 = (uint16_t)value;
1401 break;
1402 case OD8_SETTING_GFXCLK_VOLTAGE1:
1403 od_table.GfxclkVolt1 = (uint16_t)value;
1404 break;
1405 case OD8_SETTING_GFXCLK_FREQ2:
1406 od_table.GfxclkFreq2 = (uint16_t)value;
1407 break;
1408 case OD8_SETTING_GFXCLK_VOLTAGE2:
1409 od_table.GfxclkVolt2 = (uint16_t)value;
1410 break;
1411 case OD8_SETTING_GFXCLK_FREQ3:
1412 od_table.GfxclkFreq3 = (uint16_t)value;
1413 break;
1414 case OD8_SETTING_GFXCLK_VOLTAGE3:
1415 od_table.GfxclkVolt3 = (uint16_t)value;
1416 break;
1417 case OD8_SETTING_UCLK_FMAX:
1418 if (value < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
1419 value > od8_settings[OD8_SETTING_UCLK_FMAX].max_value)
1420 return -EINVAL;
1421 od_table.UclkFmax = (uint16_t)value;
1422 break;
1423 case OD8_SETTING_POWER_PERCENTAGE:
1424 od_table.OverDrivePct = (int16_t)value;
1425 break;
1426 case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
1427 od_table.FanMaximumRpm = (uint16_t)value;
1428 break;
1429 case OD8_SETTING_FAN_MIN_SPEED:
1430 od_table.FanMinimumPwm = (uint16_t)value;
1431 break;
1432 case OD8_SETTING_FAN_TARGET_TEMP:
1433 od_table.FanTargetTemperature = (uint16_t)value;
1434 break;
1435 case OD8_SETTING_OPERATING_TEMP_MAX:
1436 od_table.MaxOpTemp = (uint16_t)value;
1437 break;
1438 }
1439
1440 ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false);
1441 PP_ASSERT_WITH_CODE(!ret,
1442 "Failed to import over drive table!",
1443 return ret);
1444
1445 return 0;
1446 }
1447
vega20_get_sclk_od(struct pp_hwmgr * hwmgr)1448 static int vega20_get_sclk_od(
1449 struct pp_hwmgr *hwmgr)
1450 {
1451 struct vega20_hwmgr *data = hwmgr->backend;
1452 struct vega20_single_dpm_table *sclk_table =
1453 &(data->dpm_table.gfx_table);
1454 struct vega20_single_dpm_table *golden_sclk_table =
1455 &(data->golden_dpm_table.gfx_table);
1456 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
1457 int golden_value = golden_sclk_table->dpm_levels
1458 [golden_sclk_table->count - 1].value;
1459
1460 /* od percentage */
1461 value -= golden_value;
1462 value = DIV_ROUND_UP(value * 100, golden_value);
1463
1464 return value;
1465 }
1466
vega20_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1467 static int vega20_set_sclk_od(
1468 struct pp_hwmgr *hwmgr, uint32_t value)
1469 {
1470 struct vega20_hwmgr *data = hwmgr->backend;
1471 struct vega20_single_dpm_table *golden_sclk_table =
1472 &(data->golden_dpm_table.gfx_table);
1473 uint32_t od_sclk;
1474 int ret = 0;
1475
1476 od_sclk = golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value;
1477 od_sclk /= 100;
1478 od_sclk += golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
1479
1480 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_GFXCLK_FMAX, od_sclk);
1481 PP_ASSERT_WITH_CODE(!ret,
1482 "[SetSclkOD] failed to set od gfxclk!",
1483 return ret);
1484
1485 /* retrieve updated gfxclk table */
1486 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
1487 PP_ASSERT_WITH_CODE(!ret,
1488 "[SetSclkOD] failed to refresh gfxclk table!",
1489 return ret);
1490
1491 return 0;
1492 }
1493
vega20_get_mclk_od(struct pp_hwmgr * hwmgr)1494 static int vega20_get_mclk_od(
1495 struct pp_hwmgr *hwmgr)
1496 {
1497 struct vega20_hwmgr *data = hwmgr->backend;
1498 struct vega20_single_dpm_table *mclk_table =
1499 &(data->dpm_table.mem_table);
1500 struct vega20_single_dpm_table *golden_mclk_table =
1501 &(data->golden_dpm_table.mem_table);
1502 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
1503 int golden_value = golden_mclk_table->dpm_levels
1504 [golden_mclk_table->count - 1].value;
1505
1506 /* od percentage */
1507 value -= golden_value;
1508 value = DIV_ROUND_UP(value * 100, golden_value);
1509
1510 return value;
1511 }
1512
vega20_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)1513 static int vega20_set_mclk_od(
1514 struct pp_hwmgr *hwmgr, uint32_t value)
1515 {
1516 struct vega20_hwmgr *data = hwmgr->backend;
1517 struct vega20_single_dpm_table *golden_mclk_table =
1518 &(data->golden_dpm_table.mem_table);
1519 uint32_t od_mclk;
1520 int ret = 0;
1521
1522 od_mclk = golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * value;
1523 od_mclk /= 100;
1524 od_mclk += golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
1525
1526 ret = vega20_od8_set_settings(hwmgr, OD8_SETTING_UCLK_FMAX, od_mclk);
1527 PP_ASSERT_WITH_CODE(!ret,
1528 "[SetMclkOD] failed to set od memclk!",
1529 return ret);
1530
1531 /* retrieve updated memclk table */
1532 ret = vega20_setup_memclk_dpm_table(hwmgr);
1533 PP_ASSERT_WITH_CODE(!ret,
1534 "[SetMclkOD] failed to refresh memclk table!",
1535 return ret);
1536
1537 return 0;
1538 }
1539
vega20_populate_umdpstate_clocks(struct pp_hwmgr * hwmgr)1540 static int vega20_populate_umdpstate_clocks(
1541 struct pp_hwmgr *hwmgr)
1542 {
1543 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
1544 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
1545 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
1546
1547 hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
1548 hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
1549
1550 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
1551 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
1552 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
1553 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
1554 }
1555
1556 hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
1557 hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
1558
1559 return 0;
1560 }
1561
vega20_get_max_sustainable_clock(struct pp_hwmgr * hwmgr,PP_Clock * clock,PPCLK_e clock_select)1562 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
1563 PP_Clock *clock, PPCLK_e clock_select)
1564 {
1565 int ret = 0;
1566
1567 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1568 PPSMC_MSG_GetDcModeMaxDpmFreq,
1569 (clock_select << 16))) == 0,
1570 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1571 return ret);
1572 *clock = smum_get_argument(hwmgr);
1573
1574 /* if DC limit is zero, return AC limit */
1575 if (*clock == 0) {
1576 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
1577 PPSMC_MSG_GetMaxDpmFreq,
1578 (clock_select << 16))) == 0,
1579 "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1580 return ret);
1581 *clock = smum_get_argument(hwmgr);
1582 }
1583
1584 return 0;
1585 }
1586
vega20_init_max_sustainable_clocks(struct pp_hwmgr * hwmgr)1587 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr)
1588 {
1589 struct vega20_hwmgr *data =
1590 (struct vega20_hwmgr *)(hwmgr->backend);
1591 struct vega20_max_sustainable_clocks *max_sustainable_clocks =
1592 &(data->max_sustainable_clocks);
1593 int ret = 0;
1594
1595 max_sustainable_clocks->uclock = data->vbios_boot_state.mem_clock / 100;
1596 max_sustainable_clocks->soc_clock = data->vbios_boot_state.soc_clock / 100;
1597 max_sustainable_clocks->dcef_clock = data->vbios_boot_state.dcef_clock / 100;
1598 max_sustainable_clocks->display_clock = 0xFFFFFFFF;
1599 max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
1600 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
1601
1602 if (data->smu_features[GNLD_DPM_UCLK].enabled)
1603 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1604 &(max_sustainable_clocks->uclock),
1605 PPCLK_UCLK)) == 0,
1606 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1607 return ret);
1608
1609 if (data->smu_features[GNLD_DPM_SOCCLK].enabled)
1610 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1611 &(max_sustainable_clocks->soc_clock),
1612 PPCLK_SOCCLK)) == 0,
1613 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1614 return ret);
1615
1616 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1617 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1618 &(max_sustainable_clocks->dcef_clock),
1619 PPCLK_DCEFCLK)) == 0,
1620 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1621 return ret);
1622 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1623 &(max_sustainable_clocks->display_clock),
1624 PPCLK_DISPCLK)) == 0,
1625 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1626 return ret);
1627 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1628 &(max_sustainable_clocks->phy_clock),
1629 PPCLK_PHYCLK)) == 0,
1630 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1631 return ret);
1632 PP_ASSERT_WITH_CODE((ret = vega20_get_max_sustainable_clock(hwmgr,
1633 &(max_sustainable_clocks->pixel_clock),
1634 PPCLK_PIXCLK)) == 0,
1635 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1636 return ret);
1637 }
1638
1639 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
1640 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
1641
1642 return 0;
1643 }
1644
vega20_enable_mgpu_fan_boost(struct pp_hwmgr * hwmgr)1645 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
1646 {
1647 int result;
1648
1649 result = smum_send_msg_to_smc(hwmgr,
1650 PPSMC_MSG_SetMGpuFanBoostLimitRpm);
1651 PP_ASSERT_WITH_CODE(!result,
1652 "[EnableMgpuFan] Failed to enable mgpu fan boost!",
1653 return result);
1654
1655 return 0;
1656 }
1657
vega20_init_powergate_state(struct pp_hwmgr * hwmgr)1658 static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1659 {
1660 struct vega20_hwmgr *data =
1661 (struct vega20_hwmgr *)(hwmgr->backend);
1662
1663 data->uvd_power_gated = true;
1664 data->vce_power_gated = true;
1665
1666 if (data->smu_features[GNLD_DPM_UVD].enabled)
1667 data->uvd_power_gated = false;
1668
1669 if (data->smu_features[GNLD_DPM_VCE].enabled)
1670 data->vce_power_gated = false;
1671 }
1672
vega20_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1673 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1674 {
1675 int result = 0;
1676
1677 smum_send_msg_to_smc_with_parameter(hwmgr,
1678 PPSMC_MSG_NumOfDisplays, 0);
1679
1680 result = vega20_set_allowed_featuresmask(hwmgr);
1681 PP_ASSERT_WITH_CODE(!result,
1682 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1683 return result);
1684
1685 result = vega20_init_smc_table(hwmgr);
1686 PP_ASSERT_WITH_CODE(!result,
1687 "[EnableDPMTasks] Failed to initialize SMC table!",
1688 return result);
1689
1690 result = vega20_run_btc(hwmgr);
1691 PP_ASSERT_WITH_CODE(!result,
1692 "[EnableDPMTasks] Failed to run btc!",
1693 return result);
1694
1695 result = vega20_run_btc_afll(hwmgr);
1696 PP_ASSERT_WITH_CODE(!result,
1697 "[EnableDPMTasks] Failed to run btc afll!",
1698 return result);
1699
1700 result = vega20_enable_all_smu_features(hwmgr);
1701 PP_ASSERT_WITH_CODE(!result,
1702 "[EnableDPMTasks] Failed to enable all smu features!",
1703 return result);
1704
1705 result = vega20_override_pcie_parameters(hwmgr);
1706 PP_ASSERT_WITH_CODE(!result,
1707 "[EnableDPMTasks] Failed to override pcie parameters!",
1708 return result);
1709
1710 result = vega20_notify_smc_display_change(hwmgr);
1711 PP_ASSERT_WITH_CODE(!result,
1712 "[EnableDPMTasks] Failed to notify smc display change!",
1713 return result);
1714
1715 result = vega20_send_clock_ratio(hwmgr);
1716 PP_ASSERT_WITH_CODE(!result,
1717 "[EnableDPMTasks] Failed to send clock ratio!",
1718 return result);
1719
1720 /* Initialize UVD/VCE powergating state */
1721 vega20_init_powergate_state(hwmgr);
1722
1723 result = vega20_setup_default_dpm_tables(hwmgr);
1724 PP_ASSERT_WITH_CODE(!result,
1725 "[EnableDPMTasks] Failed to setup default DPM tables!",
1726 return result);
1727
1728 result = vega20_init_max_sustainable_clocks(hwmgr);
1729 PP_ASSERT_WITH_CODE(!result,
1730 "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1731 return result);
1732
1733 result = vega20_power_control_set_level(hwmgr);
1734 PP_ASSERT_WITH_CODE(!result,
1735 "[EnableDPMTasks] Failed to power control set level!",
1736 return result);
1737
1738 result = vega20_od8_initialize_default_settings(hwmgr);
1739 PP_ASSERT_WITH_CODE(!result,
1740 "[EnableDPMTasks] Failed to initialize odn settings!",
1741 return result);
1742
1743 result = vega20_populate_umdpstate_clocks(hwmgr);
1744 PP_ASSERT_WITH_CODE(!result,
1745 "[EnableDPMTasks] Failed to populate umdpstate clocks!",
1746 return result);
1747
1748 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
1749 POWER_SOURCE_AC << 16);
1750 PP_ASSERT_WITH_CODE(!result,
1751 "[GetPptLimit] get default PPT limit failed!",
1752 return result);
1753 hwmgr->power_limit =
1754 hwmgr->default_power_limit = smum_get_argument(hwmgr);
1755
1756 return 0;
1757 }
1758
vega20_find_lowest_dpm_level(struct vega20_single_dpm_table * table)1759 static uint32_t vega20_find_lowest_dpm_level(
1760 struct vega20_single_dpm_table *table)
1761 {
1762 uint32_t i;
1763
1764 for (i = 0; i < table->count; i++) {
1765 if (table->dpm_levels[i].enabled)
1766 break;
1767 }
1768 if (i >= table->count) {
1769 i = 0;
1770 table->dpm_levels[i].enabled = true;
1771 }
1772
1773 return i;
1774 }
1775
vega20_find_highest_dpm_level(struct vega20_single_dpm_table * table)1776 static uint32_t vega20_find_highest_dpm_level(
1777 struct vega20_single_dpm_table *table)
1778 {
1779 int i = 0;
1780
1781 PP_ASSERT_WITH_CODE(table != NULL,
1782 "[FindHighestDPMLevel] DPM Table does not exist!",
1783 return 0);
1784 PP_ASSERT_WITH_CODE(table->count > 0,
1785 "[FindHighestDPMLevel] DPM Table has no entry!",
1786 return 0);
1787 PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
1788 "[FindHighestDPMLevel] DPM Table has too many entries!",
1789 return MAX_REGULAR_DPM_NUMBER - 1);
1790
1791 for (i = table->count - 1; i >= 0; i--) {
1792 if (table->dpm_levels[i].enabled)
1793 break;
1794 }
1795 if (i < 0) {
1796 i = 0;
1797 table->dpm_levels[i].enabled = true;
1798 }
1799
1800 return i;
1801 }
1802
vega20_upload_dpm_min_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1803 static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1804 {
1805 struct vega20_hwmgr *data =
1806 (struct vega20_hwmgr *)(hwmgr->backend);
1807 uint32_t min_freq;
1808 int ret = 0;
1809
1810 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1811 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1812 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1813 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1814 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1815 (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
1816 "Failed to set soft min gfxclk !",
1817 return ret);
1818 }
1819
1820 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1821 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1822 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1823 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1824 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1825 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1826 "Failed to set soft min memclk !",
1827 return ret);
1828 }
1829
1830 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1831 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1832 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1833
1834 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1835 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1836 (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
1837 "Failed to set soft min vclk!",
1838 return ret);
1839
1840 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1841
1842 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1843 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1844 (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
1845 "Failed to set soft min dclk!",
1846 return ret);
1847 }
1848
1849 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1850 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1851 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1852
1853 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1854 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1855 (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
1856 "Failed to set soft min eclk!",
1857 return ret);
1858 }
1859
1860 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1861 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1862 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1863
1864 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1865 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1866 (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
1867 "Failed to set soft min socclk!",
1868 return ret);
1869 }
1870
1871 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1872 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1873 min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level;
1874
1875 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1876 hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1877 (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
1878 "Failed to set soft min fclk!",
1879 return ret);
1880 }
1881
1882 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled &&
1883 (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
1884 min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level;
1885
1886 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1887 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1888 (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
1889 "Failed to set hard min dcefclk!",
1890 return ret);
1891 }
1892
1893 return ret;
1894 }
1895
vega20_upload_dpm_max_level(struct pp_hwmgr * hwmgr,uint32_t feature_mask)1896 static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
1897 {
1898 struct vega20_hwmgr *data =
1899 (struct vega20_hwmgr *)(hwmgr->backend);
1900 uint32_t max_freq;
1901 int ret = 0;
1902
1903 if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
1904 (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
1905 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1906
1907 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1908 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1909 (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
1910 "Failed to set soft max gfxclk!",
1911 return ret);
1912 }
1913
1914 if (data->smu_features[GNLD_DPM_UCLK].enabled &&
1915 (feature_mask & FEATURE_DPM_UCLK_MASK)) {
1916 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1917
1918 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1919 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1920 (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
1921 "Failed to set soft max memclk!",
1922 return ret);
1923 }
1924
1925 if (data->smu_features[GNLD_DPM_UVD].enabled &&
1926 (feature_mask & FEATURE_DPM_UVD_MASK)) {
1927 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1928
1929 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1930 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1931 (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
1932 "Failed to set soft max vclk!",
1933 return ret);
1934
1935 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1936 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1937 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1938 (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
1939 "Failed to set soft max dclk!",
1940 return ret);
1941 }
1942
1943 if (data->smu_features[GNLD_DPM_VCE].enabled &&
1944 (feature_mask & FEATURE_DPM_VCE_MASK)) {
1945 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1946
1947 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1948 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1949 (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
1950 "Failed to set soft max eclk!",
1951 return ret);
1952 }
1953
1954 if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
1955 (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
1956 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1957
1958 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1959 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1960 (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
1961 "Failed to set soft max socclk!",
1962 return ret);
1963 }
1964
1965 if (data->smu_features[GNLD_DPM_FCLK].enabled &&
1966 (feature_mask & FEATURE_DPM_FCLK_MASK)) {
1967 max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level;
1968
1969 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1970 hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1971 (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
1972 "Failed to set soft max fclk!",
1973 return ret);
1974 }
1975
1976 return ret;
1977 }
1978
vega20_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)1979 int vega20_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1980 {
1981 struct vega20_hwmgr *data =
1982 (struct vega20_hwmgr *)(hwmgr->backend);
1983 int ret = 0;
1984
1985 if (data->smu_features[GNLD_DPM_VCE].supported) {
1986 if (data->smu_features[GNLD_DPM_VCE].enabled == enable) {
1987 if (enable)
1988 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
1989 else
1990 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
1991 }
1992
1993 ret = vega20_enable_smc_features(hwmgr,
1994 enable,
1995 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap);
1996 PP_ASSERT_WITH_CODE(!ret,
1997 "Attempt to Enable/Disable DPM VCE Failed!",
1998 return ret);
1999 data->smu_features[GNLD_DPM_VCE].enabled = enable;
2000 }
2001
2002 return 0;
2003 }
2004
vega20_get_clock_ranges(struct pp_hwmgr * hwmgr,uint32_t * clock,PPCLK_e clock_select,bool max)2005 static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
2006 uint32_t *clock,
2007 PPCLK_e clock_select,
2008 bool max)
2009 {
2010 int ret;
2011 *clock = 0;
2012
2013 if (max) {
2014 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2015 PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
2016 "[GetClockRanges] Failed to get max clock from SMC!",
2017 return ret);
2018 *clock = smum_get_argument(hwmgr);
2019 } else {
2020 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2021 PPSMC_MSG_GetMinDpmFreq,
2022 (clock_select << 16))) == 0,
2023 "[GetClockRanges] Failed to get min clock from SMC!",
2024 return ret);
2025 *clock = smum_get_argument(hwmgr);
2026 }
2027
2028 return 0;
2029 }
2030
vega20_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)2031 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
2032 {
2033 struct vega20_hwmgr *data =
2034 (struct vega20_hwmgr *)(hwmgr->backend);
2035 uint32_t gfx_clk;
2036 int ret = 0;
2037
2038 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
2039 "[GetSclks]: gfxclk dpm not enabled!\n",
2040 return -EPERM);
2041
2042 if (low) {
2043 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false);
2044 PP_ASSERT_WITH_CODE(!ret,
2045 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
2046 return ret);
2047 } else {
2048 ret = vega20_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true);
2049 PP_ASSERT_WITH_CODE(!ret,
2050 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
2051 return ret);
2052 }
2053
2054 return (gfx_clk * 100);
2055 }
2056
vega20_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)2057 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
2058 {
2059 struct vega20_hwmgr *data =
2060 (struct vega20_hwmgr *)(hwmgr->backend);
2061 uint32_t mem_clk;
2062 int ret = 0;
2063
2064 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
2065 "[MemMclks]: memclk dpm not enabled!\n",
2066 return -EPERM);
2067
2068 if (low) {
2069 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false);
2070 PP_ASSERT_WITH_CODE(!ret,
2071 "[GetMclks]: fail to get min PPCLK_UCLK\n",
2072 return ret);
2073 } else {
2074 ret = vega20_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true);
2075 PP_ASSERT_WITH_CODE(!ret,
2076 "[GetMclks]: fail to get max PPCLK_UCLK\n",
2077 return ret);
2078 }
2079
2080 return (mem_clk * 100);
2081 }
2082
vega20_get_metrics_table(struct pp_hwmgr * hwmgr,SmuMetrics_t * metrics_table)2083 static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table)
2084 {
2085 struct vega20_hwmgr *data =
2086 (struct vega20_hwmgr *)(hwmgr->backend);
2087 int ret = 0;
2088
2089 if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) {
2090 ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table,
2091 TABLE_SMU_METRICS, true);
2092 if (ret) {
2093 pr_info("Failed to export SMU metrics table!\n");
2094 return ret;
2095 }
2096 memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t));
2097 data->metrics_time = jiffies;
2098 } else
2099 memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t));
2100
2101 return ret;
2102 }
2103
vega20_get_gpu_power(struct pp_hwmgr * hwmgr,uint32_t * query)2104 static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2105 uint32_t *query)
2106 {
2107 int ret = 0;
2108 SmuMetrics_t metrics_table;
2109
2110 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
2111 if (ret)
2112 return ret;
2113
2114 /* For the 40.46 release, they changed the value name */
2115 if (hwmgr->smu_version == 0x282e00)
2116 *query = metrics_table.AverageSocketPower << 8;
2117 else
2118 *query = metrics_table.CurrSocketPower << 8;
2119
2120 return ret;
2121 }
2122
vega20_get_current_clk_freq(struct pp_hwmgr * hwmgr,PPCLK_e clk_id,uint32_t * clk_freq)2123 static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
2124 PPCLK_e clk_id, uint32_t *clk_freq)
2125 {
2126 int ret = 0;
2127
2128 *clk_freq = 0;
2129
2130 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2131 PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
2132 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
2133 return ret);
2134 *clk_freq = smum_get_argument(hwmgr);
2135
2136 *clk_freq = *clk_freq * 100;
2137
2138 return 0;
2139 }
2140
vega20_get_current_activity_percent(struct pp_hwmgr * hwmgr,int idx,uint32_t * activity_percent)2141 static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr,
2142 int idx,
2143 uint32_t *activity_percent)
2144 {
2145 int ret = 0;
2146 SmuMetrics_t metrics_table;
2147
2148 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
2149 if (ret)
2150 return ret;
2151
2152 switch (idx) {
2153 case AMDGPU_PP_SENSOR_GPU_LOAD:
2154 *activity_percent = metrics_table.AverageGfxActivity;
2155 break;
2156 case AMDGPU_PP_SENSOR_MEM_LOAD:
2157 *activity_percent = metrics_table.AverageUclkActivity;
2158 break;
2159 default:
2160 pr_err("Invalid index for retrieving clock activity\n");
2161 return -EINVAL;
2162 }
2163
2164 return ret;
2165 }
2166
vega20_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)2167 static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
2168 void *value, int *size)
2169 {
2170 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2171 struct amdgpu_device *adev = hwmgr->adev;
2172 SmuMetrics_t metrics_table;
2173 uint32_t val_vid;
2174 int ret = 0;
2175
2176 switch (idx) {
2177 case AMDGPU_PP_SENSOR_GFX_SCLK:
2178 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
2179 if (ret)
2180 return ret;
2181
2182 *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100;
2183 *size = 4;
2184 break;
2185 case AMDGPU_PP_SENSOR_GFX_MCLK:
2186 ret = vega20_get_current_clk_freq(hwmgr,
2187 PPCLK_UCLK,
2188 (uint32_t *)value);
2189 if (!ret)
2190 *size = 4;
2191 break;
2192 case AMDGPU_PP_SENSOR_GPU_LOAD:
2193 case AMDGPU_PP_SENSOR_MEM_LOAD:
2194 ret = vega20_get_current_activity_percent(hwmgr, idx, (uint32_t *)value);
2195 if (!ret)
2196 *size = 4;
2197 break;
2198 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
2199 *((uint32_t *)value) = vega20_thermal_get_temperature(hwmgr);
2200 *size = 4;
2201 break;
2202 case AMDGPU_PP_SENSOR_EDGE_TEMP:
2203 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
2204 if (ret)
2205 return ret;
2206
2207 *((uint32_t *)value) = metrics_table.TemperatureEdge *
2208 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2209 *size = 4;
2210 break;
2211 case AMDGPU_PP_SENSOR_MEM_TEMP:
2212 ret = vega20_get_metrics_table(hwmgr, &metrics_table);
2213 if (ret)
2214 return ret;
2215
2216 *((uint32_t *)value) = metrics_table.TemperatureHBM *
2217 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2218 *size = 4;
2219 break;
2220 case AMDGPU_PP_SENSOR_UVD_POWER:
2221 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
2222 *size = 4;
2223 break;
2224 case AMDGPU_PP_SENSOR_VCE_POWER:
2225 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
2226 *size = 4;
2227 break;
2228 case AMDGPU_PP_SENSOR_GPU_POWER:
2229 *size = 16;
2230 ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value);
2231 break;
2232 case AMDGPU_PP_SENSOR_VDDGFX:
2233 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
2234 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
2235 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
2236 *((uint32_t *)value) =
2237 (uint32_t)convert_to_vddc((uint8_t)val_vid);
2238 break;
2239 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2240 ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value);
2241 if (!ret)
2242 *size = 8;
2243 break;
2244 default:
2245 ret = -EINVAL;
2246 break;
2247 }
2248 return ret;
2249 }
2250
vega20_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)2251 int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
2252 struct pp_display_clock_request *clock_req)
2253 {
2254 int result = 0;
2255 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2256 enum amd_pp_clock_type clk_type = clock_req->clock_type;
2257 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
2258 PPCLK_e clk_select = 0;
2259 uint32_t clk_request = 0;
2260
2261 if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
2262 switch (clk_type) {
2263 case amd_pp_dcef_clock:
2264 clk_select = PPCLK_DCEFCLK;
2265 break;
2266 case amd_pp_disp_clock:
2267 clk_select = PPCLK_DISPCLK;
2268 break;
2269 case amd_pp_pixel_clock:
2270 clk_select = PPCLK_PIXCLK;
2271 break;
2272 case amd_pp_phy_clock:
2273 clk_select = PPCLK_PHYCLK;
2274 break;
2275 default:
2276 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
2277 result = -EINVAL;
2278 break;
2279 }
2280
2281 if (!result) {
2282 clk_request = (clk_select << 16) | clk_freq;
2283 result = smum_send_msg_to_smc_with_parameter(hwmgr,
2284 PPSMC_MSG_SetHardMinByFreq,
2285 clk_request);
2286 }
2287 }
2288
2289 return result;
2290 }
2291
vega20_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)2292 static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
2293 PHM_PerformanceLevelDesignation designation, uint32_t index,
2294 PHM_PerformanceLevel *level)
2295 {
2296 return 0;
2297 }
2298
vega20_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)2299 static int vega20_notify_smc_display_config_after_ps_adjustment(
2300 struct pp_hwmgr *hwmgr)
2301 {
2302 struct vega20_hwmgr *data =
2303 (struct vega20_hwmgr *)(hwmgr->backend);
2304 struct vega20_single_dpm_table *dpm_table =
2305 &data->dpm_table.mem_table;
2306 struct PP_Clocks min_clocks = {0};
2307 struct pp_display_clock_request clock_req;
2308 int ret = 0;
2309
2310 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
2311 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
2312 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2313
2314 if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
2315 clock_req.clock_type = amd_pp_dcef_clock;
2316 clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
2317 if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
2318 if (data->smu_features[GNLD_DS_DCEFCLK].supported)
2319 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
2320 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
2321 min_clocks.dcefClockInSR / 100)) == 0,
2322 "Attempt to set divider for DCEFCLK Failed!",
2323 return ret);
2324 } else {
2325 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
2326 }
2327 }
2328
2329 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
2330 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
2331 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2332 PPSMC_MSG_SetHardMinByFreq,
2333 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
2334 "[SetHardMinFreq] Set hard min uclk failed!",
2335 return ret);
2336 }
2337
2338 return 0;
2339 }
2340
vega20_force_dpm_highest(struct pp_hwmgr * hwmgr)2341 static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2342 {
2343 struct vega20_hwmgr *data =
2344 (struct vega20_hwmgr *)(hwmgr->backend);
2345 uint32_t soft_level;
2346 int ret = 0;
2347
2348 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2349
2350 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2351 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2352 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2353
2354 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2355
2356 data->dpm_table.mem_table.dpm_state.soft_min_level =
2357 data->dpm_table.mem_table.dpm_state.soft_max_level =
2358 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2359
2360 soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2361
2362 data->dpm_table.soc_table.dpm_state.soft_min_level =
2363 data->dpm_table.soc_table.dpm_state.soft_max_level =
2364 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2365
2366 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2367 FEATURE_DPM_UCLK_MASK |
2368 FEATURE_DPM_SOCCLK_MASK);
2369 PP_ASSERT_WITH_CODE(!ret,
2370 "Failed to upload boot level to highest!",
2371 return ret);
2372
2373 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2374 FEATURE_DPM_UCLK_MASK |
2375 FEATURE_DPM_SOCCLK_MASK);
2376 PP_ASSERT_WITH_CODE(!ret,
2377 "Failed to upload dpm max level to highest!",
2378 return ret);
2379
2380 return 0;
2381 }
2382
vega20_force_dpm_lowest(struct pp_hwmgr * hwmgr)2383 static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2384 {
2385 struct vega20_hwmgr *data =
2386 (struct vega20_hwmgr *)(hwmgr->backend);
2387 uint32_t soft_level;
2388 int ret = 0;
2389
2390 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2391
2392 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2393 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2394 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
2395
2396 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2397
2398 data->dpm_table.mem_table.dpm_state.soft_min_level =
2399 data->dpm_table.mem_table.dpm_state.soft_max_level =
2400 data->dpm_table.mem_table.dpm_levels[soft_level].value;
2401
2402 soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2403
2404 data->dpm_table.soc_table.dpm_state.soft_min_level =
2405 data->dpm_table.soc_table.dpm_state.soft_max_level =
2406 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2407
2408 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2409 FEATURE_DPM_UCLK_MASK |
2410 FEATURE_DPM_SOCCLK_MASK);
2411 PP_ASSERT_WITH_CODE(!ret,
2412 "Failed to upload boot level to highest!",
2413 return ret);
2414
2415 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2416 FEATURE_DPM_UCLK_MASK |
2417 FEATURE_DPM_SOCCLK_MASK);
2418 PP_ASSERT_WITH_CODE(!ret,
2419 "Failed to upload dpm max level to highest!",
2420 return ret);
2421
2422 return 0;
2423
2424 }
2425
vega20_unforce_dpm_levels(struct pp_hwmgr * hwmgr)2426 static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2427 {
2428 struct vega20_hwmgr *data =
2429 (struct vega20_hwmgr *)(hwmgr->backend);
2430 uint32_t soft_min_level, soft_max_level;
2431 int ret = 0;
2432
2433 /* gfxclk soft min/max settings */
2434 soft_min_level =
2435 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2436 soft_max_level =
2437 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2438
2439 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2440 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2441 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2442 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2443
2444 /* uclk soft min/max settings */
2445 soft_min_level =
2446 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2447 soft_max_level =
2448 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2449
2450 data->dpm_table.mem_table.dpm_state.soft_min_level =
2451 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2452 data->dpm_table.mem_table.dpm_state.soft_max_level =
2453 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2454
2455 /* socclk soft min/max settings */
2456 soft_min_level =
2457 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2458 soft_max_level =
2459 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2460
2461 data->dpm_table.soc_table.dpm_state.soft_min_level =
2462 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2463 data->dpm_table.soc_table.dpm_state.soft_max_level =
2464 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2465
2466 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2467 FEATURE_DPM_UCLK_MASK |
2468 FEATURE_DPM_SOCCLK_MASK);
2469 PP_ASSERT_WITH_CODE(!ret,
2470 "Failed to upload DPM Bootup Levels!",
2471 return ret);
2472
2473 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2474 FEATURE_DPM_UCLK_MASK |
2475 FEATURE_DPM_SOCCLK_MASK);
2476 PP_ASSERT_WITH_CODE(!ret,
2477 "Failed to upload DPM Max Levels!",
2478 return ret);
2479
2480 return 0;
2481 }
2482
vega20_get_profiling_clk_mask(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * soc_mask)2483 static int vega20_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2484 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
2485 {
2486 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2487 struct vega20_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
2488 struct vega20_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
2489 struct vega20_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
2490
2491 *sclk_mask = 0;
2492 *mclk_mask = 0;
2493 *soc_mask = 0;
2494
2495 if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
2496 mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
2497 soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
2498 *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
2499 *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
2500 *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
2501 }
2502
2503 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2504 *sclk_mask = 0;
2505 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2506 *mclk_mask = 0;
2507 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2508 *sclk_mask = gfx_dpm_table->count - 1;
2509 *mclk_mask = mem_dpm_table->count - 1;
2510 *soc_mask = soc_dpm_table->count - 1;
2511 }
2512
2513 return 0;
2514 }
2515
vega20_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)2516 static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2517 enum pp_clock_type type, uint32_t mask)
2518 {
2519 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2520 uint32_t soft_min_level, soft_max_level, hard_min_level;
2521 int ret = 0;
2522
2523 switch (type) {
2524 case PP_SCLK:
2525 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2526 soft_max_level = mask ? (fls(mask) - 1) : 0;
2527
2528 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2529 pr_err("Clock level specified %d is over max allowed %d\n",
2530 soft_max_level,
2531 data->dpm_table.gfx_table.count - 1);
2532 return -EINVAL;
2533 }
2534
2535 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2536 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2537 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2538 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2539
2540 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2541 PP_ASSERT_WITH_CODE(!ret,
2542 "Failed to upload boot level to lowest!",
2543 return ret);
2544
2545 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
2546 PP_ASSERT_WITH_CODE(!ret,
2547 "Failed to upload dpm max level to highest!",
2548 return ret);
2549 break;
2550
2551 case PP_MCLK:
2552 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2553 soft_max_level = mask ? (fls(mask) - 1) : 0;
2554
2555 if (soft_max_level >= data->dpm_table.mem_table.count) {
2556 pr_err("Clock level specified %d is over max allowed %d\n",
2557 soft_max_level,
2558 data->dpm_table.mem_table.count - 1);
2559 return -EINVAL;
2560 }
2561
2562 data->dpm_table.mem_table.dpm_state.soft_min_level =
2563 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2564 data->dpm_table.mem_table.dpm_state.soft_max_level =
2565 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2566
2567 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2568 PP_ASSERT_WITH_CODE(!ret,
2569 "Failed to upload boot level to lowest!",
2570 return ret);
2571
2572 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
2573 PP_ASSERT_WITH_CODE(!ret,
2574 "Failed to upload dpm max level to highest!",
2575 return ret);
2576
2577 break;
2578
2579 case PP_SOCCLK:
2580 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2581 soft_max_level = mask ? (fls(mask) - 1) : 0;
2582
2583 if (soft_max_level >= data->dpm_table.soc_table.count) {
2584 pr_err("Clock level specified %d is over max allowed %d\n",
2585 soft_max_level,
2586 data->dpm_table.soc_table.count - 1);
2587 return -EINVAL;
2588 }
2589
2590 data->dpm_table.soc_table.dpm_state.soft_min_level =
2591 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2592 data->dpm_table.soc_table.dpm_state.soft_max_level =
2593 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2594
2595 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2596 PP_ASSERT_WITH_CODE(!ret,
2597 "Failed to upload boot level to lowest!",
2598 return ret);
2599
2600 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK);
2601 PP_ASSERT_WITH_CODE(!ret,
2602 "Failed to upload dpm max level to highest!",
2603 return ret);
2604
2605 break;
2606
2607 case PP_FCLK:
2608 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2609 soft_max_level = mask ? (fls(mask) - 1) : 0;
2610
2611 if (soft_max_level >= data->dpm_table.fclk_table.count) {
2612 pr_err("Clock level specified %d is over max allowed %d\n",
2613 soft_max_level,
2614 data->dpm_table.fclk_table.count - 1);
2615 return -EINVAL;
2616 }
2617
2618 data->dpm_table.fclk_table.dpm_state.soft_min_level =
2619 data->dpm_table.fclk_table.dpm_levels[soft_min_level].value;
2620 data->dpm_table.fclk_table.dpm_state.soft_max_level =
2621 data->dpm_table.fclk_table.dpm_levels[soft_max_level].value;
2622
2623 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2624 PP_ASSERT_WITH_CODE(!ret,
2625 "Failed to upload boot level to lowest!",
2626 return ret);
2627
2628 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK);
2629 PP_ASSERT_WITH_CODE(!ret,
2630 "Failed to upload dpm max level to highest!",
2631 return ret);
2632
2633 break;
2634
2635 case PP_DCEFCLK:
2636 hard_min_level = mask ? (ffs(mask) - 1) : 0;
2637
2638 if (hard_min_level >= data->dpm_table.dcef_table.count) {
2639 pr_err("Clock level specified %d is over max allowed %d\n",
2640 hard_min_level,
2641 data->dpm_table.dcef_table.count - 1);
2642 return -EINVAL;
2643 }
2644
2645 data->dpm_table.dcef_table.dpm_state.hard_min_level =
2646 data->dpm_table.dcef_table.dpm_levels[hard_min_level].value;
2647
2648 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK);
2649 PP_ASSERT_WITH_CODE(!ret,
2650 "Failed to upload boot level to lowest!",
2651 return ret);
2652
2653 //TODO: Setting DCEFCLK max dpm level is not supported
2654
2655 break;
2656
2657 case PP_PCIE:
2658 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2659 soft_max_level = mask ? (fls(mask) - 1) : 0;
2660 if (soft_min_level >= NUM_LINK_LEVELS ||
2661 soft_max_level >= NUM_LINK_LEVELS)
2662 return -EINVAL;
2663
2664 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
2665 PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
2666 PP_ASSERT_WITH_CODE(!ret,
2667 "Failed to set min link dpm level!",
2668 return ret);
2669
2670 break;
2671
2672 default:
2673 break;
2674 }
2675
2676 return 0;
2677 }
2678
vega20_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)2679 static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
2680 enum amd_dpm_forced_level level)
2681 {
2682 int ret = 0;
2683 uint32_t sclk_mask, mclk_mask, soc_mask;
2684
2685 switch (level) {
2686 case AMD_DPM_FORCED_LEVEL_HIGH:
2687 ret = vega20_force_dpm_highest(hwmgr);
2688 break;
2689
2690 case AMD_DPM_FORCED_LEVEL_LOW:
2691 ret = vega20_force_dpm_lowest(hwmgr);
2692 break;
2693
2694 case AMD_DPM_FORCED_LEVEL_AUTO:
2695 ret = vega20_unforce_dpm_levels(hwmgr);
2696 break;
2697
2698 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2699 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2700 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2701 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2702 ret = vega20_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
2703 if (ret)
2704 return ret;
2705 vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
2706 vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
2707 vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask);
2708 break;
2709
2710 case AMD_DPM_FORCED_LEVEL_MANUAL:
2711 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2712 default:
2713 break;
2714 }
2715
2716 return ret;
2717 }
2718
vega20_get_fan_control_mode(struct pp_hwmgr * hwmgr)2719 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr)
2720 {
2721 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2722
2723 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
2724 return AMD_FAN_CTRL_MANUAL;
2725 else
2726 return AMD_FAN_CTRL_AUTO;
2727 }
2728
vega20_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)2729 static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
2730 {
2731 switch (mode) {
2732 case AMD_FAN_CTRL_NONE:
2733 vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2734 break;
2735 case AMD_FAN_CTRL_MANUAL:
2736 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2737 vega20_fan_ctrl_stop_smc_fan_control(hwmgr);
2738 break;
2739 case AMD_FAN_CTRL_AUTO:
2740 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
2741 vega20_fan_ctrl_start_smc_fan_control(hwmgr);
2742 break;
2743 default:
2744 break;
2745 }
2746 }
2747
vega20_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)2748 static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr,
2749 struct amd_pp_simple_clock_info *info)
2750 {
2751 #if 0
2752 struct phm_ppt_v2_information *table_info =
2753 (struct phm_ppt_v2_information *)hwmgr->pptable;
2754 struct phm_clock_and_voltage_limits *max_limits =
2755 &table_info->max_clock_voltage_on_ac;
2756
2757 info->engine_max_clock = max_limits->sclk;
2758 info->memory_max_clock = max_limits->mclk;
2759 #endif
2760 return 0;
2761 }
2762
2763
vega20_get_sclks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2764 static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2765 struct pp_clock_levels_with_latency *clocks)
2766 {
2767 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2768 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2769 int i, count;
2770
2771 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
2772 return -1;
2773
2774 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2775 clocks->num_levels = count;
2776
2777 for (i = 0; i < count; i++) {
2778 clocks->data[i].clocks_in_khz =
2779 dpm_table->dpm_levels[i].value * 1000;
2780 clocks->data[i].latency_in_us = 0;
2781 }
2782
2783 return 0;
2784 }
2785
vega20_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clock)2786 static uint32_t vega20_get_mem_latency(struct pp_hwmgr *hwmgr,
2787 uint32_t clock)
2788 {
2789 return 25;
2790 }
2791
vega20_get_memclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2792 static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2793 struct pp_clock_levels_with_latency *clocks)
2794 {
2795 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2796 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2797 int i, count;
2798
2799 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
2800 return -1;
2801
2802 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2803 clocks->num_levels = data->mclk_latency_table.count = count;
2804
2805 for (i = 0; i < count; i++) {
2806 clocks->data[i].clocks_in_khz =
2807 data->mclk_latency_table.entries[i].frequency =
2808 dpm_table->dpm_levels[i].value * 1000;
2809 clocks->data[i].latency_in_us =
2810 data->mclk_latency_table.entries[i].latency =
2811 vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
2812 }
2813
2814 return 0;
2815 }
2816
vega20_get_dcefclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2817 static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2818 struct pp_clock_levels_with_latency *clocks)
2819 {
2820 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2821 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2822 int i, count;
2823
2824 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
2825 return -1;
2826
2827 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2828 clocks->num_levels = count;
2829
2830 for (i = 0; i < count; i++) {
2831 clocks->data[i].clocks_in_khz =
2832 dpm_table->dpm_levels[i].value * 1000;
2833 clocks->data[i].latency_in_us = 0;
2834 }
2835
2836 return 0;
2837 }
2838
vega20_get_socclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)2839 static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2840 struct pp_clock_levels_with_latency *clocks)
2841 {
2842 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2843 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2844 int i, count;
2845
2846 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
2847 return -1;
2848
2849 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2850 clocks->num_levels = count;
2851
2852 for (i = 0; i < count; i++) {
2853 clocks->data[i].clocks_in_khz =
2854 dpm_table->dpm_levels[i].value * 1000;
2855 clocks->data[i].latency_in_us = 0;
2856 }
2857
2858 return 0;
2859
2860 }
2861
vega20_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)2862 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
2863 enum amd_pp_clock_type type,
2864 struct pp_clock_levels_with_latency *clocks)
2865 {
2866 int ret;
2867
2868 switch (type) {
2869 case amd_pp_sys_clock:
2870 ret = vega20_get_sclks(hwmgr, clocks);
2871 break;
2872 case amd_pp_mem_clock:
2873 ret = vega20_get_memclocks(hwmgr, clocks);
2874 break;
2875 case amd_pp_dcef_clock:
2876 ret = vega20_get_dcefclocks(hwmgr, clocks);
2877 break;
2878 case amd_pp_soc_clock:
2879 ret = vega20_get_socclocks(hwmgr, clocks);
2880 break;
2881 default:
2882 return -EINVAL;
2883 }
2884
2885 return ret;
2886 }
2887
vega20_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)2888 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
2889 enum amd_pp_clock_type type,
2890 struct pp_clock_levels_with_voltage *clocks)
2891 {
2892 clocks->num_levels = 0;
2893
2894 return 0;
2895 }
2896
vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)2897 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
2898 void *clock_ranges)
2899 {
2900 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
2901 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
2902 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
2903
2904 if (!data->registry_data.disable_water_mark &&
2905 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2906 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2907 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
2908 data->water_marks_bitmap |= WaterMarksExist;
2909 data->water_marks_bitmap &= ~WaterMarksLoaded;
2910 }
2911
2912 return 0;
2913 }
2914
vega20_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2915 static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2916 enum PP_OD_DPM_TABLE_COMMAND type,
2917 long *input, uint32_t size)
2918 {
2919 struct vega20_hwmgr *data =
2920 (struct vega20_hwmgr *)(hwmgr->backend);
2921 struct vega20_od8_single_setting *od8_settings =
2922 data->od8_settings.od8_settings_array;
2923 OverDriveTable_t *od_table =
2924 &(data->smc_state_table.overdrive_table);
2925 int32_t input_index, input_clk, input_vol, i;
2926 int od8_id;
2927 int ret;
2928
2929 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
2930 return -EINVAL);
2931
2932 switch (type) {
2933 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2934 if (!(od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
2935 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
2936 pr_info("Sclk min/max frequency overdrive not supported\n");
2937 return -EOPNOTSUPP;
2938 }
2939
2940 for (i = 0; i < size; i += 2) {
2941 if (i + 2 > size) {
2942 pr_info("invalid number of input parameters %d\n",
2943 size);
2944 return -EINVAL;
2945 }
2946
2947 input_index = input[i];
2948 input_clk = input[i + 1];
2949
2950 if (input_index != 0 && input_index != 1) {
2951 pr_info("Invalid index %d\n", input_index);
2952 pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
2953 return -EINVAL;
2954 }
2955
2956 if (input_clk < od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value ||
2957 input_clk > od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value) {
2958 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
2959 input_clk,
2960 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
2961 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
2962 return -EINVAL;
2963 }
2964
2965 if ((input_index == 0 && od_table->GfxclkFmin != input_clk) ||
2966 (input_index == 1 && od_table->GfxclkFmax != input_clk))
2967 data->gfxclk_overdrive = true;
2968
2969 if (input_index == 0)
2970 od_table->GfxclkFmin = input_clk;
2971 else
2972 od_table->GfxclkFmax = input_clk;
2973 }
2974
2975 break;
2976
2977 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2978 if (!od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
2979 pr_info("Mclk max frequency overdrive not supported\n");
2980 return -EOPNOTSUPP;
2981 }
2982
2983 for (i = 0; i < size; i += 2) {
2984 if (i + 2 > size) {
2985 pr_info("invalid number of input parameters %d\n",
2986 size);
2987 return -EINVAL;
2988 }
2989
2990 input_index = input[i];
2991 input_clk = input[i + 1];
2992
2993 if (input_index != 1) {
2994 pr_info("Invalid index %d\n", input_index);
2995 pr_info("Support max Mclk frequency setting only which index by 1\n");
2996 return -EINVAL;
2997 }
2998
2999 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
3000 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
3001 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3002 input_clk,
3003 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3004 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3005 return -EINVAL;
3006 }
3007
3008 if (input_index == 1 && od_table->UclkFmax != input_clk)
3009 data->memclk_overdrive = true;
3010
3011 od_table->UclkFmax = input_clk;
3012 }
3013
3014 break;
3015
3016 case PP_OD_EDIT_VDDC_CURVE:
3017 if (!(od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3018 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3019 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3020 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3021 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3022 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
3023 pr_info("Voltage curve calibrate not supported\n");
3024 return -EOPNOTSUPP;
3025 }
3026
3027 for (i = 0; i < size; i += 3) {
3028 if (i + 3 > size) {
3029 pr_info("invalid number of input parameters %d\n",
3030 size);
3031 return -EINVAL;
3032 }
3033
3034 input_index = input[i];
3035 input_clk = input[i + 1];
3036 input_vol = input[i + 2];
3037
3038 if (input_index > 2) {
3039 pr_info("Setting for point %d is not supported\n",
3040 input_index + 1);
3041 pr_info("Three supported points index by 0, 1, 2\n");
3042 return -EINVAL;
3043 }
3044
3045 od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
3046 if (input_clk < od8_settings[od8_id].min_value ||
3047 input_clk > od8_settings[od8_id].max_value) {
3048 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3049 input_clk,
3050 od8_settings[od8_id].min_value,
3051 od8_settings[od8_id].max_value);
3052 return -EINVAL;
3053 }
3054
3055 od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
3056 if (input_vol < od8_settings[od8_id].min_value ||
3057 input_vol > od8_settings[od8_id].max_value) {
3058 pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
3059 input_vol,
3060 od8_settings[od8_id].min_value,
3061 od8_settings[od8_id].max_value);
3062 return -EINVAL;
3063 }
3064
3065 switch (input_index) {
3066 case 0:
3067 od_table->GfxclkFreq1 = input_clk;
3068 od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
3069 break;
3070 case 1:
3071 od_table->GfxclkFreq2 = input_clk;
3072 od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
3073 break;
3074 case 2:
3075 od_table->GfxclkFreq3 = input_clk;
3076 od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
3077 break;
3078 }
3079 }
3080 break;
3081
3082 case PP_OD_RESTORE_DEFAULT_TABLE:
3083 data->gfxclk_overdrive = false;
3084 data->memclk_overdrive = false;
3085
3086 ret = smum_smc_table_manager(hwmgr,
3087 (uint8_t *)od_table,
3088 TABLE_OVERDRIVE, true);
3089 PP_ASSERT_WITH_CODE(!ret,
3090 "Failed to export overdrive table!",
3091 return ret);
3092 break;
3093
3094 case PP_OD_COMMIT_DPM_TABLE:
3095 ret = smum_smc_table_manager(hwmgr,
3096 (uint8_t *)od_table,
3097 TABLE_OVERDRIVE, false);
3098 PP_ASSERT_WITH_CODE(!ret,
3099 "Failed to import overdrive table!",
3100 return ret);
3101
3102 /* retrieve updated gfxclk table */
3103 if (data->gfxclk_overdrive) {
3104 data->gfxclk_overdrive = false;
3105
3106 ret = vega20_setup_gfxclk_dpm_table(hwmgr);
3107 if (ret)
3108 return ret;
3109 }
3110
3111 /* retrieve updated memclk table */
3112 if (data->memclk_overdrive) {
3113 data->memclk_overdrive = false;
3114
3115 ret = vega20_setup_memclk_dpm_table(hwmgr);
3116 if (ret)
3117 return ret;
3118 }
3119 break;
3120
3121 default:
3122 return -EINVAL;
3123 }
3124
3125 return 0;
3126 }
3127
vega20_set_mp1_state(struct pp_hwmgr * hwmgr,enum pp_mp1_state mp1_state)3128 static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
3129 enum pp_mp1_state mp1_state)
3130 {
3131 uint16_t msg;
3132 int ret;
3133
3134 switch (mp1_state) {
3135 case PP_MP1_STATE_SHUTDOWN:
3136 msg = PPSMC_MSG_PrepareMp1ForShutdown;
3137 break;
3138 case PP_MP1_STATE_UNLOAD:
3139 msg = PPSMC_MSG_PrepareMp1ForUnload;
3140 break;
3141 case PP_MP1_STATE_RESET:
3142 msg = PPSMC_MSG_PrepareMp1ForReset;
3143 break;
3144 case PP_MP1_STATE_NONE:
3145 default:
3146 return 0;
3147 }
3148
3149 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
3150 "[PrepareMp1] Failed!",
3151 return ret);
3152
3153 return 0;
3154 }
3155
vega20_get_ppfeature_status(struct pp_hwmgr * hwmgr,char * buf)3156 static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3157 {
3158 static const char *ppfeature_name[] = {
3159 "DPM_PREFETCHER",
3160 "GFXCLK_DPM",
3161 "UCLK_DPM",
3162 "SOCCLK_DPM",
3163 "UVD_DPM",
3164 "VCE_DPM",
3165 "ULV",
3166 "MP0CLK_DPM",
3167 "LINK_DPM",
3168 "DCEFCLK_DPM",
3169 "GFXCLK_DS",
3170 "SOCCLK_DS",
3171 "LCLK_DS",
3172 "PPT",
3173 "TDC",
3174 "THERMAL",
3175 "GFX_PER_CU_CG",
3176 "RM",
3177 "DCEFCLK_DS",
3178 "ACDC",
3179 "VR0HOT",
3180 "VR1HOT",
3181 "FW_CTF",
3182 "LED_DISPLAY",
3183 "FAN_CONTROL",
3184 "GFX_EDC",
3185 "GFXOFF",
3186 "CG",
3187 "FCLK_DPM",
3188 "FCLK_DS",
3189 "MP1CLK_DS",
3190 "MP0CLK_DS",
3191 "XGMI",
3192 "ECC"};
3193 static const char *output_title[] = {
3194 "FEATURES",
3195 "BITMASK",
3196 "ENABLEMENT"};
3197 uint64_t features_enabled;
3198 int i;
3199 int ret = 0;
3200 int size = 0;
3201
3202 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3203 PP_ASSERT_WITH_CODE(!ret,
3204 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
3205 return ret);
3206
3207 size += sprintf(buf + size, "Current ppfeatures: 0x%016"PRIx64"\n", features_enabled);
3208 size += sprintf(buf + size, "%-19s %-22s %s\n",
3209 output_title[0],
3210 output_title[1],
3211 output_title[2]);
3212 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
3213 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
3214 ppfeature_name[i],
3215 1ULL << i,
3216 (features_enabled & (1ULL << i)) ? "Y" : "N");
3217 }
3218
3219 return size;
3220 }
3221
vega20_set_ppfeature_status(struct pp_hwmgr * hwmgr,uint64_t new_ppfeature_masks)3222 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
3223 {
3224 uint64_t features_enabled;
3225 uint64_t features_to_enable;
3226 uint64_t features_to_disable;
3227 int ret = 0;
3228
3229 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
3230 return -EINVAL;
3231
3232 ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
3233 if (ret)
3234 return ret;
3235
3236 features_to_disable =
3237 features_enabled & ~new_ppfeature_masks;
3238 features_to_enable =
3239 ~features_enabled & new_ppfeature_masks;
3240
3241 pr_debug("features_to_disable 0x%"PRIx64"\n", features_to_disable);
3242 pr_debug("features_to_enable 0x%"PRIx64"\n", features_to_enable);
3243
3244 if (features_to_disable) {
3245 ret = vega20_enable_smc_features(hwmgr, false, features_to_disable);
3246 if (ret)
3247 return ret;
3248 }
3249
3250 if (features_to_enable) {
3251 ret = vega20_enable_smc_features(hwmgr, true, features_to_enable);
3252 if (ret)
3253 return ret;
3254 }
3255
3256 return 0;
3257 }
3258
vega20_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)3259 static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3260 enum pp_clock_type type, char *buf)
3261 {
3262 struct vega20_hwmgr *data =
3263 (struct vega20_hwmgr *)(hwmgr->backend);
3264 struct vega20_od8_single_setting *od8_settings =
3265 data->od8_settings.od8_settings_array;
3266 OverDriveTable_t *od_table =
3267 &(data->smc_state_table.overdrive_table);
3268 struct phm_ppt_v3_information *pptable_information =
3269 (struct phm_ppt_v3_information *)hwmgr->pptable;
3270 PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
3271 struct amdgpu_device *adev = hwmgr->adev;
3272 struct pp_clock_levels_with_latency clocks;
3273 struct vega20_single_dpm_table *fclk_dpm_table =
3274 &(data->dpm_table.fclk_table);
3275 int i, now, size = 0;
3276 int ret = 0;
3277 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
3278
3279 switch (type) {
3280 case PP_SCLK:
3281 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
3282 PP_ASSERT_WITH_CODE(!ret,
3283 "Attempt to get current gfx clk Failed!",
3284 return ret);
3285
3286 if (vega20_get_sclks(hwmgr, &clocks)) {
3287 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3288 now / 100);
3289 break;
3290 }
3291
3292 for (i = 0; i < clocks.num_levels; i++)
3293 size += sprintf(buf + size, "%d: %uMhz %s\n",
3294 i, clocks.data[i].clocks_in_khz / 1000,
3295 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3296 break;
3297
3298 case PP_MCLK:
3299 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
3300 PP_ASSERT_WITH_CODE(!ret,
3301 "Attempt to get current mclk freq Failed!",
3302 return ret);
3303
3304 if (vega20_get_memclocks(hwmgr, &clocks)) {
3305 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3306 now / 100);
3307 break;
3308 }
3309
3310 for (i = 0; i < clocks.num_levels; i++)
3311 size += sprintf(buf + size, "%d: %uMhz %s\n",
3312 i, clocks.data[i].clocks_in_khz / 1000,
3313 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3314 break;
3315
3316 case PP_SOCCLK:
3317 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now);
3318 PP_ASSERT_WITH_CODE(!ret,
3319 "Attempt to get current socclk freq Failed!",
3320 return ret);
3321
3322 if (vega20_get_socclocks(hwmgr, &clocks)) {
3323 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3324 now / 100);
3325 break;
3326 }
3327
3328 for (i = 0; i < clocks.num_levels; i++)
3329 size += sprintf(buf + size, "%d: %uMhz %s\n",
3330 i, clocks.data[i].clocks_in_khz / 1000,
3331 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3332 break;
3333
3334 case PP_FCLK:
3335 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now);
3336 PP_ASSERT_WITH_CODE(!ret,
3337 "Attempt to get current fclk freq Failed!",
3338 return ret);
3339
3340 for (i = 0; i < fclk_dpm_table->count; i++)
3341 size += sprintf(buf + size, "%d: %uMhz %s\n",
3342 i, fclk_dpm_table->dpm_levels[i].value,
3343 fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
3344 break;
3345
3346 case PP_DCEFCLK:
3347 ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now);
3348 PP_ASSERT_WITH_CODE(!ret,
3349 "Attempt to get current dcefclk freq Failed!",
3350 return ret);
3351
3352 if (vega20_get_dcefclocks(hwmgr, &clocks)) {
3353 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3354 now / 100);
3355 break;
3356 }
3357
3358 for (i = 0; i < clocks.num_levels; i++)
3359 size += sprintf(buf + size, "%d: %uMhz %s\n",
3360 i, clocks.data[i].clocks_in_khz / 1000,
3361 (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
3362 break;
3363
3364 case PP_PCIE:
3365 current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
3366 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
3367 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
3368 current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
3369 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
3370 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
3371 for (i = 0; i < NUM_LINK_LEVELS; i++) {
3372 if (i == 1 && data->pcie_parameters_override) {
3373 gen_speed = data->pcie_gen_level1;
3374 lane_width = data->pcie_width_level1;
3375 } else {
3376 gen_speed = pptable->PcieGenSpeed[i];
3377 lane_width = pptable->PcieLaneCount[i];
3378 }
3379 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
3380 (gen_speed == 0) ? "2.5GT/s," :
3381 (gen_speed == 1) ? "5.0GT/s," :
3382 (gen_speed == 2) ? "8.0GT/s," :
3383 (gen_speed == 3) ? "16.0GT/s," : "",
3384 (lane_width == 1) ? "x1" :
3385 (lane_width == 2) ? "x2" :
3386 (lane_width == 3) ? "x4" :
3387 (lane_width == 4) ? "x8" :
3388 (lane_width == 5) ? "x12" :
3389 (lane_width == 6) ? "x16" : "",
3390 pptable->LclkFreq[i],
3391 (current_gen_speed == gen_speed) &&
3392 (current_lane_width == lane_width) ?
3393 "*" : "");
3394 }
3395 break;
3396
3397 case OD_SCLK:
3398 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3399 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3400 size = sprintf(buf, "%s:\n", "OD_SCLK");
3401 size += sprintf(buf + size, "0: %10uMhz\n",
3402 od_table->GfxclkFmin);
3403 size += sprintf(buf + size, "1: %10uMhz\n",
3404 od_table->GfxclkFmax);
3405 }
3406 break;
3407
3408 case OD_MCLK:
3409 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3410 size = sprintf(buf, "%s:\n", "OD_MCLK");
3411 size += sprintf(buf + size, "1: %10uMhz\n",
3412 od_table->UclkFmax);
3413 }
3414
3415 break;
3416
3417 case OD_VDDC_CURVE:
3418 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3419 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3420 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3421 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3422 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3423 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3424 size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
3425 size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
3426 od_table->GfxclkFreq1,
3427 od_table->GfxclkVolt1 / VOLTAGE_SCALE);
3428 size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
3429 od_table->GfxclkFreq2,
3430 od_table->GfxclkVolt2 / VOLTAGE_SCALE);
3431 size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
3432 od_table->GfxclkFreq3,
3433 od_table->GfxclkVolt3 / VOLTAGE_SCALE);
3434 }
3435
3436 break;
3437
3438 case OD_RANGE:
3439 size = sprintf(buf, "%s:\n", "OD_RANGE");
3440
3441 if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
3442 od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
3443 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
3444 od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
3445 od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
3446 }
3447
3448 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3449 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
3450 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3451 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3452 }
3453
3454 if (od8_settings[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
3455 od8_settings[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
3456 od8_settings[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
3457 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
3458 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
3459 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
3460 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
3461 od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
3462 od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
3463 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
3464 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
3465 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
3466 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
3467 od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
3468 od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
3469 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
3470 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
3471 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
3472 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
3473 od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
3474 od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
3475 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
3476 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
3477 od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
3478 }
3479
3480 break;
3481 default:
3482 break;
3483 }
3484 return size;
3485 }
3486
vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr,struct vega20_single_dpm_table * dpm_table)3487 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
3488 struct vega20_single_dpm_table *dpm_table)
3489 {
3490 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3491 int ret = 0;
3492
3493 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
3494 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3495 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
3496 return -EINVAL);
3497 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
3498 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
3499 return -EINVAL);
3500
3501 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3502 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3503 PPSMC_MSG_SetHardMinByFreq,
3504 (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
3505 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
3506 return ret);
3507 }
3508
3509 return ret;
3510 }
3511
vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr * hwmgr)3512 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
3513 {
3514 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3515 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
3516 int ret = 0;
3517
3518 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
3519 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3520 "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3521 return -EINVAL);
3522 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
3523 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3524 return -EINVAL);
3525
3526 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3527 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3528 PPSMC_MSG_SetSoftMinByFreq,
3529 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
3530 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3531 return ret);
3532 }
3533
3534 return ret;
3535 }
3536
vega20_pre_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3537 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3538 {
3539 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3540 int ret = 0;
3541
3542 smum_send_msg_to_smc_with_parameter(hwmgr,
3543 PPSMC_MSG_NumOfDisplays, 0);
3544
3545 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
3546 &data->dpm_table.mem_table);
3547 if (ret)
3548 return ret;
3549
3550 return vega20_set_fclk_to_highest_dpm_level(hwmgr);
3551 }
3552
vega20_display_configuration_changed_task(struct pp_hwmgr * hwmgr)3553 static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3554 {
3555 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3556 int result = 0;
3557 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
3558
3559 if ((data->water_marks_bitmap & WaterMarksExist) &&
3560 !(data->water_marks_bitmap & WaterMarksLoaded)) {
3561 result = smum_smc_table_manager(hwmgr,
3562 (uint8_t *)wm_table, TABLE_WATERMARKS, false);
3563 PP_ASSERT_WITH_CODE(!result,
3564 "Failed to update WMTABLE!",
3565 return result);
3566 data->water_marks_bitmap |= WaterMarksLoaded;
3567 }
3568
3569 if ((data->water_marks_bitmap & WaterMarksExist) &&
3570 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
3571 data->smu_features[GNLD_DPM_SOCCLK].supported) {
3572 result = smum_send_msg_to_smc_with_parameter(hwmgr,
3573 PPSMC_MSG_NumOfDisplays,
3574 hwmgr->display_config->num_display);
3575 }
3576
3577 return result;
3578 }
3579
vega20_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)3580 int vega20_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
3581 {
3582 struct vega20_hwmgr *data =
3583 (struct vega20_hwmgr *)(hwmgr->backend);
3584 int ret = 0;
3585
3586 if (data->smu_features[GNLD_DPM_UVD].supported) {
3587 if (data->smu_features[GNLD_DPM_UVD].enabled == enable) {
3588 if (enable)
3589 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
3590 else
3591 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
3592 }
3593
3594 ret = vega20_enable_smc_features(hwmgr,
3595 enable,
3596 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap);
3597 PP_ASSERT_WITH_CODE(!ret,
3598 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
3599 return ret);
3600 data->smu_features[GNLD_DPM_UVD].enabled = enable;
3601 }
3602
3603 return 0;
3604 }
3605
vega20_power_gate_vce(struct pp_hwmgr * hwmgr,bool bgate)3606 static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
3607 {
3608 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3609
3610 if (data->vce_power_gated == bgate)
3611 return ;
3612
3613 data->vce_power_gated = bgate;
3614 if (bgate) {
3615 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3616 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3617 AMD_IP_BLOCK_TYPE_VCE,
3618 AMD_PG_STATE_GATE);
3619 } else {
3620 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
3621 AMD_IP_BLOCK_TYPE_VCE,
3622 AMD_PG_STATE_UNGATE);
3623 vega20_enable_disable_vce_dpm(hwmgr, !bgate);
3624 }
3625
3626 }
3627
vega20_power_gate_uvd(struct pp_hwmgr * hwmgr,bool bgate)3628 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
3629 {
3630 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3631
3632 if (data->uvd_power_gated == bgate)
3633 return ;
3634
3635 data->uvd_power_gated = bgate;
3636 vega20_enable_disable_uvd_dpm(hwmgr, !bgate);
3637 }
3638
vega20_apply_clocks_adjust_rules(struct pp_hwmgr * hwmgr)3639 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3640 {
3641 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3642 struct vega20_single_dpm_table *dpm_table;
3643 bool vblank_too_short = false;
3644 bool disable_mclk_switching;
3645 bool disable_fclk_switching;
3646 uint32_t i, latency;
3647
3648 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3649 !hwmgr->display_config->multi_monitor_in_sync) ||
3650 vblank_too_short;
3651 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3652
3653 /* gfxclk */
3654 dpm_table = &(data->dpm_table.gfx_table);
3655 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3656 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3657 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3658 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3659
3660 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3661 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
3662 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3663 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
3664 }
3665
3666 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3667 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3668 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3669 }
3670
3671 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3672 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3673 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3674 }
3675 }
3676
3677 /* memclk */
3678 dpm_table = &(data->dpm_table.mem_table);
3679 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3680 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3681 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3682 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3683
3684 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3685 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
3686 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3687 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
3688 }
3689
3690 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3691 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3692 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
3693 }
3694
3695 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3696 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3697 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3698 }
3699 }
3700
3701 /* honour DAL's UCLK Hardmin */
3702 if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
3703 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
3704
3705 /* Hardmin is dependent on displayconfig */
3706 if (disable_mclk_switching) {
3707 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3708 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
3709 if (data->mclk_latency_table.entries[i].latency <= latency) {
3710 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
3711 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
3712 break;
3713 }
3714 }
3715 }
3716 }
3717
3718 if (hwmgr->display_config->nb_pstate_switch_disable)
3719 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3720
3721 if ((disable_mclk_switching &&
3722 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3723 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3724 disable_fclk_switching = true;
3725 else
3726 disable_fclk_switching = false;
3727
3728 /* fclk */
3729 dpm_table = &(data->dpm_table.fclk_table);
3730 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3731 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3732 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3733 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3734 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3735 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3736
3737 /* vclk */
3738 dpm_table = &(data->dpm_table.vclk_table);
3739 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3740 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3741 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3742 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3743
3744 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3745 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3746 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3747 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3748 }
3749
3750 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3751 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3752 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3753 }
3754 }
3755
3756 /* dclk */
3757 dpm_table = &(data->dpm_table.dclk_table);
3758 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3759 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3760 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3761 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3762
3763 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3764 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
3765 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3766 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
3767 }
3768
3769 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3770 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3771 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3772 }
3773 }
3774
3775 /* socclk */
3776 dpm_table = &(data->dpm_table.soc_table);
3777 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3778 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3779 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3780 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3781
3782 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3783 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
3784 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3785 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
3786 }
3787
3788 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3789 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3790 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3791 }
3792 }
3793
3794 /* eclk */
3795 dpm_table = &(data->dpm_table.eclk_table);
3796 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3797 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3798 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3799 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3800
3801 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3802 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
3803 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3804 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
3805 }
3806
3807 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3808 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3809 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3810 }
3811 }
3812
3813 return 0;
3814 }
3815
3816 static bool
vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)3817 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
3818 {
3819 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3820 bool is_update_required = false;
3821
3822 if (data->display_timing.num_existing_displays !=
3823 hwmgr->display_config->num_display)
3824 is_update_required = true;
3825
3826 if (data->registry_data.gfx_clk_deep_sleep_support &&
3827 (data->display_timing.min_clock_in_sr !=
3828 hwmgr->display_config->min_core_set_clock_in_sr))
3829 is_update_required = true;
3830
3831 return is_update_required;
3832 }
3833
vega20_disable_dpm_tasks(struct pp_hwmgr * hwmgr)3834 static int vega20_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3835 {
3836 int ret = 0;
3837
3838 ret = vega20_disable_all_smu_features(hwmgr);
3839 PP_ASSERT_WITH_CODE(!ret,
3840 "[DisableDpmTasks] Failed to disable all smu features!",
3841 return ret);
3842
3843 return 0;
3844 }
3845
vega20_power_off_asic(struct pp_hwmgr * hwmgr)3846 static int vega20_power_off_asic(struct pp_hwmgr *hwmgr)
3847 {
3848 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3849 int result;
3850
3851 result = vega20_disable_dpm_tasks(hwmgr);
3852 PP_ASSERT_WITH_CODE((0 == result),
3853 "[PowerOffAsic] Failed to disable DPM!",
3854 );
3855 data->water_marks_bitmap &= ~(WaterMarksLoaded);
3856
3857 return result;
3858 }
3859
conv_power_profile_to_pplib_workload(int power_profile)3860 static int conv_power_profile_to_pplib_workload(int power_profile)
3861 {
3862 int pplib_workload = 0;
3863
3864 switch (power_profile) {
3865 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3866 pplib_workload = WORKLOAD_DEFAULT_BIT;
3867 break;
3868 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3869 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3870 break;
3871 case PP_SMC_POWER_PROFILE_POWERSAVING:
3872 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
3873 break;
3874 case PP_SMC_POWER_PROFILE_VIDEO:
3875 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
3876 break;
3877 case PP_SMC_POWER_PROFILE_VR:
3878 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
3879 break;
3880 case PP_SMC_POWER_PROFILE_COMPUTE:
3881 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
3882 break;
3883 case PP_SMC_POWER_PROFILE_CUSTOM:
3884 pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
3885 break;
3886 }
3887
3888 return pplib_workload;
3889 }
3890
vega20_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)3891 static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3892 {
3893 DpmActivityMonitorCoeffInt_t activity_monitor;
3894 uint32_t i, size = 0;
3895 uint16_t workload_type = 0;
3896 static const char *profile_name[] = {
3897 "BOOTUP_DEFAULT",
3898 "3D_FULL_SCREEN",
3899 "POWER_SAVING",
3900 "VIDEO",
3901 "VR",
3902 "COMPUTE",
3903 "CUSTOM"};
3904 static const char *title[] = {
3905 "PROFILE_INDEX(NAME)",
3906 "CLOCK_TYPE(NAME)",
3907 "FPS",
3908 "UseRlcBusy",
3909 "MinActiveFreqType",
3910 "MinActiveFreq",
3911 "BoosterFreqType",
3912 "BoosterFreq",
3913 "PD_Data_limit_c",
3914 "PD_Data_error_coeff",
3915 "PD_Data_error_rate_coeff"};
3916 int result = 0;
3917
3918 if (!buf)
3919 return -EINVAL;
3920
3921 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
3922 title[0], title[1], title[2], title[3], title[4], title[5],
3923 title[6], title[7], title[8], title[9], title[10]);
3924
3925 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
3926 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
3927 workload_type = conv_power_profile_to_pplib_workload(i);
3928 result = vega20_get_activity_monitor_coeff(hwmgr,
3929 (uint8_t *)(&activity_monitor), workload_type);
3930 PP_ASSERT_WITH_CODE(!result,
3931 "[GetPowerProfile] Failed to get activity monitor!",
3932 return result);
3933
3934 size += sprintf(buf + size, "%2d %14s%s:\n",
3935 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ");
3936
3937 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3938 " ",
3939 0,
3940 "GFXCLK",
3941 activity_monitor.Gfx_FPS,
3942 activity_monitor.Gfx_UseRlcBusy,
3943 activity_monitor.Gfx_MinActiveFreqType,
3944 activity_monitor.Gfx_MinActiveFreq,
3945 activity_monitor.Gfx_BoosterFreqType,
3946 activity_monitor.Gfx_BoosterFreq,
3947 activity_monitor.Gfx_PD_Data_limit_c,
3948 activity_monitor.Gfx_PD_Data_error_coeff,
3949 activity_monitor.Gfx_PD_Data_error_rate_coeff);
3950
3951 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3952 " ",
3953 1,
3954 "SOCCLK",
3955 activity_monitor.Soc_FPS,
3956 activity_monitor.Soc_UseRlcBusy,
3957 activity_monitor.Soc_MinActiveFreqType,
3958 activity_monitor.Soc_MinActiveFreq,
3959 activity_monitor.Soc_BoosterFreqType,
3960 activity_monitor.Soc_BoosterFreq,
3961 activity_monitor.Soc_PD_Data_limit_c,
3962 activity_monitor.Soc_PD_Data_error_coeff,
3963 activity_monitor.Soc_PD_Data_error_rate_coeff);
3964
3965 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3966 " ",
3967 2,
3968 "UCLK",
3969 activity_monitor.Mem_FPS,
3970 activity_monitor.Mem_UseRlcBusy,
3971 activity_monitor.Mem_MinActiveFreqType,
3972 activity_monitor.Mem_MinActiveFreq,
3973 activity_monitor.Mem_BoosterFreqType,
3974 activity_monitor.Mem_BoosterFreq,
3975 activity_monitor.Mem_PD_Data_limit_c,
3976 activity_monitor.Mem_PD_Data_error_coeff,
3977 activity_monitor.Mem_PD_Data_error_rate_coeff);
3978
3979 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3980 " ",
3981 3,
3982 "FCLK",
3983 activity_monitor.Fclk_FPS,
3984 activity_monitor.Fclk_UseRlcBusy,
3985 activity_monitor.Fclk_MinActiveFreqType,
3986 activity_monitor.Fclk_MinActiveFreq,
3987 activity_monitor.Fclk_BoosterFreqType,
3988 activity_monitor.Fclk_BoosterFreq,
3989 activity_monitor.Fclk_PD_Data_limit_c,
3990 activity_monitor.Fclk_PD_Data_error_coeff,
3991 activity_monitor.Fclk_PD_Data_error_rate_coeff);
3992 }
3993
3994 return size;
3995 }
3996
vega20_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)3997 static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
3998 {
3999 DpmActivityMonitorCoeffInt_t activity_monitor;
4000 int workload_type, result = 0;
4001 uint32_t power_profile_mode = input[size];
4002
4003 if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
4004 pr_err("Invalid power profile mode %d\n", power_profile_mode);
4005 return -EINVAL;
4006 }
4007
4008 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4009 struct vega20_hwmgr *data =
4010 (struct vega20_hwmgr *)(hwmgr->backend);
4011 if (size == 0 && !data->is_custom_profile_set)
4012 return -EINVAL;
4013 if (size < 10 && size != 0)
4014 return -EINVAL;
4015
4016 result = vega20_get_activity_monitor_coeff(hwmgr,
4017 (uint8_t *)(&activity_monitor),
4018 WORKLOAD_PPLIB_CUSTOM_BIT);
4019 PP_ASSERT_WITH_CODE(!result,
4020 "[SetPowerProfile] Failed to get activity monitor!",
4021 return result);
4022
4023 /* If size==0, then we want to apply the already-configured
4024 * CUSTOM profile again. Just apply it, since we checked its
4025 * validity above
4026 */
4027 if (size == 0)
4028 goto out;
4029
4030 switch (input[0]) {
4031 case 0: /* Gfxclk */
4032 activity_monitor.Gfx_FPS = input[1];
4033 activity_monitor.Gfx_UseRlcBusy = input[2];
4034 activity_monitor.Gfx_MinActiveFreqType = input[3];
4035 activity_monitor.Gfx_MinActiveFreq = input[4];
4036 activity_monitor.Gfx_BoosterFreqType = input[5];
4037 activity_monitor.Gfx_BoosterFreq = input[6];
4038 activity_monitor.Gfx_PD_Data_limit_c = input[7];
4039 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
4040 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
4041 break;
4042 case 1: /* Socclk */
4043 activity_monitor.Soc_FPS = input[1];
4044 activity_monitor.Soc_UseRlcBusy = input[2];
4045 activity_monitor.Soc_MinActiveFreqType = input[3];
4046 activity_monitor.Soc_MinActiveFreq = input[4];
4047 activity_monitor.Soc_BoosterFreqType = input[5];
4048 activity_monitor.Soc_BoosterFreq = input[6];
4049 activity_monitor.Soc_PD_Data_limit_c = input[7];
4050 activity_monitor.Soc_PD_Data_error_coeff = input[8];
4051 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
4052 break;
4053 case 2: /* Uclk */
4054 activity_monitor.Mem_FPS = input[1];
4055 activity_monitor.Mem_UseRlcBusy = input[2];
4056 activity_monitor.Mem_MinActiveFreqType = input[3];
4057 activity_monitor.Mem_MinActiveFreq = input[4];
4058 activity_monitor.Mem_BoosterFreqType = input[5];
4059 activity_monitor.Mem_BoosterFreq = input[6];
4060 activity_monitor.Mem_PD_Data_limit_c = input[7];
4061 activity_monitor.Mem_PD_Data_error_coeff = input[8];
4062 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
4063 break;
4064 case 3: /* Fclk */
4065 activity_monitor.Fclk_FPS = input[1];
4066 activity_monitor.Fclk_UseRlcBusy = input[2];
4067 activity_monitor.Fclk_MinActiveFreqType = input[3];
4068 activity_monitor.Fclk_MinActiveFreq = input[4];
4069 activity_monitor.Fclk_BoosterFreqType = input[5];
4070 activity_monitor.Fclk_BoosterFreq = input[6];
4071 activity_monitor.Fclk_PD_Data_limit_c = input[7];
4072 activity_monitor.Fclk_PD_Data_error_coeff = input[8];
4073 activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
4074 break;
4075 }
4076
4077 result = vega20_set_activity_monitor_coeff(hwmgr,
4078 (uint8_t *)(&activity_monitor),
4079 WORKLOAD_PPLIB_CUSTOM_BIT);
4080 data->is_custom_profile_set = true;
4081 PP_ASSERT_WITH_CODE(!result,
4082 "[SetPowerProfile] Failed to set activity monitor!",
4083 return result);
4084 }
4085
4086 out:
4087 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4088 workload_type =
4089 conv_power_profile_to_pplib_workload(power_profile_mode);
4090 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4091 1 << workload_type);
4092
4093 hwmgr->power_profile_mode = power_profile_mode;
4094
4095 return 0;
4096 }
4097
vega20_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4098 static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4099 uint32_t virtual_addr_low,
4100 uint32_t virtual_addr_hi,
4101 uint32_t mc_addr_low,
4102 uint32_t mc_addr_hi,
4103 uint32_t size)
4104 {
4105 smum_send_msg_to_smc_with_parameter(hwmgr,
4106 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4107 virtual_addr_hi);
4108 smum_send_msg_to_smc_with_parameter(hwmgr,
4109 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4110 virtual_addr_low);
4111 smum_send_msg_to_smc_with_parameter(hwmgr,
4112 PPSMC_MSG_DramLogSetDramAddrHigh,
4113 mc_addr_hi);
4114
4115 smum_send_msg_to_smc_with_parameter(hwmgr,
4116 PPSMC_MSG_DramLogSetDramAddrLow,
4117 mc_addr_low);
4118
4119 smum_send_msg_to_smc_with_parameter(hwmgr,
4120 PPSMC_MSG_DramLogSetDramSize,
4121 size);
4122 return 0;
4123 }
4124
vega20_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4125 static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4126 struct PP_TemperatureRange *thermal_data)
4127 {
4128 struct vega20_hwmgr *data =
4129 (struct vega20_hwmgr *)(hwmgr->backend);
4130 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
4131
4132 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4133
4134 thermal_data->max = pp_table->TedgeLimit *
4135 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4136 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4137 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4138 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4139 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4140 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4141 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4142 thermal_data->mem_crit_max = pp_table->ThbmLimit *
4143 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4144 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4145 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4146
4147 return 0;
4148 }
4149
vega20_smu_i2c_bus_access(struct pp_hwmgr * hwmgr,bool acquire)4150 static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
4151 {
4152 int res;
4153
4154 /* I2C bus access can happen very early, when SMU not loaded yet */
4155 if (!vega20_is_smc_ram_running(hwmgr))
4156 return 0;
4157
4158 res = smum_send_msg_to_smc_with_parameter(hwmgr,
4159 (acquire ?
4160 PPSMC_MSG_RequestI2CBus :
4161 PPSMC_MSG_ReleaseI2CBus),
4162 0);
4163
4164 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
4165 return res;
4166 }
4167
vega20_set_df_cstate(struct pp_hwmgr * hwmgr,enum pp_df_cstate state)4168 static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
4169 enum pp_df_cstate state)
4170 {
4171 int ret;
4172
4173 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
4174 if (hwmgr->smu_version < 0x283200) {
4175 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
4176 return -EINVAL;
4177 }
4178
4179 ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
4180 if (ret)
4181 pr_err("SetDfCstate failed!\n");
4182
4183 return ret;
4184 }
4185
vega20_set_xgmi_pstate(struct pp_hwmgr * hwmgr,uint32_t pstate)4186 static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
4187 uint32_t pstate)
4188 {
4189 int ret;
4190
4191 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
4192 PPSMC_MSG_SetXgmiMode,
4193 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
4194 if (ret)
4195 pr_err("SetXgmiPstate failed!\n");
4196
4197 return ret;
4198 }
4199
4200 static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
4201 /* init/fini related */
4202 .backend_init = vega20_hwmgr_backend_init,
4203 .backend_fini = vega20_hwmgr_backend_fini,
4204 .asic_setup = vega20_setup_asic_task,
4205 .power_off_asic = vega20_power_off_asic,
4206 .dynamic_state_management_enable = vega20_enable_dpm_tasks,
4207 .dynamic_state_management_disable = vega20_disable_dpm_tasks,
4208 /* power state related */
4209 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
4210 .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
4211 .display_config_changed = vega20_display_configuration_changed_task,
4212 .check_smc_update_required_for_display_configuration =
4213 vega20_check_smc_update_required_for_display_configuration,
4214 .notify_smc_display_config_after_ps_adjustment =
4215 vega20_notify_smc_display_config_after_ps_adjustment,
4216 /* export to DAL */
4217 .get_sclk = vega20_dpm_get_sclk,
4218 .get_mclk = vega20_dpm_get_mclk,
4219 .get_dal_power_level = vega20_get_dal_power_level,
4220 .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
4221 .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
4222 .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
4223 .display_clock_voltage_request = vega20_display_clock_voltage_request,
4224 .get_performance_level = vega20_get_performance_level,
4225 /* UMD pstate, profile related */
4226 .force_dpm_level = vega20_dpm_force_dpm_level,
4227 .get_power_profile_mode = vega20_get_power_profile_mode,
4228 .set_power_profile_mode = vega20_set_power_profile_mode,
4229 /* od related */
4230 .set_power_limit = vega20_set_power_limit,
4231 .get_sclk_od = vega20_get_sclk_od,
4232 .set_sclk_od = vega20_set_sclk_od,
4233 .get_mclk_od = vega20_get_mclk_od,
4234 .set_mclk_od = vega20_set_mclk_od,
4235 .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
4236 /* for sysfs to retrive/set gfxclk/memclk */
4237 .force_clock_level = vega20_force_clock_level,
4238 .print_clock_levels = vega20_print_clock_levels,
4239 .read_sensor = vega20_read_sensor,
4240 .get_ppfeature_status = vega20_get_ppfeature_status,
4241 .set_ppfeature_status = vega20_set_ppfeature_status,
4242 /* powergate related */
4243 .powergate_uvd = vega20_power_gate_uvd,
4244 .powergate_vce = vega20_power_gate_vce,
4245 /* thermal related */
4246 .start_thermal_controller = vega20_start_thermal_controller,
4247 .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
4248 .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
4249 .register_irq_handlers = smu9_register_irq_handlers,
4250 .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
4251 /* fan control related */
4252 .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
4253 .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
4254 .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
4255 .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
4256 .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
4257 .get_fan_control_mode = vega20_get_fan_control_mode,
4258 .set_fan_control_mode = vega20_set_fan_control_mode,
4259 /* smu memory related */
4260 .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
4261 .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
4262 /* BACO related */
4263 .get_asic_baco_capability = vega20_baco_get_capability,
4264 .get_asic_baco_state = vega20_baco_get_state,
4265 .set_asic_baco_state = vega20_baco_set_state,
4266 .set_mp1_state = vega20_set_mp1_state,
4267 .smu_i2c_bus_access = vega20_smu_i2c_bus_access,
4268 .set_df_cstate = vega20_set_df_cstate,
4269 .set_xgmi_pstate = vega20_set_xgmi_pstate,
4270 };
4271
vega20_hwmgr_init(struct pp_hwmgr * hwmgr)4272 int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
4273 {
4274 hwmgr->hwmgr_func = &vega20_hwmgr_funcs;
4275 hwmgr->pptable_func = &vega20_pptable_funcs;
4276
4277 return 0;
4278 }
4279