1 /* $NetBSD: amdgpu_vega10_hwmgr.c,v 1.4 2021/12/19 12:37:54 riastradh Exp $ */
2
3 /*
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vega10_hwmgr.c,v 1.4 2021/12/19 12:37:54 riastradh Exp $");
28
29 #include <linux/delay.h>
30 #include <linux/fb.h>
31 #include <linux/module.h>
32 #include <linux/pci.h>
33 #include <linux/slab.h>
34
35 #include "hwmgr.h"
36 #include "amd_powerplay.h"
37 #include "hardwaremanager.h"
38 #include "ppatomfwctrl.h"
39 #include "atomfirmware.h"
40 #include "cgs_common.h"
41 #include "vega10_powertune.h"
42 #include "smu9.h"
43 #include "smu9_driver_if.h"
44 #include "vega10_inc.h"
45 #include "soc15_common.h"
46 #include "pppcielanes.h"
47 #include "vega10_hwmgr.h"
48 #include "vega10_smumgr.h"
49 #include "vega10_processpptables.h"
50 #include "vega10_pptable.h"
51 #include "vega10_thermal.h"
52 #include "pp_debug.h"
53 #include "amd_pcie_helpers.h"
54 #include "ppinterrupt.h"
55 #include "pp_overdriver.h"
56 #include "pp_thermal.h"
57 #include "vega10_baco.h"
58
59 #include "smuio/smuio_9_0_offset.h"
60 #include "smuio/smuio_9_0_sh_mask.h"
61
62 #include <linux/nbsd-namespace.h>
63
64 #define HBM_MEMORY_CHANNEL_WIDTH 128
65
66 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
67
68 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
69 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
70
71 //DF_CS_AON0_DramBaseAddress0
72 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
73 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
74 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
75 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
76 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
77 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
78 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
79 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
80 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
81 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
82
83 typedef enum {
84 CLK_SMNCLK = 0,
85 CLK_SOCCLK,
86 CLK_MP0CLK,
87 CLK_MP1CLK,
88 CLK_LCLK,
89 CLK_DCEFCLK,
90 CLK_VCLK,
91 CLK_DCLK,
92 CLK_ECLK,
93 CLK_UCLK,
94 CLK_GFXCLK,
95 CLK_COUNT,
96 } CLOCK_ID_e;
97
98 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
99
cast_phw_vega10_power_state(struct pp_hw_power_state * hw_ps)100 struct vega10_power_state *cast_phw_vega10_power_state(
101 struct pp_hw_power_state *hw_ps)
102 {
103 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
104 "Invalid Powerstate Type!",
105 return NULL;);
106
107 return (struct vega10_power_state *)hw_ps;
108 }
109
cast_const_phw_vega10_power_state(const struct pp_hw_power_state * hw_ps)110 const struct vega10_power_state *cast_const_phw_vega10_power_state(
111 const struct pp_hw_power_state *hw_ps)
112 {
113 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
114 "Invalid Powerstate Type!",
115 return NULL;);
116
117 return (const struct vega10_power_state *)hw_ps;
118 }
119
vega10_set_default_registry_data(struct pp_hwmgr * hwmgr)120 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
121 {
122 struct vega10_hwmgr *data = hwmgr->backend;
123
124 data->registry_data.sclk_dpm_key_disabled =
125 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
126 data->registry_data.socclk_dpm_key_disabled =
127 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
128 data->registry_data.mclk_dpm_key_disabled =
129 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
130 data->registry_data.pcie_dpm_key_disabled =
131 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
132
133 data->registry_data.dcefclk_dpm_key_disabled =
134 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
135
136 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
137 data->registry_data.power_containment_support = 1;
138 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
139 data->registry_data.enable_tdc_limit_feature = 1;
140 }
141
142 data->registry_data.clock_stretcher_support =
143 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
144
145 data->registry_data.ulv_support =
146 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
147
148 data->registry_data.sclk_deep_sleep_support =
149 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
150
151 data->registry_data.disable_water_mark = 0;
152
153 data->registry_data.fan_control_support = 1;
154 data->registry_data.thermal_support = 1;
155 data->registry_data.fw_ctf_enabled = 1;
156
157 data->registry_data.avfs_support =
158 hwmgr->feature_mask & PP_AVFS_MASK ? true : false;
159 data->registry_data.led_dpm_enabled = 1;
160
161 data->registry_data.vr0hot_enabled = 1;
162 data->registry_data.vr1hot_enabled = 1;
163 data->registry_data.regulator_hot_gpio_support = 1;
164
165 data->registry_data.didt_support = 1;
166 if (data->registry_data.didt_support) {
167 data->registry_data.didt_mode = 6;
168 data->registry_data.sq_ramping_support = 1;
169 data->registry_data.db_ramping_support = 0;
170 data->registry_data.td_ramping_support = 0;
171 data->registry_data.tcp_ramping_support = 0;
172 data->registry_data.dbr_ramping_support = 0;
173 data->registry_data.edc_didt_support = 1;
174 data->registry_data.gc_didt_support = 0;
175 data->registry_data.psm_didt_support = 0;
176 }
177
178 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
179 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
180 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
181 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
182 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
183 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
184 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
185 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
186 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
187 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
188 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
189 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
190 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
191
192 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
193 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
194 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
195 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
196 }
197
vega10_set_features_platform_caps(struct pp_hwmgr * hwmgr)198 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
199 {
200 struct vega10_hwmgr *data = hwmgr->backend;
201 struct phm_ppt_v2_information *table_info =
202 (struct phm_ppt_v2_information *)hwmgr->pptable;
203 struct amdgpu_device *adev = hwmgr->adev;
204
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_SclkDeepSleep);
207
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_DynamicPatchPowerState);
210
211 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
212 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
213 PHM_PlatformCaps_ControlVDDCI);
214
215 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
216 PHM_PlatformCaps_EnableSMU7ThermalManagement);
217
218 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
219 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_UVDPowerGating);
221
222 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
223 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
224 PHM_PlatformCaps_VCEPowerGating);
225
226 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
227 PHM_PlatformCaps_UnTabledHardwareInterface);
228
229 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_FanSpeedInTableIsRPM);
231
232 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
233 PHM_PlatformCaps_ODFuzzyFanControlSupport);
234
235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_DynamicPowerManagement);
237
238 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_SMC);
240
241 /* power tune caps */
242 /* assume disabled */
243 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
244 PHM_PlatformCaps_PowerContainment);
245 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
246 PHM_PlatformCaps_DiDtSupport);
247 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_SQRamping);
249 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
250 PHM_PlatformCaps_DBRamping);
251 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
252 PHM_PlatformCaps_TDRamping);
253 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
254 PHM_PlatformCaps_TCPRamping);
255 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
256 PHM_PlatformCaps_DBRRamping);
257 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
258 PHM_PlatformCaps_DiDtEDCEnable);
259 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
260 PHM_PlatformCaps_GCEDC);
261 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_PSM);
263
264 if (data->registry_data.didt_support) {
265 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
266 if (data->registry_data.sq_ramping_support)
267 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
268 if (data->registry_data.db_ramping_support)
269 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
270 if (data->registry_data.td_ramping_support)
271 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
272 if (data->registry_data.tcp_ramping_support)
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
274 if (data->registry_data.dbr_ramping_support)
275 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
276 if (data->registry_data.edc_didt_support)
277 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
278 if (data->registry_data.gc_didt_support)
279 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
280 if (data->registry_data.psm_didt_support)
281 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
282 }
283
284 if (data->registry_data.power_containment_support)
285 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
286 PHM_PlatformCaps_PowerContainment);
287 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_CAC);
289
290 if (table_info->tdp_table->usClockStretchAmount &&
291 data->registry_data.clock_stretcher_support)
292 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_ClockStretcher);
294
295 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
296 PHM_PlatformCaps_RegulatorHot);
297 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
298 PHM_PlatformCaps_AutomaticDCTransition);
299
300 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
301 PHM_PlatformCaps_UVDDPM);
302 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
303 PHM_PlatformCaps_VCEDPM);
304
305 return 0;
306 }
307
vega10_odn_initial_default_setting(struct pp_hwmgr * hwmgr)308 static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
309 {
310 struct vega10_hwmgr *data = hwmgr->backend;
311 struct phm_ppt_v2_information *table_info =
312 (struct phm_ppt_v2_information *)(hwmgr->pptable);
313 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
314 struct vega10_odn_vddc_lookup_table *od_lookup_table;
315 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
316 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
317 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
318 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
319 uint32_t i;
320 int result;
321
322 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
323 if (!result) {
324 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
325 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
326 }
327
328 od_lookup_table = &odn_table->vddc_lookup_table;
329 vddc_lookup_table = table_info->vddc_lookup_table;
330
331 for (i = 0; i < vddc_lookup_table->count; i++)
332 od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
333
334 od_lookup_table->count = vddc_lookup_table->count;
335
336 dep_table[0] = table_info->vdd_dep_on_sclk;
337 dep_table[1] = table_info->vdd_dep_on_mclk;
338 dep_table[2] = table_info->vdd_dep_on_socclk;
339 od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
340 od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
341 od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
342
343 for (i = 0; i < 3; i++)
344 smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
345
346 if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
347 odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
348 if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
349 odn_table->min_vddc = dep_table[0]->entries[0].vddc;
350
351 i = od_table[2]->count - 1;
352 od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
353 hwmgr->platform_descriptor.overdriveLimit.memoryClock :
354 od_table[2]->entries[i].clk;
355 od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
356 odn_table->max_vddc :
357 od_table[2]->entries[i].vddc;
358
359 return 0;
360 }
361
vega10_init_dpm_defaults(struct pp_hwmgr * hwmgr)362 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
363 {
364 struct vega10_hwmgr *data = hwmgr->backend;
365 int i;
366 uint32_t sub_vendor_id, hw_revision;
367 uint32_t top32, bottom32;
368 struct amdgpu_device *adev = hwmgr->adev;
369
370 vega10_initialize_power_tune_defaults(hwmgr);
371
372 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
373 data->smu_features[i].smu_feature_id = 0xffff;
374 data->smu_features[i].smu_feature_bitmap = 1 << i;
375 data->smu_features[i].enabled = false;
376 data->smu_features[i].supported = false;
377 }
378
379 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
380 FEATURE_DPM_PREFETCHER_BIT;
381 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
382 FEATURE_DPM_GFXCLK_BIT;
383 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
384 FEATURE_DPM_UCLK_BIT;
385 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
386 FEATURE_DPM_SOCCLK_BIT;
387 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
388 FEATURE_DPM_UVD_BIT;
389 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
390 FEATURE_DPM_VCE_BIT;
391 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
392 FEATURE_DPM_MP0CLK_BIT;
393 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
394 FEATURE_DPM_LINK_BIT;
395 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
396 FEATURE_DPM_DCEFCLK_BIT;
397 data->smu_features[GNLD_ULV].smu_feature_id =
398 FEATURE_ULV_BIT;
399 data->smu_features[GNLD_AVFS].smu_feature_id =
400 FEATURE_AVFS_BIT;
401 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
402 FEATURE_DS_GFXCLK_BIT;
403 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
404 FEATURE_DS_SOCCLK_BIT;
405 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
406 FEATURE_DS_LCLK_BIT;
407 data->smu_features[GNLD_PPT].smu_feature_id =
408 FEATURE_PPT_BIT;
409 data->smu_features[GNLD_TDC].smu_feature_id =
410 FEATURE_TDC_BIT;
411 data->smu_features[GNLD_THERMAL].smu_feature_id =
412 FEATURE_THERMAL_BIT;
413 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
414 FEATURE_GFX_PER_CU_CG_BIT;
415 data->smu_features[GNLD_RM].smu_feature_id =
416 FEATURE_RM_BIT;
417 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
418 FEATURE_DS_DCEFCLK_BIT;
419 data->smu_features[GNLD_ACDC].smu_feature_id =
420 FEATURE_ACDC_BIT;
421 data->smu_features[GNLD_VR0HOT].smu_feature_id =
422 FEATURE_VR0HOT_BIT;
423 data->smu_features[GNLD_VR1HOT].smu_feature_id =
424 FEATURE_VR1HOT_BIT;
425 data->smu_features[GNLD_FW_CTF].smu_feature_id =
426 FEATURE_FW_CTF_BIT;
427 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
428 FEATURE_LED_DISPLAY_BIT;
429 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
430 FEATURE_FAN_CONTROL_BIT;
431 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
432 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
433 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
434
435 if (!data->registry_data.prefetcher_dpm_key_disabled)
436 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
437
438 if (!data->registry_data.sclk_dpm_key_disabled)
439 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
440
441 if (!data->registry_data.mclk_dpm_key_disabled)
442 data->smu_features[GNLD_DPM_UCLK].supported = true;
443
444 if (!data->registry_data.socclk_dpm_key_disabled)
445 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
446
447 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
448 data->smu_features[GNLD_DPM_UVD].supported = true;
449
450 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
451 data->smu_features[GNLD_DPM_VCE].supported = true;
452
453 if (!data->registry_data.pcie_dpm_key_disabled)
454 data->smu_features[GNLD_DPM_LINK].supported = true;
455
456 if (!data->registry_data.dcefclk_dpm_key_disabled)
457 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
458
459 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
460 data->registry_data.sclk_deep_sleep_support) {
461 data->smu_features[GNLD_DS_GFXCLK].supported = true;
462 data->smu_features[GNLD_DS_SOCCLK].supported = true;
463 data->smu_features[GNLD_DS_LCLK].supported = true;
464 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
465 }
466
467 if (data->registry_data.enable_pkg_pwr_tracking_feature)
468 data->smu_features[GNLD_PPT].supported = true;
469
470 if (data->registry_data.enable_tdc_limit_feature)
471 data->smu_features[GNLD_TDC].supported = true;
472
473 if (data->registry_data.thermal_support)
474 data->smu_features[GNLD_THERMAL].supported = true;
475
476 if (data->registry_data.fan_control_support)
477 data->smu_features[GNLD_FAN_CONTROL].supported = true;
478
479 if (data->registry_data.fw_ctf_enabled)
480 data->smu_features[GNLD_FW_CTF].supported = true;
481
482 if (data->registry_data.avfs_support)
483 data->smu_features[GNLD_AVFS].supported = true;
484
485 if (data->registry_data.led_dpm_enabled)
486 data->smu_features[GNLD_LED_DISPLAY].supported = true;
487
488 if (data->registry_data.vr1hot_enabled)
489 data->smu_features[GNLD_VR1HOT].supported = true;
490
491 if (data->registry_data.vr0hot_enabled)
492 data->smu_features[GNLD_VR0HOT].supported = true;
493
494 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
495 hwmgr->smu_version = smum_get_argument(hwmgr);
496 /* ACG firmware has major version 5 */
497 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
498 data->smu_features[GNLD_ACG].supported = true;
499 if (data->registry_data.didt_support)
500 data->smu_features[GNLD_DIDT].supported = true;
501
502 hw_revision = adev->pdev->revision;
503 sub_vendor_id = adev->pdev->subsystem_vendor;
504
505 if ((hwmgr->chip_id == 0x6862 ||
506 hwmgr->chip_id == 0x6861 ||
507 hwmgr->chip_id == 0x6868) &&
508 (hw_revision == 0) &&
509 (sub_vendor_id != 0x1002))
510 data->smu_features[GNLD_PCC_LIMIT].supported = true;
511
512 /* Get the SN to turn into a Unique ID */
513 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
514 top32 = smum_get_argument(hwmgr);
515 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
516 bottom32 = smum_get_argument(hwmgr);
517
518 adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
519 }
520
521 #ifdef PPLIB_VEGA10_EVV_SUPPORT
vega10_get_socclk_for_voltage_evv(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,uint16_t virtual_voltage_id,int32_t * socclk)522 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
523 phm_ppt_v1_voltage_lookup_table *lookup_table,
524 uint16_t virtual_voltage_id, int32_t *socclk)
525 {
526 uint8_t entry_id;
527 uint8_t voltage_id;
528 struct phm_ppt_v2_information *table_info =
529 (struct phm_ppt_v2_information *)(hwmgr->pptable);
530
531 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
532 "Lookup table is empty",
533 return -EINVAL);
534
535 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
536 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
537 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
538 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
539 break;
540 }
541
542 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
543 "Can't find requested voltage id in vdd_dep_on_socclk table!",
544 return -EINVAL);
545
546 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
547
548 return 0;
549 }
550
551 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
552 /**
553 * Get Leakage VDDC based on leakage ID.
554 *
555 * @param hwmgr the address of the powerplay hardware manager.
556 * @return always 0.
557 */
vega10_get_evv_voltages(struct pp_hwmgr * hwmgr)558 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
559 {
560 struct vega10_hwmgr *data = hwmgr->backend;
561 uint16_t vv_id;
562 uint32_t vddc = 0;
563 uint16_t i, j;
564 uint32_t sclk = 0;
565 struct phm_ppt_v2_information *table_info =
566 (struct phm_ppt_v2_information *)hwmgr->pptable;
567 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
568 table_info->vdd_dep_on_socclk;
569 int result;
570
571 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
572 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
573
574 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
575 table_info->vddc_lookup_table, vv_id, &sclk)) {
576 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
577 for (j = 1; j < socclk_table->count; j++) {
578 if (socclk_table->entries[j].clk == sclk &&
579 socclk_table->entries[j].cks_enable == 0) {
580 sclk += 5000;
581 break;
582 }
583 }
584 }
585
586 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
587 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
588 "Error retrieving EVV voltage value!",
589 continue);
590
591
592 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
593 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
594 "Invalid VDDC value", result = -EINVAL;);
595
596 /* the voltage should not be zero nor equal to leakage ID */
597 if (vddc != 0 && vddc != vv_id) {
598 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
599 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
600 data->vddc_leakage.count++;
601 }
602 }
603 }
604
605 return 0;
606 }
607
608 /**
609 * Change virtual leakage voltage to actual value.
610 *
611 * @param hwmgr the address of the powerplay hardware manager.
612 * @param pointer to changing voltage
613 * @param pointer to leakage table
614 */
vega10_patch_with_vdd_leakage(struct pp_hwmgr * hwmgr,uint16_t * voltage,struct vega10_leakage_voltage * leakage_table)615 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
616 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
617 {
618 uint32_t index;
619
620 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
621 for (index = 0; index < leakage_table->count; index++) {
622 /* if this voltage matches a leakage voltage ID */
623 /* patch with actual leakage voltage */
624 if (leakage_table->leakage_id[index] == *voltage) {
625 *voltage = leakage_table->actual_voltage[index];
626 break;
627 }
628 }
629
630 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
631 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
632 }
633
634 /**
635 * Patch voltage lookup table by EVV leakages.
636 *
637 * @param hwmgr the address of the powerplay hardware manager.
638 * @param pointer to voltage lookup table
639 * @param pointer to leakage table
640 * @return always 0
641 */
vega10_patch_lookup_table_with_leakage(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,struct vega10_leakage_voltage * leakage_table)642 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
643 phm_ppt_v1_voltage_lookup_table *lookup_table,
644 struct vega10_leakage_voltage *leakage_table)
645 {
646 uint32_t i;
647
648 for (i = 0; i < lookup_table->count; i++)
649 vega10_patch_with_vdd_leakage(hwmgr,
650 &lookup_table->entries[i].us_vdd, leakage_table);
651
652 return 0;
653 }
654
vega10_patch_clock_voltage_limits_with_vddc_leakage(struct pp_hwmgr * hwmgr,struct vega10_leakage_voltage * leakage_table,uint16_t * vddc)655 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
656 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
657 uint16_t *vddc)
658 {
659 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
660
661 return 0;
662 }
663 #endif
664
vega10_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr * hwmgr)665 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
666 struct pp_hwmgr *hwmgr)
667 {
668 uint8_t entry_id, voltage_id;
669 unsigned i;
670 struct phm_ppt_v2_information *table_info =
671 (struct phm_ppt_v2_information *)(hwmgr->pptable);
672 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
673 table_info->mm_dep_table;
674 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
675 table_info->vdd_dep_on_mclk;
676
677 for (i = 0; i < 6; i++) {
678 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
679 switch (i) {
680 case 0: vdt = table_info->vdd_dep_on_socclk; break;
681 case 1: vdt = table_info->vdd_dep_on_sclk; break;
682 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
683 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
684 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
685 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
686 }
687
688 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
689 voltage_id = vdt->entries[entry_id].vddInd;
690 vdt->entries[entry_id].vddc =
691 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
692 }
693 }
694
695 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
696 voltage_id = mm_table->entries[entry_id].vddcInd;
697 mm_table->entries[entry_id].vddc =
698 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
699 }
700
701 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
702 voltage_id = mclk_table->entries[entry_id].vddInd;
703 mclk_table->entries[entry_id].vddc =
704 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
705 voltage_id = mclk_table->entries[entry_id].vddciInd;
706 mclk_table->entries[entry_id].vddci =
707 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
708 voltage_id = mclk_table->entries[entry_id].mvddInd;
709 mclk_table->entries[entry_id].mvdd =
710 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
711 }
712
713
714 return 0;
715
716 }
717
vega10_sort_lookup_table(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_voltage_lookup_table * lookup_table)718 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
719 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
720 {
721 uint32_t table_size, i, j;
722
723 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
724 "Lookup table is empty", return -EINVAL);
725
726 table_size = lookup_table->count;
727
728 /* Sorting voltages */
729 for (i = 0; i < table_size - 1; i++) {
730 for (j = i + 1; j > 0; j--) {
731 if (lookup_table->entries[j].us_vdd <
732 lookup_table->entries[j - 1].us_vdd) {
733 swap(lookup_table->entries[j - 1],
734 lookup_table->entries[j]);
735 }
736 }
737 }
738
739 return 0;
740 }
741
vega10_complete_dependency_tables(struct pp_hwmgr * hwmgr)742 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
743 {
744 int result = 0;
745 int tmp_result;
746 struct phm_ppt_v2_information *table_info =
747 (struct phm_ppt_v2_information *)(hwmgr->pptable);
748 #ifdef PPLIB_VEGA10_EVV_SUPPORT
749 struct vega10_hwmgr *data = hwmgr->backend;
750
751 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
752 table_info->vddc_lookup_table, &(data->vddc_leakage));
753 if (tmp_result)
754 result = tmp_result;
755
756 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
757 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
758 if (tmp_result)
759 result = tmp_result;
760 #endif
761
762 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
763 if (tmp_result)
764 result = tmp_result;
765
766 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
767 if (tmp_result)
768 result = tmp_result;
769
770 return result;
771 }
772
vega10_set_private_data_based_on_pptable(struct pp_hwmgr * hwmgr)773 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
774 {
775 struct phm_ppt_v2_information *table_info =
776 (struct phm_ppt_v2_information *)(hwmgr->pptable);
777 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
778 table_info->vdd_dep_on_socclk;
779 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
780 table_info->vdd_dep_on_mclk;
781
782 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
783 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
784 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
785 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
786
787 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
788 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
789 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
790 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
791
792 table_info->max_clock_voltage_on_ac.sclk =
793 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
794 table_info->max_clock_voltage_on_ac.mclk =
795 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
796 table_info->max_clock_voltage_on_ac.vddc =
797 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
798 table_info->max_clock_voltage_on_ac.vddci =
799 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
800
801 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
802 table_info->max_clock_voltage_on_ac.sclk;
803 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
804 table_info->max_clock_voltage_on_ac.mclk;
805 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
806 table_info->max_clock_voltage_on_ac.vddc;
807 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
808 table_info->max_clock_voltage_on_ac.vddci;
809
810 return 0;
811 }
812
vega10_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)813 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
814 {
815 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
816 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
817
818 kfree(hwmgr->backend);
819 hwmgr->backend = NULL;
820
821 return 0;
822 }
823
vega10_hwmgr_backend_init(struct pp_hwmgr * hwmgr)824 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
825 {
826 int result = 0;
827 struct vega10_hwmgr *data;
828 uint32_t config_telemetry = 0;
829 struct pp_atomfwctrl_voltage_table vol_table;
830 struct amdgpu_device *adev = hwmgr->adev;
831
832 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
833 if (data == NULL)
834 return -ENOMEM;
835
836 hwmgr->backend = data;
837
838 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
839 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
840 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
841
842 vega10_set_default_registry_data(hwmgr);
843 data->disable_dpm_mask = 0xff;
844
845 /* need to set voltage control types before EVV patching */
846 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
847 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
848 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
849
850 /* VDDCR_SOC */
851 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
852 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
853 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
854 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
855 &vol_table)) {
856 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
857 (vol_table.telemetry_offset & 0xff);
858 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
859 }
860 } else {
861 kfree(hwmgr->backend);
862 hwmgr->backend = NULL;
863 PP_ASSERT_WITH_CODE(false,
864 "VDDCR_SOC is not SVID2!",
865 return -1);
866 }
867
868 /* MVDDC */
869 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
870 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
871 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
872 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
873 &vol_table)) {
874 config_telemetry |=
875 ((vol_table.telemetry_slope << 24) & 0xff000000) |
876 ((vol_table.telemetry_offset << 16) & 0xff0000);
877 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
878 }
879 }
880
881 /* VDDCI_MEM */
882 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
883 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
884 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
885 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
886 }
887
888 data->config_telemetry = config_telemetry;
889
890 vega10_set_features_platform_caps(hwmgr);
891
892 vega10_init_dpm_defaults(hwmgr);
893
894 #ifdef PPLIB_VEGA10_EVV_SUPPORT
895 /* Get leakage voltage based on leakage ID. */
896 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
897 "Get EVV Voltage Failed. Abort Driver loading!",
898 return -1);
899 #endif
900
901 /* Patch our voltage dependency table with actual leakage voltage
902 * We need to perform leakage translation before it's used by other functions
903 */
904 vega10_complete_dependency_tables(hwmgr);
905
906 /* Parse pptable data read from VBIOS */
907 vega10_set_private_data_based_on_pptable(hwmgr);
908
909 data->is_tlu_enabled = false;
910
911 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
912 VEGA10_MAX_HARDWARE_POWERLEVELS;
913 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
914 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
915
916 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
917 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
918 hwmgr->platform_descriptor.clockStep.engineClock = 500;
919 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
920
921 data->total_active_cus = adev->gfx.cu_info.number;
922 if (!hwmgr->not_vf)
923 return result;
924
925 /* Setup default Overdrive Fan control settings */
926 data->odn_fan_table.target_fan_speed =
927 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
928 data->odn_fan_table.target_temperature =
929 hwmgr->thermal_controller.
930 advanceFanControlParameters.ucTargetTemperature;
931 data->odn_fan_table.min_performance_clock =
932 hwmgr->thermal_controller.advanceFanControlParameters.
933 ulMinFanSCLKAcousticLimit;
934 data->odn_fan_table.min_fan_limit =
935 hwmgr->thermal_controller.
936 advanceFanControlParameters.usFanPWMMinLimit *
937 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
938
939 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
940 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
941 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
942 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
943 "Mem Channel Index Exceeded maximum!",
944 return -EINVAL);
945
946 return result;
947 }
948
vega10_init_sclk_threshold(struct pp_hwmgr * hwmgr)949 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
950 {
951 struct vega10_hwmgr *data = hwmgr->backend;
952
953 data->low_sclk_interrupt_threshold = 0;
954
955 return 0;
956 }
957
vega10_setup_dpm_led_config(struct pp_hwmgr * hwmgr)958 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
959 {
960 struct vega10_hwmgr *data = hwmgr->backend;
961 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
962
963 struct pp_atomfwctrl_voltage_table table;
964 uint8_t i, j;
965 uint32_t mask = 0;
966 uint32_t tmp;
967 int32_t ret = 0;
968
969 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
970 VOLTAGE_OBJ_GPIO_LUT, &table);
971
972 if (!ret) {
973 tmp = table.mask_low;
974 for (i = 0, j = 0; i < 32; i++) {
975 if (tmp & 1) {
976 mask |= (uint32_t)(i << (8 * j));
977 if (++j >= 3)
978 break;
979 }
980 tmp >>= 1;
981 }
982 }
983
984 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
985 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
986 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
987 return 0;
988 }
989
vega10_setup_asic_task(struct pp_hwmgr * hwmgr)990 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
991 {
992 if (!hwmgr->not_vf)
993 return 0;
994
995 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
996 "Failed to init sclk threshold!",
997 return -EINVAL);
998
999 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
1000 "Failed to set up led dpm config!",
1001 return -EINVAL);
1002
1003 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
1004
1005 return 0;
1006 }
1007
1008 /**
1009 * Remove repeated voltage values and create table with unique values.
1010 *
1011 * @param hwmgr the address of the powerplay hardware manager.
1012 * @param vol_table the pointer to changing voltage table
1013 * @return 0 in success
1014 */
1015
vega10_trim_voltage_table(struct pp_hwmgr * hwmgr,struct pp_atomfwctrl_voltage_table * vol_table)1016 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
1017 struct pp_atomfwctrl_voltage_table *vol_table)
1018 {
1019 uint32_t i, j;
1020 uint16_t vvalue;
1021 bool found = false;
1022 struct pp_atomfwctrl_voltage_table *table;
1023
1024 PP_ASSERT_WITH_CODE(vol_table,
1025 "Voltage Table empty.", return -EINVAL);
1026 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
1027 GFP_KERNEL);
1028
1029 if (!table)
1030 return -ENOMEM;
1031
1032 table->mask_low = vol_table->mask_low;
1033 table->phase_delay = vol_table->phase_delay;
1034
1035 for (i = 0; i < vol_table->count; i++) {
1036 vvalue = vol_table->entries[i].value;
1037 found = false;
1038
1039 for (j = 0; j < table->count; j++) {
1040 if (vvalue == table->entries[j].value) {
1041 found = true;
1042 break;
1043 }
1044 }
1045
1046 if (!found) {
1047 table->entries[table->count].value = vvalue;
1048 table->entries[table->count].smio_low =
1049 vol_table->entries[i].smio_low;
1050 table->count++;
1051 }
1052 }
1053
1054 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1055 kfree(table);
1056
1057 return 0;
1058 }
1059
vega10_get_mvdd_voltage_table(struct pp_hwmgr * hwmgr,phm_ppt_v1_clock_voltage_dependency_table * dep_table,struct pp_atomfwctrl_voltage_table * vol_table)1060 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1061 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1062 struct pp_atomfwctrl_voltage_table *vol_table)
1063 {
1064 int i;
1065
1066 PP_ASSERT_WITH_CODE(dep_table->count,
1067 "Voltage Dependency Table empty.",
1068 return -EINVAL);
1069
1070 vol_table->mask_low = 0;
1071 vol_table->phase_delay = 0;
1072 vol_table->count = dep_table->count;
1073
1074 for (i = 0; i < vol_table->count; i++) {
1075 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1076 vol_table->entries[i].smio_low = 0;
1077 }
1078
1079 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1080 vol_table),
1081 "Failed to trim MVDD Table!",
1082 return -1);
1083
1084 return 0;
1085 }
1086
vega10_get_vddci_voltage_table(struct pp_hwmgr * hwmgr,phm_ppt_v1_clock_voltage_dependency_table * dep_table,struct pp_atomfwctrl_voltage_table * vol_table)1087 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1088 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1089 struct pp_atomfwctrl_voltage_table *vol_table)
1090 {
1091 uint32_t i;
1092
1093 PP_ASSERT_WITH_CODE(dep_table->count,
1094 "Voltage Dependency Table empty.",
1095 return -EINVAL);
1096
1097 vol_table->mask_low = 0;
1098 vol_table->phase_delay = 0;
1099 vol_table->count = dep_table->count;
1100
1101 for (i = 0; i < dep_table->count; i++) {
1102 vol_table->entries[i].value = dep_table->entries[i].vddci;
1103 vol_table->entries[i].smio_low = 0;
1104 }
1105
1106 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1107 "Failed to trim VDDCI table.",
1108 return -1);
1109
1110 return 0;
1111 }
1112
vega10_get_vdd_voltage_table(struct pp_hwmgr * hwmgr,phm_ppt_v1_clock_voltage_dependency_table * dep_table,struct pp_atomfwctrl_voltage_table * vol_table)1113 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1114 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1115 struct pp_atomfwctrl_voltage_table *vol_table)
1116 {
1117 int i;
1118
1119 PP_ASSERT_WITH_CODE(dep_table->count,
1120 "Voltage Dependency Table empty.",
1121 return -EINVAL);
1122
1123 vol_table->mask_low = 0;
1124 vol_table->phase_delay = 0;
1125 vol_table->count = dep_table->count;
1126
1127 for (i = 0; i < vol_table->count; i++) {
1128 vol_table->entries[i].value = dep_table->entries[i].vddc;
1129 vol_table->entries[i].smio_low = 0;
1130 }
1131
1132 return 0;
1133 }
1134
1135 /* ---- Voltage Tables ----
1136 * If the voltage table would be bigger than
1137 * what will fit into the state table on
1138 * the SMC keep only the higher entries.
1139 */
vega10_trim_voltage_table_to_fit_state_table(struct pp_hwmgr * hwmgr,uint32_t max_vol_steps,struct pp_atomfwctrl_voltage_table * vol_table)1140 static void vega10_trim_voltage_table_to_fit_state_table(
1141 struct pp_hwmgr *hwmgr,
1142 uint32_t max_vol_steps,
1143 struct pp_atomfwctrl_voltage_table *vol_table)
1144 {
1145 unsigned int i, diff;
1146
1147 if (vol_table->count <= max_vol_steps)
1148 return;
1149
1150 diff = vol_table->count - max_vol_steps;
1151
1152 for (i = 0; i < max_vol_steps; i++)
1153 vol_table->entries[i] = vol_table->entries[i + diff];
1154
1155 vol_table->count = max_vol_steps;
1156 }
1157
1158 /**
1159 * Create Voltage Tables.
1160 *
1161 * @param hwmgr the address of the powerplay hardware manager.
1162 * @return always 0
1163 */
vega10_construct_voltage_tables(struct pp_hwmgr * hwmgr)1164 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1165 {
1166 struct vega10_hwmgr *data = hwmgr->backend;
1167 struct phm_ppt_v2_information *table_info =
1168 (struct phm_ppt_v2_information *)hwmgr->pptable;
1169 int result;
1170
1171 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1172 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1173 result = vega10_get_mvdd_voltage_table(hwmgr,
1174 table_info->vdd_dep_on_mclk,
1175 &(data->mvdd_voltage_table));
1176 PP_ASSERT_WITH_CODE(!result,
1177 "Failed to retrieve MVDDC table!",
1178 return result);
1179 }
1180
1181 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1182 result = vega10_get_vddci_voltage_table(hwmgr,
1183 table_info->vdd_dep_on_mclk,
1184 &(data->vddci_voltage_table));
1185 PP_ASSERT_WITH_CODE(!result,
1186 "Failed to retrieve VDDCI_MEM table!",
1187 return result);
1188 }
1189
1190 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1191 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1192 result = vega10_get_vdd_voltage_table(hwmgr,
1193 table_info->vdd_dep_on_sclk,
1194 &(data->vddc_voltage_table));
1195 PP_ASSERT_WITH_CODE(!result,
1196 "Failed to retrieve VDDCR_SOC table!",
1197 return result);
1198 }
1199
1200 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1201 "Too many voltage values for VDDC. Trimming to fit state table.",
1202 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1203 16, &(data->vddc_voltage_table)));
1204
1205 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1206 "Too many voltage values for VDDCI. Trimming to fit state table.",
1207 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1208 16, &(data->vddci_voltage_table)));
1209
1210 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1211 "Too many voltage values for MVDD. Trimming to fit state table.",
1212 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1213 16, &(data->mvdd_voltage_table)));
1214
1215
1216 return 0;
1217 }
1218
1219 /*
1220 * @fn vega10_init_dpm_state
1221 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1222 *
1223 * @param dpm_state - the address of the DPM Table to initiailize.
1224 * @return None.
1225 */
vega10_init_dpm_state(struct vega10_dpm_state * dpm_state)1226 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1227 {
1228 dpm_state->soft_min_level = 0xff;
1229 dpm_state->soft_max_level = 0xff;
1230 dpm_state->hard_min_level = 0xff;
1231 dpm_state->hard_max_level = 0xff;
1232 }
1233
vega10_setup_default_single_dpm_table(struct pp_hwmgr * hwmgr,struct vega10_single_dpm_table * dpm_table,struct phm_ppt_v1_clock_voltage_dependency_table * dep_table)1234 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1235 struct vega10_single_dpm_table *dpm_table,
1236 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1237 {
1238 int i;
1239
1240 dpm_table->count = 0;
1241
1242 for (i = 0; i < dep_table->count; i++) {
1243 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1244 dep_table->entries[i].clk) {
1245 dpm_table->dpm_levels[dpm_table->count].value =
1246 dep_table->entries[i].clk;
1247 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1248 dpm_table->count++;
1249 }
1250 }
1251 }
vega10_setup_default_pcie_table(struct pp_hwmgr * hwmgr)1252 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1253 {
1254 struct vega10_hwmgr *data = hwmgr->backend;
1255 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1256 struct phm_ppt_v2_information *table_info =
1257 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1258 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1259 table_info->pcie_table;
1260 uint32_t i;
1261
1262 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1263 "Incorrect number of PCIE States from VBIOS!",
1264 return -1);
1265
1266 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1267 if (data->registry_data.pcieSpeedOverride)
1268 pcie_table->pcie_gen[i] =
1269 data->registry_data.pcieSpeedOverride;
1270 else
1271 pcie_table->pcie_gen[i] =
1272 bios_pcie_table->entries[i].gen_speed;
1273
1274 if (data->registry_data.pcieLaneOverride)
1275 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1276 data->registry_data.pcieLaneOverride);
1277 else
1278 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1279 bios_pcie_table->entries[i].lane_width);
1280 if (data->registry_data.pcieClockOverride)
1281 pcie_table->lclk[i] =
1282 data->registry_data.pcieClockOverride;
1283 else
1284 pcie_table->lclk[i] =
1285 bios_pcie_table->entries[i].pcie_sclk;
1286 }
1287
1288 pcie_table->count = NUM_LINK_LEVELS;
1289
1290 return 0;
1291 }
1292
1293 /*
1294 * This function is to initialize all DPM state tables
1295 * for SMU based on the dependency table.
1296 * Dynamic state patching function will then trim these
1297 * state tables to the allowed range based
1298 * on the power policy or external client requests,
1299 * such as UVD request, etc.
1300 */
vega10_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)1301 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1302 {
1303 struct vega10_hwmgr *data = hwmgr->backend;
1304 struct phm_ppt_v2_information *table_info =
1305 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1306 struct vega10_single_dpm_table *dpm_table;
1307 uint32_t i;
1308
1309 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1310 table_info->vdd_dep_on_socclk;
1311 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1312 table_info->vdd_dep_on_sclk;
1313 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1314 table_info->vdd_dep_on_mclk;
1315 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1316 table_info->mm_dep_table;
1317 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1318 table_info->vdd_dep_on_dcefclk;
1319 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1320 table_info->vdd_dep_on_pixclk;
1321 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1322 table_info->vdd_dep_on_dispclk;
1323 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1324 table_info->vdd_dep_on_phyclk;
1325
1326 PP_ASSERT_WITH_CODE(dep_soc_table,
1327 "SOCCLK dependency table is missing. This table is mandatory",
1328 return -EINVAL);
1329 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1330 "SOCCLK dependency table is empty. This table is mandatory",
1331 return -EINVAL);
1332
1333 PP_ASSERT_WITH_CODE(dep_gfx_table,
1334 "GFXCLK dependency table is missing. This table is mandatory",
1335 return -EINVAL);
1336 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1337 "GFXCLK dependency table is empty. This table is mandatory",
1338 return -EINVAL);
1339
1340 PP_ASSERT_WITH_CODE(dep_mclk_table,
1341 "MCLK dependency table is missing. This table is mandatory",
1342 return -EINVAL);
1343 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1344 "MCLK dependency table has to have is missing. This table is mandatory",
1345 return -EINVAL);
1346
1347 /* Initialize Sclk DPM table based on allow Sclk values */
1348 dpm_table = &(data->dpm_table.soc_table);
1349 vega10_setup_default_single_dpm_table(hwmgr,
1350 dpm_table,
1351 dep_soc_table);
1352
1353 vega10_init_dpm_state(&(dpm_table->dpm_state));
1354
1355 dpm_table = &(data->dpm_table.gfx_table);
1356 vega10_setup_default_single_dpm_table(hwmgr,
1357 dpm_table,
1358 dep_gfx_table);
1359 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
1360 hwmgr->platform_descriptor.overdriveLimit.engineClock =
1361 dpm_table->dpm_levels[dpm_table->count-1].value;
1362 vega10_init_dpm_state(&(dpm_table->dpm_state));
1363
1364 /* Initialize Mclk DPM table based on allow Mclk values */
1365 data->dpm_table.mem_table.count = 0;
1366 dpm_table = &(data->dpm_table.mem_table);
1367 vega10_setup_default_single_dpm_table(hwmgr,
1368 dpm_table,
1369 dep_mclk_table);
1370 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
1371 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1372 dpm_table->dpm_levels[dpm_table->count-1].value;
1373 vega10_init_dpm_state(&(dpm_table->dpm_state));
1374
1375 data->dpm_table.eclk_table.count = 0;
1376 dpm_table = &(data->dpm_table.eclk_table);
1377 for (i = 0; i < dep_mm_table->count; i++) {
1378 if (i == 0 || dpm_table->dpm_levels
1379 [dpm_table->count - 1].value <=
1380 dep_mm_table->entries[i].eclk) {
1381 dpm_table->dpm_levels[dpm_table->count].value =
1382 dep_mm_table->entries[i].eclk;
1383 dpm_table->dpm_levels[dpm_table->count].enabled =
1384 (i == 0) ? true : false;
1385 dpm_table->count++;
1386 }
1387 }
1388 vega10_init_dpm_state(&(dpm_table->dpm_state));
1389
1390 data->dpm_table.vclk_table.count = 0;
1391 data->dpm_table.dclk_table.count = 0;
1392 dpm_table = &(data->dpm_table.vclk_table);
1393 for (i = 0; i < dep_mm_table->count; i++) {
1394 if (i == 0 || dpm_table->dpm_levels
1395 [dpm_table->count - 1].value <=
1396 dep_mm_table->entries[i].vclk) {
1397 dpm_table->dpm_levels[dpm_table->count].value =
1398 dep_mm_table->entries[i].vclk;
1399 dpm_table->dpm_levels[dpm_table->count].enabled =
1400 (i == 0) ? true : false;
1401 dpm_table->count++;
1402 }
1403 }
1404 vega10_init_dpm_state(&(dpm_table->dpm_state));
1405
1406 dpm_table = &(data->dpm_table.dclk_table);
1407 for (i = 0; i < dep_mm_table->count; i++) {
1408 if (i == 0 || dpm_table->dpm_levels
1409 [dpm_table->count - 1].value <=
1410 dep_mm_table->entries[i].dclk) {
1411 dpm_table->dpm_levels[dpm_table->count].value =
1412 dep_mm_table->entries[i].dclk;
1413 dpm_table->dpm_levels[dpm_table->count].enabled =
1414 (i == 0) ? true : false;
1415 dpm_table->count++;
1416 }
1417 }
1418 vega10_init_dpm_state(&(dpm_table->dpm_state));
1419
1420 /* Assume there is no headless Vega10 for now */
1421 dpm_table = &(data->dpm_table.dcef_table);
1422 vega10_setup_default_single_dpm_table(hwmgr,
1423 dpm_table,
1424 dep_dcef_table);
1425
1426 vega10_init_dpm_state(&(dpm_table->dpm_state));
1427
1428 dpm_table = &(data->dpm_table.pixel_table);
1429 vega10_setup_default_single_dpm_table(hwmgr,
1430 dpm_table,
1431 dep_pix_table);
1432
1433 vega10_init_dpm_state(&(dpm_table->dpm_state));
1434
1435 dpm_table = &(data->dpm_table.display_table);
1436 vega10_setup_default_single_dpm_table(hwmgr,
1437 dpm_table,
1438 dep_disp_table);
1439
1440 vega10_init_dpm_state(&(dpm_table->dpm_state));
1441
1442 dpm_table = &(data->dpm_table.phy_table);
1443 vega10_setup_default_single_dpm_table(hwmgr,
1444 dpm_table,
1445 dep_phy_table);
1446
1447 vega10_init_dpm_state(&(dpm_table->dpm_state));
1448
1449 vega10_setup_default_pcie_table(hwmgr);
1450
1451 /* Zero out the saved copy of the CUSTOM profile
1452 * This will be checked when trying to set the profile
1453 * and will require that new values be passed in
1454 */
1455 data->custom_profile_mode[0] = 0;
1456 data->custom_profile_mode[1] = 0;
1457 data->custom_profile_mode[2] = 0;
1458 data->custom_profile_mode[3] = 0;
1459
1460 /* save a copy of the default DPM table */
1461 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1462 sizeof(struct vega10_dpm_table));
1463
1464 return 0;
1465 }
1466
1467 /*
1468 * @fn vega10_populate_ulv_state
1469 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1470 *
1471 * @param hwmgr - the address of the hardware manager.
1472 * @return Always 0.
1473 */
vega10_populate_ulv_state(struct pp_hwmgr * hwmgr)1474 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1475 {
1476 struct vega10_hwmgr *data = hwmgr->backend;
1477 struct phm_ppt_v2_information *table_info =
1478 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1479
1480 data->smc_state_table.pp_table.UlvOffsetVid =
1481 (uint8_t)table_info->us_ulv_voltage_offset;
1482
1483 data->smc_state_table.pp_table.UlvSmnclkDid =
1484 (uint8_t)(table_info->us_ulv_smnclk_did);
1485 data->smc_state_table.pp_table.UlvMp1clkDid =
1486 (uint8_t)(table_info->us_ulv_mp1clk_did);
1487 data->smc_state_table.pp_table.UlvGfxclkBypass =
1488 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1489 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1490 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1491 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1492 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1493
1494 return 0;
1495 }
1496
vega10_populate_single_lclk_level(struct pp_hwmgr * hwmgr,uint32_t lclock,uint8_t * curr_lclk_did)1497 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1498 uint32_t lclock, uint8_t *curr_lclk_did)
1499 {
1500 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1501
1502 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1503 hwmgr,
1504 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1505 lclock, ÷rs),
1506 "Failed to get LCLK clock settings from VBIOS!",
1507 return -1);
1508
1509 *curr_lclk_did = dividers.ulDid;
1510
1511 return 0;
1512 }
1513
vega10_populate_smc_link_levels(struct pp_hwmgr * hwmgr)1514 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1515 {
1516 int result = -1;
1517 struct vega10_hwmgr *data = hwmgr->backend;
1518 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1519 struct vega10_pcie_table *pcie_table =
1520 &(data->dpm_table.pcie_table);
1521 uint32_t i, j;
1522
1523 for (i = 0; i < pcie_table->count; i++) {
1524 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1525 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1526
1527 result = vega10_populate_single_lclk_level(hwmgr,
1528 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1529 if (result) {
1530 pr_info("Populate LClock Level %d Failed!\n", i);
1531 return result;
1532 }
1533 }
1534
1535 j = i - 1;
1536 while (i < NUM_LINK_LEVELS) {
1537 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1538 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1539
1540 result = vega10_populate_single_lclk_level(hwmgr,
1541 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1542 if (result) {
1543 pr_info("Populate LClock Level %d Failed!\n", i);
1544 return result;
1545 }
1546 i++;
1547 }
1548
1549 return result;
1550 }
1551
1552 /**
1553 * Populates single SMC GFXSCLK structure using the provided engine clock
1554 *
1555 * @param hwmgr the address of the hardware manager
1556 * @param gfx_clock the GFX clock to use to populate the structure.
1557 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1558 */
1559
vega10_populate_single_gfx_level(struct pp_hwmgr * hwmgr,uint32_t gfx_clock,PllSetting_t * current_gfxclk_level,uint32_t * acg_freq)1560 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1561 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1562 uint32_t *acg_freq)
1563 {
1564 struct phm_ppt_v2_information *table_info =
1565 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1566 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
1567 struct vega10_hwmgr *data = hwmgr->backend;
1568 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1569 uint32_t gfx_max_clock =
1570 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1571 uint32_t i = 0;
1572
1573 if (hwmgr->od_enabled)
1574 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1575 &(data->odn_dpm_table.vdd_dep_on_sclk);
1576 else
1577 dep_on_sclk = table_info->vdd_dep_on_sclk;
1578
1579 PP_ASSERT_WITH_CODE(dep_on_sclk,
1580 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1581 return -EINVAL);
1582
1583 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1584 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1585 else {
1586 for (i = 0; i < dep_on_sclk->count; i++) {
1587 if (dep_on_sclk->entries[i].clk == gfx_clock)
1588 break;
1589 }
1590 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1591 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1592 return -EINVAL);
1593 }
1594
1595 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1596 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1597 gfx_clock, ÷rs),
1598 "Failed to get GFX Clock settings from VBIOS!",
1599 return -EINVAL);
1600
1601 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1602 current_gfxclk_level->FbMult =
1603 cpu_to_le32(dividers.ulPll_fb_mult);
1604 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1605 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1606 current_gfxclk_level->SsFbMult =
1607 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1608 current_gfxclk_level->SsSlewFrac =
1609 cpu_to_le16(dividers.usPll_ss_slew_frac);
1610 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1611
1612 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1613
1614 return 0;
1615 }
1616
1617 /**
1618 * @brief Populates single SMC SOCCLK structure using the provided clock.
1619 *
1620 * @param hwmgr - the address of the hardware manager.
1621 * @param soc_clock - the SOC clock to use to populate the structure.
1622 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1623 * @return 0 on success..
1624 */
vega10_populate_single_soc_level(struct pp_hwmgr * hwmgr,uint32_t soc_clock,uint8_t * current_soc_did,uint8_t * current_vol_index)1625 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1626 uint32_t soc_clock, uint8_t *current_soc_did,
1627 uint8_t *current_vol_index)
1628 {
1629 struct vega10_hwmgr *data = hwmgr->backend;
1630 struct phm_ppt_v2_information *table_info =
1631 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1632 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
1633 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1634 uint32_t i;
1635
1636 if (hwmgr->od_enabled) {
1637 dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1638 &data->odn_dpm_table.vdd_dep_on_socclk;
1639 for (i = 0; i < dep_on_soc->count; i++) {
1640 if (dep_on_soc->entries[i].clk >= soc_clock)
1641 break;
1642 }
1643 } else {
1644 dep_on_soc = table_info->vdd_dep_on_socclk;
1645 for (i = 0; i < dep_on_soc->count; i++) {
1646 if (dep_on_soc->entries[i].clk == soc_clock)
1647 break;
1648 }
1649 }
1650
1651 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1652 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1653 return -EINVAL);
1654
1655 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1656 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1657 soc_clock, ÷rs),
1658 "Failed to get SOC Clock settings from VBIOS!",
1659 return -EINVAL);
1660
1661 *current_soc_did = (uint8_t)dividers.ulDid;
1662 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1663 return 0;
1664 }
1665
1666 /**
1667 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1668 *
1669 * @param hwmgr the address of the hardware manager
1670 */
vega10_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)1671 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1672 {
1673 struct vega10_hwmgr *data = hwmgr->backend;
1674 struct phm_ppt_v2_information *table_info =
1675 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1676 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1677 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1678 int result = 0;
1679 uint32_t i, j;
1680
1681 for (i = 0; i < dpm_table->count; i++) {
1682 result = vega10_populate_single_gfx_level(hwmgr,
1683 dpm_table->dpm_levels[i].value,
1684 &(pp_table->GfxclkLevel[i]),
1685 &(pp_table->AcgFreqTable[i]));
1686 if (result)
1687 return result;
1688 }
1689
1690 j = i - 1;
1691 while (i < NUM_GFXCLK_DPM_LEVELS) {
1692 result = vega10_populate_single_gfx_level(hwmgr,
1693 dpm_table->dpm_levels[j].value,
1694 &(pp_table->GfxclkLevel[i]),
1695 &(pp_table->AcgFreqTable[i]));
1696 if (result)
1697 return result;
1698 i++;
1699 }
1700
1701 pp_table->GfxclkSlewRate =
1702 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1703
1704 dpm_table = &(data->dpm_table.soc_table);
1705 for (i = 0; i < dpm_table->count; i++) {
1706 result = vega10_populate_single_soc_level(hwmgr,
1707 dpm_table->dpm_levels[i].value,
1708 &(pp_table->SocclkDid[i]),
1709 &(pp_table->SocDpmVoltageIndex[i]));
1710 if (result)
1711 return result;
1712 }
1713
1714 j = i - 1;
1715 while (i < NUM_SOCCLK_DPM_LEVELS) {
1716 result = vega10_populate_single_soc_level(hwmgr,
1717 dpm_table->dpm_levels[j].value,
1718 &(pp_table->SocclkDid[i]),
1719 &(pp_table->SocDpmVoltageIndex[i]));
1720 if (result)
1721 return result;
1722 i++;
1723 }
1724
1725 return result;
1726 }
1727
vega10_populate_vddc_soc_levels(struct pp_hwmgr * hwmgr)1728 static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1729 {
1730 struct vega10_hwmgr *data = hwmgr->backend;
1731 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1732 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1733 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1734
1735 uint8_t soc_vid = 0;
1736 uint32_t i, max_vddc_level;
1737
1738 if (hwmgr->od_enabled)
1739 vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1740 else
1741 vddc_lookup_table = table_info->vddc_lookup_table;
1742
1743 max_vddc_level = vddc_lookup_table->count;
1744 for (i = 0; i < max_vddc_level; i++) {
1745 soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1746 pp_table->SocVid[i] = soc_vid;
1747 }
1748 while (i < MAX_REGULAR_DPM_NUMBER) {
1749 pp_table->SocVid[i] = soc_vid;
1750 i++;
1751 }
1752 }
1753
1754 /**
1755 * @brief Populates single SMC GFXCLK structure using the provided clock.
1756 *
1757 * @param hwmgr - the address of the hardware manager.
1758 * @param mem_clock - the memory clock to use to populate the structure.
1759 * @return 0 on success..
1760 */
vega10_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t mem_clock,uint8_t * current_mem_vid,PllSetting_t * current_memclk_level,uint8_t * current_mem_soc_vind)1761 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1762 uint32_t mem_clock, uint8_t *current_mem_vid,
1763 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1764 {
1765 struct vega10_hwmgr *data = hwmgr->backend;
1766 struct phm_ppt_v2_information *table_info =
1767 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1768 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
1769 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1770 uint32_t mem_max_clock =
1771 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1772 uint32_t i = 0;
1773
1774 if (hwmgr->od_enabled)
1775 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1776 &data->odn_dpm_table.vdd_dep_on_mclk;
1777 else
1778 dep_on_mclk = table_info->vdd_dep_on_mclk;
1779
1780 PP_ASSERT_WITH_CODE(dep_on_mclk,
1781 "Invalid SOC_VDD-UCLK Dependency Table!",
1782 return -EINVAL);
1783
1784 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
1785 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1786 } else {
1787 for (i = 0; i < dep_on_mclk->count; i++) {
1788 if (dep_on_mclk->entries[i].clk == mem_clock)
1789 break;
1790 }
1791 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1792 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1793 return -EINVAL);
1794 }
1795
1796 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1797 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs),
1798 "Failed to get UCLK settings from VBIOS!",
1799 return -1);
1800
1801 *current_mem_vid =
1802 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1803 *current_mem_soc_vind =
1804 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1805 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1806 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1807
1808 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1809 "Invalid Divider ID!",
1810 return -EINVAL);
1811
1812 return 0;
1813 }
1814
1815 /**
1816 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1817 *
1818 * @param pHwMgr - the address of the hardware manager.
1819 * @return PP_Result_OK on success.
1820 */
vega10_populate_all_memory_levels(struct pp_hwmgr * hwmgr)1821 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1822 {
1823 struct vega10_hwmgr *data = hwmgr->backend;
1824 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1825 struct vega10_single_dpm_table *dpm_table =
1826 &(data->dpm_table.mem_table);
1827 int result = 0;
1828 uint32_t i, j;
1829
1830 for (i = 0; i < dpm_table->count; i++) {
1831 result = vega10_populate_single_memory_level(hwmgr,
1832 dpm_table->dpm_levels[i].value,
1833 &(pp_table->MemVid[i]),
1834 &(pp_table->UclkLevel[i]),
1835 &(pp_table->MemSocVoltageIndex[i]));
1836 if (result)
1837 return result;
1838 }
1839
1840 j = i - 1;
1841 while (i < NUM_UCLK_DPM_LEVELS) {
1842 result = vega10_populate_single_memory_level(hwmgr,
1843 dpm_table->dpm_levels[j].value,
1844 &(pp_table->MemVid[i]),
1845 &(pp_table->UclkLevel[i]),
1846 &(pp_table->MemSocVoltageIndex[i]));
1847 if (result)
1848 return result;
1849 i++;
1850 }
1851
1852 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
1853 pp_table->MemoryChannelWidth =
1854 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1855 channel_number[data->mem_channels]);
1856
1857 pp_table->LowestUclkReservedForUlv =
1858 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1859
1860 return result;
1861 }
1862
vega10_populate_single_display_type(struct pp_hwmgr * hwmgr,DSPCLK_e disp_clock)1863 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1864 DSPCLK_e disp_clock)
1865 {
1866 struct vega10_hwmgr *data = hwmgr->backend;
1867 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1868 struct phm_ppt_v2_information *table_info =
1869 (struct phm_ppt_v2_information *)
1870 (hwmgr->pptable);
1871 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1872 uint32_t i;
1873 uint16_t clk = 0, vddc = 0;
1874 uint8_t vid = 0;
1875
1876 switch (disp_clock) {
1877 case DSPCLK_DCEFCLK:
1878 dep_table = table_info->vdd_dep_on_dcefclk;
1879 break;
1880 case DSPCLK_DISPCLK:
1881 dep_table = table_info->vdd_dep_on_dispclk;
1882 break;
1883 case DSPCLK_PIXCLK:
1884 dep_table = table_info->vdd_dep_on_pixclk;
1885 break;
1886 case DSPCLK_PHYCLK:
1887 dep_table = table_info->vdd_dep_on_phyclk;
1888 break;
1889 default:
1890 return -1;
1891 }
1892
1893 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1894 "Number Of Entries Exceeded maximum!",
1895 return -1);
1896
1897 for (i = 0; i < dep_table->count; i++) {
1898 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1899 vddc = table_info->vddc_lookup_table->
1900 entries[dep_table->entries[i].vddInd].us_vdd;
1901 vid = (uint8_t)convert_to_vid(vddc);
1902 pp_table->DisplayClockTable[disp_clock][i].Freq =
1903 cpu_to_le16(clk);
1904 pp_table->DisplayClockTable[disp_clock][i].Vid =
1905 cpu_to_le16(vid);
1906 }
1907
1908 while (i < NUM_DSPCLK_LEVELS) {
1909 pp_table->DisplayClockTable[disp_clock][i].Freq =
1910 cpu_to_le16(clk);
1911 pp_table->DisplayClockTable[disp_clock][i].Vid =
1912 cpu_to_le16(vid);
1913 i++;
1914 }
1915
1916 return 0;
1917 }
1918
vega10_populate_all_display_clock_levels(struct pp_hwmgr * hwmgr)1919 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1920 {
1921 uint32_t i;
1922
1923 for (i = 0; i < DSPCLK_COUNT; i++) {
1924 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1925 "Failed to populate Clock in DisplayClockTable!",
1926 return -1);
1927 }
1928
1929 return 0;
1930 }
1931
vega10_populate_single_eclock_level(struct pp_hwmgr * hwmgr,uint32_t eclock,uint8_t * current_eclk_did,uint8_t * current_soc_vol)1932 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1933 uint32_t eclock, uint8_t *current_eclk_did,
1934 uint8_t *current_soc_vol)
1935 {
1936 struct phm_ppt_v2_information *table_info =
1937 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1938 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1939 table_info->mm_dep_table;
1940 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1941 uint32_t i;
1942
1943 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1944 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1945 eclock, ÷rs),
1946 "Failed to get ECLK clock settings from VBIOS!",
1947 return -1);
1948
1949 *current_eclk_did = (uint8_t)dividers.ulDid;
1950
1951 for (i = 0; i < dep_table->count; i++) {
1952 if (dep_table->entries[i].eclk == eclock)
1953 *current_soc_vol = dep_table->entries[i].vddcInd;
1954 }
1955
1956 return 0;
1957 }
1958
vega10_populate_smc_vce_levels(struct pp_hwmgr * hwmgr)1959 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1960 {
1961 struct vega10_hwmgr *data = hwmgr->backend;
1962 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1963 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1964 int result = -EINVAL;
1965 uint32_t i, j;
1966
1967 for (i = 0; i < dpm_table->count; i++) {
1968 result = vega10_populate_single_eclock_level(hwmgr,
1969 dpm_table->dpm_levels[i].value,
1970 &(pp_table->EclkDid[i]),
1971 &(pp_table->VceDpmVoltageIndex[i]));
1972 if (result)
1973 return result;
1974 }
1975
1976 j = i - 1;
1977 while (i < NUM_VCE_DPM_LEVELS) {
1978 result = vega10_populate_single_eclock_level(hwmgr,
1979 dpm_table->dpm_levels[j].value,
1980 &(pp_table->EclkDid[i]),
1981 &(pp_table->VceDpmVoltageIndex[i]));
1982 if (result)
1983 return result;
1984 i++;
1985 }
1986
1987 return result;
1988 }
1989
vega10_populate_single_vclock_level(struct pp_hwmgr * hwmgr,uint32_t vclock,uint8_t * current_vclk_did)1990 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1991 uint32_t vclock, uint8_t *current_vclk_did)
1992 {
1993 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1994
1995 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1996 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1997 vclock, ÷rs),
1998 "Failed to get VCLK clock settings from VBIOS!",
1999 return -EINVAL);
2000
2001 *current_vclk_did = (uint8_t)dividers.ulDid;
2002
2003 return 0;
2004 }
2005
vega10_populate_single_dclock_level(struct pp_hwmgr * hwmgr,uint32_t dclock,uint8_t * current_dclk_did)2006 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
2007 uint32_t dclock, uint8_t *current_dclk_did)
2008 {
2009 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
2010
2011 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
2012 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2013 dclock, ÷rs),
2014 "Failed to get DCLK clock settings from VBIOS!",
2015 return -EINVAL);
2016
2017 *current_dclk_did = (uint8_t)dividers.ulDid;
2018
2019 return 0;
2020 }
2021
vega10_populate_smc_uvd_levels(struct pp_hwmgr * hwmgr)2022 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
2023 {
2024 struct vega10_hwmgr *data = hwmgr->backend;
2025 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2026 struct vega10_single_dpm_table *vclk_dpm_table =
2027 &(data->dpm_table.vclk_table);
2028 struct vega10_single_dpm_table *dclk_dpm_table =
2029 &(data->dpm_table.dclk_table);
2030 struct phm_ppt_v2_information *table_info =
2031 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2032 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
2033 table_info->mm_dep_table;
2034 int result = -EINVAL;
2035 uint32_t i, j;
2036
2037 for (i = 0; i < vclk_dpm_table->count; i++) {
2038 result = vega10_populate_single_vclock_level(hwmgr,
2039 vclk_dpm_table->dpm_levels[i].value,
2040 &(pp_table->VclkDid[i]));
2041 if (result)
2042 return result;
2043 }
2044
2045 j = i - 1;
2046 while (i < NUM_UVD_DPM_LEVELS) {
2047 result = vega10_populate_single_vclock_level(hwmgr,
2048 vclk_dpm_table->dpm_levels[j].value,
2049 &(pp_table->VclkDid[i]));
2050 if (result)
2051 return result;
2052 i++;
2053 }
2054
2055 for (i = 0; i < dclk_dpm_table->count; i++) {
2056 result = vega10_populate_single_dclock_level(hwmgr,
2057 dclk_dpm_table->dpm_levels[i].value,
2058 &(pp_table->DclkDid[i]));
2059 if (result)
2060 return result;
2061 }
2062
2063 j = i - 1;
2064 while (i < NUM_UVD_DPM_LEVELS) {
2065 result = vega10_populate_single_dclock_level(hwmgr,
2066 dclk_dpm_table->dpm_levels[j].value,
2067 &(pp_table->DclkDid[i]));
2068 if (result)
2069 return result;
2070 i++;
2071 }
2072
2073 for (i = 0; i < dep_table->count; i++) {
2074 if (dep_table->entries[i].vclk ==
2075 vclk_dpm_table->dpm_levels[i].value &&
2076 dep_table->entries[i].dclk ==
2077 dclk_dpm_table->dpm_levels[i].value)
2078 pp_table->UvdDpmVoltageIndex[i] =
2079 dep_table->entries[i].vddcInd;
2080 else
2081 return -1;
2082 }
2083
2084 j = i - 1;
2085 while (i < NUM_UVD_DPM_LEVELS) {
2086 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2087 i++;
2088 }
2089
2090 return 0;
2091 }
2092
vega10_populate_clock_stretcher_table(struct pp_hwmgr * hwmgr)2093 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2094 {
2095 struct vega10_hwmgr *data = hwmgr->backend;
2096 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2097 struct phm_ppt_v2_information *table_info =
2098 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2099 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2100 table_info->vdd_dep_on_sclk;
2101 uint32_t i;
2102
2103 for (i = 0; i < dep_table->count; i++) {
2104 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2105 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2106 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2107 }
2108
2109 return 0;
2110 }
2111
vega10_populate_avfs_parameters(struct pp_hwmgr * hwmgr)2112 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2113 {
2114 struct vega10_hwmgr *data = hwmgr->backend;
2115 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2116 struct phm_ppt_v2_information *table_info =
2117 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2118 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2119 table_info->vdd_dep_on_sclk;
2120 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2121 int result = 0;
2122 uint32_t i;
2123
2124 pp_table->MinVoltageVid = (uint8_t)0xff;
2125 pp_table->MaxVoltageVid = (uint8_t)0;
2126
2127 if (data->smu_features[GNLD_AVFS].supported) {
2128 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2129 if (!result) {
2130 pp_table->MinVoltageVid = (uint8_t)
2131 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2132 pp_table->MaxVoltageVid = (uint8_t)
2133 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2134
2135 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2136 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2137 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2138 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2139 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2140 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2141 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2142
2143 pp_table->BtcGbVdroopTableCksOff.a0 =
2144 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2145 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2146 pp_table->BtcGbVdroopTableCksOff.a1 =
2147 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2148 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2149 pp_table->BtcGbVdroopTableCksOff.a2 =
2150 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2151 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2152
2153 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2154 pp_table->BtcGbVdroopTableCksOn.a0 =
2155 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2156 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2157 pp_table->BtcGbVdroopTableCksOn.a1 =
2158 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2159 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2160 pp_table->BtcGbVdroopTableCksOn.a2 =
2161 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2162 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2163
2164 pp_table->AvfsGbCksOn.m1 =
2165 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2166 pp_table->AvfsGbCksOn.m2 =
2167 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2168 pp_table->AvfsGbCksOn.b =
2169 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2170 pp_table->AvfsGbCksOn.m1_shift = 24;
2171 pp_table->AvfsGbCksOn.m2_shift = 12;
2172 pp_table->AvfsGbCksOn.b_shift = 0;
2173
2174 pp_table->OverrideAvfsGbCksOn =
2175 avfs_params.ucEnableGbFuseTableCkson;
2176 pp_table->AvfsGbCksOff.m1 =
2177 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2178 pp_table->AvfsGbCksOff.m2 =
2179 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2180 pp_table->AvfsGbCksOff.b =
2181 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2182 pp_table->AvfsGbCksOff.m1_shift = 24;
2183 pp_table->AvfsGbCksOff.m2_shift = 12;
2184 pp_table->AvfsGbCksOff.b_shift = 0;
2185
2186 for (i = 0; i < dep_table->count; i++)
2187 pp_table->StaticVoltageOffsetVid[i] =
2188 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2189
2190 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2191 data->disp_clk_quad_eqn_a) &&
2192 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2193 data->disp_clk_quad_eqn_b)) {
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2195 (int32_t)data->disp_clk_quad_eqn_a;
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2197 (int32_t)data->disp_clk_quad_eqn_b;
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2199 (int32_t)data->disp_clk_quad_eqn_c;
2200 } else {
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2202 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2203 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2204 (int32_t)avfs_params.ulDispclk2GfxclkM2;
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2206 (int32_t)avfs_params.ulDispclk2GfxclkB;
2207 }
2208
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2212
2213 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2214 data->dcef_clk_quad_eqn_a) &&
2215 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2216 data->dcef_clk_quad_eqn_b)) {
2217 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2218 (int32_t)data->dcef_clk_quad_eqn_a;
2219 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2220 (int32_t)data->dcef_clk_quad_eqn_b;
2221 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2222 (int32_t)data->dcef_clk_quad_eqn_c;
2223 } else {
2224 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2225 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2226 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2227 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
2228 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2229 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2230 }
2231
2232 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2233 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2234 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2235
2236 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2237 data->pixel_clk_quad_eqn_a) &&
2238 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2239 data->pixel_clk_quad_eqn_b)) {
2240 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2241 (int32_t)data->pixel_clk_quad_eqn_a;
2242 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2243 (int32_t)data->pixel_clk_quad_eqn_b;
2244 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2245 (int32_t)data->pixel_clk_quad_eqn_c;
2246 } else {
2247 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2248 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2249 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2250 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
2251 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2252 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2253 }
2254
2255 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2256 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2257 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2258 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2259 data->phy_clk_quad_eqn_a) &&
2260 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2261 data->phy_clk_quad_eqn_b)) {
2262 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2263 (int32_t)data->phy_clk_quad_eqn_a;
2264 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2265 (int32_t)data->phy_clk_quad_eqn_b;
2266 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2267 (int32_t)data->phy_clk_quad_eqn_c;
2268 } else {
2269 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2270 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2271 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2272 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
2273 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2274 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2275 }
2276
2277 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2278 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2279 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2280
2281 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2282 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2283 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2284 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2285 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2286 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2287
2288 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2289 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2290 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2291 pp_table->AcgAvfsGb.m1_shift = 24;
2292 pp_table->AcgAvfsGb.m2_shift = 12;
2293 pp_table->AcgAvfsGb.b_shift = 0;
2294
2295 } else {
2296 data->smu_features[GNLD_AVFS].supported = false;
2297 }
2298 }
2299
2300 return 0;
2301 }
2302
vega10_acg_enable(struct pp_hwmgr * hwmgr)2303 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2304 {
2305 struct vega10_hwmgr *data = hwmgr->backend;
2306 uint32_t agc_btc_response;
2307
2308 if (data->smu_features[GNLD_ACG].supported) {
2309 if (0 == vega10_enable_smc_features(hwmgr, true,
2310 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2311 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2312
2313 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
2314
2315 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
2316 agc_btc_response = smum_get_argument(hwmgr);
2317
2318 if (1 == agc_btc_response) {
2319 if (1 == data->acg_loop_state)
2320 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
2321 else if (2 == data->acg_loop_state)
2322 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2323 if (0 == vega10_enable_smc_features(hwmgr, true,
2324 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2325 data->smu_features[GNLD_ACG].enabled = true;
2326 } else {
2327 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2328 data->smu_features[GNLD_ACG].enabled = false;
2329 }
2330 }
2331
2332 return 0;
2333 }
2334
vega10_acg_disable(struct pp_hwmgr * hwmgr)2335 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2336 {
2337 struct vega10_hwmgr *data = hwmgr->backend;
2338
2339 if (data->smu_features[GNLD_ACG].supported &&
2340 data->smu_features[GNLD_ACG].enabled)
2341 if (!vega10_enable_smc_features(hwmgr, false,
2342 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2343 data->smu_features[GNLD_ACG].enabled = false;
2344
2345 return 0;
2346 }
2347
vega10_populate_gpio_parameters(struct pp_hwmgr * hwmgr)2348 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2349 {
2350 struct vega10_hwmgr *data = hwmgr->backend;
2351 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2352 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2353 int result;
2354
2355 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2356 if (!result) {
2357 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2358 data->registry_data.regulator_hot_gpio_support) {
2359 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2360 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2361 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2362 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2363 } else {
2364 pp_table->VR0HotGpio = 0;
2365 pp_table->VR0HotPolarity = 0;
2366 pp_table->VR1HotGpio = 0;
2367 pp_table->VR1HotPolarity = 0;
2368 }
2369
2370 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2371 data->registry_data.ac_dc_switch_gpio_support) {
2372 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2373 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2374 } else {
2375 pp_table->AcDcGpio = 0;
2376 pp_table->AcDcPolarity = 0;
2377 }
2378 }
2379
2380 return result;
2381 }
2382
vega10_avfs_enable(struct pp_hwmgr * hwmgr,bool enable)2383 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2384 {
2385 struct vega10_hwmgr *data = hwmgr->backend;
2386
2387 if (data->smu_features[GNLD_AVFS].supported) {
2388 /* Already enabled or disabled */
2389 if (!(enable ^ data->smu_features[GNLD_AVFS].enabled))
2390 return 0;
2391
2392 if (enable) {
2393 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2394 true,
2395 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2396 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2397 return -1);
2398 data->smu_features[GNLD_AVFS].enabled = true;
2399 } else {
2400 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2401 false,
2402 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2403 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2404 return -1);
2405 data->smu_features[GNLD_AVFS].enabled = false;
2406 }
2407 }
2408
2409 return 0;
2410 }
2411
vega10_update_avfs(struct pp_hwmgr * hwmgr)2412 static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2413 {
2414 struct vega10_hwmgr *data = hwmgr->backend;
2415
2416 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2417 vega10_avfs_enable(hwmgr, false);
2418 } else if (data->need_update_dpm_table) {
2419 vega10_avfs_enable(hwmgr, false);
2420 vega10_avfs_enable(hwmgr, true);
2421 } else {
2422 vega10_avfs_enable(hwmgr, true);
2423 }
2424
2425 return 0;
2426 }
2427
vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr * hwmgr)2428 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2429 {
2430 int result = 0;
2431
2432 uint64_t serial_number = 0;
2433 uint32_t top32, bottom32;
2434 struct phm_fuses_default fuse;
2435
2436 struct vega10_hwmgr *data = hwmgr->backend;
2437 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2438
2439 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
2440 top32 = smum_get_argument(hwmgr);
2441
2442 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
2443 bottom32 = smum_get_argument(hwmgr);
2444
2445 serial_number = ((uint64_t)bottom32 << 32) | top32;
2446
2447 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
2448 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2449 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2450 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2451 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2452 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2453 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2454 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2455 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2456 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2457 result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
2458 AVFSFUSETABLE, false);
2459 PP_ASSERT_WITH_CODE(!result,
2460 "Failed to upload FuseOVerride!",
2461 );
2462 }
2463
2464 return result;
2465 }
2466
vega10_check_dpm_table_updated(struct pp_hwmgr * hwmgr)2467 static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
2468 {
2469 struct vega10_hwmgr *data = hwmgr->backend;
2470 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2471 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
2472 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
2473 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
2474 uint32_t i;
2475
2476 dep_table = table_info->vdd_dep_on_mclk;
2477 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
2478
2479 for (i = 0; i < dep_table->count; i++) {
2480 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2481 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
2482 return;
2483 }
2484 }
2485
2486 dep_table = table_info->vdd_dep_on_sclk;
2487 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
2488 for (i = 0; i < dep_table->count; i++) {
2489 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2490 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
2491 return;
2492 }
2493 }
2494 }
2495
2496 /**
2497 * Initializes the SMC table and uploads it
2498 *
2499 * @param hwmgr the address of the powerplay hardware manager.
2500 * @param pInput the pointer to input data (PowerState)
2501 * @return always 0
2502 */
vega10_init_smc_table(struct pp_hwmgr * hwmgr)2503 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2504 {
2505 int result;
2506 struct vega10_hwmgr *data = hwmgr->backend;
2507 struct phm_ppt_v2_information *table_info =
2508 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2509 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2510 struct pp_atomfwctrl_voltage_table voltage_table;
2511 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2512 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2513
2514 result = vega10_setup_default_dpm_tables(hwmgr);
2515 PP_ASSERT_WITH_CODE(!result,
2516 "Failed to setup default DPM tables!",
2517 return result);
2518
2519 if (!hwmgr->not_vf)
2520 return 0;
2521
2522 /* initialize ODN table */
2523 if (hwmgr->od_enabled) {
2524 if (odn_table->max_vddc) {
2525 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2526 vega10_check_dpm_table_updated(hwmgr);
2527 } else {
2528 vega10_odn_initial_default_setting(hwmgr);
2529 }
2530 }
2531
2532 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2533 VOLTAGE_OBJ_SVID2, &voltage_table);
2534 pp_table->MaxVidStep = voltage_table.max_vid_step;
2535
2536 pp_table->GfxDpmVoltageMode =
2537 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2538 pp_table->SocDpmVoltageMode =
2539 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2540 pp_table->UclkDpmVoltageMode =
2541 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2542 pp_table->UvdDpmVoltageMode =
2543 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2544 pp_table->VceDpmVoltageMode =
2545 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2546 pp_table->Mp0DpmVoltageMode =
2547 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2548
2549 pp_table->DisplayDpmVoltageMode =
2550 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2551
2552 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2553 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2554
2555 if (data->registry_data.ulv_support &&
2556 table_info->us_ulv_voltage_offset) {
2557 result = vega10_populate_ulv_state(hwmgr);
2558 PP_ASSERT_WITH_CODE(!result,
2559 "Failed to initialize ULV state!",
2560 return result);
2561 }
2562
2563 result = vega10_populate_smc_link_levels(hwmgr);
2564 PP_ASSERT_WITH_CODE(!result,
2565 "Failed to initialize Link Level!",
2566 return result);
2567
2568 result = vega10_populate_all_graphic_levels(hwmgr);
2569 PP_ASSERT_WITH_CODE(!result,
2570 "Failed to initialize Graphics Level!",
2571 return result);
2572
2573 result = vega10_populate_all_memory_levels(hwmgr);
2574 PP_ASSERT_WITH_CODE(!result,
2575 "Failed to initialize Memory Level!",
2576 return result);
2577
2578 vega10_populate_vddc_soc_levels(hwmgr);
2579
2580 result = vega10_populate_all_display_clock_levels(hwmgr);
2581 PP_ASSERT_WITH_CODE(!result,
2582 "Failed to initialize Display Level!",
2583 return result);
2584
2585 result = vega10_populate_smc_vce_levels(hwmgr);
2586 PP_ASSERT_WITH_CODE(!result,
2587 "Failed to initialize VCE Level!",
2588 return result);
2589
2590 result = vega10_populate_smc_uvd_levels(hwmgr);
2591 PP_ASSERT_WITH_CODE(!result,
2592 "Failed to initialize UVD Level!",
2593 return result);
2594
2595 if (data->registry_data.clock_stretcher_support) {
2596 result = vega10_populate_clock_stretcher_table(hwmgr);
2597 PP_ASSERT_WITH_CODE(!result,
2598 "Failed to populate Clock Stretcher Table!",
2599 return result);
2600 }
2601
2602 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2603 if (!result) {
2604 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2605 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2606 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2607 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2608 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2609 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2610 SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk);
2611
2612 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2613 SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk);
2614
2615 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2616 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2617 if (0 != boot_up_values.usVddc) {
2618 smum_send_msg_to_smc_with_parameter(hwmgr,
2619 PPSMC_MSG_SetFloorSocVoltage,
2620 (boot_up_values.usVddc * 4));
2621 data->vbios_boot_state.bsoc_vddc_lock = true;
2622 } else {
2623 data->vbios_boot_state.bsoc_vddc_lock = false;
2624 }
2625 smum_send_msg_to_smc_with_parameter(hwmgr,
2626 PPSMC_MSG_SetMinDeepSleepDcefclk,
2627 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2628 }
2629
2630 result = vega10_populate_avfs_parameters(hwmgr);
2631 PP_ASSERT_WITH_CODE(!result,
2632 "Failed to initialize AVFS Parameters!",
2633 return result);
2634
2635 result = vega10_populate_gpio_parameters(hwmgr);
2636 PP_ASSERT_WITH_CODE(!result,
2637 "Failed to initialize GPIO Parameters!",
2638 return result);
2639
2640 pp_table->GfxclkAverageAlpha = (uint8_t)
2641 (data->gfxclk_average_alpha);
2642 pp_table->SocclkAverageAlpha = (uint8_t)
2643 (data->socclk_average_alpha);
2644 pp_table->UclkAverageAlpha = (uint8_t)
2645 (data->uclk_average_alpha);
2646 pp_table->GfxActivityAverageAlpha = (uint8_t)
2647 (data->gfx_activity_average_alpha);
2648
2649 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2650
2651 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2652
2653 PP_ASSERT_WITH_CODE(!result,
2654 "Failed to upload PPtable!", return result);
2655
2656 result = vega10_avfs_enable(hwmgr, true);
2657 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2658 return result);
2659 vega10_acg_enable(hwmgr);
2660
2661 return 0;
2662 }
2663
vega10_enable_thermal_protection(struct pp_hwmgr * hwmgr)2664 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2665 {
2666 struct vega10_hwmgr *data = hwmgr->backend;
2667
2668 if (data->smu_features[GNLD_THERMAL].supported) {
2669 if (data->smu_features[GNLD_THERMAL].enabled)
2670 pr_info("THERMAL Feature Already enabled!");
2671
2672 PP_ASSERT_WITH_CODE(
2673 !vega10_enable_smc_features(hwmgr,
2674 true,
2675 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2676 "Enable THERMAL Feature Failed!",
2677 return -1);
2678 data->smu_features[GNLD_THERMAL].enabled = true;
2679 }
2680
2681 return 0;
2682 }
2683
vega10_disable_thermal_protection(struct pp_hwmgr * hwmgr)2684 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2685 {
2686 struct vega10_hwmgr *data = hwmgr->backend;
2687
2688 if (data->smu_features[GNLD_THERMAL].supported) {
2689 if (!data->smu_features[GNLD_THERMAL].enabled)
2690 pr_info("THERMAL Feature Already disabled!");
2691
2692 PP_ASSERT_WITH_CODE(
2693 !vega10_enable_smc_features(hwmgr,
2694 false,
2695 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2696 "disable THERMAL Feature Failed!",
2697 return -1);
2698 data->smu_features[GNLD_THERMAL].enabled = false;
2699 }
2700
2701 return 0;
2702 }
2703
vega10_enable_vrhot_feature(struct pp_hwmgr * hwmgr)2704 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2705 {
2706 struct vega10_hwmgr *data = hwmgr->backend;
2707
2708 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
2709 if (data->smu_features[GNLD_VR0HOT].supported) {
2710 PP_ASSERT_WITH_CODE(
2711 !vega10_enable_smc_features(hwmgr,
2712 true,
2713 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2714 "Attempt to Enable VR0 Hot feature Failed!",
2715 return -1);
2716 data->smu_features[GNLD_VR0HOT].enabled = true;
2717 } else {
2718 if (data->smu_features[GNLD_VR1HOT].supported) {
2719 PP_ASSERT_WITH_CODE(
2720 !vega10_enable_smc_features(hwmgr,
2721 true,
2722 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2723 "Attempt to Enable VR0 Hot feature Failed!",
2724 return -1);
2725 data->smu_features[GNLD_VR1HOT].enabled = true;
2726 }
2727 }
2728 }
2729 return 0;
2730 }
2731
vega10_enable_ulv(struct pp_hwmgr * hwmgr)2732 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2733 {
2734 struct vega10_hwmgr *data = hwmgr->backend;
2735
2736 if (data->registry_data.ulv_support) {
2737 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2738 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2739 "Enable ULV Feature Failed!",
2740 return -1);
2741 data->smu_features[GNLD_ULV].enabled = true;
2742 }
2743
2744 return 0;
2745 }
2746
vega10_disable_ulv(struct pp_hwmgr * hwmgr)2747 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2748 {
2749 struct vega10_hwmgr *data = hwmgr->backend;
2750
2751 if (data->registry_data.ulv_support) {
2752 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2753 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2754 "disable ULV Feature Failed!",
2755 return -EINVAL);
2756 data->smu_features[GNLD_ULV].enabled = false;
2757 }
2758
2759 return 0;
2760 }
2761
vega10_enable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)2762 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2763 {
2764 struct vega10_hwmgr *data = hwmgr->backend;
2765
2766 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2767 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2768 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2769 "Attempt to Enable DS_GFXCLK Feature Failed!",
2770 return -EINVAL);
2771 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2772 }
2773
2774 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2775 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2776 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2777 "Attempt to Enable DS_SOCCLK Feature Failed!",
2778 return -EINVAL);
2779 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2780 }
2781
2782 if (data->smu_features[GNLD_DS_LCLK].supported) {
2783 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2784 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2785 "Attempt to Enable DS_LCLK Feature Failed!",
2786 return -EINVAL);
2787 data->smu_features[GNLD_DS_LCLK].enabled = true;
2788 }
2789
2790 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2791 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2792 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2793 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2794 return -EINVAL);
2795 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2796 }
2797
2798 return 0;
2799 }
2800
vega10_disable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)2801 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2802 {
2803 struct vega10_hwmgr *data = hwmgr->backend;
2804
2805 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2806 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2807 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2808 "Attempt to disable DS_GFXCLK Feature Failed!",
2809 return -EINVAL);
2810 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2811 }
2812
2813 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2814 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2815 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2816 "Attempt to disable DS_ Feature Failed!",
2817 return -EINVAL);
2818 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2819 }
2820
2821 if (data->smu_features[GNLD_DS_LCLK].supported) {
2822 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2823 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2824 "Attempt to disable DS_LCLK Feature Failed!",
2825 return -EINVAL);
2826 data->smu_features[GNLD_DS_LCLK].enabled = false;
2827 }
2828
2829 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2830 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2831 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2832 "Attempt to disable DS_DCEFCLK Feature Failed!",
2833 return -EINVAL);
2834 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2835 }
2836
2837 return 0;
2838 }
2839
vega10_stop_dpm(struct pp_hwmgr * hwmgr,uint32_t bitmap)2840 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2841 {
2842 struct vega10_hwmgr *data = hwmgr->backend;
2843 uint32_t i, feature_mask = 0;
2844
2845 if (!hwmgr->not_vf)
2846 return 0;
2847
2848 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2849 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2850 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2851 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2852 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2853 }
2854
2855 for (i = 0; i < GNLD_DPM_MAX; i++) {
2856 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2857 if (data->smu_features[i].supported) {
2858 if (data->smu_features[i].enabled) {
2859 feature_mask |= data->smu_features[i].
2860 smu_feature_bitmap;
2861 data->smu_features[i].enabled = false;
2862 }
2863 }
2864 }
2865 }
2866
2867 vega10_enable_smc_features(hwmgr, false, feature_mask);
2868
2869 return 0;
2870 }
2871
2872 /**
2873 * @brief Tell SMC to enabled the supported DPMs.
2874 *
2875 * @param hwmgr - the address of the powerplay hardware manager.
2876 * @Param bitmap - bitmap for the features to enabled.
2877 * @return 0 on at least one DPM is successfully enabled.
2878 */
vega10_start_dpm(struct pp_hwmgr * hwmgr,uint32_t bitmap)2879 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2880 {
2881 struct vega10_hwmgr *data = hwmgr->backend;
2882 uint32_t i, feature_mask = 0;
2883
2884 for (i = 0; i < GNLD_DPM_MAX; i++) {
2885 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2886 if (data->smu_features[i].supported) {
2887 if (!data->smu_features[i].enabled) {
2888 feature_mask |= data->smu_features[i].
2889 smu_feature_bitmap;
2890 data->smu_features[i].enabled = true;
2891 }
2892 }
2893 }
2894 }
2895
2896 if (vega10_enable_smc_features(hwmgr,
2897 true, feature_mask)) {
2898 for (i = 0; i < GNLD_DPM_MAX; i++) {
2899 if (data->smu_features[i].smu_feature_bitmap &
2900 feature_mask)
2901 data->smu_features[i].enabled = false;
2902 }
2903 }
2904
2905 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2906 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2907 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2908 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2909 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2910 }
2911
2912 if (data->vbios_boot_state.bsoc_vddc_lock) {
2913 smum_send_msg_to_smc_with_parameter(hwmgr,
2914 PPSMC_MSG_SetFloorSocVoltage, 0);
2915 data->vbios_boot_state.bsoc_vddc_lock = false;
2916 }
2917
2918 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
2919 if (data->smu_features[GNLD_ACDC].supported) {
2920 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2921 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2922 "Attempt to Enable DS_GFXCLK Feature Failed!",
2923 return -1);
2924 data->smu_features[GNLD_ACDC].enabled = true;
2925 }
2926 }
2927
2928 return 0;
2929 }
2930
vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr * hwmgr,bool enable)2931 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2932 {
2933 struct vega10_hwmgr *data = hwmgr->backend;
2934
2935 if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2936 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2937 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2938 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2939 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2940 "Attempt to Enable PCC Limit feature Failed!",
2941 return -EINVAL);
2942 data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2943 }
2944
2945 return 0;
2946 }
2947
vega10_enable_dpm_tasks(struct pp_hwmgr * hwmgr)2948 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2949 {
2950 struct vega10_hwmgr *data = hwmgr->backend;
2951 int tmp_result, result = 0;
2952
2953 if (hwmgr->not_vf) {
2954 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2955
2956 smum_send_msg_to_smc_with_parameter(hwmgr,
2957 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2958
2959 tmp_result = vega10_construct_voltage_tables(hwmgr);
2960 PP_ASSERT_WITH_CODE(!tmp_result,
2961 "Failed to construct voltage tables!",
2962 result = tmp_result);
2963 }
2964
2965 if (hwmgr->not_vf || hwmgr->pp_one_vf) {
2966 tmp_result = vega10_init_smc_table(hwmgr);
2967 PP_ASSERT_WITH_CODE(!tmp_result,
2968 "Failed to initialize SMC table!",
2969 result = tmp_result);
2970 }
2971
2972 if (hwmgr->not_vf) {
2973 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
2974 tmp_result = vega10_enable_thermal_protection(hwmgr);
2975 PP_ASSERT_WITH_CODE(!tmp_result,
2976 "Failed to enable thermal protection!",
2977 result = tmp_result);
2978 }
2979
2980 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2981 PP_ASSERT_WITH_CODE(!tmp_result,
2982 "Failed to enable VR hot feature!",
2983 result = tmp_result);
2984
2985 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2986 PP_ASSERT_WITH_CODE(!tmp_result,
2987 "Failed to enable deep sleep master switch!",
2988 result = tmp_result);
2989 }
2990
2991 if (hwmgr->not_vf) {
2992 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2993 PP_ASSERT_WITH_CODE(!tmp_result,
2994 "Failed to start DPM!", result = tmp_result);
2995 }
2996
2997 if (hwmgr->not_vf) {
2998 /* enable didt, do not abort if failed didt */
2999 tmp_result = vega10_enable_didt_config(hwmgr);
3000 PP_ASSERT(!tmp_result,
3001 "Failed to enable didt config!");
3002 }
3003
3004 tmp_result = vega10_enable_power_containment(hwmgr);
3005 PP_ASSERT_WITH_CODE(!tmp_result,
3006 "Failed to enable power containment!",
3007 result = tmp_result);
3008
3009 if (hwmgr->not_vf) {
3010 tmp_result = vega10_power_control_set_level(hwmgr);
3011 PP_ASSERT_WITH_CODE(!tmp_result,
3012 "Failed to power control set level!",
3013 result = tmp_result);
3014
3015 tmp_result = vega10_enable_ulv(hwmgr);
3016 PP_ASSERT_WITH_CODE(!tmp_result,
3017 "Failed to enable ULV!",
3018 result = tmp_result);
3019 }
3020
3021 return result;
3022 }
3023
vega10_get_power_state_size(struct pp_hwmgr * hwmgr)3024 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
3025 {
3026 return sizeof(struct vega10_power_state);
3027 }
3028
vega10_get_pp_table_entry_callback_func(struct pp_hwmgr * hwmgr,void * state,struct pp_power_state * power_state,void * pp_table,uint32_t classification_flag)3029 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
3030 void *state, struct pp_power_state *power_state,
3031 void *pp_table, uint32_t classification_flag)
3032 {
3033 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
3034 struct vega10_power_state *vega10_power_state =
3035 cast_phw_vega10_power_state(&(power_state->hardware));
3036 struct vega10_performance_level *performance_level;
3037 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
3038 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
3039 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
3040 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
3041 (ATOM_Vega10_SOCCLK_Dependency_Table *)
3042 (((unsigned long)powerplay_table) +
3043 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
3044 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
3045 (ATOM_Vega10_GFXCLK_Dependency_Table *)
3046 (((unsigned long)powerplay_table) +
3047 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
3048 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
3049 (ATOM_Vega10_MCLK_Dependency_Table *)
3050 (((unsigned long)powerplay_table) +
3051 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3052
3053
3054 /* The following fields are not initialized here:
3055 * id orderedList allStatesList
3056 */
3057 power_state->classification.ui_label =
3058 (le16_to_cpu(state_entry->usClassification) &
3059 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3060 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3061 power_state->classification.flags = classification_flag;
3062 /* NOTE: There is a classification2 flag in BIOS
3063 * that is not being used right now
3064 */
3065 power_state->classification.temporary_state = false;
3066 power_state->classification.to_be_deleted = false;
3067
3068 power_state->validation.disallowOnDC =
3069 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3070 ATOM_Vega10_DISALLOW_ON_DC) != 0);
3071
3072 power_state->display.disableFrameModulation = false;
3073 power_state->display.limitRefreshrate = false;
3074 power_state->display.enableVariBright =
3075 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3076 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3077
3078 power_state->validation.supportedPowerLevels = 0;
3079 power_state->uvd_clocks.VCLK = 0;
3080 power_state->uvd_clocks.DCLK = 0;
3081 power_state->temperatures.min = 0;
3082 power_state->temperatures.max = 0;
3083
3084 performance_level = &(vega10_power_state->performance_levels
3085 [vega10_power_state->performance_level_count++]);
3086
3087 PP_ASSERT_WITH_CODE(
3088 (vega10_power_state->performance_level_count <
3089 NUM_GFXCLK_DPM_LEVELS),
3090 "Performance levels exceeds SMC limit!",
3091 return -1);
3092
3093 PP_ASSERT_WITH_CODE(
3094 (vega10_power_state->performance_level_count <=
3095 hwmgr->platform_descriptor.
3096 hardwareActivityPerformanceLevels),
3097 "Performance levels exceeds Driver limit!",
3098 return -1);
3099
3100 /* Performance levels are arranged from low to high. */
3101 performance_level->soc_clock = socclk_dep_table->entries
3102 [state_entry->ucSocClockIndexLow].ulClk;
3103 performance_level->gfx_clock = gfxclk_dep_table->entries
3104 [state_entry->ucGfxClockIndexLow].ulClk;
3105 performance_level->mem_clock = mclk_dep_table->entries
3106 [state_entry->ucMemClockIndexLow].ulMemClk;
3107
3108 performance_level = &(vega10_power_state->performance_levels
3109 [vega10_power_state->performance_level_count++]);
3110 performance_level->soc_clock = socclk_dep_table->entries
3111 [state_entry->ucSocClockIndexHigh].ulClk;
3112 if (gfxclk_dep_table->ucRevId == 0) {
3113 /* under vega10 pp one vf mode, the gfx clk dpm need be lower
3114 * to level-4 due to the limited 110w-power
3115 */
3116 if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
3117 performance_level->gfx_clock =
3118 gfxclk_dep_table->entries[4].ulClk;
3119 else
3120 performance_level->gfx_clock = gfxclk_dep_table->entries
3121 [state_entry->ucGfxClockIndexHigh].ulClk;
3122 } else if (gfxclk_dep_table->ucRevId == 1) {
3123 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3124 if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0))
3125 performance_level->gfx_clock = patom_record_V2[4].ulClk;
3126 else
3127 performance_level->gfx_clock =
3128 patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3129 }
3130
3131 performance_level->mem_clock = mclk_dep_table->entries
3132 [state_entry->ucMemClockIndexHigh].ulMemClk;
3133 return 0;
3134 }
3135
vega10_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3136 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3137 unsigned long entry_index, struct pp_power_state *state)
3138 {
3139 int result __unused;
3140 struct vega10_power_state *ps;
3141
3142 state->hardware.magic = PhwVega10_Magic;
3143
3144 ps = cast_phw_vega10_power_state(&state->hardware);
3145
3146 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3147 vega10_get_pp_table_entry_callback_func);
3148
3149 /*
3150 * This is the earliest time we have all the dependency table
3151 * and the VBIOS boot state
3152 */
3153 /* set DC compatible flag if this state supports DC */
3154 if (!state->validation.disallowOnDC)
3155 ps->dc_compatible = true;
3156
3157 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3158 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3159
3160 return 0;
3161 }
3162
vega10_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)3163 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3164 struct pp_hw_power_state *hw_ps)
3165 {
3166 return 0;
3167 }
3168
vega10_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * request_ps,const struct pp_power_state * current_ps)3169 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3170 struct pp_power_state *request_ps,
3171 const struct pp_power_state *current_ps)
3172 {
3173 struct amdgpu_device *adev = hwmgr->adev;
3174 struct vega10_power_state *vega10_ps =
3175 cast_phw_vega10_power_state(&request_ps->hardware);
3176 uint32_t sclk;
3177 uint32_t mclk;
3178 struct PP_Clocks minimum_clocks = {0};
3179 bool disable_mclk_switching;
3180 bool disable_mclk_switching_for_frame_lock;
3181 bool disable_mclk_switching_for_vr;
3182 bool force_mclk_high;
3183 const struct phm_clock_and_voltage_limits *max_limits;
3184 uint32_t i;
3185 struct vega10_hwmgr *data = hwmgr->backend;
3186 struct phm_ppt_v2_information *table_info =
3187 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3188 int32_t count;
3189 uint32_t stable_pstate_sclk_dpm_percentage;
3190 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3191 uint32_t latency;
3192
3193 data->battery_state = (PP_StateUILabel_Battery ==
3194 request_ps->classification.ui_label);
3195
3196 if (vega10_ps->performance_level_count != 2)
3197 pr_info("VI should always have 2 performance levels");
3198
3199 max_limits = adev->pm.ac_power ?
3200 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3201 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3202
3203 /* Cap clock DPM tables at DC MAX if it is in DC. */
3204 if (!adev->pm.ac_power) {
3205 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3206 if (vega10_ps->performance_levels[i].mem_clock >
3207 max_limits->mclk)
3208 vega10_ps->performance_levels[i].mem_clock =
3209 max_limits->mclk;
3210 if (vega10_ps->performance_levels[i].gfx_clock >
3211 max_limits->sclk)
3212 vega10_ps->performance_levels[i].gfx_clock =
3213 max_limits->sclk;
3214 }
3215 }
3216
3217 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3218 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3219 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3220
3221 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3222 stable_pstate_sclk_dpm_percentage =
3223 data->registry_data.stable_pstate_sclk_dpm_percentage;
3224 PP_ASSERT_WITH_CODE(
3225 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3226 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3227 "percent sclk value must range from 1% to 100%, setting default value",
3228 stable_pstate_sclk_dpm_percentage = 75);
3229
3230 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3231 stable_pstate_sclk = (max_limits->sclk *
3232 stable_pstate_sclk_dpm_percentage) / 100;
3233
3234 for (count = table_info->vdd_dep_on_sclk->count - 1;
3235 count >= 0; count--) {
3236 if (stable_pstate_sclk >=
3237 table_info->vdd_dep_on_sclk->entries[count].clk) {
3238 stable_pstate_sclk =
3239 table_info->vdd_dep_on_sclk->entries[count].clk;
3240 break;
3241 }
3242 }
3243
3244 if (count < 0)
3245 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3246
3247 stable_pstate_mclk = max_limits->mclk;
3248
3249 minimum_clocks.engineClock = stable_pstate_sclk;
3250 minimum_clocks.memoryClock = stable_pstate_mclk;
3251 }
3252
3253 disable_mclk_switching_for_frame_lock =
3254 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3255 disable_mclk_switching_for_vr =
3256 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3257 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3258
3259 if (hwmgr->display_config->num_display == 0)
3260 disable_mclk_switching = false;
3261 else
3262 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3263 !hwmgr->display_config->multi_monitor_in_sync) ||
3264 disable_mclk_switching_for_frame_lock ||
3265 disable_mclk_switching_for_vr ||
3266 force_mclk_high;
3267
3268 sclk = vega10_ps->performance_levels[0].gfx_clock;
3269 mclk = vega10_ps->performance_levels[0].mem_clock;
3270
3271 if (sclk < minimum_clocks.engineClock)
3272 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3273 max_limits->sclk : minimum_clocks.engineClock;
3274
3275 if (mclk < minimum_clocks.memoryClock)
3276 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3277 max_limits->mclk : minimum_clocks.memoryClock;
3278
3279 vega10_ps->performance_levels[0].gfx_clock = sclk;
3280 vega10_ps->performance_levels[0].mem_clock = mclk;
3281
3282 if (vega10_ps->performance_levels[1].gfx_clock <
3283 vega10_ps->performance_levels[0].gfx_clock)
3284 vega10_ps->performance_levels[0].gfx_clock =
3285 vega10_ps->performance_levels[1].gfx_clock;
3286
3287 if (disable_mclk_switching) {
3288 /* Set Mclk the max of level 0 and level 1 */
3289 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3290 mclk = vega10_ps->performance_levels[1].mem_clock;
3291
3292 /* Find the lowest MCLK frequency that is within
3293 * the tolerable latency defined in DAL
3294 */
3295 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3296 for (i = 0; i < data->mclk_latency_table.count; i++) {
3297 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3298 (data->mclk_latency_table.entries[i].frequency >=
3299 vega10_ps->performance_levels[0].mem_clock) &&
3300 (data->mclk_latency_table.entries[i].frequency <=
3301 vega10_ps->performance_levels[1].mem_clock))
3302 mclk = data->mclk_latency_table.entries[i].frequency;
3303 }
3304 vega10_ps->performance_levels[0].mem_clock = mclk;
3305 } else {
3306 if (vega10_ps->performance_levels[1].mem_clock <
3307 vega10_ps->performance_levels[0].mem_clock)
3308 vega10_ps->performance_levels[0].mem_clock =
3309 vega10_ps->performance_levels[1].mem_clock;
3310 }
3311
3312 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3313 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3314 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3315 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3316 }
3317 }
3318
3319 return 0;
3320 }
3321
vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr * hwmgr,const void * input)3322 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3323 {
3324 struct vega10_hwmgr *data = hwmgr->backend;
3325 const struct phm_set_power_state_input *states =
3326 (const struct phm_set_power_state_input *)input;
3327 const struct vega10_power_state *vega10_ps =
3328 cast_const_phw_vega10_power_state(states->pnew_state);
3329 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
3330 uint32_t sclk = vega10_ps->performance_levels
3331 [vega10_ps->performance_level_count - 1].gfx_clock;
3332 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
3333 uint32_t mclk = vega10_ps->performance_levels
3334 [vega10_ps->performance_level_count - 1].mem_clock;
3335 uint32_t i;
3336
3337 for (i = 0; i < sclk_table->count; i++) {
3338 if (sclk == sclk_table->dpm_levels[i].value)
3339 break;
3340 }
3341
3342 if (i >= sclk_table->count) {
3343 if (sclk > sclk_table->dpm_levels[i-1].value) {
3344 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3345 sclk_table->dpm_levels[i-1].value = sclk;
3346 }
3347 }
3348
3349 for (i = 0; i < mclk_table->count; i++) {
3350 if (mclk == mclk_table->dpm_levels[i].value)
3351 break;
3352 }
3353
3354 if (i >= mclk_table->count) {
3355 if (mclk > mclk_table->dpm_levels[i-1].value) {
3356 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3357 mclk_table->dpm_levels[i-1].value = mclk;
3358 }
3359 }
3360
3361 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3362 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3363
3364 return 0;
3365 }
3366
vega10_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr * hwmgr,const void * input)3367 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3368 struct pp_hwmgr *hwmgr, const void *input)
3369 {
3370 int result = 0;
3371 struct vega10_hwmgr *data = hwmgr->backend;
3372 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3373 struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3374 struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3375 int count;
3376
3377 if (!data->need_update_dpm_table)
3378 return 0;
3379
3380 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3381 for (count = 0; count < dpm_table->gfx_table.count; count++)
3382 dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3383 }
3384
3385 odn_clk_table = &odn_table->vdd_dep_on_mclk;
3386 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3387 for (count = 0; count < dpm_table->mem_table.count; count++)
3388 dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3389 }
3390
3391 if (data->need_update_dpm_table &
3392 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3393 result = vega10_populate_all_graphic_levels(hwmgr);
3394 PP_ASSERT_WITH_CODE((0 == result),
3395 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3396 return result);
3397 }
3398
3399 if (data->need_update_dpm_table &
3400 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3401 result = vega10_populate_all_memory_levels(hwmgr);
3402 PP_ASSERT_WITH_CODE((0 == result),
3403 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3404 return result);
3405 }
3406
3407 vega10_populate_vddc_soc_levels(hwmgr);
3408
3409 return result;
3410 }
3411
vega10_trim_single_dpm_states(struct pp_hwmgr * hwmgr,struct vega10_single_dpm_table * dpm_table,uint32_t low_limit,uint32_t high_limit)3412 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3413 struct vega10_single_dpm_table *dpm_table,
3414 uint32_t low_limit, uint32_t high_limit)
3415 {
3416 uint32_t i;
3417
3418 for (i = 0; i < dpm_table->count; i++) {
3419 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3420 (dpm_table->dpm_levels[i].value > high_limit))
3421 dpm_table->dpm_levels[i].enabled = false;
3422 else
3423 dpm_table->dpm_levels[i].enabled = true;
3424 }
3425 return 0;
3426 }
3427
vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr * hwmgr,struct vega10_single_dpm_table * dpm_table,uint32_t low_limit,uint32_t high_limit,uint32_t disable_dpm_mask)3428 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3429 struct vega10_single_dpm_table *dpm_table,
3430 uint32_t low_limit, uint32_t high_limit,
3431 uint32_t disable_dpm_mask)
3432 {
3433 uint32_t i;
3434
3435 for (i = 0; i < dpm_table->count; i++) {
3436 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3437 (dpm_table->dpm_levels[i].value > high_limit))
3438 dpm_table->dpm_levels[i].enabled = false;
3439 else if (!((1 << i) & disable_dpm_mask))
3440 dpm_table->dpm_levels[i].enabled = false;
3441 else
3442 dpm_table->dpm_levels[i].enabled = true;
3443 }
3444 return 0;
3445 }
3446
vega10_trim_dpm_states(struct pp_hwmgr * hwmgr,const struct vega10_power_state * vega10_ps)3447 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3448 const struct vega10_power_state *vega10_ps)
3449 {
3450 struct vega10_hwmgr *data = hwmgr->backend;
3451 uint32_t high_limit_count;
3452
3453 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3454 "power state did not have any performance level",
3455 return -1);
3456
3457 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3458
3459 vega10_trim_single_dpm_states(hwmgr,
3460 &(data->dpm_table.soc_table),
3461 vega10_ps->performance_levels[0].soc_clock,
3462 vega10_ps->performance_levels[high_limit_count].soc_clock);
3463
3464 vega10_trim_single_dpm_states_with_mask(hwmgr,
3465 &(data->dpm_table.gfx_table),
3466 vega10_ps->performance_levels[0].gfx_clock,
3467 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3468 data->disable_dpm_mask);
3469
3470 vega10_trim_single_dpm_states(hwmgr,
3471 &(data->dpm_table.mem_table),
3472 vega10_ps->performance_levels[0].mem_clock,
3473 vega10_ps->performance_levels[high_limit_count].mem_clock);
3474
3475 return 0;
3476 }
3477
vega10_find_lowest_dpm_level(struct vega10_single_dpm_table * table)3478 static uint32_t vega10_find_lowest_dpm_level(
3479 struct vega10_single_dpm_table *table)
3480 {
3481 uint32_t i;
3482
3483 for (i = 0; i < table->count; i++) {
3484 if (table->dpm_levels[i].enabled)
3485 break;
3486 }
3487
3488 return i;
3489 }
3490
vega10_find_highest_dpm_level(struct vega10_single_dpm_table * table)3491 static uint32_t vega10_find_highest_dpm_level(
3492 struct vega10_single_dpm_table *table)
3493 {
3494 uint32_t i = 0;
3495
3496 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3497 for (i = table->count; i > 0; i--) {
3498 if (table->dpm_levels[i - 1].enabled)
3499 return i - 1;
3500 }
3501 } else {
3502 pr_info("DPM Table Has Too Many Entries!");
3503 return MAX_REGULAR_DPM_NUMBER - 1;
3504 }
3505
3506 return i;
3507 }
3508
vega10_apply_dal_minimum_voltage_request(struct pp_hwmgr * hwmgr)3509 static void vega10_apply_dal_minimum_voltage_request(
3510 struct pp_hwmgr *hwmgr)
3511 {
3512 return;
3513 }
3514
vega10_get_soc_index_for_max_uclk(struct pp_hwmgr * hwmgr)3515 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3516 {
3517 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3518 struct phm_ppt_v2_information *table_info =
3519 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3520
3521 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3522
3523 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3524 }
3525
vega10_upload_dpm_bootup_level(struct pp_hwmgr * hwmgr)3526 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3527 {
3528 struct vega10_hwmgr *data = hwmgr->backend;
3529 uint32_t socclk_idx;
3530
3531 vega10_apply_dal_minimum_voltage_request(hwmgr);
3532
3533 if (!data->registry_data.sclk_dpm_key_disabled) {
3534 if (data->smc_state_table.gfx_boot_level !=
3535 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3536 smum_send_msg_to_smc_with_parameter(hwmgr,
3537 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3538 data->smc_state_table.gfx_boot_level);
3539
3540 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3541 data->smc_state_table.gfx_boot_level;
3542 }
3543 }
3544
3545 if (!data->registry_data.mclk_dpm_key_disabled) {
3546 if (data->smc_state_table.mem_boot_level !=
3547 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3548 if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1)
3549 && hwmgr->not_vf) {
3550 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3551 smum_send_msg_to_smc_with_parameter(hwmgr,
3552 PPSMC_MSG_SetSoftMinSocclkByIndex,
3553 socclk_idx);
3554 } else {
3555 smum_send_msg_to_smc_with_parameter(hwmgr,
3556 PPSMC_MSG_SetSoftMinUclkByIndex,
3557 data->smc_state_table.mem_boot_level);
3558 }
3559 data->dpm_table.mem_table.dpm_state.soft_min_level =
3560 data->smc_state_table.mem_boot_level;
3561 }
3562 }
3563
3564 if (!hwmgr->not_vf)
3565 return 0;
3566
3567 if (!data->registry_data.socclk_dpm_key_disabled) {
3568 if (data->smc_state_table.soc_boot_level !=
3569 data->dpm_table.soc_table.dpm_state.soft_min_level) {
3570 smum_send_msg_to_smc_with_parameter(hwmgr,
3571 PPSMC_MSG_SetSoftMinSocclkByIndex,
3572 data->smc_state_table.soc_boot_level);
3573 data->dpm_table.soc_table.dpm_state.soft_min_level =
3574 data->smc_state_table.soc_boot_level;
3575 }
3576 }
3577
3578 return 0;
3579 }
3580
vega10_upload_dpm_max_level(struct pp_hwmgr * hwmgr)3581 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3582 {
3583 struct vega10_hwmgr *data = hwmgr->backend;
3584
3585 vega10_apply_dal_minimum_voltage_request(hwmgr);
3586
3587 if (!data->registry_data.sclk_dpm_key_disabled) {
3588 if (data->smc_state_table.gfx_max_level !=
3589 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3590 smum_send_msg_to_smc_with_parameter(hwmgr,
3591 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3592 data->smc_state_table.gfx_max_level);
3593 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3594 data->smc_state_table.gfx_max_level;
3595 }
3596 }
3597
3598 if (!data->registry_data.mclk_dpm_key_disabled) {
3599 if (data->smc_state_table.mem_max_level !=
3600 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3601 smum_send_msg_to_smc_with_parameter(hwmgr,
3602 PPSMC_MSG_SetSoftMaxUclkByIndex,
3603 data->smc_state_table.mem_max_level);
3604 data->dpm_table.mem_table.dpm_state.soft_max_level =
3605 data->smc_state_table.mem_max_level;
3606 }
3607 }
3608
3609 if (!hwmgr->not_vf)
3610 return 0;
3611
3612 if (!data->registry_data.socclk_dpm_key_disabled) {
3613 if (data->smc_state_table.soc_max_level !=
3614 data->dpm_table.soc_table.dpm_state.soft_max_level) {
3615 smum_send_msg_to_smc_with_parameter(hwmgr,
3616 PPSMC_MSG_SetSoftMaxSocclkByIndex,
3617 data->smc_state_table.soc_max_level);
3618 data->dpm_table.soc_table.dpm_state.soft_max_level =
3619 data->smc_state_table.soc_max_level;
3620 }
3621 }
3622
3623 return 0;
3624 }
3625
vega10_generate_dpm_level_enable_mask(struct pp_hwmgr * hwmgr,const void * input)3626 static int vega10_generate_dpm_level_enable_mask(
3627 struct pp_hwmgr *hwmgr, const void *input)
3628 {
3629 struct vega10_hwmgr *data = hwmgr->backend;
3630 const struct phm_set_power_state_input *states =
3631 (const struct phm_set_power_state_input *)input;
3632 const struct vega10_power_state *vega10_ps =
3633 cast_const_phw_vega10_power_state(states->pnew_state);
3634 int i;
3635
3636 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3637 "Attempt to Trim DPM States Failed!",
3638 return -1);
3639
3640 data->smc_state_table.gfx_boot_level =
3641 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3642 data->smc_state_table.gfx_max_level =
3643 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3644 data->smc_state_table.mem_boot_level =
3645 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3646 data->smc_state_table.mem_max_level =
3647 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3648 data->smc_state_table.soc_boot_level =
3649 vega10_find_lowest_dpm_level(&(data->dpm_table.soc_table));
3650 data->smc_state_table.soc_max_level =
3651 vega10_find_highest_dpm_level(&(data->dpm_table.soc_table));
3652
3653 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3654 "Attempt to upload DPM Bootup Levels Failed!",
3655 return -1);
3656 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3657 "Attempt to upload DPM Max Levels Failed!",
3658 return -1);
3659 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3660 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3661
3662
3663 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3664 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3665
3666 for (i = data->smc_state_table.soc_boot_level; i < data->smc_state_table.soc_max_level; i++)
3667 data->dpm_table.soc_table.dpm_levels[i].enabled = true;
3668
3669 return 0;
3670 }
3671
vega10_enable_disable_vce_dpm(struct pp_hwmgr * hwmgr,bool enable)3672 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3673 {
3674 struct vega10_hwmgr *data = hwmgr->backend;
3675
3676 if (data->smu_features[GNLD_DPM_VCE].supported) {
3677 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
3678 enable,
3679 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3680 "Attempt to Enable/Disable DPM VCE Failed!",
3681 return -1);
3682 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3683 }
3684
3685 return 0;
3686 }
3687
vega10_update_sclk_threshold(struct pp_hwmgr * hwmgr)3688 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3689 {
3690 struct vega10_hwmgr *data = hwmgr->backend;
3691 uint32_t low_sclk_interrupt_threshold = 0;
3692
3693 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
3694 (data->low_sclk_interrupt_threshold != 0)) {
3695 low_sclk_interrupt_threshold =
3696 data->low_sclk_interrupt_threshold;
3697
3698 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3699 cpu_to_le32(low_sclk_interrupt_threshold);
3700
3701 /* This message will also enable SmcToHost Interrupt */
3702 smum_send_msg_to_smc_with_parameter(hwmgr,
3703 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3704 (uint32_t)low_sclk_interrupt_threshold);
3705 }
3706
3707 return 0;
3708 }
3709
vega10_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)3710 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3711 const void *input)
3712 {
3713 int tmp_result, result = 0;
3714 struct vega10_hwmgr *data = hwmgr->backend;
3715 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3716
3717 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3718 PP_ASSERT_WITH_CODE(!tmp_result,
3719 "Failed to find DPM states clocks in DPM table!",
3720 result = tmp_result);
3721
3722 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3723 PP_ASSERT_WITH_CODE(!tmp_result,
3724 "Failed to populate and upload SCLK MCLK DPM levels!",
3725 result = tmp_result);
3726
3727 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3728 PP_ASSERT_WITH_CODE(!tmp_result,
3729 "Failed to generate DPM level enabled mask!",
3730 result = tmp_result);
3731
3732 tmp_result = vega10_update_sclk_threshold(hwmgr);
3733 PP_ASSERT_WITH_CODE(!tmp_result,
3734 "Failed to update SCLK threshold!",
3735 result = tmp_result);
3736
3737 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
3738 PP_ASSERT_WITH_CODE(!result,
3739 "Failed to upload PPtable!", return result);
3740
3741 /*
3742 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
3743 * That effectively disables AVFS feature.
3744 */
3745 if(hwmgr->hardcode_pp_table != NULL)
3746 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
3747
3748 vega10_update_avfs(hwmgr);
3749
3750 /*
3751 * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
3752 * That will help to keep AVFS disabled.
3753 */
3754 data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3755
3756 return 0;
3757 }
3758
vega10_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)3759 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3760 {
3761 struct pp_power_state *ps;
3762 struct vega10_power_state *vega10_ps;
3763
3764 if (hwmgr == NULL)
3765 return -EINVAL;
3766
3767 ps = hwmgr->request_ps;
3768
3769 if (ps == NULL)
3770 return -EINVAL;
3771
3772 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3773
3774 if (low)
3775 return vega10_ps->performance_levels[0].gfx_clock;
3776 else
3777 return vega10_ps->performance_levels
3778 [vega10_ps->performance_level_count - 1].gfx_clock;
3779 }
3780
vega10_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)3781 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3782 {
3783 struct pp_power_state *ps;
3784 struct vega10_power_state *vega10_ps;
3785
3786 if (hwmgr == NULL)
3787 return -EINVAL;
3788
3789 ps = hwmgr->request_ps;
3790
3791 if (ps == NULL)
3792 return -EINVAL;
3793
3794 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3795
3796 if (low)
3797 return vega10_ps->performance_levels[0].mem_clock;
3798 else
3799 return vega10_ps->performance_levels
3800 [vega10_ps->performance_level_count-1].mem_clock;
3801 }
3802
vega10_get_gpu_power(struct pp_hwmgr * hwmgr,uint32_t * query)3803 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3804 uint32_t *query)
3805 {
3806 uint32_t value;
3807
3808 if (!query)
3809 return -EINVAL;
3810
3811 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3812 value = smum_get_argument(hwmgr);
3813
3814 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3815 *query = value << 8;
3816
3817 return 0;
3818 }
3819
vega10_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)3820 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3821 void *value, int *size)
3822 {
3823 struct amdgpu_device *adev = hwmgr->adev;
3824 uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
3825 struct vega10_hwmgr *data = hwmgr->backend;
3826 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3827 int ret = 0;
3828 uint32_t val_vid;
3829
3830 switch (idx) {
3831 case AMDGPU_PP_SENSOR_GFX_SCLK:
3832 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3833 sclk_mhz = smum_get_argument(hwmgr);
3834 *((uint32_t *)value) = sclk_mhz * 100;
3835 break;
3836 case AMDGPU_PP_SENSOR_GFX_MCLK:
3837 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3838 mclk_idx = smum_get_argument(hwmgr);
3839 if (mclk_idx < dpm_table->mem_table.count) {
3840 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3841 *size = 4;
3842 } else {
3843 ret = -EINVAL;
3844 }
3845 break;
3846 case AMDGPU_PP_SENSOR_GPU_LOAD:
3847 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3848 activity_percent = smum_get_argument(hwmgr);
3849 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3850 *size = 4;
3851 break;
3852 case AMDGPU_PP_SENSOR_GPU_TEMP:
3853 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3854 *size = 4;
3855 break;
3856 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
3857 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
3858 *((uint32_t *)value) = smum_get_argument(hwmgr) *
3859 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
3860 *size = 4;
3861 break;
3862 case AMDGPU_PP_SENSOR_MEM_TEMP:
3863 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
3864 *((uint32_t *)value) = smum_get_argument(hwmgr) *
3865 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
3866 *size = 4;
3867 break;
3868 case AMDGPU_PP_SENSOR_UVD_POWER:
3869 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3870 *size = 4;
3871 break;
3872 case AMDGPU_PP_SENSOR_VCE_POWER:
3873 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3874 *size = 4;
3875 break;
3876 case AMDGPU_PP_SENSOR_GPU_POWER:
3877 ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
3878 break;
3879 case AMDGPU_PP_SENSOR_VDDGFX:
3880 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
3881 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3882 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3883 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3884 return 0;
3885 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3886 ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
3887 if (!ret)
3888 *size = 8;
3889 break;
3890 default:
3891 ret = -EINVAL;
3892 break;
3893 }
3894
3895 return ret;
3896 }
3897
vega10_notify_smc_display_change(struct pp_hwmgr * hwmgr,bool has_disp)3898 static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3899 bool has_disp)
3900 {
3901 smum_send_msg_to_smc_with_parameter(hwmgr,
3902 PPSMC_MSG_SetUclkFastSwitch,
3903 has_disp ? 1 : 0);
3904 }
3905
vega10_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)3906 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3907 struct pp_display_clock_request *clock_req)
3908 {
3909 int result = 0;
3910 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3911 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
3912 DSPCLK_e clk_select = 0;
3913 uint32_t clk_request = 0;
3914
3915 switch (clk_type) {
3916 case amd_pp_dcef_clock:
3917 clk_select = DSPCLK_DCEFCLK;
3918 break;
3919 case amd_pp_disp_clock:
3920 clk_select = DSPCLK_DISPCLK;
3921 break;
3922 case amd_pp_pixel_clock:
3923 clk_select = DSPCLK_PIXCLK;
3924 break;
3925 case amd_pp_phy_clock:
3926 clk_select = DSPCLK_PHYCLK;
3927 break;
3928 default:
3929 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3930 result = -1;
3931 break;
3932 }
3933
3934 if (!result) {
3935 clk_request = (clk_freq << 16) | clk_select;
3936 smum_send_msg_to_smc_with_parameter(hwmgr,
3937 PPSMC_MSG_RequestDisplayClockByFreq,
3938 clk_request);
3939 }
3940
3941 return result;
3942 }
3943
vega10_get_uclk_index(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_clock_voltage_dependency_table * mclk_table,uint32_t frequency)3944 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3945 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3946 uint32_t frequency)
3947 {
3948 uint8_t count;
3949 uint8_t i;
3950
3951 if (mclk_table == NULL || mclk_table->count == 0)
3952 return 0;
3953
3954 count = (uint8_t)(mclk_table->count);
3955
3956 for(i = 0; i < count; i++) {
3957 if(mclk_table->entries[i].clk >= frequency)
3958 return i;
3959 }
3960
3961 return i-1;
3962 }
3963
vega10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)3964 static int vega10_notify_smc_display_config_after_ps_adjustment(
3965 struct pp_hwmgr *hwmgr)
3966 {
3967 struct vega10_hwmgr *data = hwmgr->backend;
3968 struct vega10_single_dpm_table *dpm_table =
3969 &data->dpm_table.dcef_table;
3970 struct phm_ppt_v2_information *table_info =
3971 (struct phm_ppt_v2_information *)hwmgr->pptable;
3972 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3973 uint32_t idx;
3974 struct PP_Clocks min_clocks = {0};
3975 uint32_t i;
3976 struct pp_display_clock_request clock_req;
3977
3978 if ((hwmgr->display_config->num_display > 1) &&
3979 !hwmgr->display_config->multi_monitor_in_sync &&
3980 !hwmgr->display_config->nb_pstate_switch_disable)
3981 vega10_notify_smc_display_change(hwmgr, false);
3982 else
3983 vega10_notify_smc_display_change(hwmgr, true);
3984
3985 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3986 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3987 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3988
3989 for (i = 0; i < dpm_table->count; i++) {
3990 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3991 break;
3992 }
3993
3994 if (i < dpm_table->count) {
3995 clock_req.clock_type = amd_pp_dcef_clock;
3996 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
3997 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3998 smum_send_msg_to_smc_with_parameter(
3999 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
4000 min_clocks.dcefClockInSR / 100);
4001 } else {
4002 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4003 }
4004 } else {
4005 pr_debug("Cannot find requested DCEFCLK!");
4006 }
4007
4008 if (min_clocks.memoryClock != 0) {
4009 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
4010 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
4011 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
4012 }
4013
4014 return 0;
4015 }
4016
vega10_force_dpm_highest(struct pp_hwmgr * hwmgr)4017 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
4018 {
4019 struct vega10_hwmgr *data = hwmgr->backend;
4020
4021 data->smc_state_table.gfx_boot_level =
4022 data->smc_state_table.gfx_max_level =
4023 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4024 data->smc_state_table.mem_boot_level =
4025 data->smc_state_table.mem_max_level =
4026 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4027
4028 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4029 "Failed to upload boot level to highest!",
4030 return -1);
4031
4032 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4033 "Failed to upload dpm max level to highest!",
4034 return -1);
4035
4036 return 0;
4037 }
4038
vega10_force_dpm_lowest(struct pp_hwmgr * hwmgr)4039 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4040 {
4041 struct vega10_hwmgr *data = hwmgr->backend;
4042
4043 data->smc_state_table.gfx_boot_level =
4044 data->smc_state_table.gfx_max_level =
4045 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4046 data->smc_state_table.mem_boot_level =
4047 data->smc_state_table.mem_max_level =
4048 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4049
4050 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4051 "Failed to upload boot level to highest!",
4052 return -1);
4053
4054 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4055 "Failed to upload dpm max level to highest!",
4056 return -1);
4057
4058 return 0;
4059
4060 }
4061
vega10_unforce_dpm_levels(struct pp_hwmgr * hwmgr)4062 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4063 {
4064 struct vega10_hwmgr *data = hwmgr->backend;
4065
4066 data->smc_state_table.gfx_boot_level =
4067 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4068 data->smc_state_table.gfx_max_level =
4069 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4070 data->smc_state_table.mem_boot_level =
4071 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4072 data->smc_state_table.mem_max_level =
4073 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4074
4075 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4076 "Failed to upload DPM Bootup Levels!",
4077 return -1);
4078
4079 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4080 "Failed to upload DPM Max Levels!",
4081 return -1);
4082 return 0;
4083 }
4084
vega10_get_profiling_clk_mask(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * soc_mask)4085 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
4086 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
4087 {
4088 struct phm_ppt_v2_information *table_info =
4089 (struct phm_ppt_v2_information *)(hwmgr->pptable);
4090
4091 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
4092 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
4093 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
4094 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4095 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4096 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
4097 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
4098 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
4099 }
4100
4101 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
4102 *sclk_mask = 0;
4103 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
4104 *mclk_mask = 0;
4105 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
4106 /* under vega10 pp one vf mode, the gfx clk dpm need be lower
4107 * to level-4 due to the limited power
4108 */
4109 if (hwmgr->pp_one_vf)
4110 *sclk_mask = 4;
4111 else
4112 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
4113 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
4114 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
4115 }
4116
4117 return 0;
4118 }
4119
vega10_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)4120 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4121 {
4122 if (!hwmgr->not_vf)
4123 return;
4124
4125 switch (mode) {
4126 case AMD_FAN_CTRL_NONE:
4127 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4128 break;
4129 case AMD_FAN_CTRL_MANUAL:
4130 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
4131 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4132 break;
4133 case AMD_FAN_CTRL_AUTO:
4134 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
4135 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4136 break;
4137 default:
4138 break;
4139 }
4140 }
4141
vega10_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)4142 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4143 enum pp_clock_type type, uint32_t mask)
4144 {
4145 struct vega10_hwmgr *data = hwmgr->backend;
4146
4147 switch (type) {
4148 case PP_SCLK:
4149 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4150 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
4151
4152 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4153 "Failed to upload boot level to lowest!",
4154 return -EINVAL);
4155
4156 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4157 "Failed to upload dpm max level to highest!",
4158 return -EINVAL);
4159 break;
4160
4161 case PP_MCLK:
4162 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4163 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
4164
4165 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4166 "Failed to upload boot level to lowest!",
4167 return -EINVAL);
4168
4169 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4170 "Failed to upload dpm max level to highest!",
4171 return -EINVAL);
4172
4173 break;
4174
4175 case PP_SOCCLK:
4176 data->smc_state_table.soc_boot_level = mask ? (ffs(mask) - 1) : 0;
4177 data->smc_state_table.soc_max_level = mask ? (fls(mask) - 1) : 0;
4178
4179 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4180 "Failed to upload boot level to lowest!",
4181 return -EINVAL);
4182
4183 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4184 "Failed to upload dpm max level to highest!",
4185 return -EINVAL);
4186
4187 break;
4188
4189 case PP_DCEFCLK:
4190 pr_info("Setting DCEFCLK min/max dpm level is not supported!\n");
4191 break;
4192
4193 case PP_PCIE:
4194 default:
4195 break;
4196 }
4197
4198 return 0;
4199 }
4200
vega10_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)4201 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4202 enum amd_dpm_forced_level level)
4203 {
4204 int ret = 0;
4205 uint32_t sclk_mask = 0;
4206 uint32_t mclk_mask = 0;
4207 uint32_t soc_mask = 0;
4208
4209 if (hwmgr->pstate_sclk == 0)
4210 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4211
4212 switch (level) {
4213 case AMD_DPM_FORCED_LEVEL_HIGH:
4214 ret = vega10_force_dpm_highest(hwmgr);
4215 break;
4216 case AMD_DPM_FORCED_LEVEL_LOW:
4217 ret = vega10_force_dpm_lowest(hwmgr);
4218 break;
4219 case AMD_DPM_FORCED_LEVEL_AUTO:
4220 ret = vega10_unforce_dpm_levels(hwmgr);
4221 break;
4222 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4223 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4224 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4225 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4226 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4227 if (ret)
4228 return ret;
4229 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4230 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4231 break;
4232 case AMD_DPM_FORCED_LEVEL_MANUAL:
4233 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4234 default:
4235 break;
4236 }
4237
4238 if (!hwmgr->not_vf)
4239 return ret;
4240
4241 if (!ret) {
4242 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4243 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4244 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4245 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4246 }
4247
4248 return ret;
4249 }
4250
vega10_get_fan_control_mode(struct pp_hwmgr * hwmgr)4251 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4252 {
4253 struct vega10_hwmgr *data = hwmgr->backend;
4254
4255 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4256 return AMD_FAN_CTRL_MANUAL;
4257 else
4258 return AMD_FAN_CTRL_AUTO;
4259 }
4260
vega10_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)4261 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4262 struct amd_pp_simple_clock_info *info)
4263 {
4264 struct phm_ppt_v2_information *table_info =
4265 (struct phm_ppt_v2_information *)hwmgr->pptable;
4266 struct phm_clock_and_voltage_limits *max_limits =
4267 &table_info->max_clock_voltage_on_ac;
4268
4269 info->engine_max_clock = max_limits->sclk;
4270 info->memory_max_clock = max_limits->mclk;
4271
4272 return 0;
4273 }
4274
vega10_get_sclks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4275 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4276 struct pp_clock_levels_with_latency *clocks)
4277 {
4278 struct phm_ppt_v2_information *table_info =
4279 (struct phm_ppt_v2_information *)hwmgr->pptable;
4280 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4281 table_info->vdd_dep_on_sclk;
4282 uint32_t i;
4283
4284 clocks->num_levels = 0;
4285 for (i = 0; i < dep_table->count; i++) {
4286 if (dep_table->entries[i].clk) {
4287 clocks->data[clocks->num_levels].clocks_in_khz =
4288 dep_table->entries[i].clk * 10;
4289 clocks->num_levels++;
4290 }
4291 }
4292
4293 }
4294
vega10_get_memclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4295 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4296 struct pp_clock_levels_with_latency *clocks)
4297 {
4298 struct phm_ppt_v2_information *table_info =
4299 (struct phm_ppt_v2_information *)hwmgr->pptable;
4300 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4301 table_info->vdd_dep_on_mclk;
4302 struct vega10_hwmgr *data = hwmgr->backend;
4303 uint32_t j = 0;
4304 uint32_t i;
4305
4306 for (i = 0; i < dep_table->count; i++) {
4307 if (dep_table->entries[i].clk) {
4308
4309 clocks->data[j].clocks_in_khz =
4310 dep_table->entries[i].clk * 10;
4311 data->mclk_latency_table.entries[j].frequency =
4312 dep_table->entries[i].clk;
4313 clocks->data[j].latency_in_us =
4314 data->mclk_latency_table.entries[j].latency = 25;
4315 j++;
4316 }
4317 }
4318 clocks->num_levels = data->mclk_latency_table.count = j;
4319 }
4320
vega10_get_dcefclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4321 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4322 struct pp_clock_levels_with_latency *clocks)
4323 {
4324 struct phm_ppt_v2_information *table_info =
4325 (struct phm_ppt_v2_information *)hwmgr->pptable;
4326 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4327 table_info->vdd_dep_on_dcefclk;
4328 uint32_t i;
4329
4330 for (i = 0; i < dep_table->count; i++) {
4331 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4332 clocks->data[i].latency_in_us = 0;
4333 clocks->num_levels++;
4334 }
4335 }
4336
vega10_get_socclocks(struct pp_hwmgr * hwmgr,struct pp_clock_levels_with_latency * clocks)4337 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4338 struct pp_clock_levels_with_latency *clocks)
4339 {
4340 struct phm_ppt_v2_information *table_info =
4341 (struct phm_ppt_v2_information *)hwmgr->pptable;
4342 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4343 table_info->vdd_dep_on_socclk;
4344 uint32_t i;
4345
4346 for (i = 0; i < dep_table->count; i++) {
4347 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4348 clocks->data[i].latency_in_us = 0;
4349 clocks->num_levels++;
4350 }
4351 }
4352
vega10_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)4353 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4354 enum amd_pp_clock_type type,
4355 struct pp_clock_levels_with_latency *clocks)
4356 {
4357 switch (type) {
4358 case amd_pp_sys_clock:
4359 vega10_get_sclks(hwmgr, clocks);
4360 break;
4361 case amd_pp_mem_clock:
4362 vega10_get_memclocks(hwmgr, clocks);
4363 break;
4364 case amd_pp_dcef_clock:
4365 vega10_get_dcefclocks(hwmgr, clocks);
4366 break;
4367 case amd_pp_soc_clock:
4368 vega10_get_socclocks(hwmgr, clocks);
4369 break;
4370 default:
4371 return -1;
4372 }
4373
4374 return 0;
4375 }
4376
vega10_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)4377 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4378 enum amd_pp_clock_type type,
4379 struct pp_clock_levels_with_voltage *clocks)
4380 {
4381 struct phm_ppt_v2_information *table_info =
4382 (struct phm_ppt_v2_information *)hwmgr->pptable;
4383 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4384 uint32_t i;
4385
4386 switch (type) {
4387 case amd_pp_mem_clock:
4388 dep_table = table_info->vdd_dep_on_mclk;
4389 break;
4390 case amd_pp_dcef_clock:
4391 dep_table = table_info->vdd_dep_on_dcefclk;
4392 break;
4393 case amd_pp_disp_clock:
4394 dep_table = table_info->vdd_dep_on_dispclk;
4395 break;
4396 case amd_pp_pixel_clock:
4397 dep_table = table_info->vdd_dep_on_pixclk;
4398 break;
4399 case amd_pp_phy_clock:
4400 dep_table = table_info->vdd_dep_on_phyclk;
4401 break;
4402 default:
4403 return -1;
4404 }
4405
4406 for (i = 0; i < dep_table->count; i++) {
4407 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4408 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4409 entries[dep_table->entries[i].vddInd].us_vdd);
4410 clocks->num_levels++;
4411 }
4412
4413 if (i < dep_table->count)
4414 return -1;
4415
4416 return 0;
4417 }
4418
vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_range)4419 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4420 void *clock_range)
4421 {
4422 struct vega10_hwmgr *data = hwmgr->backend;
4423 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
4424 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4425
4426 if (!data->registry_data.disable_water_mark) {
4427 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
4428 data->water_marks_bitmap = WaterMarksExist;
4429 }
4430
4431 return 0;
4432 }
4433
vega10_get_ppfeature_status(struct pp_hwmgr * hwmgr,char * buf)4434 static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
4435 {
4436 static const char *ppfeature_name[] = {
4437 "DPM_PREFETCHER",
4438 "GFXCLK_DPM",
4439 "UCLK_DPM",
4440 "SOCCLK_DPM",
4441 "UVD_DPM",
4442 "VCE_DPM",
4443 "ULV",
4444 "MP0CLK_DPM",
4445 "LINK_DPM",
4446 "DCEFCLK_DPM",
4447 "AVFS",
4448 "GFXCLK_DS",
4449 "SOCCLK_DS",
4450 "LCLK_DS",
4451 "PPT",
4452 "TDC",
4453 "THERMAL",
4454 "GFX_PER_CU_CG",
4455 "RM",
4456 "DCEFCLK_DS",
4457 "ACDC",
4458 "VR0HOT",
4459 "VR1HOT",
4460 "FW_CTF",
4461 "LED_DISPLAY",
4462 "FAN_CONTROL",
4463 "FAST_PPT",
4464 "DIDT",
4465 "ACG",
4466 "PCC_LIMIT"};
4467 static const char *output_title[] = {
4468 "FEATURES",
4469 "BITMASK",
4470 "ENABLEMENT"};
4471 uint64_t features_enabled;
4472 int i;
4473 int ret = 0;
4474 int size = 0;
4475
4476 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
4477 PP_ASSERT_WITH_CODE(!ret,
4478 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
4479 return ret);
4480
4481 size += sprintf(buf + size, "Current ppfeatures: 0x%016"PRIx64"\n", features_enabled);
4482 size += sprintf(buf + size, "%-19s %-22s %s\n",
4483 output_title[0],
4484 output_title[1],
4485 output_title[2]);
4486 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
4487 size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
4488 ppfeature_name[i],
4489 1ULL << i,
4490 (features_enabled & (1ULL << i)) ? "Y" : "N");
4491 }
4492
4493 return size;
4494 }
4495
vega10_set_ppfeature_status(struct pp_hwmgr * hwmgr,uint64_t new_ppfeature_masks)4496 static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
4497 {
4498 uint64_t features_enabled;
4499 uint64_t features_to_enable;
4500 uint64_t features_to_disable;
4501 int ret = 0;
4502
4503 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
4504 return -EINVAL;
4505
4506 ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled);
4507 if (ret)
4508 return ret;
4509
4510 features_to_disable =
4511 features_enabled & ~new_ppfeature_masks;
4512 features_to_enable =
4513 ~features_enabled & new_ppfeature_masks;
4514
4515 pr_debug("features_to_disable 0x%"PRIx64"\n", features_to_disable);
4516 pr_debug("features_to_enable 0x%"PRIx64"\n", features_to_enable);
4517
4518 if (features_to_disable) {
4519 ret = vega10_enable_smc_features(hwmgr, false, features_to_disable);
4520 if (ret)
4521 return ret;
4522 }
4523
4524 if (features_to_enable) {
4525 ret = vega10_enable_smc_features(hwmgr, true, features_to_enable);
4526 if (ret)
4527 return ret;
4528 }
4529
4530 return 0;
4531 }
4532
vega10_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)4533 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4534 enum pp_clock_type type, char *buf)
4535 {
4536 struct vega10_hwmgr *data = hwmgr->backend;
4537 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4538 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4539 struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
4540 struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
4541 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4542 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4543
4544 int i, now, size = 0, count = 0;
4545
4546 switch (type) {
4547 case PP_SCLK:
4548 if (data->registry_data.sclk_dpm_key_disabled)
4549 break;
4550
4551 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
4552 now = smum_get_argument(hwmgr);
4553
4554 if (hwmgr->pp_one_vf &&
4555 (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
4556 count = 5;
4557 else
4558 count = sclk_table->count;
4559 for (i = 0; i < count; i++)
4560 size += sprintf(buf + size, "%d: %uMhz %s\n",
4561 i, sclk_table->dpm_levels[i].value / 100,
4562 (i == now) ? "*" : "");
4563 break;
4564 case PP_MCLK:
4565 if (data->registry_data.mclk_dpm_key_disabled)
4566 break;
4567
4568 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
4569 now = smum_get_argument(hwmgr);
4570
4571 for (i = 0; i < mclk_table->count; i++)
4572 size += sprintf(buf + size, "%d: %uMhz %s\n",
4573 i, mclk_table->dpm_levels[i].value / 100,
4574 (i == now) ? "*" : "");
4575 break;
4576 case PP_SOCCLK:
4577 if (data->registry_data.socclk_dpm_key_disabled)
4578 break;
4579
4580 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
4581 now = smum_get_argument(hwmgr);
4582
4583 for (i = 0; i < soc_table->count; i++)
4584 size += sprintf(buf + size, "%d: %uMhz %s\n",
4585 i, soc_table->dpm_levels[i].value / 100,
4586 (i == now) ? "*" : "");
4587 break;
4588 case PP_DCEFCLK:
4589 if (data->registry_data.dcefclk_dpm_key_disabled)
4590 break;
4591
4592 smum_send_msg_to_smc_with_parameter(hwmgr,
4593 PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
4594 now = smum_get_argument(hwmgr);
4595
4596 for (i = 0; i < dcef_table->count; i++)
4597 size += sprintf(buf + size, "%d: %uMhz %s\n",
4598 i, dcef_table->dpm_levels[i].value / 100,
4599 (dcef_table->dpm_levels[i].value / 100 == now) ?
4600 "*" : "");
4601 break;
4602 case PP_PCIE:
4603 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
4604 now = smum_get_argument(hwmgr);
4605
4606 for (i = 0; i < pcie_table->count; i++)
4607 size += sprintf(buf + size, "%d: %s %s\n", i,
4608 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4609 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4610 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
4611 (i == now) ? "*" : "");
4612 break;
4613 case OD_SCLK:
4614 if (hwmgr->od_enabled) {
4615 size = sprintf(buf, "%s:\n", "OD_SCLK");
4616 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4617 for (i = 0; i < podn_vdd_dep->count; i++)
4618 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4619 i, podn_vdd_dep->entries[i].clk / 100,
4620 podn_vdd_dep->entries[i].vddc);
4621 }
4622 break;
4623 case OD_MCLK:
4624 if (hwmgr->od_enabled) {
4625 size = sprintf(buf, "%s:\n", "OD_MCLK");
4626 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4627 for (i = 0; i < podn_vdd_dep->count; i++)
4628 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4629 i, podn_vdd_dep->entries[i].clk/100,
4630 podn_vdd_dep->entries[i].vddc);
4631 }
4632 break;
4633 case OD_RANGE:
4634 if (hwmgr->od_enabled) {
4635 size = sprintf(buf, "%s:\n", "OD_RANGE");
4636 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4637 data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4638 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4639 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4640 data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4641 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4642 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4643 data->odn_dpm_table.min_vddc,
4644 data->odn_dpm_table.max_vddc);
4645 }
4646 break;
4647 default:
4648 break;
4649 }
4650 return size;
4651 }
4652
vega10_display_configuration_changed_task(struct pp_hwmgr * hwmgr)4653 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4654 {
4655 struct vega10_hwmgr *data = hwmgr->backend;
4656 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4657 int result = 0;
4658
4659 if ((data->water_marks_bitmap & WaterMarksExist) &&
4660 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4661 result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
4662 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4663 data->water_marks_bitmap |= WaterMarksLoaded;
4664 }
4665
4666 if (data->water_marks_bitmap & WaterMarksLoaded) {
4667 smum_send_msg_to_smc_with_parameter(hwmgr,
4668 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
4669 }
4670
4671 return result;
4672 }
4673
vega10_enable_disable_uvd_dpm(struct pp_hwmgr * hwmgr,bool enable)4674 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4675 {
4676 struct vega10_hwmgr *data = hwmgr->backend;
4677
4678 if (data->smu_features[GNLD_DPM_UVD].supported) {
4679 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4680 enable,
4681 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4682 "Attempt to Enable/Disable DPM UVD Failed!",
4683 return -1);
4684 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4685 }
4686 return 0;
4687 }
4688
vega10_power_gate_vce(struct pp_hwmgr * hwmgr,bool bgate)4689 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4690 {
4691 struct vega10_hwmgr *data = hwmgr->backend;
4692
4693 data->vce_power_gated = bgate;
4694 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4695 }
4696
vega10_power_gate_uvd(struct pp_hwmgr * hwmgr,bool bgate)4697 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4698 {
4699 struct vega10_hwmgr *data = hwmgr->backend;
4700
4701 data->uvd_power_gated = bgate;
4702 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4703 }
4704
vega10_are_power_levels_equal(const struct vega10_performance_level * pl1,const struct vega10_performance_level * pl2)4705 static inline bool vega10_are_power_levels_equal(
4706 const struct vega10_performance_level *pl1,
4707 const struct vega10_performance_level *pl2)
4708 {
4709 return ((pl1->soc_clock == pl2->soc_clock) &&
4710 (pl1->gfx_clock == pl2->gfx_clock) &&
4711 (pl1->mem_clock == pl2->mem_clock));
4712 }
4713
vega10_check_states_equal(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pstate1,const struct pp_hw_power_state * pstate2,bool * equal)4714 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4715 const struct pp_hw_power_state *pstate1,
4716 const struct pp_hw_power_state *pstate2, bool *equal)
4717 {
4718 const struct vega10_power_state *psa;
4719 const struct vega10_power_state *psb;
4720 int i;
4721
4722 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4723 return -EINVAL;
4724
4725 psa = cast_const_phw_vega10_power_state(pstate1);
4726 psb = cast_const_phw_vega10_power_state(pstate2);
4727 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4728 if (psa->performance_level_count != psb->performance_level_count) {
4729 *equal = false;
4730 return 0;
4731 }
4732
4733 for (i = 0; i < psa->performance_level_count; i++) {
4734 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4735 /* If we have found even one performance level pair that is different the states are different. */
4736 *equal = false;
4737 return 0;
4738 }
4739 }
4740
4741 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4742 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4743 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4744 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4745
4746 return 0;
4747 }
4748
4749 static bool
vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)4750 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4751 {
4752 struct vega10_hwmgr *data = hwmgr->backend;
4753 bool is_update_required = false;
4754
4755 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4756 is_update_required = true;
4757
4758 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4759 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
4760 is_update_required = true;
4761 }
4762
4763 return is_update_required;
4764 }
4765
vega10_disable_dpm_tasks(struct pp_hwmgr * hwmgr)4766 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4767 {
4768 int tmp_result, result = 0;
4769
4770 if (!hwmgr->not_vf)
4771 return 0;
4772
4773 if (PP_CAP(PHM_PlatformCaps_ThermalController))
4774 vega10_disable_thermal_protection(hwmgr);
4775
4776 tmp_result = vega10_disable_power_containment(hwmgr);
4777 PP_ASSERT_WITH_CODE((tmp_result == 0),
4778 "Failed to disable power containment!", result = tmp_result);
4779
4780 tmp_result = vega10_disable_didt_config(hwmgr);
4781 PP_ASSERT_WITH_CODE((tmp_result == 0),
4782 "Failed to disable didt config!", result = tmp_result);
4783
4784 tmp_result = vega10_avfs_enable(hwmgr, false);
4785 PP_ASSERT_WITH_CODE((tmp_result == 0),
4786 "Failed to disable AVFS!", result = tmp_result);
4787
4788 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4789 PP_ASSERT_WITH_CODE((tmp_result == 0),
4790 "Failed to stop DPM!", result = tmp_result);
4791
4792 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4793 PP_ASSERT_WITH_CODE((tmp_result == 0),
4794 "Failed to disable deep sleep!", result = tmp_result);
4795
4796 tmp_result = vega10_disable_ulv(hwmgr);
4797 PP_ASSERT_WITH_CODE((tmp_result == 0),
4798 "Failed to disable ulv!", result = tmp_result);
4799
4800 tmp_result = vega10_acg_disable(hwmgr);
4801 PP_ASSERT_WITH_CODE((tmp_result == 0),
4802 "Failed to disable acg!", result = tmp_result);
4803
4804 vega10_enable_disable_PCC_limit_feature(hwmgr, false);
4805 return result;
4806 }
4807
vega10_power_off_asic(struct pp_hwmgr * hwmgr)4808 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4809 {
4810 struct vega10_hwmgr *data = hwmgr->backend;
4811 int result;
4812
4813 result = vega10_disable_dpm_tasks(hwmgr);
4814 PP_ASSERT_WITH_CODE((0 == result),
4815 "[disable_dpm_tasks] Failed to disable DPM!",
4816 );
4817 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4818
4819 return result;
4820 }
4821
vega10_get_sclk_od(struct pp_hwmgr * hwmgr)4822 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4823 {
4824 struct vega10_hwmgr *data = hwmgr->backend;
4825 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4826 struct vega10_single_dpm_table *golden_sclk_table =
4827 &(data->golden_dpm_table.gfx_table);
4828 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4829 int golden_value = golden_sclk_table->dpm_levels
4830 [golden_sclk_table->count - 1].value;
4831
4832 value -= golden_value;
4833 value = DIV_ROUND_UP(value * 100, golden_value);
4834
4835 return value;
4836 }
4837
vega10_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4838 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4839 {
4840 struct vega10_hwmgr *data = hwmgr->backend;
4841 struct vega10_single_dpm_table *golden_sclk_table =
4842 &(data->golden_dpm_table.gfx_table);
4843 struct pp_power_state *ps;
4844 struct vega10_power_state *vega10_ps;
4845
4846 ps = hwmgr->request_ps;
4847
4848 if (ps == NULL)
4849 return -EINVAL;
4850
4851 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4852
4853 vega10_ps->performance_levels
4854 [vega10_ps->performance_level_count - 1].gfx_clock =
4855 golden_sclk_table->dpm_levels
4856 [golden_sclk_table->count - 1].value *
4857 value / 100 +
4858 golden_sclk_table->dpm_levels
4859 [golden_sclk_table->count - 1].value;
4860
4861 if (vega10_ps->performance_levels
4862 [vega10_ps->performance_level_count - 1].gfx_clock >
4863 hwmgr->platform_descriptor.overdriveLimit.engineClock) {
4864 vega10_ps->performance_levels
4865 [vega10_ps->performance_level_count - 1].gfx_clock =
4866 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4867 pr_warn("max sclk supported by vbios is %d\n",
4868 hwmgr->platform_descriptor.overdriveLimit.engineClock);
4869 }
4870 return 0;
4871 }
4872
vega10_get_mclk_od(struct pp_hwmgr * hwmgr)4873 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4874 {
4875 struct vega10_hwmgr *data = hwmgr->backend;
4876 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4877 struct vega10_single_dpm_table *golden_mclk_table =
4878 &(data->golden_dpm_table.mem_table);
4879 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4880 int golden_value = golden_mclk_table->dpm_levels
4881 [golden_mclk_table->count - 1].value;
4882
4883 value -= golden_value;
4884 value = DIV_ROUND_UP(value * 100, golden_value);
4885
4886 return value;
4887 }
4888
vega10_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4889 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4890 {
4891 struct vega10_hwmgr *data = hwmgr->backend;
4892 struct vega10_single_dpm_table *golden_mclk_table =
4893 &(data->golden_dpm_table.mem_table);
4894 struct pp_power_state *ps;
4895 struct vega10_power_state *vega10_ps;
4896
4897 ps = hwmgr->request_ps;
4898
4899 if (ps == NULL)
4900 return -EINVAL;
4901
4902 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4903
4904 vega10_ps->performance_levels
4905 [vega10_ps->performance_level_count - 1].mem_clock =
4906 golden_mclk_table->dpm_levels
4907 [golden_mclk_table->count - 1].value *
4908 value / 100 +
4909 golden_mclk_table->dpm_levels
4910 [golden_mclk_table->count - 1].value;
4911
4912 if (vega10_ps->performance_levels
4913 [vega10_ps->performance_level_count - 1].mem_clock >
4914 hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
4915 vega10_ps->performance_levels
4916 [vega10_ps->performance_level_count - 1].mem_clock =
4917 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4918 pr_warn("max mclk supported by vbios is %d\n",
4919 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4920 }
4921
4922 return 0;
4923 }
4924
vega10_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4925 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4926 uint32_t virtual_addr_low,
4927 uint32_t virtual_addr_hi,
4928 uint32_t mc_addr_low,
4929 uint32_t mc_addr_hi,
4930 uint32_t size)
4931 {
4932 smum_send_msg_to_smc_with_parameter(hwmgr,
4933 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4934 virtual_addr_hi);
4935 smum_send_msg_to_smc_with_parameter(hwmgr,
4936 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4937 virtual_addr_low);
4938 smum_send_msg_to_smc_with_parameter(hwmgr,
4939 PPSMC_MSG_DramLogSetDramAddrHigh,
4940 mc_addr_hi);
4941
4942 smum_send_msg_to_smc_with_parameter(hwmgr,
4943 PPSMC_MSG_DramLogSetDramAddrLow,
4944 mc_addr_low);
4945
4946 smum_send_msg_to_smc_with_parameter(hwmgr,
4947 PPSMC_MSG_DramLogSetDramSize,
4948 size);
4949 return 0;
4950 }
4951
vega10_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4952 static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4953 struct PP_TemperatureRange *thermal_data)
4954 {
4955 struct vega10_hwmgr *data = hwmgr->backend;
4956 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
4957
4958 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4959
4960 thermal_data->max = pp_table->TedgeLimit *
4961 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4962 thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
4963 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4964 thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
4965 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4966 thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
4967 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4968 thermal_data->mem_crit_max = pp_table->ThbmLimit *
4969 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4970 thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
4971 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4972
4973 return 0;
4974 }
4975
vega10_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)4976 static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4977 {
4978 struct vega10_hwmgr *data = hwmgr->backend;
4979 uint32_t i, size = 0;
4980 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
4981 {70, 60, 1, 3,},
4982 {90, 60, 0, 0,},
4983 {70, 60, 0, 0,},
4984 {70, 90, 0, 0,},
4985 {30, 60, 0, 6,},
4986 };
4987 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4988 "3D_FULL_SCREEN",
4989 "POWER_SAVING",
4990 "VIDEO",
4991 "VR",
4992 "COMPUTE",
4993 "CUSTOM"};
4994 static const char *title[6] = {"NUM",
4995 "MODE_NAME",
4996 "BUSY_SET_POINT",
4997 "FPS",
4998 "USE_RLC_BUSY",
4999 "MIN_ACTIVE_LEVEL"};
5000
5001 if (!buf)
5002 return -EINVAL;
5003
5004 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
5005 title[1], title[2], title[3], title[4], title[5]);
5006
5007 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
5008 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
5009 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
5010 profile_mode_setting[i][0], profile_mode_setting[i][1],
5011 profile_mode_setting[i][2], profile_mode_setting[i][3]);
5012 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
5013 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
5014 data->custom_profile_mode[0], data->custom_profile_mode[1],
5015 data->custom_profile_mode[2], data->custom_profile_mode[3]);
5016 return size;
5017 }
5018
vega10_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)5019 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5020 {
5021 struct vega10_hwmgr *data = hwmgr->backend;
5022 uint8_t busy_set_point;
5023 uint8_t FPS;
5024 uint8_t use_rlc_busy;
5025 uint8_t min_active_level;
5026 uint32_t power_profile_mode = input[size];
5027
5028 if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
5029 if (size != 0 && size != 4)
5030 return -EINVAL;
5031
5032 /* If size = 0 and the CUSTOM profile has been set already
5033 * then just apply the profile. The copy stored in the hwmgr
5034 * is zeroed out on init
5035 */
5036 if (size == 0) {
5037 if (data->custom_profile_mode[0] != 0)
5038 goto out;
5039 else
5040 return -EINVAL;
5041 }
5042
5043 data->custom_profile_mode[0] = busy_set_point = input[0];
5044 data->custom_profile_mode[1] = FPS = input[1];
5045 data->custom_profile_mode[2] = use_rlc_busy = input[2];
5046 data->custom_profile_mode[3] = min_active_level = input[3];
5047 smum_send_msg_to_smc_with_parameter(hwmgr,
5048 PPSMC_MSG_SetCustomGfxDpmParameters,
5049 busy_set_point | FPS<<8 |
5050 use_rlc_busy << 16 | min_active_level<<24);
5051 }
5052
5053 out:
5054 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
5055 1 << power_profile_mode);
5056 hwmgr->power_profile_mode = power_profile_mode;
5057
5058 return 0;
5059 }
5060
5061
vega10_check_clk_voltage_valid(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,uint32_t clk,uint32_t voltage)5062 static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5063 enum PP_OD_DPM_TABLE_COMMAND type,
5064 uint32_t clk,
5065 uint32_t voltage)
5066 {
5067 struct vega10_hwmgr *data = hwmgr->backend;
5068 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
5069 struct vega10_single_dpm_table *golden_table;
5070
5071 if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
5072 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
5073 return false;
5074 }
5075
5076 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5077 golden_table = &(data->golden_dpm_table.gfx_table);
5078 if (golden_table->dpm_levels[0].value > clk ||
5079 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5080 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5081 golden_table->dpm_levels[0].value/100,
5082 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5083 return false;
5084 }
5085 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5086 golden_table = &(data->golden_dpm_table.mem_table);
5087 if (golden_table->dpm_levels[0].value > clk ||
5088 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5089 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5090 golden_table->dpm_levels[0].value/100,
5091 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5092 return false;
5093 }
5094 } else {
5095 return false;
5096 }
5097
5098 return true;
5099 }
5100
vega10_odn_update_power_state(struct pp_hwmgr * hwmgr)5101 static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
5102 {
5103 struct vega10_hwmgr *data = hwmgr->backend;
5104 struct pp_power_state *ps = hwmgr->request_ps;
5105 struct vega10_power_state *vega10_ps;
5106 struct vega10_single_dpm_table *gfx_dpm_table =
5107 &data->dpm_table.gfx_table;
5108 struct vega10_single_dpm_table *soc_dpm_table =
5109 &data->dpm_table.soc_table;
5110 struct vega10_single_dpm_table *mem_dpm_table =
5111 &data->dpm_table.mem_table;
5112 int max_level;
5113
5114 if (!ps)
5115 return;
5116
5117 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
5118 max_level = vega10_ps->performance_level_count - 1;
5119
5120 if (vega10_ps->performance_levels[max_level].gfx_clock !=
5121 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
5122 vega10_ps->performance_levels[max_level].gfx_clock =
5123 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
5124
5125 if (vega10_ps->performance_levels[max_level].soc_clock !=
5126 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
5127 vega10_ps->performance_levels[max_level].soc_clock =
5128 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
5129
5130 if (vega10_ps->performance_levels[max_level].mem_clock !=
5131 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
5132 vega10_ps->performance_levels[max_level].mem_clock =
5133 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
5134
5135 if (!hwmgr->ps)
5136 return;
5137
5138 ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
5139 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
5140 max_level = vega10_ps->performance_level_count - 1;
5141
5142 if (vega10_ps->performance_levels[max_level].gfx_clock !=
5143 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
5144 vega10_ps->performance_levels[max_level].gfx_clock =
5145 gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
5146
5147 if (vega10_ps->performance_levels[max_level].soc_clock !=
5148 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
5149 vega10_ps->performance_levels[max_level].soc_clock =
5150 soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
5151
5152 if (vega10_ps->performance_levels[max_level].mem_clock !=
5153 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
5154 vega10_ps->performance_levels[max_level].mem_clock =
5155 mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
5156 }
5157
vega10_odn_update_soc_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type)5158 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
5159 enum PP_OD_DPM_TABLE_COMMAND type)
5160 {
5161 struct vega10_hwmgr *data = hwmgr->backend;
5162 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
5163 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
5164 struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table;
5165
5166 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
5167 &data->odn_dpm_table.vdd_dep_on_socclk;
5168 struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
5169
5170 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
5171 uint8_t i, j;
5172
5173 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5174 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
5175 for (i = 0; i < podn_vdd_dep->count; i++)
5176 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
5177 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5178 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
5179 for (i = 0; i < dpm_table->count; i++) {
5180 for (j = 0; j < od_vddc_lookup_table->count; j++) {
5181 if (od_vddc_lookup_table->entries[j].us_vdd >
5182 podn_vdd_dep->entries[i].vddc)
5183 break;
5184 }
5185 if (j == od_vddc_lookup_table->count) {
5186 j = od_vddc_lookup_table->count - 1;
5187 od_vddc_lookup_table->entries[j].us_vdd =
5188 podn_vdd_dep->entries[i].vddc;
5189 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
5190 }
5191 podn_vdd_dep->entries[i].vddInd = j;
5192 }
5193 dpm_table = &data->dpm_table.soc_table;
5194 for (i = 0; i < dep_table->count; i++) {
5195 if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd &&
5196 dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) {
5197 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
5198 for (; (i < dep_table->count) &&
5199 (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) {
5200 podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk;
5201 dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
5202 }
5203 break;
5204 } else {
5205 dpm_table->dpm_levels[i].value = dep_table->entries[i].clk;
5206 podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc;
5207 podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd;
5208 podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk;
5209 }
5210 }
5211 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
5212 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) {
5213 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
5214 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk =
5215 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
5216 dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value =
5217 podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
5218 }
5219 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
5220 podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) {
5221 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
5222 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd =
5223 podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd;
5224 }
5225 }
5226 vega10_odn_update_power_state(hwmgr);
5227 }
5228
vega10_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)5229 static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5230 enum PP_OD_DPM_TABLE_COMMAND type,
5231 long *input, uint32_t size)
5232 {
5233 struct vega10_hwmgr *data = hwmgr->backend;
5234 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
5235 struct vega10_single_dpm_table *dpm_table;
5236
5237 uint32_t input_clk;
5238 uint32_t input_vol;
5239 uint32_t input_level;
5240 uint32_t i;
5241
5242 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5243 return -EINVAL);
5244
5245 if (!hwmgr->od_enabled) {
5246 pr_info("OverDrive feature not enabled\n");
5247 return -EINVAL;
5248 }
5249
5250 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5251 dpm_table = &data->dpm_table.gfx_table;
5252 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
5253 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
5254 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5255 dpm_table = &data->dpm_table.mem_table;
5256 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
5257 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
5258 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5259 memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
5260 vega10_odn_initial_default_setting(hwmgr);
5261 vega10_odn_update_power_state(hwmgr);
5262 /* force to update all clock tables */
5263 data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK |
5264 DPMTABLE_UPDATE_MCLK |
5265 DPMTABLE_UPDATE_SOCCLK;
5266 return 0;
5267 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5268 vega10_check_dpm_table_updated(hwmgr);
5269 return 0;
5270 } else {
5271 return -EINVAL;
5272 }
5273
5274 for (i = 0; i < size; i += 3) {
5275 if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
5276 pr_info("invalid clock voltage input\n");
5277 return 0;
5278 }
5279 input_level = input[i];
5280 input_clk = input[i+1] * 100;
5281 input_vol = input[i+2];
5282
5283 if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5284 dpm_table->dpm_levels[input_level].value = input_clk;
5285 podn_vdd_dep_table->entries[input_level].clk = input_clk;
5286 podn_vdd_dep_table->entries[input_level].vddc = input_vol;
5287 } else {
5288 return -EINVAL;
5289 }
5290 }
5291 vega10_odn_update_soc_table(hwmgr, type);
5292 return 0;
5293 }
5294
vega10_set_mp1_state(struct pp_hwmgr * hwmgr,enum pp_mp1_state mp1_state)5295 static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
5296 enum pp_mp1_state mp1_state)
5297 {
5298 uint16_t msg;
5299 int ret;
5300
5301 switch (mp1_state) {
5302 case PP_MP1_STATE_UNLOAD:
5303 msg = PPSMC_MSG_PrepareMp1ForUnload;
5304 break;
5305 case PP_MP1_STATE_SHUTDOWN:
5306 case PP_MP1_STATE_RESET:
5307 case PP_MP1_STATE_NONE:
5308 default:
5309 return 0;
5310 }
5311
5312 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
5313 "[PrepareMp1] Failed!",
5314 return ret);
5315
5316 return 0;
5317 }
5318
vega10_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)5319 static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5320 PHM_PerformanceLevelDesignation designation, uint32_t index,
5321 PHM_PerformanceLevel *level)
5322 {
5323 const struct vega10_power_state *ps;
5324 uint32_t i;
5325
5326 if (level == NULL || hwmgr == NULL || state == NULL)
5327 return -EINVAL;
5328
5329 ps = cast_const_phw_vega10_power_state(state);
5330
5331 i = index > ps->performance_level_count - 1 ?
5332 ps->performance_level_count - 1 : index;
5333
5334 level->coreClock = ps->performance_levels[i].gfx_clock;
5335 level->memory_clock = ps->performance_levels[i].mem_clock;
5336
5337 return 0;
5338 }
5339
vega10_disable_power_features_for_compute_performance(struct pp_hwmgr * hwmgr,bool disable)5340 static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
5341 {
5342 struct vega10_hwmgr *data = hwmgr->backend;
5343 uint32_t feature_mask = 0;
5344
5345 if (disable) {
5346 feature_mask |= data->smu_features[GNLD_ULV].enabled ?
5347 data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
5348 feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
5349 data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
5350 feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
5351 data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
5352 feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
5353 data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
5354 feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
5355 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
5356 } else {
5357 feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
5358 data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
5359 feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
5360 data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
5361 feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
5362 data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
5363 feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
5364 data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
5365 feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
5366 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
5367 }
5368
5369 if (feature_mask)
5370 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
5371 !disable, feature_mask),
5372 "enable/disable power features for compute performance Failed!",
5373 return -EINVAL);
5374
5375 if (disable) {
5376 data->smu_features[GNLD_ULV].enabled = false;
5377 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
5378 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
5379 data->smu_features[GNLD_DS_LCLK].enabled = false;
5380 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
5381 } else {
5382 data->smu_features[GNLD_ULV].enabled = true;
5383 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
5384 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
5385 data->smu_features[GNLD_DS_LCLK].enabled = true;
5386 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
5387 }
5388
5389 return 0;
5390
5391 }
5392
5393 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5394 .backend_init = vega10_hwmgr_backend_init,
5395 .backend_fini = vega10_hwmgr_backend_fini,
5396 .asic_setup = vega10_setup_asic_task,
5397 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
5398 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
5399 .get_num_of_pp_table_entries =
5400 vega10_get_number_of_powerplay_table_entries,
5401 .get_power_state_size = vega10_get_power_state_size,
5402 .get_pp_table_entry = vega10_get_pp_table_entry,
5403 .patch_boot_state = vega10_patch_boot_state,
5404 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
5405 .power_state_set = vega10_set_power_state_tasks,
5406 .get_sclk = vega10_dpm_get_sclk,
5407 .get_mclk = vega10_dpm_get_mclk,
5408 .notify_smc_display_config_after_ps_adjustment =
5409 vega10_notify_smc_display_config_after_ps_adjustment,
5410 .force_dpm_level = vega10_dpm_force_dpm_level,
5411 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
5412 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
5413 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
5414 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
5415 .reset_fan_speed_to_default =
5416 vega10_fan_ctrl_reset_fan_speed_to_default,
5417 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
5418 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
5419 .uninitialize_thermal_controller =
5420 vega10_thermal_ctrl_uninitialize_thermal_controller,
5421 .set_fan_control_mode = vega10_set_fan_control_mode,
5422 .get_fan_control_mode = vega10_get_fan_control_mode,
5423 .read_sensor = vega10_read_sensor,
5424 .get_dal_power_level = vega10_get_dal_power_level,
5425 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
5426 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
5427 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
5428 .display_clock_voltage_request = vega10_display_clock_voltage_request,
5429 .force_clock_level = vega10_force_clock_level,
5430 .print_clock_levels = vega10_print_clock_levels,
5431 .display_config_changed = vega10_display_configuration_changed_task,
5432 .powergate_uvd = vega10_power_gate_uvd,
5433 .powergate_vce = vega10_power_gate_vce,
5434 .check_states_equal = vega10_check_states_equal,
5435 .check_smc_update_required_for_display_configuration =
5436 vega10_check_smc_update_required_for_display_configuration,
5437 .power_off_asic = vega10_power_off_asic,
5438 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
5439 .get_sclk_od = vega10_get_sclk_od,
5440 .set_sclk_od = vega10_set_sclk_od,
5441 .get_mclk_od = vega10_get_mclk_od,
5442 .set_mclk_od = vega10_set_mclk_od,
5443 .avfs_control = vega10_avfs_enable,
5444 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
5445 .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
5446 .register_irq_handlers = smu9_register_irq_handlers,
5447 .start_thermal_controller = vega10_start_thermal_controller,
5448 .get_power_profile_mode = vega10_get_power_profile_mode,
5449 .set_power_profile_mode = vega10_set_power_profile_mode,
5450 .set_power_limit = vega10_set_power_limit,
5451 .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
5452 .get_performance_level = vega10_get_performance_level,
5453 .get_asic_baco_capability = smu9_baco_get_capability,
5454 .get_asic_baco_state = smu9_baco_get_state,
5455 .set_asic_baco_state = vega10_baco_set_state,
5456 .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
5457 .get_ppfeature_status = vega10_get_ppfeature_status,
5458 .set_ppfeature_status = vega10_set_ppfeature_status,
5459 .set_mp1_state = vega10_set_mp1_state,
5460 .disable_power_features_for_compute_performance =
5461 vega10_disable_power_features_for_compute_performance,
5462 };
5463
vega10_hwmgr_init(struct pp_hwmgr * hwmgr)5464 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
5465 {
5466 struct amdgpu_device *adev = hwmgr->adev;
5467
5468 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
5469 hwmgr->pptable_func = &vega10_pptable_funcs;
5470 if (amdgpu_passthrough(adev))
5471 return vega10_baco_set_cap(hwmgr);
5472
5473 return 0;
5474 }
5475