1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev * Copyright 2015 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev *
4*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
5*b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"),
6*b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation
7*b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the
9*b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions:
10*b843c749SSergey Zigachev *
11*b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in
12*b843c749SSergey Zigachev * all copies or substantial portions of the Software.
13*b843c749SSergey Zigachev *
14*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE.
21*b843c749SSergey Zigachev *
22*b843c749SSergey Zigachev */
23*b843c749SSergey Zigachev #include "pp_debug.h"
24*b843c749SSergey Zigachev #include <linux/types.h>
25*b843c749SSergey Zigachev #include <linux/kernel.h>
26*b843c749SSergey Zigachev #include <linux/slab.h>
27*b843c749SSergey Zigachev #include "atom-types.h"
28*b843c749SSergey Zigachev #include "atombios.h"
29*b843c749SSergey Zigachev #include "processpptables.h"
30*b843c749SSergey Zigachev #include "cgs_common.h"
31*b843c749SSergey Zigachev #include "smumgr.h"
32*b843c749SSergey Zigachev #include "hwmgr.h"
33*b843c749SSergey Zigachev #include "hardwaremanager.h"
34*b843c749SSergey Zigachev #include "rv_ppsmc.h"
35*b843c749SSergey Zigachev #include "smu10_hwmgr.h"
36*b843c749SSergey Zigachev #include "power_state.h"
37*b843c749SSergey Zigachev #include "soc15_common.h"
38*b843c749SSergey Zigachev
39*b843c749SSergey Zigachev #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
40*b843c749SSergey Zigachev #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
41*b843c749SSergey Zigachev #define SCLK_MIN_DIV_INTV_SHIFT 12
42*b843c749SSergey Zigachev #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
43*b843c749SSergey Zigachev #define SMC_RAM_END 0x40000
44*b843c749SSergey Zigachev
45*b843c749SSergey Zigachev #define mmPWR_MISC_CNTL_STATUS 0x0183
46*b843c749SSergey Zigachev #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
47*b843c749SSergey Zigachev #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
48*b843c749SSergey Zigachev #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
49*b843c749SSergey Zigachev #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
50*b843c749SSergey Zigachev #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
51*b843c749SSergey Zigachev
52*b843c749SSergey Zigachev static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
53*b843c749SSergey Zigachev
54*b843c749SSergey Zigachev
smu10_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)55*b843c749SSergey Zigachev static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
56*b843c749SSergey Zigachev struct pp_display_clock_request *clock_req)
57*b843c749SSergey Zigachev {
58*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
59*b843c749SSergey Zigachev enum amd_pp_clock_type clk_type = clock_req->clock_type;
60*b843c749SSergey Zigachev uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
61*b843c749SSergey Zigachev PPSMC_Msg msg;
62*b843c749SSergey Zigachev
63*b843c749SSergey Zigachev switch (clk_type) {
64*b843c749SSergey Zigachev case amd_pp_dcf_clock:
65*b843c749SSergey Zigachev if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
66*b843c749SSergey Zigachev return 0;
67*b843c749SSergey Zigachev msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
68*b843c749SSergey Zigachev smu10_data->dcf_actual_hard_min_freq = clk_freq;
69*b843c749SSergey Zigachev break;
70*b843c749SSergey Zigachev case amd_pp_soc_clock:
71*b843c749SSergey Zigachev msg = PPSMC_MSG_SetHardMinSocclkByFreq;
72*b843c749SSergey Zigachev break;
73*b843c749SSergey Zigachev case amd_pp_f_clock:
74*b843c749SSergey Zigachev if (clk_freq == smu10_data->f_actual_hard_min_freq)
75*b843c749SSergey Zigachev return 0;
76*b843c749SSergey Zigachev smu10_data->f_actual_hard_min_freq = clk_freq;
77*b843c749SSergey Zigachev msg = PPSMC_MSG_SetHardMinFclkByFreq;
78*b843c749SSergey Zigachev break;
79*b843c749SSergey Zigachev default:
80*b843c749SSergey Zigachev pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
81*b843c749SSergey Zigachev return -EINVAL;
82*b843c749SSergey Zigachev }
83*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
84*b843c749SSergey Zigachev
85*b843c749SSergey Zigachev return 0;
86*b843c749SSergey Zigachev }
87*b843c749SSergey Zigachev
cast_smu10_ps(struct pp_hw_power_state * hw_ps)88*b843c749SSergey Zigachev static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
89*b843c749SSergey Zigachev {
90*b843c749SSergey Zigachev if (SMU10_Magic != hw_ps->magic)
91*b843c749SSergey Zigachev return NULL;
92*b843c749SSergey Zigachev
93*b843c749SSergey Zigachev return (struct smu10_power_state *)hw_ps;
94*b843c749SSergey Zigachev }
95*b843c749SSergey Zigachev
cast_const_smu10_ps(const struct pp_hw_power_state * hw_ps)96*b843c749SSergey Zigachev static const struct smu10_power_state *cast_const_smu10_ps(
97*b843c749SSergey Zigachev const struct pp_hw_power_state *hw_ps)
98*b843c749SSergey Zigachev {
99*b843c749SSergey Zigachev if (SMU10_Magic != hw_ps->magic)
100*b843c749SSergey Zigachev return NULL;
101*b843c749SSergey Zigachev
102*b843c749SSergey Zigachev return (struct smu10_power_state *)hw_ps;
103*b843c749SSergey Zigachev }
104*b843c749SSergey Zigachev
smu10_initialize_dpm_defaults(struct pp_hwmgr * hwmgr)105*b843c749SSergey Zigachev static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
106*b843c749SSergey Zigachev {
107*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
108*b843c749SSergey Zigachev
109*b843c749SSergey Zigachev smu10_data->dce_slow_sclk_threshold = 30000;
110*b843c749SSergey Zigachev smu10_data->thermal_auto_throttling_treshold = 0;
111*b843c749SSergey Zigachev smu10_data->is_nb_dpm_enabled = 1;
112*b843c749SSergey Zigachev smu10_data->dpm_flags = 1;
113*b843c749SSergey Zigachev smu10_data->need_min_deep_sleep_dcefclk = true;
114*b843c749SSergey Zigachev smu10_data->num_active_display = 0;
115*b843c749SSergey Zigachev smu10_data->deep_sleep_dcefclk = 0;
116*b843c749SSergey Zigachev
117*b843c749SSergey Zigachev if (hwmgr->feature_mask & PP_GFXOFF_MASK)
118*b843c749SSergey Zigachev smu10_data->gfx_off_controled_by_driver = true;
119*b843c749SSergey Zigachev else
120*b843c749SSergey Zigachev smu10_data->gfx_off_controled_by_driver = false;
121*b843c749SSergey Zigachev
122*b843c749SSergey Zigachev phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
123*b843c749SSergey Zigachev PHM_PlatformCaps_SclkDeepSleep);
124*b843c749SSergey Zigachev
125*b843c749SSergey Zigachev phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
126*b843c749SSergey Zigachev PHM_PlatformCaps_SclkThrottleLowNotification);
127*b843c749SSergey Zigachev
128*b843c749SSergey Zigachev phm_cap_set(hwmgr->platform_descriptor.platformCaps,
129*b843c749SSergey Zigachev PHM_PlatformCaps_PowerPlaySupport);
130*b843c749SSergey Zigachev return 0;
131*b843c749SSergey Zigachev }
132*b843c749SSergey Zigachev
smu10_construct_max_power_limits_table(struct pp_hwmgr * hwmgr,struct phm_clock_and_voltage_limits * table)133*b843c749SSergey Zigachev static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
134*b843c749SSergey Zigachev struct phm_clock_and_voltage_limits *table)
135*b843c749SSergey Zigachev {
136*b843c749SSergey Zigachev return 0;
137*b843c749SSergey Zigachev }
138*b843c749SSergey Zigachev
smu10_init_dynamic_state_adjustment_rule_settings(struct pp_hwmgr * hwmgr)139*b843c749SSergey Zigachev static int smu10_init_dynamic_state_adjustment_rule_settings(
140*b843c749SSergey Zigachev struct pp_hwmgr *hwmgr)
141*b843c749SSergey Zigachev {
142*b843c749SSergey Zigachev uint32_t table_size =
143*b843c749SSergey Zigachev sizeof(struct phm_clock_voltage_dependency_table) +
144*b843c749SSergey Zigachev (7 * sizeof(struct phm_clock_voltage_dependency_record));
145*b843c749SSergey Zigachev
146*b843c749SSergey Zigachev struct phm_clock_voltage_dependency_table *table_clk_vlt =
147*b843c749SSergey Zigachev kzalloc(table_size, GFP_KERNEL);
148*b843c749SSergey Zigachev
149*b843c749SSergey Zigachev if (NULL == table_clk_vlt) {
150*b843c749SSergey Zigachev pr_err("Can not allocate memory!\n");
151*b843c749SSergey Zigachev return -ENOMEM;
152*b843c749SSergey Zigachev }
153*b843c749SSergey Zigachev
154*b843c749SSergey Zigachev table_clk_vlt->count = 8;
155*b843c749SSergey Zigachev table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
156*b843c749SSergey Zigachev table_clk_vlt->entries[0].v = 0;
157*b843c749SSergey Zigachev table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
158*b843c749SSergey Zigachev table_clk_vlt->entries[1].v = 1;
159*b843c749SSergey Zigachev table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
160*b843c749SSergey Zigachev table_clk_vlt->entries[2].v = 2;
161*b843c749SSergey Zigachev table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
162*b843c749SSergey Zigachev table_clk_vlt->entries[3].v = 3;
163*b843c749SSergey Zigachev table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
164*b843c749SSergey Zigachev table_clk_vlt->entries[4].v = 4;
165*b843c749SSergey Zigachev table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
166*b843c749SSergey Zigachev table_clk_vlt->entries[5].v = 5;
167*b843c749SSergey Zigachev table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
168*b843c749SSergey Zigachev table_clk_vlt->entries[6].v = 6;
169*b843c749SSergey Zigachev table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
170*b843c749SSergey Zigachev table_clk_vlt->entries[7].v = 7;
171*b843c749SSergey Zigachev hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
172*b843c749SSergey Zigachev
173*b843c749SSergey Zigachev return 0;
174*b843c749SSergey Zigachev }
175*b843c749SSergey Zigachev
smu10_get_system_info_data(struct pp_hwmgr * hwmgr)176*b843c749SSergey Zigachev static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
177*b843c749SSergey Zigachev {
178*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
179*b843c749SSergey Zigachev
180*b843c749SSergey Zigachev smu10_data->sys_info.htc_hyst_lmt = 5;
181*b843c749SSergey Zigachev smu10_data->sys_info.htc_tmp_lmt = 203;
182*b843c749SSergey Zigachev
183*b843c749SSergey Zigachev if (smu10_data->thermal_auto_throttling_treshold == 0)
184*b843c749SSergey Zigachev smu10_data->thermal_auto_throttling_treshold = 203;
185*b843c749SSergey Zigachev
186*b843c749SSergey Zigachev smu10_construct_max_power_limits_table (hwmgr,
187*b843c749SSergey Zigachev &hwmgr->dyn_state.max_clock_voltage_on_ac);
188*b843c749SSergey Zigachev
189*b843c749SSergey Zigachev smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
190*b843c749SSergey Zigachev
191*b843c749SSergey Zigachev return 0;
192*b843c749SSergey Zigachev }
193*b843c749SSergey Zigachev
smu10_construct_boot_state(struct pp_hwmgr * hwmgr)194*b843c749SSergey Zigachev static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
195*b843c749SSergey Zigachev {
196*b843c749SSergey Zigachev return 0;
197*b843c749SSergey Zigachev }
198*b843c749SSergey Zigachev
smu10_set_clock_limit(struct pp_hwmgr * hwmgr,const void * input)199*b843c749SSergey Zigachev static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
200*b843c749SSergey Zigachev {
201*b843c749SSergey Zigachev struct PP_Clocks clocks = {0};
202*b843c749SSergey Zigachev struct pp_display_clock_request clock_req;
203*b843c749SSergey Zigachev
204*b843c749SSergey Zigachev clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
205*b843c749SSergey Zigachev clock_req.clock_type = amd_pp_dcf_clock;
206*b843c749SSergey Zigachev clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
207*b843c749SSergey Zigachev
208*b843c749SSergey Zigachev PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
209*b843c749SSergey Zigachev "Attempt to set DCF Clock Failed!", return -EINVAL);
210*b843c749SSergey Zigachev
211*b843c749SSergey Zigachev return 0;
212*b843c749SSergey Zigachev }
213*b843c749SSergey Zigachev
smu10_set_deep_sleep_dcefclk(struct pp_hwmgr * hwmgr,uint32_t clock)214*b843c749SSergey Zigachev static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
215*b843c749SSergey Zigachev {
216*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
217*b843c749SSergey Zigachev
218*b843c749SSergey Zigachev if (smu10_data->need_min_deep_sleep_dcefclk && smu10_data->deep_sleep_dcefclk != clock/100) {
219*b843c749SSergey Zigachev smu10_data->deep_sleep_dcefclk = clock/100;
220*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
221*b843c749SSergey Zigachev PPSMC_MSG_SetMinDeepSleepDcefclk,
222*b843c749SSergey Zigachev smu10_data->deep_sleep_dcefclk);
223*b843c749SSergey Zigachev }
224*b843c749SSergey Zigachev return 0;
225*b843c749SSergey Zigachev }
226*b843c749SSergey Zigachev
smu10_set_active_display_count(struct pp_hwmgr * hwmgr,uint32_t count)227*b843c749SSergey Zigachev static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
228*b843c749SSergey Zigachev {
229*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
230*b843c749SSergey Zigachev
231*b843c749SSergey Zigachev if (smu10_data->num_active_display != count) {
232*b843c749SSergey Zigachev smu10_data->num_active_display = count;
233*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
234*b843c749SSergey Zigachev PPSMC_MSG_SetDisplayCount,
235*b843c749SSergey Zigachev smu10_data->num_active_display);
236*b843c749SSergey Zigachev }
237*b843c749SSergey Zigachev
238*b843c749SSergey Zigachev return 0;
239*b843c749SSergey Zigachev }
240*b843c749SSergey Zigachev
smu10_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)241*b843c749SSergey Zigachev static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
242*b843c749SSergey Zigachev {
243*b843c749SSergey Zigachev return smu10_set_clock_limit(hwmgr, input);
244*b843c749SSergey Zigachev }
245*b843c749SSergey Zigachev
smu10_init_power_gate_state(struct pp_hwmgr * hwmgr)246*b843c749SSergey Zigachev static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
247*b843c749SSergey Zigachev {
248*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
249*b843c749SSergey Zigachev struct amdgpu_device *adev = hwmgr->adev;
250*b843c749SSergey Zigachev
251*b843c749SSergey Zigachev smu10_data->vcn_power_gated = true;
252*b843c749SSergey Zigachev smu10_data->isp_tileA_power_gated = true;
253*b843c749SSergey Zigachev smu10_data->isp_tileB_power_gated = true;
254*b843c749SSergey Zigachev
255*b843c749SSergey Zigachev if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
256*b843c749SSergey Zigachev return smum_send_msg_to_smc_with_parameter(hwmgr,
257*b843c749SSergey Zigachev PPSMC_MSG_SetGfxCGPG,
258*b843c749SSergey Zigachev true);
259*b843c749SSergey Zigachev else
260*b843c749SSergey Zigachev return 0;
261*b843c749SSergey Zigachev }
262*b843c749SSergey Zigachev
263*b843c749SSergey Zigachev
smu10_setup_asic_task(struct pp_hwmgr * hwmgr)264*b843c749SSergey Zigachev static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
265*b843c749SSergey Zigachev {
266*b843c749SSergey Zigachev return smu10_init_power_gate_state(hwmgr);
267*b843c749SSergey Zigachev }
268*b843c749SSergey Zigachev
smu10_reset_cc6_data(struct pp_hwmgr * hwmgr)269*b843c749SSergey Zigachev static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
270*b843c749SSergey Zigachev {
271*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
272*b843c749SSergey Zigachev
273*b843c749SSergey Zigachev smu10_data->separation_time = 0;
274*b843c749SSergey Zigachev smu10_data->cc6_disable = false;
275*b843c749SSergey Zigachev smu10_data->pstate_disable = false;
276*b843c749SSergey Zigachev smu10_data->cc6_setting_changed = false;
277*b843c749SSergey Zigachev
278*b843c749SSergey Zigachev return 0;
279*b843c749SSergey Zigachev }
280*b843c749SSergey Zigachev
smu10_power_off_asic(struct pp_hwmgr * hwmgr)281*b843c749SSergey Zigachev static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
282*b843c749SSergey Zigachev {
283*b843c749SSergey Zigachev return smu10_reset_cc6_data(hwmgr);
284*b843c749SSergey Zigachev }
285*b843c749SSergey Zigachev
smu10_is_gfx_on(struct pp_hwmgr * hwmgr)286*b843c749SSergey Zigachev static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
287*b843c749SSergey Zigachev {
288*b843c749SSergey Zigachev uint32_t reg;
289*b843c749SSergey Zigachev struct amdgpu_device *adev = hwmgr->adev;
290*b843c749SSergey Zigachev
291*b843c749SSergey Zigachev reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
292*b843c749SSergey Zigachev if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
293*b843c749SSergey Zigachev (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
294*b843c749SSergey Zigachev return true;
295*b843c749SSergey Zigachev
296*b843c749SSergey Zigachev return false;
297*b843c749SSergey Zigachev }
298*b843c749SSergey Zigachev
smu10_disable_gfx_off(struct pp_hwmgr * hwmgr)299*b843c749SSergey Zigachev static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
300*b843c749SSergey Zigachev {
301*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
302*b843c749SSergey Zigachev
303*b843c749SSergey Zigachev if (smu10_data->gfx_off_controled_by_driver) {
304*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
305*b843c749SSergey Zigachev
306*b843c749SSergey Zigachev /* confirm gfx is back to "on" state */
307*b843c749SSergey Zigachev while (!smu10_is_gfx_on(hwmgr))
308*b843c749SSergey Zigachev msleep(1);
309*b843c749SSergey Zigachev }
310*b843c749SSergey Zigachev
311*b843c749SSergey Zigachev return 0;
312*b843c749SSergey Zigachev }
313*b843c749SSergey Zigachev
smu10_disable_dpm_tasks(struct pp_hwmgr * hwmgr)314*b843c749SSergey Zigachev static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
315*b843c749SSergey Zigachev {
316*b843c749SSergey Zigachev return 0;
317*b843c749SSergey Zigachev }
318*b843c749SSergey Zigachev
smu10_enable_gfx_off(struct pp_hwmgr * hwmgr)319*b843c749SSergey Zigachev static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
320*b843c749SSergey Zigachev {
321*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
322*b843c749SSergey Zigachev
323*b843c749SSergey Zigachev if (smu10_data->gfx_off_controled_by_driver)
324*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
325*b843c749SSergey Zigachev
326*b843c749SSergey Zigachev return 0;
327*b843c749SSergey Zigachev }
328*b843c749SSergey Zigachev
smu10_enable_dpm_tasks(struct pp_hwmgr * hwmgr)329*b843c749SSergey Zigachev static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
330*b843c749SSergey Zigachev {
331*b843c749SSergey Zigachev return 0;
332*b843c749SSergey Zigachev }
333*b843c749SSergey Zigachev
smu10_gfx_off_control(struct pp_hwmgr * hwmgr,bool enable)334*b843c749SSergey Zigachev static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
335*b843c749SSergey Zigachev {
336*b843c749SSergey Zigachev if (enable)
337*b843c749SSergey Zigachev return smu10_enable_gfx_off(hwmgr);
338*b843c749SSergey Zigachev else
339*b843c749SSergey Zigachev return smu10_disable_gfx_off(hwmgr);
340*b843c749SSergey Zigachev }
341*b843c749SSergey Zigachev
smu10_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * prequest_ps,const struct pp_power_state * pcurrent_ps)342*b843c749SSergey Zigachev static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
343*b843c749SSergey Zigachev struct pp_power_state *prequest_ps,
344*b843c749SSergey Zigachev const struct pp_power_state *pcurrent_ps)
345*b843c749SSergey Zigachev {
346*b843c749SSergey Zigachev return 0;
347*b843c749SSergey Zigachev }
348*b843c749SSergey Zigachev
349*b843c749SSergey Zigachev /* temporary hardcoded clock voltage breakdown tables */
350*b843c749SSergey Zigachev static const DpmClock_t VddDcfClk[]= {
351*b843c749SSergey Zigachev { 300, 2600},
352*b843c749SSergey Zigachev { 600, 3200},
353*b843c749SSergey Zigachev { 600, 3600},
354*b843c749SSergey Zigachev };
355*b843c749SSergey Zigachev
356*b843c749SSergey Zigachev static const DpmClock_t VddSocClk[]= {
357*b843c749SSergey Zigachev { 478, 2600},
358*b843c749SSergey Zigachev { 722, 3200},
359*b843c749SSergey Zigachev { 722, 3600},
360*b843c749SSergey Zigachev };
361*b843c749SSergey Zigachev
362*b843c749SSergey Zigachev static const DpmClock_t VddFClk[]= {
363*b843c749SSergey Zigachev { 400, 2600},
364*b843c749SSergey Zigachev {1200, 3200},
365*b843c749SSergey Zigachev {1200, 3600},
366*b843c749SSergey Zigachev };
367*b843c749SSergey Zigachev
368*b843c749SSergey Zigachev static const DpmClock_t VddDispClk[]= {
369*b843c749SSergey Zigachev { 435, 2600},
370*b843c749SSergey Zigachev { 661, 3200},
371*b843c749SSergey Zigachev {1086, 3600},
372*b843c749SSergey Zigachev };
373*b843c749SSergey Zigachev
374*b843c749SSergey Zigachev static const DpmClock_t VddDppClk[]= {
375*b843c749SSergey Zigachev { 435, 2600},
376*b843c749SSergey Zigachev { 661, 3200},
377*b843c749SSergey Zigachev { 661, 3600},
378*b843c749SSergey Zigachev };
379*b843c749SSergey Zigachev
380*b843c749SSergey Zigachev static const DpmClock_t VddPhyClk[]= {
381*b843c749SSergey Zigachev { 540, 2600},
382*b843c749SSergey Zigachev { 810, 3200},
383*b843c749SSergey Zigachev { 810, 3600},
384*b843c749SSergey Zigachev };
385*b843c749SSergey Zigachev
smu10_get_clock_voltage_dependency_table(struct pp_hwmgr * hwmgr,struct smu10_voltage_dependency_table ** pptable,uint32_t num_entry,const DpmClock_t * pclk_dependency_table)386*b843c749SSergey Zigachev static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
387*b843c749SSergey Zigachev struct smu10_voltage_dependency_table **pptable,
388*b843c749SSergey Zigachev uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
389*b843c749SSergey Zigachev {
390*b843c749SSergey Zigachev uint32_t table_size, i;
391*b843c749SSergey Zigachev struct smu10_voltage_dependency_table *ptable;
392*b843c749SSergey Zigachev
393*b843c749SSergey Zigachev table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
394*b843c749SSergey Zigachev ptable = kzalloc(table_size, GFP_KERNEL);
395*b843c749SSergey Zigachev
396*b843c749SSergey Zigachev if (NULL == ptable)
397*b843c749SSergey Zigachev return -ENOMEM;
398*b843c749SSergey Zigachev
399*b843c749SSergey Zigachev ptable->count = num_entry;
400*b843c749SSergey Zigachev
401*b843c749SSergey Zigachev for (i = 0; i < ptable->count; i++) {
402*b843c749SSergey Zigachev ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
403*b843c749SSergey Zigachev ptable->entries[i].vol = pclk_dependency_table->Vol;
404*b843c749SSergey Zigachev pclk_dependency_table++;
405*b843c749SSergey Zigachev }
406*b843c749SSergey Zigachev
407*b843c749SSergey Zigachev *pptable = ptable;
408*b843c749SSergey Zigachev
409*b843c749SSergey Zigachev return 0;
410*b843c749SSergey Zigachev }
411*b843c749SSergey Zigachev
412*b843c749SSergey Zigachev
smu10_populate_clock_table(struct pp_hwmgr * hwmgr)413*b843c749SSergey Zigachev static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
414*b843c749SSergey Zigachev {
415*b843c749SSergey Zigachev uint32_t result;
416*b843c749SSergey Zigachev
417*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
418*b843c749SSergey Zigachev DpmClocks_t *table = &(smu10_data->clock_table);
419*b843c749SSergey Zigachev struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
420*b843c749SSergey Zigachev
421*b843c749SSergey Zigachev result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
422*b843c749SSergey Zigachev
423*b843c749SSergey Zigachev PP_ASSERT_WITH_CODE((0 == result),
424*b843c749SSergey Zigachev "Attempt to copy clock table from smc failed",
425*b843c749SSergey Zigachev return result);
426*b843c749SSergey Zigachev
427*b843c749SSergey Zigachev if (0 == result && table->DcefClocks[0].Freq != 0) {
428*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
429*b843c749SSergey Zigachev NUM_DCEFCLK_DPM_LEVELS,
430*b843c749SSergey Zigachev &smu10_data->clock_table.DcefClocks[0]);
431*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
432*b843c749SSergey Zigachev NUM_SOCCLK_DPM_LEVELS,
433*b843c749SSergey Zigachev &smu10_data->clock_table.SocClocks[0]);
434*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
435*b843c749SSergey Zigachev NUM_FCLK_DPM_LEVELS,
436*b843c749SSergey Zigachev &smu10_data->clock_table.FClocks[0]);
437*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
438*b843c749SSergey Zigachev NUM_MEMCLK_DPM_LEVELS,
439*b843c749SSergey Zigachev &smu10_data->clock_table.MemClocks[0]);
440*b843c749SSergey Zigachev } else {
441*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
442*b843c749SSergey Zigachev ARRAY_SIZE(VddDcfClk),
443*b843c749SSergey Zigachev &VddDcfClk[0]);
444*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
445*b843c749SSergey Zigachev ARRAY_SIZE(VddSocClk),
446*b843c749SSergey Zigachev &VddSocClk[0]);
447*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
448*b843c749SSergey Zigachev ARRAY_SIZE(VddFClk),
449*b843c749SSergey Zigachev &VddFClk[0]);
450*b843c749SSergey Zigachev }
451*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
452*b843c749SSergey Zigachev ARRAY_SIZE(VddDispClk),
453*b843c749SSergey Zigachev &VddDispClk[0]);
454*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
455*b843c749SSergey Zigachev ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
456*b843c749SSergey Zigachev smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
457*b843c749SSergey Zigachev ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
458*b843c749SSergey Zigachev
459*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
460*b843c749SSergey Zigachev result = smum_get_argument(hwmgr);
461*b843c749SSergey Zigachev smu10_data->gfx_min_freq_limit = result / 10 * 1000;
462*b843c749SSergey Zigachev
463*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
464*b843c749SSergey Zigachev result = smum_get_argument(hwmgr);
465*b843c749SSergey Zigachev smu10_data->gfx_max_freq_limit = result / 10 * 1000;
466*b843c749SSergey Zigachev
467*b843c749SSergey Zigachev return 0;
468*b843c749SSergey Zigachev }
469*b843c749SSergey Zigachev
smu10_hwmgr_backend_init(struct pp_hwmgr * hwmgr)470*b843c749SSergey Zigachev static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
471*b843c749SSergey Zigachev {
472*b843c749SSergey Zigachev int result = 0;
473*b843c749SSergey Zigachev struct smu10_hwmgr *data;
474*b843c749SSergey Zigachev
475*b843c749SSergey Zigachev data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
476*b843c749SSergey Zigachev if (data == NULL)
477*b843c749SSergey Zigachev return -ENOMEM;
478*b843c749SSergey Zigachev
479*b843c749SSergey Zigachev hwmgr->backend = data;
480*b843c749SSergey Zigachev
481*b843c749SSergey Zigachev result = smu10_initialize_dpm_defaults(hwmgr);
482*b843c749SSergey Zigachev if (result != 0) {
483*b843c749SSergey Zigachev pr_err("smu10_initialize_dpm_defaults failed\n");
484*b843c749SSergey Zigachev return result;
485*b843c749SSergey Zigachev }
486*b843c749SSergey Zigachev
487*b843c749SSergey Zigachev smu10_populate_clock_table(hwmgr);
488*b843c749SSergey Zigachev
489*b843c749SSergey Zigachev result = smu10_get_system_info_data(hwmgr);
490*b843c749SSergey Zigachev if (result != 0) {
491*b843c749SSergey Zigachev pr_err("smu10_get_system_info_data failed\n");
492*b843c749SSergey Zigachev return result;
493*b843c749SSergey Zigachev }
494*b843c749SSergey Zigachev
495*b843c749SSergey Zigachev smu10_construct_boot_state(hwmgr);
496*b843c749SSergey Zigachev
497*b843c749SSergey Zigachev hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
498*b843c749SSergey Zigachev SMU10_MAX_HARDWARE_POWERLEVELS;
499*b843c749SSergey Zigachev
500*b843c749SSergey Zigachev hwmgr->platform_descriptor.hardwarePerformanceLevels =
501*b843c749SSergey Zigachev SMU10_MAX_HARDWARE_POWERLEVELS;
502*b843c749SSergey Zigachev
503*b843c749SSergey Zigachev hwmgr->platform_descriptor.vbiosInterruptId = 0;
504*b843c749SSergey Zigachev
505*b843c749SSergey Zigachev hwmgr->platform_descriptor.clockStep.engineClock = 500;
506*b843c749SSergey Zigachev
507*b843c749SSergey Zigachev hwmgr->platform_descriptor.clockStep.memoryClock = 500;
508*b843c749SSergey Zigachev
509*b843c749SSergey Zigachev hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
510*b843c749SSergey Zigachev
511*b843c749SSergey Zigachev hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
512*b843c749SSergey Zigachev hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
513*b843c749SSergey Zigachev
514*b843c749SSergey Zigachev return result;
515*b843c749SSergey Zigachev }
516*b843c749SSergey Zigachev
smu10_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)517*b843c749SSergey Zigachev static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
518*b843c749SSergey Zigachev {
519*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
520*b843c749SSergey Zigachev struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
521*b843c749SSergey Zigachev
522*b843c749SSergey Zigachev kfree(pinfo->vdd_dep_on_dcefclk);
523*b843c749SSergey Zigachev pinfo->vdd_dep_on_dcefclk = NULL;
524*b843c749SSergey Zigachev kfree(pinfo->vdd_dep_on_socclk);
525*b843c749SSergey Zigachev pinfo->vdd_dep_on_socclk = NULL;
526*b843c749SSergey Zigachev kfree(pinfo->vdd_dep_on_fclk);
527*b843c749SSergey Zigachev pinfo->vdd_dep_on_fclk = NULL;
528*b843c749SSergey Zigachev kfree(pinfo->vdd_dep_on_dispclk);
529*b843c749SSergey Zigachev pinfo->vdd_dep_on_dispclk = NULL;
530*b843c749SSergey Zigachev kfree(pinfo->vdd_dep_on_dppclk);
531*b843c749SSergey Zigachev pinfo->vdd_dep_on_dppclk = NULL;
532*b843c749SSergey Zigachev kfree(pinfo->vdd_dep_on_phyclk);
533*b843c749SSergey Zigachev pinfo->vdd_dep_on_phyclk = NULL;
534*b843c749SSergey Zigachev
535*b843c749SSergey Zigachev kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
536*b843c749SSergey Zigachev hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
537*b843c749SSergey Zigachev
538*b843c749SSergey Zigachev kfree(hwmgr->backend);
539*b843c749SSergey Zigachev hwmgr->backend = NULL;
540*b843c749SSergey Zigachev
541*b843c749SSergey Zigachev return 0;
542*b843c749SSergey Zigachev }
543*b843c749SSergey Zigachev
smu10_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)544*b843c749SSergey Zigachev static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
545*b843c749SSergey Zigachev enum amd_dpm_forced_level level)
546*b843c749SSergey Zigachev {
547*b843c749SSergey Zigachev struct smu10_hwmgr *data = hwmgr->backend;
548*b843c749SSergey Zigachev
549*b843c749SSergey Zigachev if (hwmgr->smu_version < 0x1E3700) {
550*b843c749SSergey Zigachev pr_info("smu firmware version too old, can not set dpm level\n");
551*b843c749SSergey Zigachev return 0;
552*b843c749SSergey Zigachev }
553*b843c749SSergey Zigachev
554*b843c749SSergey Zigachev switch (level) {
555*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_HIGH:
556*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
557*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
558*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinGfxClk,
559*b843c749SSergey Zigachev data->gfx_max_freq_limit/100);
560*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
561*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinFclkByFreq,
562*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_FCLK);
563*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
564*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinSocclkByFreq,
565*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_SOCCLK);
566*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
567*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinVcn,
568*b843c749SSergey Zigachev SMU10_UMD_PSTATE_VCE);
569*b843c749SSergey Zigachev
570*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
571*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxGfxClk,
572*b843c749SSergey Zigachev data->gfx_max_freq_limit/100);
573*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
574*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxFclkByFreq,
575*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_FCLK);
576*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
577*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxSocclkByFreq,
578*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_SOCCLK);
579*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
580*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxVcn,
581*b843c749SSergey Zigachev SMU10_UMD_PSTATE_VCE);
582*b843c749SSergey Zigachev break;
583*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
584*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
585*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinGfxClk,
586*b843c749SSergey Zigachev data->gfx_min_freq_limit/100);
587*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
588*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxGfxClk,
589*b843c749SSergey Zigachev data->gfx_min_freq_limit/100);
590*b843c749SSergey Zigachev break;
591*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
592*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
593*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinFclkByFreq,
594*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_FCLK);
595*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
596*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxFclkByFreq,
597*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_FCLK);
598*b843c749SSergey Zigachev break;
599*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
600*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
601*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinGfxClk,
602*b843c749SSergey Zigachev SMU10_UMD_PSTATE_GFXCLK);
603*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
604*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinFclkByFreq,
605*b843c749SSergey Zigachev SMU10_UMD_PSTATE_FCLK);
606*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
607*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinSocclkByFreq,
608*b843c749SSergey Zigachev SMU10_UMD_PSTATE_SOCCLK);
609*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
610*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinVcn,
611*b843c749SSergey Zigachev SMU10_UMD_PSTATE_VCE);
612*b843c749SSergey Zigachev
613*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
614*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxGfxClk,
615*b843c749SSergey Zigachev SMU10_UMD_PSTATE_GFXCLK);
616*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
617*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxFclkByFreq,
618*b843c749SSergey Zigachev SMU10_UMD_PSTATE_FCLK);
619*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
620*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxSocclkByFreq,
621*b843c749SSergey Zigachev SMU10_UMD_PSTATE_SOCCLK);
622*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
623*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxVcn,
624*b843c749SSergey Zigachev SMU10_UMD_PSTATE_VCE);
625*b843c749SSergey Zigachev break;
626*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_AUTO:
627*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
628*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinGfxClk,
629*b843c749SSergey Zigachev data->gfx_min_freq_limit/100);
630*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
631*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinFclkByFreq,
632*b843c749SSergey Zigachev hwmgr->display_config->num_display > 3 ?
633*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_FCLK :
634*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_FCLK);
635*b843c749SSergey Zigachev
636*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
637*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinSocclkByFreq,
638*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_SOCCLK);
639*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
640*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinVcn,
641*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_VCE);
642*b843c749SSergey Zigachev
643*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
644*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxGfxClk,
645*b843c749SSergey Zigachev data->gfx_max_freq_limit/100);
646*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
647*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxFclkByFreq,
648*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_FCLK);
649*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
650*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxSocclkByFreq,
651*b843c749SSergey Zigachev SMU10_UMD_PSTATE_PEAK_SOCCLK);
652*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
653*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxVcn,
654*b843c749SSergey Zigachev SMU10_UMD_PSTATE_VCE);
655*b843c749SSergey Zigachev break;
656*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_LOW:
657*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
658*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinGfxClk,
659*b843c749SSergey Zigachev data->gfx_min_freq_limit/100);
660*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
661*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxGfxClk,
662*b843c749SSergey Zigachev data->gfx_min_freq_limit/100);
663*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
664*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinFclkByFreq,
665*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_FCLK);
666*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
667*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxFclkByFreq,
668*b843c749SSergey Zigachev SMU10_UMD_PSTATE_MIN_FCLK);
669*b843c749SSergey Zigachev break;
670*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_MANUAL:
671*b843c749SSergey Zigachev case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
672*b843c749SSergey Zigachev default:
673*b843c749SSergey Zigachev break;
674*b843c749SSergey Zigachev }
675*b843c749SSergey Zigachev return 0;
676*b843c749SSergey Zigachev }
677*b843c749SSergey Zigachev
smu10_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)678*b843c749SSergey Zigachev static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
679*b843c749SSergey Zigachev {
680*b843c749SSergey Zigachev struct smu10_hwmgr *data;
681*b843c749SSergey Zigachev
682*b843c749SSergey Zigachev if (hwmgr == NULL)
683*b843c749SSergey Zigachev return -EINVAL;
684*b843c749SSergey Zigachev
685*b843c749SSergey Zigachev data = (struct smu10_hwmgr *)(hwmgr->backend);
686*b843c749SSergey Zigachev
687*b843c749SSergey Zigachev if (low)
688*b843c749SSergey Zigachev return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
689*b843c749SSergey Zigachev else
690*b843c749SSergey Zigachev return data->clock_vol_info.vdd_dep_on_fclk->entries[
691*b843c749SSergey Zigachev data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
692*b843c749SSergey Zigachev }
693*b843c749SSergey Zigachev
smu10_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)694*b843c749SSergey Zigachev static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
695*b843c749SSergey Zigachev {
696*b843c749SSergey Zigachev struct smu10_hwmgr *data;
697*b843c749SSergey Zigachev
698*b843c749SSergey Zigachev if (hwmgr == NULL)
699*b843c749SSergey Zigachev return -EINVAL;
700*b843c749SSergey Zigachev
701*b843c749SSergey Zigachev data = (struct smu10_hwmgr *)(hwmgr->backend);
702*b843c749SSergey Zigachev
703*b843c749SSergey Zigachev if (low)
704*b843c749SSergey Zigachev return data->gfx_min_freq_limit;
705*b843c749SSergey Zigachev else
706*b843c749SSergey Zigachev return data->gfx_max_freq_limit;
707*b843c749SSergey Zigachev }
708*b843c749SSergey Zigachev
smu10_dpm_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)709*b843c749SSergey Zigachev static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
710*b843c749SSergey Zigachev struct pp_hw_power_state *hw_ps)
711*b843c749SSergey Zigachev {
712*b843c749SSergey Zigachev return 0;
713*b843c749SSergey Zigachev }
714*b843c749SSergey Zigachev
smu10_dpm_get_pp_table_entry_callback(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps,unsigned int index,const void * clock_info)715*b843c749SSergey Zigachev static int smu10_dpm_get_pp_table_entry_callback(
716*b843c749SSergey Zigachev struct pp_hwmgr *hwmgr,
717*b843c749SSergey Zigachev struct pp_hw_power_state *hw_ps,
718*b843c749SSergey Zigachev unsigned int index,
719*b843c749SSergey Zigachev const void *clock_info)
720*b843c749SSergey Zigachev {
721*b843c749SSergey Zigachev struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
722*b843c749SSergey Zigachev
723*b843c749SSergey Zigachev smu10_ps->levels[index].engine_clock = 0;
724*b843c749SSergey Zigachev
725*b843c749SSergey Zigachev smu10_ps->levels[index].vddc_index = 0;
726*b843c749SSergey Zigachev smu10_ps->level = index + 1;
727*b843c749SSergey Zigachev
728*b843c749SSergey Zigachev if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
729*b843c749SSergey Zigachev smu10_ps->levels[index].ds_divider_index = 5;
730*b843c749SSergey Zigachev smu10_ps->levels[index].ss_divider_index = 5;
731*b843c749SSergey Zigachev }
732*b843c749SSergey Zigachev
733*b843c749SSergey Zigachev return 0;
734*b843c749SSergey Zigachev }
735*b843c749SSergey Zigachev
smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr * hwmgr)736*b843c749SSergey Zigachev static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
737*b843c749SSergey Zigachev {
738*b843c749SSergey Zigachev int result;
739*b843c749SSergey Zigachev unsigned long ret = 0;
740*b843c749SSergey Zigachev
741*b843c749SSergey Zigachev result = pp_tables_get_num_of_entries(hwmgr, &ret);
742*b843c749SSergey Zigachev
743*b843c749SSergey Zigachev return result ? 0 : ret;
744*b843c749SSergey Zigachev }
745*b843c749SSergey Zigachev
smu10_dpm_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry,struct pp_power_state * ps)746*b843c749SSergey Zigachev static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
747*b843c749SSergey Zigachev unsigned long entry, struct pp_power_state *ps)
748*b843c749SSergey Zigachev {
749*b843c749SSergey Zigachev int result;
750*b843c749SSergey Zigachev struct smu10_power_state *smu10_ps;
751*b843c749SSergey Zigachev
752*b843c749SSergey Zigachev ps->hardware.magic = SMU10_Magic;
753*b843c749SSergey Zigachev
754*b843c749SSergey Zigachev smu10_ps = cast_smu10_ps(&(ps->hardware));
755*b843c749SSergey Zigachev
756*b843c749SSergey Zigachev result = pp_tables_get_entry(hwmgr, entry, ps,
757*b843c749SSergey Zigachev smu10_dpm_get_pp_table_entry_callback);
758*b843c749SSergey Zigachev
759*b843c749SSergey Zigachev smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
760*b843c749SSergey Zigachev smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
761*b843c749SSergey Zigachev
762*b843c749SSergey Zigachev return result;
763*b843c749SSergey Zigachev }
764*b843c749SSergey Zigachev
smu10_get_power_state_size(struct pp_hwmgr * hwmgr)765*b843c749SSergey Zigachev static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
766*b843c749SSergey Zigachev {
767*b843c749SSergey Zigachev return sizeof(struct smu10_power_state);
768*b843c749SSergey Zigachev }
769*b843c749SSergey Zigachev
smu10_set_cpu_power_state(struct pp_hwmgr * hwmgr)770*b843c749SSergey Zigachev static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
771*b843c749SSergey Zigachev {
772*b843c749SSergey Zigachev return 0;
773*b843c749SSergey Zigachev }
774*b843c749SSergey Zigachev
775*b843c749SSergey Zigachev
smu10_store_cc6_data(struct pp_hwmgr * hwmgr,uint32_t separation_time,bool cc6_disable,bool pstate_disable,bool pstate_switch_disable)776*b843c749SSergey Zigachev static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
777*b843c749SSergey Zigachev bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
778*b843c749SSergey Zigachev {
779*b843c749SSergey Zigachev struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
780*b843c749SSergey Zigachev
781*b843c749SSergey Zigachev if (separation_time != data->separation_time ||
782*b843c749SSergey Zigachev cc6_disable != data->cc6_disable ||
783*b843c749SSergey Zigachev pstate_disable != data->pstate_disable) {
784*b843c749SSergey Zigachev data->separation_time = separation_time;
785*b843c749SSergey Zigachev data->cc6_disable = cc6_disable;
786*b843c749SSergey Zigachev data->pstate_disable = pstate_disable;
787*b843c749SSergey Zigachev data->cc6_setting_changed = true;
788*b843c749SSergey Zigachev }
789*b843c749SSergey Zigachev return 0;
790*b843c749SSergey Zigachev }
791*b843c749SSergey Zigachev
smu10_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)792*b843c749SSergey Zigachev static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
793*b843c749SSergey Zigachev struct amd_pp_simple_clock_info *info)
794*b843c749SSergey Zigachev {
795*b843c749SSergey Zigachev return -EINVAL;
796*b843c749SSergey Zigachev }
797*b843c749SSergey Zigachev
smu10_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)798*b843c749SSergey Zigachev static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
799*b843c749SSergey Zigachev enum pp_clock_type type, uint32_t mask)
800*b843c749SSergey Zigachev {
801*b843c749SSergey Zigachev struct smu10_hwmgr *data = hwmgr->backend;
802*b843c749SSergey Zigachev struct smu10_voltage_dependency_table *mclk_table =
803*b843c749SSergey Zigachev data->clock_vol_info.vdd_dep_on_fclk;
804*b843c749SSergey Zigachev uint32_t low, high;
805*b843c749SSergey Zigachev
806*b843c749SSergey Zigachev low = mask ? (ffs(mask) - 1) : 0;
807*b843c749SSergey Zigachev high = mask ? (fls(mask) - 1) : 0;
808*b843c749SSergey Zigachev
809*b843c749SSergey Zigachev switch (type) {
810*b843c749SSergey Zigachev case PP_SCLK:
811*b843c749SSergey Zigachev if (low > 2 || high > 2) {
812*b843c749SSergey Zigachev pr_info("Currently sclk only support 3 levels on RV\n");
813*b843c749SSergey Zigachev return -EINVAL;
814*b843c749SSergey Zigachev }
815*b843c749SSergey Zigachev
816*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
817*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinGfxClk,
818*b843c749SSergey Zigachev low == 2 ? data->gfx_max_freq_limit/100 :
819*b843c749SSergey Zigachev low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
820*b843c749SSergey Zigachev data->gfx_min_freq_limit/100);
821*b843c749SSergey Zigachev
822*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
823*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxGfxClk,
824*b843c749SSergey Zigachev high == 0 ? data->gfx_min_freq_limit/100 :
825*b843c749SSergey Zigachev high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
826*b843c749SSergey Zigachev data->gfx_max_freq_limit/100);
827*b843c749SSergey Zigachev break;
828*b843c749SSergey Zigachev
829*b843c749SSergey Zigachev case PP_MCLK:
830*b843c749SSergey Zigachev if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
831*b843c749SSergey Zigachev return -EINVAL;
832*b843c749SSergey Zigachev
833*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
834*b843c749SSergey Zigachev PPSMC_MSG_SetHardMinFclkByFreq,
835*b843c749SSergey Zigachev mclk_table->entries[low].clk/100);
836*b843c749SSergey Zigachev
837*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
838*b843c749SSergey Zigachev PPSMC_MSG_SetSoftMaxFclkByFreq,
839*b843c749SSergey Zigachev mclk_table->entries[high].clk/100);
840*b843c749SSergey Zigachev break;
841*b843c749SSergey Zigachev
842*b843c749SSergey Zigachev case PP_PCIE:
843*b843c749SSergey Zigachev default:
844*b843c749SSergey Zigachev break;
845*b843c749SSergey Zigachev }
846*b843c749SSergey Zigachev return 0;
847*b843c749SSergey Zigachev }
848*b843c749SSergey Zigachev
smu10_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)849*b843c749SSergey Zigachev static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
850*b843c749SSergey Zigachev enum pp_clock_type type, char *buf)
851*b843c749SSergey Zigachev {
852*b843c749SSergey Zigachev struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
853*b843c749SSergey Zigachev struct smu10_voltage_dependency_table *mclk_table =
854*b843c749SSergey Zigachev data->clock_vol_info.vdd_dep_on_fclk;
855*b843c749SSergey Zigachev uint32_t i, now, size = 0;
856*b843c749SSergey Zigachev
857*b843c749SSergey Zigachev switch (type) {
858*b843c749SSergey Zigachev case PP_SCLK:
859*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
860*b843c749SSergey Zigachev now = smum_get_argument(hwmgr);
861*b843c749SSergey Zigachev
862*b843c749SSergey Zigachev /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
863*b843c749SSergey Zigachev if (now == data->gfx_max_freq_limit/100)
864*b843c749SSergey Zigachev i = 2;
865*b843c749SSergey Zigachev else if (now == data->gfx_min_freq_limit/100)
866*b843c749SSergey Zigachev i = 0;
867*b843c749SSergey Zigachev else
868*b843c749SSergey Zigachev i = 1;
869*b843c749SSergey Zigachev
870*b843c749SSergey Zigachev size += sprintf(buf + size, "0: %uMhz %s\n",
871*b843c749SSergey Zigachev data->gfx_min_freq_limit/100,
872*b843c749SSergey Zigachev i == 0 ? "*" : "");
873*b843c749SSergey Zigachev size += sprintf(buf + size, "1: %uMhz %s\n",
874*b843c749SSergey Zigachev i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
875*b843c749SSergey Zigachev i == 1 ? "*" : "");
876*b843c749SSergey Zigachev size += sprintf(buf + size, "2: %uMhz %s\n",
877*b843c749SSergey Zigachev data->gfx_max_freq_limit/100,
878*b843c749SSergey Zigachev i == 2 ? "*" : "");
879*b843c749SSergey Zigachev break;
880*b843c749SSergey Zigachev case PP_MCLK:
881*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
882*b843c749SSergey Zigachev now = smum_get_argument(hwmgr);
883*b843c749SSergey Zigachev
884*b843c749SSergey Zigachev for (i = 0; i < mclk_table->count; i++)
885*b843c749SSergey Zigachev size += sprintf(buf + size, "%d: %uMhz %s\n",
886*b843c749SSergey Zigachev i,
887*b843c749SSergey Zigachev mclk_table->entries[i].clk / 100,
888*b843c749SSergey Zigachev ((mclk_table->entries[i].clk / 100)
889*b843c749SSergey Zigachev == now) ? "*" : "");
890*b843c749SSergey Zigachev break;
891*b843c749SSergey Zigachev default:
892*b843c749SSergey Zigachev break;
893*b843c749SSergey Zigachev }
894*b843c749SSergey Zigachev
895*b843c749SSergey Zigachev return size;
896*b843c749SSergey Zigachev }
897*b843c749SSergey Zigachev
smu10_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)898*b843c749SSergey Zigachev static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
899*b843c749SSergey Zigachev PHM_PerformanceLevelDesignation designation, uint32_t index,
900*b843c749SSergey Zigachev PHM_PerformanceLevel *level)
901*b843c749SSergey Zigachev {
902*b843c749SSergey Zigachev struct smu10_hwmgr *data;
903*b843c749SSergey Zigachev
904*b843c749SSergey Zigachev if (level == NULL || hwmgr == NULL || state == NULL)
905*b843c749SSergey Zigachev return -EINVAL;
906*b843c749SSergey Zigachev
907*b843c749SSergey Zigachev data = (struct smu10_hwmgr *)(hwmgr->backend);
908*b843c749SSergey Zigachev
909*b843c749SSergey Zigachev if (index == 0) {
910*b843c749SSergey Zigachev level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
911*b843c749SSergey Zigachev level->coreClock = data->gfx_min_freq_limit;
912*b843c749SSergey Zigachev } else {
913*b843c749SSergey Zigachev level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
914*b843c749SSergey Zigachev data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
915*b843c749SSergey Zigachev level->coreClock = data->gfx_max_freq_limit;
916*b843c749SSergey Zigachev }
917*b843c749SSergey Zigachev
918*b843c749SSergey Zigachev level->nonLocalMemoryFreq = 0;
919*b843c749SSergey Zigachev level->nonLocalMemoryWidth = 0;
920*b843c749SSergey Zigachev
921*b843c749SSergey Zigachev return 0;
922*b843c749SSergey Zigachev }
923*b843c749SSergey Zigachev
smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * clock_info)924*b843c749SSergey Zigachev static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
925*b843c749SSergey Zigachev const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
926*b843c749SSergey Zigachev {
927*b843c749SSergey Zigachev const struct smu10_power_state *ps = cast_const_smu10_ps(state);
928*b843c749SSergey Zigachev
929*b843c749SSergey Zigachev clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
930*b843c749SSergey Zigachev clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
931*b843c749SSergey Zigachev
932*b843c749SSergey Zigachev return 0;
933*b843c749SSergey Zigachev }
934*b843c749SSergey Zigachev
935*b843c749SSergey Zigachev #define MEM_FREQ_LOW_LATENCY 25000
936*b843c749SSergey Zigachev #define MEM_FREQ_HIGH_LATENCY 80000
937*b843c749SSergey Zigachev #define MEM_LATENCY_HIGH 245
938*b843c749SSergey Zigachev #define MEM_LATENCY_LOW 35
939*b843c749SSergey Zigachev #define MEM_LATENCY_ERR 0xFFFF
940*b843c749SSergey Zigachev
941*b843c749SSergey Zigachev
smu10_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clock)942*b843c749SSergey Zigachev static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
943*b843c749SSergey Zigachev uint32_t clock)
944*b843c749SSergey Zigachev {
945*b843c749SSergey Zigachev if (clock >= MEM_FREQ_LOW_LATENCY &&
946*b843c749SSergey Zigachev clock < MEM_FREQ_HIGH_LATENCY)
947*b843c749SSergey Zigachev return MEM_LATENCY_HIGH;
948*b843c749SSergey Zigachev else if (clock >= MEM_FREQ_HIGH_LATENCY)
949*b843c749SSergey Zigachev return MEM_LATENCY_LOW;
950*b843c749SSergey Zigachev else
951*b843c749SSergey Zigachev return MEM_LATENCY_ERR;
952*b843c749SSergey Zigachev }
953*b843c749SSergey Zigachev
smu10_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)954*b843c749SSergey Zigachev static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
955*b843c749SSergey Zigachev enum amd_pp_clock_type type,
956*b843c749SSergey Zigachev struct pp_clock_levels_with_latency *clocks)
957*b843c749SSergey Zigachev {
958*b843c749SSergey Zigachev uint32_t i;
959*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
960*b843c749SSergey Zigachev struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
961*b843c749SSergey Zigachev struct smu10_voltage_dependency_table *pclk_vol_table;
962*b843c749SSergey Zigachev bool latency_required = false;
963*b843c749SSergey Zigachev
964*b843c749SSergey Zigachev if (pinfo == NULL)
965*b843c749SSergey Zigachev return -EINVAL;
966*b843c749SSergey Zigachev
967*b843c749SSergey Zigachev switch (type) {
968*b843c749SSergey Zigachev case amd_pp_mem_clock:
969*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_mclk;
970*b843c749SSergey Zigachev latency_required = true;
971*b843c749SSergey Zigachev break;
972*b843c749SSergey Zigachev case amd_pp_f_clock:
973*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_fclk;
974*b843c749SSergey Zigachev latency_required = true;
975*b843c749SSergey Zigachev break;
976*b843c749SSergey Zigachev case amd_pp_dcf_clock:
977*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
978*b843c749SSergey Zigachev break;
979*b843c749SSergey Zigachev case amd_pp_disp_clock:
980*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_dispclk;
981*b843c749SSergey Zigachev break;
982*b843c749SSergey Zigachev case amd_pp_phy_clock:
983*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_phyclk;
984*b843c749SSergey Zigachev break;
985*b843c749SSergey Zigachev case amd_pp_dpp_clock:
986*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_dppclk;
987*b843c749SSergey Zigachev break;
988*b843c749SSergey Zigachev default:
989*b843c749SSergey Zigachev return -EINVAL;
990*b843c749SSergey Zigachev }
991*b843c749SSergey Zigachev
992*b843c749SSergey Zigachev if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
993*b843c749SSergey Zigachev return -EINVAL;
994*b843c749SSergey Zigachev
995*b843c749SSergey Zigachev clocks->num_levels = 0;
996*b843c749SSergey Zigachev for (i = 0; i < pclk_vol_table->count; i++) {
997*b843c749SSergey Zigachev if (pclk_vol_table->entries[i].clk) {
998*b843c749SSergey Zigachev clocks->data[clocks->num_levels].clocks_in_khz =
999*b843c749SSergey Zigachev pclk_vol_table->entries[i].clk * 10;
1000*b843c749SSergey Zigachev clocks->data[clocks->num_levels].latency_in_us = latency_required ?
1001*b843c749SSergey Zigachev smu10_get_mem_latency(hwmgr,
1002*b843c749SSergey Zigachev pclk_vol_table->entries[i].clk) :
1003*b843c749SSergey Zigachev 0;
1004*b843c749SSergey Zigachev clocks->num_levels++;
1005*b843c749SSergey Zigachev }
1006*b843c749SSergey Zigachev }
1007*b843c749SSergey Zigachev
1008*b843c749SSergey Zigachev return 0;
1009*b843c749SSergey Zigachev }
1010*b843c749SSergey Zigachev
smu10_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1011*b843c749SSergey Zigachev static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1012*b843c749SSergey Zigachev enum amd_pp_clock_type type,
1013*b843c749SSergey Zigachev struct pp_clock_levels_with_voltage *clocks)
1014*b843c749SSergey Zigachev {
1015*b843c749SSergey Zigachev uint32_t i;
1016*b843c749SSergey Zigachev struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1017*b843c749SSergey Zigachev struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1018*b843c749SSergey Zigachev struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
1019*b843c749SSergey Zigachev
1020*b843c749SSergey Zigachev if (pinfo == NULL)
1021*b843c749SSergey Zigachev return -EINVAL;
1022*b843c749SSergey Zigachev
1023*b843c749SSergey Zigachev switch (type) {
1024*b843c749SSergey Zigachev case amd_pp_mem_clock:
1025*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_mclk;
1026*b843c749SSergey Zigachev break;
1027*b843c749SSergey Zigachev case amd_pp_f_clock:
1028*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_fclk;
1029*b843c749SSergey Zigachev break;
1030*b843c749SSergey Zigachev case amd_pp_dcf_clock:
1031*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1032*b843c749SSergey Zigachev break;
1033*b843c749SSergey Zigachev case amd_pp_soc_clock:
1034*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_socclk;
1035*b843c749SSergey Zigachev break;
1036*b843c749SSergey Zigachev case amd_pp_disp_clock:
1037*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1038*b843c749SSergey Zigachev break;
1039*b843c749SSergey Zigachev case amd_pp_phy_clock:
1040*b843c749SSergey Zigachev pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1041*b843c749SSergey Zigachev break;
1042*b843c749SSergey Zigachev default:
1043*b843c749SSergey Zigachev return -EINVAL;
1044*b843c749SSergey Zigachev }
1045*b843c749SSergey Zigachev
1046*b843c749SSergey Zigachev if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1047*b843c749SSergey Zigachev return -EINVAL;
1048*b843c749SSergey Zigachev
1049*b843c749SSergey Zigachev clocks->num_levels = 0;
1050*b843c749SSergey Zigachev for (i = 0; i < pclk_vol_table->count; i++) {
1051*b843c749SSergey Zigachev if (pclk_vol_table->entries[i].clk) {
1052*b843c749SSergey Zigachev clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
1053*b843c749SSergey Zigachev clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
1054*b843c749SSergey Zigachev clocks->num_levels++;
1055*b843c749SSergey Zigachev }
1056*b843c749SSergey Zigachev }
1057*b843c749SSergey Zigachev
1058*b843c749SSergey Zigachev return 0;
1059*b843c749SSergey Zigachev }
1060*b843c749SSergey Zigachev
1061*b843c749SSergey Zigachev
1062*b843c749SSergey Zigachev
smu10_get_max_high_clocks(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * clocks)1063*b843c749SSergey Zigachev static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1064*b843c749SSergey Zigachev {
1065*b843c749SSergey Zigachev clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1066*b843c749SSergey Zigachev return 0;
1067*b843c749SSergey Zigachev }
1068*b843c749SSergey Zigachev
smu10_thermal_get_temperature(struct pp_hwmgr * hwmgr)1069*b843c749SSergey Zigachev static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1070*b843c749SSergey Zigachev {
1071*b843c749SSergey Zigachev struct amdgpu_device *adev = hwmgr->adev;
1072*b843c749SSergey Zigachev uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1073*b843c749SSergey Zigachev int cur_temp =
1074*b843c749SSergey Zigachev (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1075*b843c749SSergey Zigachev
1076*b843c749SSergey Zigachev if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1077*b843c749SSergey Zigachev cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1078*b843c749SSergey Zigachev else
1079*b843c749SSergey Zigachev cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1080*b843c749SSergey Zigachev
1081*b843c749SSergey Zigachev return cur_temp;
1082*b843c749SSergey Zigachev }
1083*b843c749SSergey Zigachev
smu10_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)1084*b843c749SSergey Zigachev static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1085*b843c749SSergey Zigachev void *value, int *size)
1086*b843c749SSergey Zigachev {
1087*b843c749SSergey Zigachev uint32_t sclk, mclk;
1088*b843c749SSergey Zigachev int ret = 0;
1089*b843c749SSergey Zigachev
1090*b843c749SSergey Zigachev switch (idx) {
1091*b843c749SSergey Zigachev case AMDGPU_PP_SENSOR_GFX_SCLK:
1092*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
1093*b843c749SSergey Zigachev sclk = smum_get_argument(hwmgr);
1094*b843c749SSergey Zigachev /* in units of 10KHZ */
1095*b843c749SSergey Zigachev *((uint32_t *)value) = sclk * 100;
1096*b843c749SSergey Zigachev *size = 4;
1097*b843c749SSergey Zigachev break;
1098*b843c749SSergey Zigachev case AMDGPU_PP_SENSOR_GFX_MCLK:
1099*b843c749SSergey Zigachev smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1100*b843c749SSergey Zigachev mclk = smum_get_argument(hwmgr);
1101*b843c749SSergey Zigachev /* in units of 10KHZ */
1102*b843c749SSergey Zigachev *((uint32_t *)value) = mclk * 100;
1103*b843c749SSergey Zigachev *size = 4;
1104*b843c749SSergey Zigachev break;
1105*b843c749SSergey Zigachev case AMDGPU_PP_SENSOR_GPU_TEMP:
1106*b843c749SSergey Zigachev *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1107*b843c749SSergey Zigachev break;
1108*b843c749SSergey Zigachev default:
1109*b843c749SSergey Zigachev ret = -EINVAL;
1110*b843c749SSergey Zigachev break;
1111*b843c749SSergey Zigachev }
1112*b843c749SSergey Zigachev
1113*b843c749SSergey Zigachev return ret;
1114*b843c749SSergey Zigachev }
1115*b843c749SSergey Zigachev
smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)1116*b843c749SSergey Zigachev static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1117*b843c749SSergey Zigachev void *clock_ranges)
1118*b843c749SSergey Zigachev {
1119*b843c749SSergey Zigachev struct smu10_hwmgr *data = hwmgr->backend;
1120*b843c749SSergey Zigachev struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1121*b843c749SSergey Zigachev Watermarks_t *table = &(data->water_marks_table);
1122*b843c749SSergey Zigachev int result = 0;
1123*b843c749SSergey Zigachev
1124*b843c749SSergey Zigachev smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1125*b843c749SSergey Zigachev smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1126*b843c749SSergey Zigachev data->water_marks_exist = true;
1127*b843c749SSergey Zigachev return result;
1128*b843c749SSergey Zigachev }
1129*b843c749SSergey Zigachev
smu10_smus_notify_pwe(struct pp_hwmgr * hwmgr)1130*b843c749SSergey Zigachev static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1131*b843c749SSergey Zigachev {
1132*b843c749SSergey Zigachev
1133*b843c749SSergey Zigachev return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
1134*b843c749SSergey Zigachev }
1135*b843c749SSergey Zigachev
smu10_powergate_mmhub(struct pp_hwmgr * hwmgr)1136*b843c749SSergey Zigachev static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1137*b843c749SSergey Zigachev {
1138*b843c749SSergey Zigachev return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1139*b843c749SSergey Zigachev }
1140*b843c749SSergey Zigachev
smu10_powergate_vcn(struct pp_hwmgr * hwmgr,bool bgate)1141*b843c749SSergey Zigachev static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1142*b843c749SSergey Zigachev {
1143*b843c749SSergey Zigachev if (bgate) {
1144*b843c749SSergey Zigachev amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1145*b843c749SSergey Zigachev AMD_IP_BLOCK_TYPE_VCN,
1146*b843c749SSergey Zigachev AMD_PG_STATE_GATE);
1147*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
1148*b843c749SSergey Zigachev PPSMC_MSG_PowerDownVcn, 0);
1149*b843c749SSergey Zigachev } else {
1150*b843c749SSergey Zigachev smum_send_msg_to_smc_with_parameter(hwmgr,
1151*b843c749SSergey Zigachev PPSMC_MSG_PowerUpVcn, 0);
1152*b843c749SSergey Zigachev amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1153*b843c749SSergey Zigachev AMD_IP_BLOCK_TYPE_VCN,
1154*b843c749SSergey Zigachev AMD_PG_STATE_UNGATE);
1155*b843c749SSergey Zigachev }
1156*b843c749SSergey Zigachev }
1157*b843c749SSergey Zigachev
1158*b843c749SSergey Zigachev static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1159*b843c749SSergey Zigachev .backend_init = smu10_hwmgr_backend_init,
1160*b843c749SSergey Zigachev .backend_fini = smu10_hwmgr_backend_fini,
1161*b843c749SSergey Zigachev .asic_setup = NULL,
1162*b843c749SSergey Zigachev .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1163*b843c749SSergey Zigachev .force_dpm_level = smu10_dpm_force_dpm_level,
1164*b843c749SSergey Zigachev .get_power_state_size = smu10_get_power_state_size,
1165*b843c749SSergey Zigachev .powerdown_uvd = NULL,
1166*b843c749SSergey Zigachev .powergate_uvd = smu10_powergate_vcn,
1167*b843c749SSergey Zigachev .powergate_vce = NULL,
1168*b843c749SSergey Zigachev .get_mclk = smu10_dpm_get_mclk,
1169*b843c749SSergey Zigachev .get_sclk = smu10_dpm_get_sclk,
1170*b843c749SSergey Zigachev .patch_boot_state = smu10_dpm_patch_boot_state,
1171*b843c749SSergey Zigachev .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1172*b843c749SSergey Zigachev .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1173*b843c749SSergey Zigachev .set_cpu_power_state = smu10_set_cpu_power_state,
1174*b843c749SSergey Zigachev .store_cc6_data = smu10_store_cc6_data,
1175*b843c749SSergey Zigachev .force_clock_level = smu10_force_clock_level,
1176*b843c749SSergey Zigachev .print_clock_levels = smu10_print_clock_levels,
1177*b843c749SSergey Zigachev .get_dal_power_level = smu10_get_dal_power_level,
1178*b843c749SSergey Zigachev .get_performance_level = smu10_get_performance_level,
1179*b843c749SSergey Zigachev .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1180*b843c749SSergey Zigachev .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1181*b843c749SSergey Zigachev .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1182*b843c749SSergey Zigachev .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1183*b843c749SSergey Zigachev .get_max_high_clocks = smu10_get_max_high_clocks,
1184*b843c749SSergey Zigachev .read_sensor = smu10_read_sensor,
1185*b843c749SSergey Zigachev .set_active_display_count = smu10_set_active_display_count,
1186*b843c749SSergey Zigachev .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
1187*b843c749SSergey Zigachev .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1188*b843c749SSergey Zigachev .power_off_asic = smu10_power_off_asic,
1189*b843c749SSergey Zigachev .asic_setup = smu10_setup_asic_task,
1190*b843c749SSergey Zigachev .power_state_set = smu10_set_power_state_tasks,
1191*b843c749SSergey Zigachev .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1192*b843c749SSergey Zigachev .powergate_mmhub = smu10_powergate_mmhub,
1193*b843c749SSergey Zigachev .smus_notify_pwe = smu10_smus_notify_pwe,
1194*b843c749SSergey Zigachev .gfx_off_control = smu10_gfx_off_control,
1195*b843c749SSergey Zigachev .display_clock_voltage_request = smu10_display_clock_voltage_request,
1196*b843c749SSergey Zigachev .powergate_gfx = smu10_gfx_off_control,
1197*b843c749SSergey Zigachev };
1198*b843c749SSergey Zigachev
smu10_init_function_pointers(struct pp_hwmgr * hwmgr)1199*b843c749SSergey Zigachev int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1200*b843c749SSergey Zigachev {
1201*b843c749SSergey Zigachev hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1202*b843c749SSergey Zigachev hwmgr->pptable_func = &pptable_funcs;
1203*b843c749SSergey Zigachev return 0;
1204*b843c749SSergey Zigachev }
1205