1 /* $NetBSD: amdgpu_smu10_hwmgr.c,v 1.4 2021/12/19 12:37:54 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu10_hwmgr.c,v 1.4 2021/12/19 12:37:54 riastradh Exp $");
27
28 #include "pp_debug.h"
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include "atom-types.h"
33 #include "atombios.h"
34 #include "processpptables.h"
35 #include "cgs_common.h"
36 #include "smumgr.h"
37 #include "hwmgr.h"
38 #include "hardwaremanager.h"
39 #include "rv_ppsmc.h"
40 #include "smu10_hwmgr.h"
41 #include "power_state.h"
42 #include "soc15_common.h"
43 #include "smu10.h"
44
45 #include <linux/nbsd-namespace.h>
46
47 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
48 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
49 #define SCLK_MIN_DIV_INTV_SHIFT 12
50 #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
51 #define SMC_RAM_END 0x40000
52
53 #define mmPWR_MISC_CNTL_STATUS 0x0183
54 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
55 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
56 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
57 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
58 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
59
60 static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
61
62
smu10_display_clock_voltage_request(struct pp_hwmgr * hwmgr,struct pp_display_clock_request * clock_req)63 static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
64 struct pp_display_clock_request *clock_req)
65 {
66 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
67 enum amd_pp_clock_type clk_type = clock_req->clock_type;
68 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
69 PPSMC_Msg msg;
70
71 switch (clk_type) {
72 case amd_pp_dcf_clock:
73 if (clk_freq == smu10_data->dcf_actual_hard_min_freq)
74 return 0;
75 msg = PPSMC_MSG_SetHardMinDcefclkByFreq;
76 smu10_data->dcf_actual_hard_min_freq = clk_freq;
77 break;
78 case amd_pp_soc_clock:
79 msg = PPSMC_MSG_SetHardMinSocclkByFreq;
80 break;
81 case amd_pp_f_clock:
82 if (clk_freq == smu10_data->f_actual_hard_min_freq)
83 return 0;
84 smu10_data->f_actual_hard_min_freq = clk_freq;
85 msg = PPSMC_MSG_SetHardMinFclkByFreq;
86 break;
87 default:
88 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
89 return -EINVAL;
90 }
91 smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
92
93 return 0;
94 }
95
cast_smu10_ps(struct pp_hw_power_state * hw_ps)96 static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps)
97 {
98 if (SMU10_Magic != hw_ps->magic)
99 return NULL;
100
101 return (struct smu10_power_state *)hw_ps;
102 }
103
cast_const_smu10_ps(const struct pp_hw_power_state * hw_ps)104 static const struct smu10_power_state *cast_const_smu10_ps(
105 const struct pp_hw_power_state *hw_ps)
106 {
107 if (SMU10_Magic != hw_ps->magic)
108 return NULL;
109
110 return (const struct smu10_power_state *)hw_ps;
111 }
112
smu10_initialize_dpm_defaults(struct pp_hwmgr * hwmgr)113 static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
114 {
115 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
116
117 smu10_data->dce_slow_sclk_threshold = 30000;
118 smu10_data->thermal_auto_throttling_treshold = 0;
119 smu10_data->is_nb_dpm_enabled = 1;
120 smu10_data->dpm_flags = 1;
121 smu10_data->need_min_deep_sleep_dcefclk = true;
122 smu10_data->num_active_display = 0;
123 smu10_data->deep_sleep_dcefclk = 0;
124
125 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
126 PHM_PlatformCaps_SclkDeepSleep);
127
128 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
129 PHM_PlatformCaps_SclkThrottleLowNotification);
130
131 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
132 PHM_PlatformCaps_PowerPlaySupport);
133 return 0;
134 }
135
smu10_construct_max_power_limits_table(struct pp_hwmgr * hwmgr,struct phm_clock_and_voltage_limits * table)136 static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
137 struct phm_clock_and_voltage_limits *table)
138 {
139 return 0;
140 }
141
smu10_init_dynamic_state_adjustment_rule_settings(struct pp_hwmgr * hwmgr)142 static int smu10_init_dynamic_state_adjustment_rule_settings(
143 struct pp_hwmgr *hwmgr)
144 {
145 struct phm_clock_voltage_dependency_table *table_clk_vlt;
146
147 table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
148 GFP_KERNEL);
149
150 if (NULL == table_clk_vlt) {
151 pr_err("Can not allocate memory!\n");
152 return -ENOMEM;
153 }
154
155 table_clk_vlt->count = 8;
156 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
157 table_clk_vlt->entries[0].v = 0;
158 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
159 table_clk_vlt->entries[1].v = 1;
160 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
161 table_clk_vlt->entries[2].v = 2;
162 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
163 table_clk_vlt->entries[3].v = 3;
164 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
165 table_clk_vlt->entries[4].v = 4;
166 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
167 table_clk_vlt->entries[5].v = 5;
168 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
169 table_clk_vlt->entries[6].v = 6;
170 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
171 table_clk_vlt->entries[7].v = 7;
172 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
173
174 return 0;
175 }
176
smu10_get_system_info_data(struct pp_hwmgr * hwmgr)177 static int smu10_get_system_info_data(struct pp_hwmgr *hwmgr)
178 {
179 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)hwmgr->backend;
180
181 smu10_data->sys_info.htc_hyst_lmt = 5;
182 smu10_data->sys_info.htc_tmp_lmt = 203;
183
184 if (smu10_data->thermal_auto_throttling_treshold == 0)
185 smu10_data->thermal_auto_throttling_treshold = 203;
186
187 smu10_construct_max_power_limits_table (hwmgr,
188 &hwmgr->dyn_state.max_clock_voltage_on_ac);
189
190 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr);
191
192 return 0;
193 }
194
smu10_construct_boot_state(struct pp_hwmgr * hwmgr)195 static int smu10_construct_boot_state(struct pp_hwmgr *hwmgr)
196 {
197 return 0;
198 }
199
smu10_set_clock_limit(struct pp_hwmgr * hwmgr,const void * input)200 static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
201 {
202 struct PP_Clocks clocks = {0};
203 struct pp_display_clock_request clock_req;
204
205 clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
206 clock_req.clock_type = amd_pp_dcf_clock;
207 clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
208
209 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr, &clock_req),
210 "Attempt to set DCF Clock Failed!", return -EINVAL);
211
212 return 0;
213 }
214
smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr * hwmgr,uint32_t clock)215 static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
216 {
217 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
218
219 if (smu10_data->need_min_deep_sleep_dcefclk &&
220 smu10_data->deep_sleep_dcefclk != clock) {
221 smu10_data->deep_sleep_dcefclk = clock;
222 smum_send_msg_to_smc_with_parameter(hwmgr,
223 PPSMC_MSG_SetMinDeepSleepDcefclk,
224 smu10_data->deep_sleep_dcefclk);
225 }
226 return 0;
227 }
228
smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr * hwmgr,uint32_t clock)229 static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
230 {
231 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
232
233 if (smu10_data->dcf_actual_hard_min_freq &&
234 smu10_data->dcf_actual_hard_min_freq != clock) {
235 smu10_data->dcf_actual_hard_min_freq = clock;
236 smum_send_msg_to_smc_with_parameter(hwmgr,
237 PPSMC_MSG_SetHardMinDcefclkByFreq,
238 smu10_data->dcf_actual_hard_min_freq);
239 }
240 return 0;
241 }
242
smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr * hwmgr,uint32_t clock)243 static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
244 {
245 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
246
247 if (smu10_data->f_actual_hard_min_freq &&
248 smu10_data->f_actual_hard_min_freq != clock) {
249 smu10_data->f_actual_hard_min_freq = clock;
250 smum_send_msg_to_smc_with_parameter(hwmgr,
251 PPSMC_MSG_SetHardMinFclkByFreq,
252 smu10_data->f_actual_hard_min_freq);
253 }
254 return 0;
255 }
256
smu10_set_active_display_count(struct pp_hwmgr * hwmgr,uint32_t count)257 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
258 {
259 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
260
261 if (smu10_data->num_active_display != count) {
262 smu10_data->num_active_display = count;
263 smum_send_msg_to_smc_with_parameter(hwmgr,
264 PPSMC_MSG_SetDisplayCount,
265 smu10_data->num_active_display);
266 }
267
268 return 0;
269 }
270
smu10_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)271 static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
272 {
273 return smu10_set_clock_limit(hwmgr, input);
274 }
275
smu10_init_power_gate_state(struct pp_hwmgr * hwmgr)276 static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
277 {
278 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
279 struct amdgpu_device *adev = hwmgr->adev;
280
281 smu10_data->vcn_power_gated = true;
282 smu10_data->isp_tileA_power_gated = true;
283 smu10_data->isp_tileB_power_gated = true;
284
285 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
286 return smum_send_msg_to_smc_with_parameter(hwmgr,
287 PPSMC_MSG_SetGfxCGPG,
288 true);
289 else
290 return 0;
291 }
292
293
smu10_setup_asic_task(struct pp_hwmgr * hwmgr)294 static int smu10_setup_asic_task(struct pp_hwmgr *hwmgr)
295 {
296 return smu10_init_power_gate_state(hwmgr);
297 }
298
smu10_reset_cc6_data(struct pp_hwmgr * hwmgr)299 static int smu10_reset_cc6_data(struct pp_hwmgr *hwmgr)
300 {
301 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
302
303 smu10_data->separation_time = 0;
304 smu10_data->cc6_disable = false;
305 smu10_data->pstate_disable = false;
306 smu10_data->cc6_setting_changed = false;
307
308 return 0;
309 }
310
smu10_power_off_asic(struct pp_hwmgr * hwmgr)311 static int smu10_power_off_asic(struct pp_hwmgr *hwmgr)
312 {
313 return smu10_reset_cc6_data(hwmgr);
314 }
315
smu10_is_gfx_on(struct pp_hwmgr * hwmgr)316 static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
317 {
318 uint32_t reg;
319 struct amdgpu_device *adev = hwmgr->adev;
320
321 reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS);
322 if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) ==
323 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT))
324 return true;
325
326 return false;
327 }
328
smu10_disable_gfx_off(struct pp_hwmgr * hwmgr)329 static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
330 {
331 struct amdgpu_device *adev = hwmgr->adev;
332
333 if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
334 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
335
336 /* confirm gfx is back to "on" state */
337 while (!smu10_is_gfx_on(hwmgr))
338 msleep(1);
339 }
340
341 return 0;
342 }
343
smu10_disable_dpm_tasks(struct pp_hwmgr * hwmgr)344 static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
345 {
346 return 0;
347 }
348
smu10_enable_gfx_off(struct pp_hwmgr * hwmgr)349 static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
350 {
351 struct amdgpu_device *adev = hwmgr->adev;
352
353 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
354 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
355
356 return 0;
357 }
358
smu10_enable_dpm_tasks(struct pp_hwmgr * hwmgr)359 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
360 {
361 return 0;
362 }
363
smu10_gfx_off_control(struct pp_hwmgr * hwmgr,bool enable)364 static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
365 {
366 if (enable)
367 return smu10_enable_gfx_off(hwmgr);
368 else
369 return smu10_disable_gfx_off(hwmgr);
370 }
371
smu10_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * prequest_ps,const struct pp_power_state * pcurrent_ps)372 static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
373 struct pp_power_state *prequest_ps,
374 const struct pp_power_state *pcurrent_ps)
375 {
376 return 0;
377 }
378
379 /* temporary hardcoded clock voltage breakdown tables */
380 static const DpmClock_t VddDcfClk[]= {
381 { 300, 2600},
382 { 600, 3200},
383 { 600, 3600},
384 };
385
386 static const DpmClock_t VddSocClk[]= {
387 { 478, 2600},
388 { 722, 3200},
389 { 722, 3600},
390 };
391
392 static const DpmClock_t VddFClk[]= {
393 { 400, 2600},
394 {1200, 3200},
395 {1200, 3600},
396 };
397
398 static const DpmClock_t VddDispClk[]= {
399 { 435, 2600},
400 { 661, 3200},
401 {1086, 3600},
402 };
403
404 static const DpmClock_t VddDppClk[]= {
405 { 435, 2600},
406 { 661, 3200},
407 { 661, 3600},
408 };
409
410 static const DpmClock_t VddPhyClk[]= {
411 { 540, 2600},
412 { 810, 3200},
413 { 810, 3600},
414 };
415
smu10_get_clock_voltage_dependency_table(struct pp_hwmgr * hwmgr,struct smu10_voltage_dependency_table ** pptable,uint32_t num_entry,const DpmClock_t * pclk_dependency_table)416 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
417 struct smu10_voltage_dependency_table **pptable,
418 uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
419 {
420 uint32_t table_size, i;
421 struct smu10_voltage_dependency_table *ptable;
422
423 table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
424 ptable = kzalloc(table_size, GFP_KERNEL);
425
426 if (NULL == ptable)
427 return -ENOMEM;
428
429 ptable->count = num_entry;
430
431 for (i = 0; i < ptable->count; i++) {
432 ptable->entries[i].clk = pclk_dependency_table->Freq * 100;
433 ptable->entries[i].vol = pclk_dependency_table->Vol;
434 pclk_dependency_table++;
435 }
436
437 *pptable = ptable;
438
439 return 0;
440 }
441
442
smu10_populate_clock_table(struct pp_hwmgr * hwmgr)443 static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
444 {
445 uint32_t result;
446
447 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
448 DpmClocks_t *table = &(smu10_data->clock_table);
449 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
450
451 result = smum_smc_table_manager(hwmgr, (uint8_t *)table, SMU10_CLOCKTABLE, true);
452
453 PP_ASSERT_WITH_CODE((0 == result),
454 "Attempt to copy clock table from smc failed",
455 return result);
456
457 if (0 == result && table->DcefClocks[0].Freq != 0) {
458 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
459 NUM_DCEFCLK_DPM_LEVELS,
460 &smu10_data->clock_table.DcefClocks[0]);
461 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
462 NUM_SOCCLK_DPM_LEVELS,
463 &smu10_data->clock_table.SocClocks[0]);
464 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
465 NUM_FCLK_DPM_LEVELS,
466 &smu10_data->clock_table.FClocks[0]);
467 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_mclk,
468 NUM_MEMCLK_DPM_LEVELS,
469 &smu10_data->clock_table.MemClocks[0]);
470 } else {
471 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dcefclk,
472 ARRAY_SIZE(VddDcfClk),
473 &VddDcfClk[0]);
474 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_socclk,
475 ARRAY_SIZE(VddSocClk),
476 &VddSocClk[0]);
477 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_fclk,
478 ARRAY_SIZE(VddFClk),
479 &VddFClk[0]);
480 }
481 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dispclk,
482 ARRAY_SIZE(VddDispClk),
483 &VddDispClk[0]);
484 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_dppclk,
485 ARRAY_SIZE(VddDppClk), &VddDppClk[0]);
486 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
487 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
488
489 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
490 result = smum_get_argument(hwmgr);
491 smu10_data->gfx_min_freq_limit = result / 10 * 1000;
492
493 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
494 result = smum_get_argument(hwmgr);
495 smu10_data->gfx_max_freq_limit = result / 10 * 1000;
496
497 return 0;
498 }
499
smu10_hwmgr_backend_init(struct pp_hwmgr * hwmgr)500 static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
501 {
502 int result = 0;
503 struct smu10_hwmgr *data;
504
505 data = kzalloc(sizeof(struct smu10_hwmgr), GFP_KERNEL);
506 if (data == NULL)
507 return -ENOMEM;
508
509 hwmgr->backend = data;
510
511 result = smu10_initialize_dpm_defaults(hwmgr);
512 if (result != 0) {
513 pr_err("smu10_initialize_dpm_defaults failed\n");
514 return result;
515 }
516
517 smu10_populate_clock_table(hwmgr);
518
519 result = smu10_get_system_info_data(hwmgr);
520 if (result != 0) {
521 pr_err("smu10_get_system_info_data failed\n");
522 return result;
523 }
524
525 smu10_construct_boot_state(hwmgr);
526
527 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
528 SMU10_MAX_HARDWARE_POWERLEVELS;
529
530 hwmgr->platform_descriptor.hardwarePerformanceLevels =
531 SMU10_MAX_HARDWARE_POWERLEVELS;
532
533 hwmgr->platform_descriptor.vbiosInterruptId = 0;
534
535 hwmgr->platform_descriptor.clockStep.engineClock = 500;
536
537 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
538
539 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
540
541 hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
542 hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
543
544 return result;
545 }
546
smu10_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)547 static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
548 {
549 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
550 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
551
552 kfree(pinfo->vdd_dep_on_dcefclk);
553 pinfo->vdd_dep_on_dcefclk = NULL;
554 kfree(pinfo->vdd_dep_on_socclk);
555 pinfo->vdd_dep_on_socclk = NULL;
556 kfree(pinfo->vdd_dep_on_fclk);
557 pinfo->vdd_dep_on_fclk = NULL;
558 kfree(pinfo->vdd_dep_on_dispclk);
559 pinfo->vdd_dep_on_dispclk = NULL;
560 kfree(pinfo->vdd_dep_on_dppclk);
561 pinfo->vdd_dep_on_dppclk = NULL;
562 kfree(pinfo->vdd_dep_on_phyclk);
563 pinfo->vdd_dep_on_phyclk = NULL;
564
565 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
566 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
567
568 kfree(hwmgr->backend);
569 hwmgr->backend = NULL;
570
571 return 0;
572 }
573
smu10_dpm_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)574 static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
575 enum amd_dpm_forced_level level)
576 {
577 struct smu10_hwmgr *data = hwmgr->backend;
578 uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
579 uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
580
581 if (hwmgr->smu_version < 0x1E3700) {
582 pr_info("smu firmware version too old, can not set dpm level\n");
583 return 0;
584 }
585
586 if (min_sclk < data->gfx_min_freq_limit)
587 min_sclk = data->gfx_min_freq_limit;
588
589 min_sclk /= 100; /* transfer 10KHz to MHz */
590 if (min_mclk < data->clock_table.FClocks[0].Freq)
591 min_mclk = data->clock_table.FClocks[0].Freq;
592
593 switch (level) {
594 case AMD_DPM_FORCED_LEVEL_HIGH:
595 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
596 smum_send_msg_to_smc_with_parameter(hwmgr,
597 PPSMC_MSG_SetHardMinGfxClk,
598 data->gfx_max_freq_limit/100);
599 smum_send_msg_to_smc_with_parameter(hwmgr,
600 PPSMC_MSG_SetHardMinFclkByFreq,
601 SMU10_UMD_PSTATE_PEAK_FCLK);
602 smum_send_msg_to_smc_with_parameter(hwmgr,
603 PPSMC_MSG_SetHardMinSocclkByFreq,
604 SMU10_UMD_PSTATE_PEAK_SOCCLK);
605 smum_send_msg_to_smc_with_parameter(hwmgr,
606 PPSMC_MSG_SetHardMinVcn,
607 SMU10_UMD_PSTATE_VCE);
608
609 smum_send_msg_to_smc_with_parameter(hwmgr,
610 PPSMC_MSG_SetSoftMaxGfxClk,
611 data->gfx_max_freq_limit/100);
612 smum_send_msg_to_smc_with_parameter(hwmgr,
613 PPSMC_MSG_SetSoftMaxFclkByFreq,
614 SMU10_UMD_PSTATE_PEAK_FCLK);
615 smum_send_msg_to_smc_with_parameter(hwmgr,
616 PPSMC_MSG_SetSoftMaxSocclkByFreq,
617 SMU10_UMD_PSTATE_PEAK_SOCCLK);
618 smum_send_msg_to_smc_with_parameter(hwmgr,
619 PPSMC_MSG_SetSoftMaxVcn,
620 SMU10_UMD_PSTATE_VCE);
621 break;
622 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
623 smum_send_msg_to_smc_with_parameter(hwmgr,
624 PPSMC_MSG_SetHardMinGfxClk,
625 min_sclk);
626 smum_send_msg_to_smc_with_parameter(hwmgr,
627 PPSMC_MSG_SetSoftMaxGfxClk,
628 min_sclk);
629 break;
630 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
631 smum_send_msg_to_smc_with_parameter(hwmgr,
632 PPSMC_MSG_SetHardMinFclkByFreq,
633 min_mclk);
634 smum_send_msg_to_smc_with_parameter(hwmgr,
635 PPSMC_MSG_SetSoftMaxFclkByFreq,
636 min_mclk);
637 break;
638 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
639 smum_send_msg_to_smc_with_parameter(hwmgr,
640 PPSMC_MSG_SetHardMinGfxClk,
641 SMU10_UMD_PSTATE_GFXCLK);
642 smum_send_msg_to_smc_with_parameter(hwmgr,
643 PPSMC_MSG_SetHardMinFclkByFreq,
644 SMU10_UMD_PSTATE_FCLK);
645 smum_send_msg_to_smc_with_parameter(hwmgr,
646 PPSMC_MSG_SetHardMinSocclkByFreq,
647 SMU10_UMD_PSTATE_SOCCLK);
648 smum_send_msg_to_smc_with_parameter(hwmgr,
649 PPSMC_MSG_SetHardMinVcn,
650 SMU10_UMD_PSTATE_VCE);
651
652 smum_send_msg_to_smc_with_parameter(hwmgr,
653 PPSMC_MSG_SetSoftMaxGfxClk,
654 SMU10_UMD_PSTATE_GFXCLK);
655 smum_send_msg_to_smc_with_parameter(hwmgr,
656 PPSMC_MSG_SetSoftMaxFclkByFreq,
657 SMU10_UMD_PSTATE_FCLK);
658 smum_send_msg_to_smc_with_parameter(hwmgr,
659 PPSMC_MSG_SetSoftMaxSocclkByFreq,
660 SMU10_UMD_PSTATE_SOCCLK);
661 smum_send_msg_to_smc_with_parameter(hwmgr,
662 PPSMC_MSG_SetSoftMaxVcn,
663 SMU10_UMD_PSTATE_VCE);
664 break;
665 case AMD_DPM_FORCED_LEVEL_AUTO:
666 smum_send_msg_to_smc_with_parameter(hwmgr,
667 PPSMC_MSG_SetHardMinGfxClk,
668 min_sclk);
669 smum_send_msg_to_smc_with_parameter(hwmgr,
670 PPSMC_MSG_SetHardMinFclkByFreq,
671 hwmgr->display_config->num_display > 3 ?
672 SMU10_UMD_PSTATE_PEAK_FCLK :
673 min_mclk);
674
675 smum_send_msg_to_smc_with_parameter(hwmgr,
676 PPSMC_MSG_SetHardMinSocclkByFreq,
677 SMU10_UMD_PSTATE_MIN_SOCCLK);
678 smum_send_msg_to_smc_with_parameter(hwmgr,
679 PPSMC_MSG_SetHardMinVcn,
680 SMU10_UMD_PSTATE_MIN_VCE);
681
682 smum_send_msg_to_smc_with_parameter(hwmgr,
683 PPSMC_MSG_SetSoftMaxGfxClk,
684 data->gfx_max_freq_limit/100);
685 smum_send_msg_to_smc_with_parameter(hwmgr,
686 PPSMC_MSG_SetSoftMaxFclkByFreq,
687 SMU10_UMD_PSTATE_PEAK_FCLK);
688 smum_send_msg_to_smc_with_parameter(hwmgr,
689 PPSMC_MSG_SetSoftMaxSocclkByFreq,
690 SMU10_UMD_PSTATE_PEAK_SOCCLK);
691 smum_send_msg_to_smc_with_parameter(hwmgr,
692 PPSMC_MSG_SetSoftMaxVcn,
693 SMU10_UMD_PSTATE_VCE);
694 break;
695 case AMD_DPM_FORCED_LEVEL_LOW:
696 smum_send_msg_to_smc_with_parameter(hwmgr,
697 PPSMC_MSG_SetHardMinGfxClk,
698 data->gfx_min_freq_limit/100);
699 smum_send_msg_to_smc_with_parameter(hwmgr,
700 PPSMC_MSG_SetSoftMaxGfxClk,
701 data->gfx_min_freq_limit/100);
702 smum_send_msg_to_smc_with_parameter(hwmgr,
703 PPSMC_MSG_SetHardMinFclkByFreq,
704 min_mclk);
705 smum_send_msg_to_smc_with_parameter(hwmgr,
706 PPSMC_MSG_SetSoftMaxFclkByFreq,
707 min_mclk);
708 break;
709 case AMD_DPM_FORCED_LEVEL_MANUAL:
710 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
711 default:
712 break;
713 }
714 return 0;
715 }
716
smu10_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)717 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
718 {
719 struct smu10_hwmgr *data;
720
721 if (hwmgr == NULL)
722 return -EINVAL;
723
724 data = (struct smu10_hwmgr *)(hwmgr->backend);
725
726 if (low)
727 return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
728 else
729 return data->clock_vol_info.vdd_dep_on_fclk->entries[
730 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
731 }
732
smu10_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)733 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
734 {
735 struct smu10_hwmgr *data;
736
737 if (hwmgr == NULL)
738 return -EINVAL;
739
740 data = (struct smu10_hwmgr *)(hwmgr->backend);
741
742 if (low)
743 return data->gfx_min_freq_limit;
744 else
745 return data->gfx_max_freq_limit;
746 }
747
smu10_dpm_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)748 static int smu10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
749 struct pp_hw_power_state *hw_ps)
750 {
751 return 0;
752 }
753
smu10_dpm_get_pp_table_entry_callback(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps,unsigned int index,const void * clock_info)754 static int smu10_dpm_get_pp_table_entry_callback(
755 struct pp_hwmgr *hwmgr,
756 struct pp_hw_power_state *hw_ps,
757 unsigned int index,
758 const void *clock_info)
759 {
760 struct smu10_power_state *smu10_ps = cast_smu10_ps(hw_ps);
761
762 smu10_ps->levels[index].engine_clock = 0;
763
764 smu10_ps->levels[index].vddc_index = 0;
765 smu10_ps->level = index + 1;
766
767 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
768 smu10_ps->levels[index].ds_divider_index = 5;
769 smu10_ps->levels[index].ss_divider_index = 5;
770 }
771
772 return 0;
773 }
774
smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr * hwmgr)775 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
776 {
777 int result;
778 unsigned long ret = 0;
779
780 result = pp_tables_get_num_of_entries(hwmgr, &ret);
781
782 return result ? 0 : ret;
783 }
784
smu10_dpm_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry,struct pp_power_state * ps)785 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
786 unsigned long entry, struct pp_power_state *ps)
787 {
788 int result;
789 struct smu10_power_state *smu10_ps;
790
791 ps->hardware.magic = SMU10_Magic;
792
793 smu10_ps = cast_smu10_ps(&(ps->hardware));
794
795 result = pp_tables_get_entry(hwmgr, entry, ps,
796 smu10_dpm_get_pp_table_entry_callback);
797
798 smu10_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
799 smu10_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
800
801 return result;
802 }
803
smu10_get_power_state_size(struct pp_hwmgr * hwmgr)804 static int smu10_get_power_state_size(struct pp_hwmgr *hwmgr)
805 {
806 return sizeof(struct smu10_power_state);
807 }
808
smu10_set_cpu_power_state(struct pp_hwmgr * hwmgr)809 static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr)
810 {
811 return 0;
812 }
813
814
smu10_store_cc6_data(struct pp_hwmgr * hwmgr,uint32_t separation_time,bool cc6_disable,bool pstate_disable,bool pstate_switch_disable)815 static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
816 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
817 {
818 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
819
820 if (separation_time != data->separation_time ||
821 cc6_disable != data->cc6_disable ||
822 pstate_disable != data->pstate_disable) {
823 data->separation_time = separation_time;
824 data->cc6_disable = cc6_disable;
825 data->pstate_disable = pstate_disable;
826 data->cc6_setting_changed = true;
827 }
828 return 0;
829 }
830
smu10_get_dal_power_level(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * info)831 static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr,
832 struct amd_pp_simple_clock_info *info)
833 {
834 return -EINVAL;
835 }
836
smu10_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)837 static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
838 enum pp_clock_type type, uint32_t mask)
839 {
840 struct smu10_hwmgr *data = hwmgr->backend;
841 struct smu10_voltage_dependency_table *mclk_table =
842 data->clock_vol_info.vdd_dep_on_fclk;
843 uint32_t low, high;
844
845 low = mask ? (ffs(mask) - 1) : 0;
846 high = mask ? (fls(mask) - 1) : 0;
847
848 switch (type) {
849 case PP_SCLK:
850 if (low > 2 || high > 2) {
851 pr_info("Currently sclk only support 3 levels on RV\n");
852 return -EINVAL;
853 }
854
855 smum_send_msg_to_smc_with_parameter(hwmgr,
856 PPSMC_MSG_SetHardMinGfxClk,
857 low == 2 ? data->gfx_max_freq_limit/100 :
858 low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
859 data->gfx_min_freq_limit/100);
860
861 smum_send_msg_to_smc_with_parameter(hwmgr,
862 PPSMC_MSG_SetSoftMaxGfxClk,
863 high == 0 ? data->gfx_min_freq_limit/100 :
864 high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
865 data->gfx_max_freq_limit/100);
866 break;
867
868 case PP_MCLK:
869 if (low > mclk_table->count - 1 || high > mclk_table->count - 1)
870 return -EINVAL;
871
872 smum_send_msg_to_smc_with_parameter(hwmgr,
873 PPSMC_MSG_SetHardMinFclkByFreq,
874 mclk_table->entries[low].clk/100);
875
876 smum_send_msg_to_smc_with_parameter(hwmgr,
877 PPSMC_MSG_SetSoftMaxFclkByFreq,
878 mclk_table->entries[high].clk/100);
879 break;
880
881 case PP_PCIE:
882 default:
883 break;
884 }
885 return 0;
886 }
887
smu10_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)888 static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
889 enum pp_clock_type type, char *buf)
890 {
891 struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend);
892 struct smu10_voltage_dependency_table *mclk_table =
893 data->clock_vol_info.vdd_dep_on_fclk;
894 uint32_t i, now, size = 0;
895
896 switch (type) {
897 case PP_SCLK:
898 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
899 now = smum_get_argument(hwmgr);
900
901 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
902 if (now == data->gfx_max_freq_limit/100)
903 i = 2;
904 else if (now == data->gfx_min_freq_limit/100)
905 i = 0;
906 else
907 i = 1;
908
909 size += sprintf(buf + size, "0: %uMhz %s\n",
910 data->gfx_min_freq_limit/100,
911 i == 0 ? "*" : "");
912 size += sprintf(buf + size, "1: %uMhz %s\n",
913 i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
914 i == 1 ? "*" : "");
915 size += sprintf(buf + size, "2: %uMhz %s\n",
916 data->gfx_max_freq_limit/100,
917 i == 2 ? "*" : "");
918 break;
919 case PP_MCLK:
920 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
921 now = smum_get_argument(hwmgr);
922
923 for (i = 0; i < mclk_table->count; i++)
924 size += sprintf(buf + size, "%d: %uMhz %s\n",
925 i,
926 mclk_table->entries[i].clk / 100,
927 ((mclk_table->entries[i].clk / 100)
928 == now) ? "*" : "");
929 break;
930 default:
931 break;
932 }
933
934 return size;
935 }
936
smu10_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)937 static int smu10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
938 PHM_PerformanceLevelDesignation designation, uint32_t index,
939 PHM_PerformanceLevel *level)
940 {
941 struct smu10_hwmgr *data;
942
943 if (level == NULL || hwmgr == NULL || state == NULL)
944 return -EINVAL;
945
946 data = (struct smu10_hwmgr *)(hwmgr->backend);
947
948 if (index == 0) {
949 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
950 level->coreClock = data->gfx_min_freq_limit;
951 } else {
952 level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
953 data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
954 level->coreClock = data->gfx_max_freq_limit;
955 }
956
957 level->nonLocalMemoryFreq = 0;
958 level->nonLocalMemoryWidth = 0;
959
960 return 0;
961 }
962
smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,struct pp_clock_info * clock_info)963 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
964 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
965 {
966 const struct smu10_power_state *ps = cast_const_smu10_ps(state);
967
968 clock_info->min_eng_clk = ps->levels[0].engine_clock / (1 << (ps->levels[0].ss_divider_index));
969 clock_info->max_eng_clk = ps->levels[ps->level - 1].engine_clock / (1 << (ps->levels[ps->level - 1].ss_divider_index));
970
971 return 0;
972 }
973
974 #define MEM_FREQ_LOW_LATENCY 25000
975 #define MEM_FREQ_HIGH_LATENCY 80000
976 #define MEM_LATENCY_HIGH 245
977 #define MEM_LATENCY_LOW 35
978 #define MEM_LATENCY_ERR 0xFFFF
979
980
smu10_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clock)981 static uint32_t smu10_get_mem_latency(struct pp_hwmgr *hwmgr,
982 uint32_t clock)
983 {
984 if (clock >= MEM_FREQ_LOW_LATENCY &&
985 clock < MEM_FREQ_HIGH_LATENCY)
986 return MEM_LATENCY_HIGH;
987 else if (clock >= MEM_FREQ_HIGH_LATENCY)
988 return MEM_LATENCY_LOW;
989 else
990 return MEM_LATENCY_ERR;
991 }
992
smu10_get_clock_by_type_with_latency(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)993 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
994 enum amd_pp_clock_type type,
995 struct pp_clock_levels_with_latency *clocks)
996 {
997 uint32_t i;
998 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
999 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1000 struct smu10_voltage_dependency_table *pclk_vol_table;
1001 bool latency_required = false;
1002
1003 if (pinfo == NULL)
1004 return -EINVAL;
1005
1006 switch (type) {
1007 case amd_pp_mem_clock:
1008 pclk_vol_table = pinfo->vdd_dep_on_mclk;
1009 latency_required = true;
1010 break;
1011 case amd_pp_f_clock:
1012 pclk_vol_table = pinfo->vdd_dep_on_fclk;
1013 latency_required = true;
1014 break;
1015 case amd_pp_dcf_clock:
1016 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1017 break;
1018 case amd_pp_disp_clock:
1019 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1020 break;
1021 case amd_pp_phy_clock:
1022 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1023 break;
1024 case amd_pp_dpp_clock:
1025 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1026 break;
1027 default:
1028 return -EINVAL;
1029 }
1030
1031 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1032 return -EINVAL;
1033
1034 clocks->num_levels = 0;
1035 for (i = 0; i < pclk_vol_table->count; i++) {
1036 if (pclk_vol_table->entries[i].clk) {
1037 clocks->data[clocks->num_levels].clocks_in_khz =
1038 pclk_vol_table->entries[i].clk * 10;
1039 clocks->data[clocks->num_levels].latency_in_us = latency_required ?
1040 smu10_get_mem_latency(hwmgr,
1041 pclk_vol_table->entries[i].clk) :
1042 0;
1043 clocks->num_levels++;
1044 }
1045 }
1046
1047 return 0;
1048 }
1049
smu10_get_clock_by_type_with_voltage(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1050 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1051 enum amd_pp_clock_type type,
1052 struct pp_clock_levels_with_voltage *clocks)
1053 {
1054 uint32_t i;
1055 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1056 struct smu10_clock_voltage_information *pinfo = &(smu10_data->clock_vol_info);
1057 struct smu10_voltage_dependency_table *pclk_vol_table = NULL;
1058
1059 if (pinfo == NULL)
1060 return -EINVAL;
1061
1062 switch (type) {
1063 case amd_pp_mem_clock:
1064 pclk_vol_table = pinfo->vdd_dep_on_mclk;
1065 break;
1066 case amd_pp_f_clock:
1067 pclk_vol_table = pinfo->vdd_dep_on_fclk;
1068 break;
1069 case amd_pp_dcf_clock:
1070 pclk_vol_table = pinfo->vdd_dep_on_dcefclk;
1071 break;
1072 case amd_pp_soc_clock:
1073 pclk_vol_table = pinfo->vdd_dep_on_socclk;
1074 break;
1075 case amd_pp_disp_clock:
1076 pclk_vol_table = pinfo->vdd_dep_on_dispclk;
1077 break;
1078 case amd_pp_phy_clock:
1079 pclk_vol_table = pinfo->vdd_dep_on_phyclk;
1080 break;
1081 default:
1082 return -EINVAL;
1083 }
1084
1085 if (pclk_vol_table == NULL || pclk_vol_table->count == 0)
1086 return -EINVAL;
1087
1088 clocks->num_levels = 0;
1089 for (i = 0; i < pclk_vol_table->count; i++) {
1090 if (pclk_vol_table->entries[i].clk) {
1091 clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
1092 clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol;
1093 clocks->num_levels++;
1094 }
1095 }
1096
1097 return 0;
1098 }
1099
1100
1101
smu10_get_max_high_clocks(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * clocks)1102 static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1103 {
1104 clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1105 return 0;
1106 }
1107
smu10_thermal_get_temperature(struct pp_hwmgr * hwmgr)1108 static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1109 {
1110 struct amdgpu_device *adev = hwmgr->adev;
1111 uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP);
1112 int cur_temp =
1113 (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT;
1114
1115 if (cur_temp & THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK)
1116 cur_temp = ((cur_temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1117 else
1118 cur_temp = (cur_temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1119
1120 return cur_temp;
1121 }
1122
smu10_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)1123 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1124 void *value, int *size)
1125 {
1126 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1127 uint32_t sclk, mclk;
1128 int ret = 0;
1129
1130 switch (idx) {
1131 case AMDGPU_PP_SENSOR_GFX_SCLK:
1132 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
1133 sclk = smum_get_argument(hwmgr);
1134 /* in units of 10KHZ */
1135 *((uint32_t *)value) = sclk * 100;
1136 *size = 4;
1137 break;
1138 case AMDGPU_PP_SENSOR_GFX_MCLK:
1139 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
1140 mclk = smum_get_argument(hwmgr);
1141 /* in units of 10KHZ */
1142 *((uint32_t *)value) = mclk * 100;
1143 *size = 4;
1144 break;
1145 case AMDGPU_PP_SENSOR_GPU_TEMP:
1146 *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
1147 break;
1148 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
1149 *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
1150 *size = 4;
1151 break;
1152 default:
1153 ret = -EINVAL;
1154 break;
1155 }
1156
1157 return ret;
1158 }
1159
smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr * hwmgr,void * clock_ranges)1160 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1161 void *clock_ranges)
1162 {
1163 struct smu10_hwmgr *data = hwmgr->backend;
1164 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
1165 Watermarks_t *table = &(data->water_marks_table);
1166
1167 smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges);
1168 smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false);
1169 data->water_marks_exist = true;
1170 return 0;
1171 }
1172
smu10_smus_notify_pwe(struct pp_hwmgr * hwmgr)1173 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
1174 {
1175
1176 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
1177 }
1178
smu10_powergate_mmhub(struct pp_hwmgr * hwmgr)1179 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
1180 {
1181 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
1182 }
1183
smu10_powergate_sdma(struct pp_hwmgr * hwmgr,bool gate)1184 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
1185 {
1186 if (gate)
1187 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
1188 else
1189 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
1190 }
1191
smu10_powergate_vcn(struct pp_hwmgr * hwmgr,bool bgate)1192 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
1193 {
1194 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1195
1196 if (bgate) {
1197 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1198 AMD_IP_BLOCK_TYPE_VCN,
1199 AMD_PG_STATE_GATE);
1200 smum_send_msg_to_smc_with_parameter(hwmgr,
1201 PPSMC_MSG_PowerDownVcn, 0);
1202 smu10_data->vcn_power_gated = true;
1203 } else {
1204 smum_send_msg_to_smc_with_parameter(hwmgr,
1205 PPSMC_MSG_PowerUpVcn, 0);
1206 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
1207 AMD_IP_BLOCK_TYPE_VCN,
1208 AMD_PG_STATE_UNGATE);
1209 smu10_data->vcn_power_gated = false;
1210 }
1211 }
1212
conv_power_profile_to_pplib_workload(int power_profile)1213 static int conv_power_profile_to_pplib_workload(int power_profile)
1214 {
1215 int pplib_workload = 0;
1216
1217 switch (power_profile) {
1218 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
1219 pplib_workload = WORKLOAD_DEFAULT_BIT;
1220 break;
1221 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
1222 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
1223 break;
1224 case PP_SMC_POWER_PROFILE_POWERSAVING:
1225 pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
1226 break;
1227 case PP_SMC_POWER_PROFILE_VIDEO:
1228 pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
1229 break;
1230 case PP_SMC_POWER_PROFILE_VR:
1231 pplib_workload = WORKLOAD_PPLIB_VR_BIT;
1232 break;
1233 case PP_SMC_POWER_PROFILE_COMPUTE:
1234 pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
1235 break;
1236 }
1237
1238 return pplib_workload;
1239 }
1240
smu10_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)1241 static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
1242 {
1243 uint32_t i, size = 0;
1244 static const uint8_t
1245 profile_mode_setting[6][4] = {{70, 60, 0, 0,},
1246 {70, 60, 1, 3,},
1247 {90, 60, 0, 0,},
1248 {70, 60, 0, 0,},
1249 {70, 90, 0, 0,},
1250 {30, 60, 0, 6,},
1251 };
1252 static const char *profile_name[6] = {
1253 "BOOTUP_DEFAULT",
1254 "3D_FULL_SCREEN",
1255 "POWER_SAVING",
1256 "VIDEO",
1257 "VR",
1258 "COMPUTE"};
1259 static const char *title[6] = {"NUM",
1260 "MODE_NAME",
1261 "BUSY_SET_POINT",
1262 "FPS",
1263 "USE_RLC_BUSY",
1264 "MIN_ACTIVE_LEVEL"};
1265
1266 if (!buf)
1267 return -EINVAL;
1268
1269 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
1270 title[1], title[2], title[3], title[4], title[5]);
1271
1272 for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
1273 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
1274 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
1275 profile_mode_setting[i][0], profile_mode_setting[i][1],
1276 profile_mode_setting[i][2], profile_mode_setting[i][3]);
1277
1278 return size;
1279 }
1280
smu10_is_raven1_refresh(struct pp_hwmgr * hwmgr)1281 static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
1282 {
1283 struct amdgpu_device *adev = hwmgr->adev;
1284 if ((adev->asic_type == CHIP_RAVEN) &&
1285 (adev->rev_id != 0x15d8) &&
1286 (hwmgr->smu_version >= 0x41e2b))
1287 return true;
1288 else
1289 return false;
1290 }
1291
smu10_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)1292 static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
1293 {
1294 int workload_type = 0;
1295 int result = 0;
1296
1297 if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
1298 pr_err("Invalid power profile mode %ld\n", input[size]);
1299 return -EINVAL;
1300 }
1301 if (hwmgr->power_profile_mode == input[size])
1302 return 0;
1303
1304 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1305 workload_type =
1306 conv_power_profile_to_pplib_workload(input[size]);
1307 if (workload_type &&
1308 smu10_is_raven1_refresh(hwmgr) &&
1309 !hwmgr->gfxoff_state_changed_by_workload) {
1310 smu10_gfx_off_control(hwmgr, false);
1311 hwmgr->gfxoff_state_changed_by_workload = true;
1312 }
1313 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1314 1 << workload_type);
1315 if (!result)
1316 hwmgr->power_profile_mode = input[size];
1317 if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
1318 smu10_gfx_off_control(hwmgr, true);
1319 hwmgr->gfxoff_state_changed_by_workload = false;
1320 }
1321
1322 return 0;
1323 }
1324
smu10_asic_reset(struct pp_hwmgr * hwmgr,enum SMU_ASIC_RESET_MODE mode)1325 static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode)
1326 {
1327 return smum_send_msg_to_smc_with_parameter(hwmgr,
1328 PPSMC_MSG_DeviceDriverReset,
1329 mode);
1330 }
1331
1332 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
1333 .backend_init = smu10_hwmgr_backend_init,
1334 .backend_fini = smu10_hwmgr_backend_fini,
1335 .asic_setup = NULL,
1336 .apply_state_adjust_rules = smu10_apply_state_adjust_rules,
1337 .force_dpm_level = smu10_dpm_force_dpm_level,
1338 .get_power_state_size = smu10_get_power_state_size,
1339 .powerdown_uvd = NULL,
1340 .powergate_uvd = smu10_powergate_vcn,
1341 .powergate_vce = NULL,
1342 .get_mclk = smu10_dpm_get_mclk,
1343 .get_sclk = smu10_dpm_get_sclk,
1344 .patch_boot_state = smu10_dpm_patch_boot_state,
1345 .get_pp_table_entry = smu10_dpm_get_pp_table_entry,
1346 .get_num_of_pp_table_entries = smu10_dpm_get_num_of_pp_table_entries,
1347 .set_cpu_power_state = smu10_set_cpu_power_state,
1348 .store_cc6_data = smu10_store_cc6_data,
1349 .force_clock_level = smu10_force_clock_level,
1350 .print_clock_levels = smu10_print_clock_levels,
1351 .get_dal_power_level = smu10_get_dal_power_level,
1352 .get_performance_level = smu10_get_performance_level,
1353 .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks,
1354 .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency,
1355 .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage,
1356 .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges,
1357 .get_max_high_clocks = smu10_get_max_high_clocks,
1358 .read_sensor = smu10_read_sensor,
1359 .set_active_display_count = smu10_set_active_display_count,
1360 .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
1361 .dynamic_state_management_enable = smu10_enable_dpm_tasks,
1362 .power_off_asic = smu10_power_off_asic,
1363 .asic_setup = smu10_setup_asic_task,
1364 .power_state_set = smu10_set_power_state_tasks,
1365 .dynamic_state_management_disable = smu10_disable_dpm_tasks,
1366 .powergate_mmhub = smu10_powergate_mmhub,
1367 .smus_notify_pwe = smu10_smus_notify_pwe,
1368 .display_clock_voltage_request = smu10_display_clock_voltage_request,
1369 .powergate_gfx = smu10_gfx_off_control,
1370 .powergate_sdma = smu10_powergate_sdma,
1371 .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
1372 .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
1373 .get_power_profile_mode = smu10_get_power_profile_mode,
1374 .set_power_profile_mode = smu10_set_power_profile_mode,
1375 .asic_reset = smu10_asic_reset,
1376 };
1377
smu10_init_function_pointers(struct pp_hwmgr * hwmgr)1378 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
1379 {
1380 hwmgr->hwmgr_func = &smu10_hwmgr_funcs;
1381 hwmgr->pptable_func = &pptable_funcs;
1382 return 0;
1383 }
1384