1 /* $NetBSD: amdgpu_ci_smumgr.c,v 1.4 2021/12/19 10:59:37 riastradh Exp $ */
2
3 /*
4 * Copyright 2017 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_ci_smumgr.c,v 1.4 2021/12/19 10:59:37 riastradh Exp $");
27
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/fb.h>
31 #include "linux/delay.h"
32 #include <linux/types.h>
33 #include <linux/pci.h>
34
35 #include "smumgr.h"
36 #include "pp_debug.h"
37 #include "ci_smumgr.h"
38 #include "ppsmc.h"
39 #include "smu7_hwmgr.h"
40 #include "hardwaremanager.h"
41 #include "ppatomctrl.h"
42 #include "cgs_common.h"
43 #include "atombios.h"
44 #include "pppcielanes.h"
45
46 #include "smu/smu_7_0_1_d.h"
47 #include "smu/smu_7_0_1_sh_mask.h"
48
49 #include "dce/dce_8_0_d.h"
50 #include "dce/dce_8_0_sh_mask.h"
51
52 #include "bif/bif_4_1_d.h"
53 #include "bif/bif_4_1_sh_mask.h"
54
55 #include "gca/gfx_7_2_d.h"
56 #include "gca/gfx_7_2_sh_mask.h"
57
58 #include "gmc/gmc_7_1_d.h"
59 #include "gmc/gmc_7_1_sh_mask.h"
60
61 #include "processpptables.h"
62
63 #define MC_CG_ARB_FREQ_F0 0x0a
64 #define MC_CG_ARB_FREQ_F1 0x0b
65 #define MC_CG_ARB_FREQ_F2 0x0c
66 #define MC_CG_ARB_FREQ_F3 0x0d
67
68 #define SMC_RAM_END 0x40000
69
70 #define CISLAND_MINIMUM_ENGINE_CLOCK 800
71 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
72
73 static const struct ci_pt_defaults defaults_hawaii_xt = {
74 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
75 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
76 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
77 };
78
79 static const struct ci_pt_defaults defaults_hawaii_pro = {
80 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
81 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
82 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
83 };
84
85 static const struct ci_pt_defaults defaults_bonaire_xt = {
86 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
87 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
88 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
89 };
90
91
92 static const struct ci_pt_defaults defaults_saturn_xt = {
93 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
94 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
95 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
96 };
97
98
ci_set_smc_sram_address(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t limit)99 static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
100 uint32_t smc_addr, uint32_t limit)
101 {
102 if ((0 != (3 & smc_addr))
103 || ((smc_addr + 3) >= limit)) {
104 pr_err("smc_addr invalid \n");
105 return -EINVAL;
106 }
107
108 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
109 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
110 return 0;
111 }
112
ci_copy_bytes_to_smc(struct pp_hwmgr * hwmgr,uint32_t smc_start_address,const uint8_t * src,uint32_t byte_count,uint32_t limit)113 static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
114 const uint8_t *src, uint32_t byte_count, uint32_t limit)
115 {
116 int result;
117 uint32_t data = 0;
118 uint32_t original_data;
119 uint32_t addr = 0;
120 uint32_t extra_shift;
121
122 if ((3 & smc_start_address)
123 || ((smc_start_address + byte_count) >= limit)) {
124 pr_err("smc_start_address invalid \n");
125 return -EINVAL;
126 }
127
128 addr = smc_start_address;
129
130 while (byte_count >= 4) {
131 /* Bytes are written into the SMC address space with the MSB first. */
132 data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
133
134 result = ci_set_smc_sram_address(hwmgr, addr, limit);
135
136 if (0 != result)
137 return result;
138
139 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
140
141 src += 4;
142 byte_count -= 4;
143 addr += 4;
144 }
145
146 if (0 != byte_count) {
147
148 data = 0;
149
150 result = ci_set_smc_sram_address(hwmgr, addr, limit);
151
152 if (0 != result)
153 return result;
154
155
156 original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
157
158 extra_shift = 8 * (4 - byte_count);
159
160 while (byte_count > 0) {
161 /* Bytes are written into the SMC addres space with the MSB first. */
162 data = (0x100 * data) + *src++;
163 byte_count--;
164 }
165
166 data <<= extra_shift;
167
168 data |= (original_data & ~((~0UL) << extra_shift));
169
170 result = ci_set_smc_sram_address(hwmgr, addr, limit);
171
172 if (0 != result)
173 return result;
174
175 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
176 }
177
178 return 0;
179 }
180
181
ci_program_jump_on_start(struct pp_hwmgr * hwmgr)182 static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
183 {
184 static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
185
186 ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
187
188 return 0;
189 }
190
ci_is_smc_ram_running(struct pp_hwmgr * hwmgr)191 bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
192 {
193 return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
194 CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
195 && (0x20100 <= cgs_read_ind_register(hwmgr->device,
196 CGS_IND_REG__SMC, ixSMC_PC_C)));
197 }
198
ci_read_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t * value,uint32_t limit)199 static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
200 uint32_t *value, uint32_t limit)
201 {
202 int result;
203
204 result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
205
206 if (result)
207 return result;
208
209 *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
210 return 0;
211 }
212
ci_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)213 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
214 {
215 int ret;
216
217 cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
218 cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
219
220 PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
221
222 ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
223
224 if (ret != 1)
225 pr_info("\n failed to send message %x ret is %d\n", msg, ret);
226
227 return 0;
228 }
229
ci_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)230 static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
231 uint16_t msg, uint32_t parameter)
232 {
233 cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
234 return ci_send_msg_to_smc(hwmgr, msg);
235 }
236
ci_initialize_power_tune_defaults(struct pp_hwmgr * hwmgr)237 static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
238 {
239 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
240 struct amdgpu_device *adev = hwmgr->adev;
241 uint32_t dev_id;
242
243 dev_id = adev->pdev->device;
244
245 switch (dev_id) {
246 case 0x67BA:
247 case 0x66B1:
248 smu_data->power_tune_defaults = &defaults_hawaii_pro;
249 break;
250 case 0x67B8:
251 case 0x66B0:
252 smu_data->power_tune_defaults = &defaults_hawaii_xt;
253 break;
254 case 0x6640:
255 case 0x6641:
256 case 0x6646:
257 case 0x6647:
258 smu_data->power_tune_defaults = &defaults_saturn_xt;
259 break;
260 case 0x6649:
261 case 0x6650:
262 case 0x6651:
263 case 0x6658:
264 case 0x665C:
265 case 0x665D:
266 case 0x67A0:
267 case 0x67A1:
268 case 0x67A2:
269 case 0x67A8:
270 case 0x67A9:
271 case 0x67AA:
272 case 0x67B9:
273 case 0x67BE:
274 default:
275 smu_data->power_tune_defaults = &defaults_bonaire_xt;
276 break;
277 }
278 }
279
ci_get_dependency_volt_by_clk(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * allowed_clock_voltage_table,uint32_t clock,uint32_t * vol)280 static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
281 struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
282 uint32_t clock, uint32_t *vol)
283 {
284 uint32_t i = 0;
285
286 if (allowed_clock_voltage_table->count == 0)
287 return -EINVAL;
288
289 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
290 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
291 *vol = allowed_clock_voltage_table->entries[i].v;
292 return 0;
293 }
294 }
295
296 *vol = allowed_clock_voltage_table->entries[i - 1].v;
297 return 0;
298 }
299
ci_calculate_sclk_params(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU7_Discrete_GraphicsLevel * sclk)300 static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
301 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
302 {
303 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
304 struct pp_atomctrl_clock_dividers_vi dividers;
305 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
306 uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
307 uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
308 uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
309 uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
310 uint32_t ref_clock;
311 uint32_t ref_divider;
312 uint32_t fbdiv;
313 int result;
314
315 /* get the engine clock dividers for this clock value */
316 result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs);
317
318 PP_ASSERT_WITH_CODE(result == 0,
319 "Error retrieving Engine Clock dividers from VBIOS.",
320 return result);
321
322 /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
323 ref_clock = atomctrl_get_reference_clock(hwmgr);
324 ref_divider = 1 + dividers.uc_pll_ref_div;
325
326 /* low 14 bits is fraction and high 12 bits is divider */
327 fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
328
329 /* SPLL_FUNC_CNTL setup */
330 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
331 SPLL_REF_DIV, dividers.uc_pll_ref_div);
332 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
333 SPLL_PDIV_A, dividers.uc_pll_post_div);
334
335 /* SPLL_FUNC_CNTL_3 setup*/
336 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
337 SPLL_FB_DIV, fbdiv);
338
339 /* set to use fractional accumulation*/
340 spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
341 SPLL_DITHEN, 1);
342
343 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
344 PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
345 struct pp_atomctrl_internal_ss_info ss_info;
346 uint32_t vco_freq = clock * dividers.uc_pll_post_div;
347
348 if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
349 vco_freq, &ss_info)) {
350 uint32_t clk_s = ref_clock * 5 /
351 (ref_divider * ss_info.speed_spectrum_rate);
352 uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
353 fbdiv / (clk_s * 10000);
354
355 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
356 CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
357 cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
358 CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
359 cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
360 CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
361 }
362 }
363
364 sclk->SclkFrequency = clock;
365 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
366 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
367 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
368 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
369 sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
370
371 return 0;
372 }
373
ci_populate_phase_value_based_on_sclk(struct pp_hwmgr * hwmgr,const struct phm_phase_shedding_limits_table * pl,uint32_t sclk,uint32_t * p_shed)374 static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
375 const struct phm_phase_shedding_limits_table *pl,
376 uint32_t sclk, uint32_t *p_shed)
377 {
378 unsigned int i;
379
380 /* use the minimum phase shedding */
381 *p_shed = 1;
382
383 for (i = 0; i < pl->count; i++) {
384 if (sclk < pl->entries[i].Sclk) {
385 *p_shed = i;
386 break;
387 }
388 }
389 }
390
ci_get_sleep_divider_id_from_clock(uint32_t clock,uint32_t clock_insr)391 static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
392 uint32_t clock_insr)
393 {
394 uint8_t i;
395 uint32_t temp;
396 uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
397
398 if (clock < min) {
399 pr_info("Engine clock can't satisfy stutter requirement!\n");
400 return 0;
401 }
402 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
403 temp = clock >> i;
404
405 if (temp >= min || i == 0)
406 break;
407 }
408 return i;
409 }
410
ci_populate_single_graphic_level(struct pp_hwmgr * hwmgr,uint32_t clock,struct SMU7_Discrete_GraphicsLevel * level)411 static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
412 uint32_t clock, struct SMU7_Discrete_GraphicsLevel *level)
413 {
414 int result;
415 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
416
417
418 result = ci_calculate_sclk_params(hwmgr, clock, level);
419
420 /* populate graphics levels */
421 result = ci_get_dependency_volt_by_clk(hwmgr,
422 hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
423 (uint32_t *)(&level->MinVddc));
424 if (result) {
425 pr_err("vdd_dep_on_sclk table is NULL\n");
426 return result;
427 }
428
429 level->SclkFrequency = clock;
430 level->MinVddcPhases = 1;
431
432 if (data->vddc_phase_shed_control)
433 ci_populate_phase_value_based_on_sclk(hwmgr,
434 hwmgr->dyn_state.vddc_phase_shed_limits_table,
435 clock,
436 &level->MinVddcPhases);
437
438 level->ActivityLevel = data->current_profile_setting.sclk_activity;
439 level->CcPwrDynRm = 0;
440 level->CcPwrDynRm1 = 0;
441 level->EnabledForActivity = 0;
442 /* this level can be used for throttling.*/
443 level->EnabledForThrottle = 1;
444 level->UpH = data->current_profile_setting.sclk_up_hyst;
445 level->DownH = data->current_profile_setting.sclk_down_hyst;
446 level->VoltageDownH = 0;
447 level->PowerThrottle = 0;
448
449
450 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
451 PHM_PlatformCaps_SclkDeepSleep))
452 level->DeepSleepDivId =
453 ci_get_sleep_divider_id_from_clock(clock,
454 CISLAND_MINIMUM_ENGINE_CLOCK);
455
456 /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
457 level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
458
459 if (0 == result) {
460 level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
461 CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
462 CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
463 CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
464 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
465 CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
466 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
467 CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
468 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
469 CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
470 }
471
472 return result;
473 }
474
ci_populate_all_graphic_levels(struct pp_hwmgr * hwmgr)475 static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
476 {
477 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
478 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
479 struct smu7_dpm_table *dpm_table = &data->dpm_table;
480 int result = 0;
481 uint32_t array = smu_data->dpm_table_start +
482 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
483 uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
484 SMU7_MAX_LEVELS_GRAPHICS;
485 struct SMU7_Discrete_GraphicsLevel *levels =
486 smu_data->smc_state_table.GraphicsLevel;
487 uint32_t i;
488
489 for (i = 0; i < dpm_table->sclk_table.count; i++) {
490 result = ci_populate_single_graphic_level(hwmgr,
491 dpm_table->sclk_table.dpm_levels[i].value,
492 &levels[i]);
493 if (result)
494 return result;
495 if (i > 1)
496 smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
497 if (i == (dpm_table->sclk_table.count - 1))
498 smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
499 PPSMC_DISPLAY_WATERMARK_HIGH;
500 }
501
502 smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
503
504 smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
505 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
506 phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
507
508 result = ci_copy_bytes_to_smc(hwmgr, array,
509 (u8 *)levels, array_size,
510 SMC_RAM_END);
511
512 return result;
513
514 }
515
ci_populate_svi_load_line(struct pp_hwmgr * hwmgr)516 static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
517 {
518 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
519 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
520
521 smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
522 smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
523 smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
524 smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
525
526 return 0;
527 }
528
ci_populate_tdc_limit(struct pp_hwmgr * hwmgr)529 static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
530 {
531 uint16_t tdc_limit;
532 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
533 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
534
535 tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
536 smu_data->power_tune_table.TDC_VDDC_PkgLimit =
537 CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
538 smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
539 defaults->tdc_vddc_throttle_release_limit_perc;
540 smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
541
542 return 0;
543 }
544
ci_populate_dw8(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)545 static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
546 {
547 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
548 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
549 uint32_t temp;
550
551 if (ci_read_smc_sram_dword(hwmgr,
552 fuse_table_offset +
553 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
554 (uint32_t *)&temp, SMC_RAM_END))
555 PP_ASSERT_WITH_CODE(false,
556 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
557 return -EINVAL);
558 else
559 smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
560
561 return 0;
562 }
563
ci_populate_fuzzy_fan(struct pp_hwmgr * hwmgr,uint32_t fuse_table_offset)564 static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
565 {
566 uint16_t tmp;
567 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
568
569 if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
570 || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
571 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
572 else
573 tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
574
575 smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
576
577 return 0;
578 }
579
ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr * hwmgr)580 static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
581 {
582 int i;
583 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
584 uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
585 uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
586 uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
587
588 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
589 "The CAC Leakage table does not exist!", return -EINVAL);
590 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
591 "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
592 PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
593 "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
594
595 for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
596 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
597 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
598 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
599 hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
600 } else {
601 lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
602 hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
603 }
604 }
605
606 return 0;
607 }
608
ci_populate_vddc_vid(struct pp_hwmgr * hwmgr)609 static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
610 {
611 int i;
612 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
613 uint8_t *vid = smu_data->power_tune_table.VddCVid;
614 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
615
616 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
617 "There should never be more than 8 entries for VddcVid!!!",
618 return -EINVAL);
619
620 for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
621 vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
622
623 return 0;
624 }
625
ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr * hwmgr)626 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
627 {
628 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
629 u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
630 u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
631 int i, min, max;
632
633 min = max = hi_vid[0];
634 for (i = 0; i < 8; i++) {
635 if (0 != hi_vid[i]) {
636 if (min > hi_vid[i])
637 min = hi_vid[i];
638 if (max < hi_vid[i])
639 max = hi_vid[i];
640 }
641
642 if (0 != lo_vid[i]) {
643 if (min > lo_vid[i])
644 min = lo_vid[i];
645 if (max < lo_vid[i])
646 max = lo_vid[i];
647 }
648 }
649
650 if ((min == 0) || (max == 0))
651 return -EINVAL;
652 smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
653 smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
654
655 return 0;
656 }
657
ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr * hwmgr)658 static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
659 {
660 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
661 uint16_t HiSidd;
662 uint16_t LoSidd;
663 struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
664
665 HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
666 LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
667
668 smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
669 CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
670 smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
671 CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
672
673 return 0;
674 }
675
ci_populate_pm_fuses(struct pp_hwmgr * hwmgr)676 static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
677 {
678 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
679 uint32_t pm_fuse_table_offset;
680 int ret = 0;
681
682 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
683 PHM_PlatformCaps_PowerContainment)) {
684 if (ci_read_smc_sram_dword(hwmgr,
685 SMU7_FIRMWARE_HEADER_LOCATION +
686 offsetof(SMU7_Firmware_Header, PmFuseTable),
687 &pm_fuse_table_offset, SMC_RAM_END)) {
688 pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
689 return -EINVAL;
690 }
691
692 /* DW0 - DW3 */
693 ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
694 /* DW4 - DW5 */
695 ret |= ci_populate_vddc_vid(hwmgr);
696 /* DW6 */
697 ret |= ci_populate_svi_load_line(hwmgr);
698 /* DW7 */
699 ret |= ci_populate_tdc_limit(hwmgr);
700 /* DW8 */
701 ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
702
703 ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
704
705 ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
706
707 ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
708 if (ret)
709 return ret;
710
711 ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
712 (uint8_t *)&smu_data->power_tune_table,
713 sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
714 }
715 return ret;
716 }
717
ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr * hwmgr)718 static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
719 {
720 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
721 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
722 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
723 SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
724 struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
725 struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
726 const uint16_t *def1, *def2;
727 int i, j, k;
728
729 dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
730 dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
731
732 dpm_table->DTETjOffset = 0;
733 dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
734 dpm_table->GpuTjHyst = 8;
735
736 dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
737
738 if (ppm) {
739 dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
740 dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
741 } else {
742 dpm_table->PPM_PkgPwrLimit = 0;
743 dpm_table->PPM_TemperatureLimit = 0;
744 }
745
746 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
747 CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
748
749 dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
750 def1 = defaults->bapmti_r;
751 def2 = defaults->bapmti_rc;
752
753 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
754 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
755 for (k = 0; k < SMU7_DTE_SINKS; k++) {
756 dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
757 dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
758 def1++;
759 def2++;
760 }
761 }
762 }
763
764 return 0;
765 }
766
ci_get_std_voltage_value_sidd(struct pp_hwmgr * hwmgr,pp_atomctrl_voltage_table_entry * tab,uint16_t * hi,uint16_t * lo)767 static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
768 pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
769 uint16_t *lo)
770 {
771 uint16_t v_index;
772 bool vol_found = false;
773 *hi = tab->value * VOLTAGE_SCALE;
774 *lo = tab->value * VOLTAGE_SCALE;
775
776 PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
777 "The SCLK/VDDC Dependency Table does not exist.\n",
778 return -EINVAL);
779
780 if (NULL == hwmgr->dyn_state.cac_leakage_table) {
781 pr_warn("CAC Leakage Table does not exist, using vddc.\n");
782 return 0;
783 }
784
785 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
786 if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
787 vol_found = true;
788 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
789 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
790 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
791 } else {
792 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
793 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
794 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
795 }
796 break;
797 }
798 }
799
800 if (!vol_found) {
801 for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
802 if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
803 vol_found = true;
804 if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
805 *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
806 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
807 } else {
808 pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
809 *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
810 *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
811 }
812 break;
813 }
814 }
815
816 if (!vol_found)
817 pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
818 }
819
820 return 0;
821 }
822
ci_populate_smc_voltage_table(struct pp_hwmgr * hwmgr,pp_atomctrl_voltage_table_entry * tab,SMU7_Discrete_VoltageLevel * smc_voltage_tab)823 static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
824 pp_atomctrl_voltage_table_entry *tab,
825 SMU7_Discrete_VoltageLevel *smc_voltage_tab)
826 {
827 int result;
828
829 result = ci_get_std_voltage_value_sidd(hwmgr, tab,
830 &smc_voltage_tab->StdVoltageHiSidd,
831 &smc_voltage_tab->StdVoltageLoSidd);
832 if (result) {
833 smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
834 smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
835 }
836
837 smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
838 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
839 CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
840
841 return 0;
842 }
843
ci_populate_smc_vddc_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)844 static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
845 SMU7_Discrete_DpmTable *table)
846 {
847 unsigned int count;
848 int result;
849 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
850
851 table->VddcLevelCount = data->vddc_voltage_table.count;
852 for (count = 0; count < table->VddcLevelCount; count++) {
853 result = ci_populate_smc_voltage_table(hwmgr,
854 &(data->vddc_voltage_table.entries[count]),
855 &(table->VddcLevel[count]));
856 PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
857
858 /* GPIO voltage control */
859 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
860 table->VddcLevel[count].Smio = (uint8_t) count;
861 table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low;
862 table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low;
863 } else {
864 table->VddcLevel[count].Smio = 0;
865 }
866 }
867
868 CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
869
870 return 0;
871 }
872
ci_populate_smc_vdd_ci_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)873 static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
874 SMU7_Discrete_DpmTable *table)
875 {
876 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
877 uint32_t count;
878 int result;
879
880 table->VddciLevelCount = data->vddci_voltage_table.count;
881
882 for (count = 0; count < table->VddciLevelCount; count++) {
883 result = ci_populate_smc_voltage_table(hwmgr,
884 &(data->vddci_voltage_table.entries[count]),
885 &(table->VddciLevel[count]));
886 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
887 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
888 table->VddciLevel[count].Smio = (uint8_t) count;
889 table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low;
890 table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low;
891 } else {
892 table->VddciLevel[count].Smio = 0;
893 }
894 }
895
896 CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
897
898 return 0;
899 }
900
ci_populate_smc_mvdd_table(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)901 static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
902 SMU7_Discrete_DpmTable *table)
903 {
904 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
905 uint32_t count;
906 int result;
907
908 table->MvddLevelCount = data->mvdd_voltage_table.count;
909
910 for (count = 0; count < table->MvddLevelCount; count++) {
911 result = ci_populate_smc_voltage_table(hwmgr,
912 &(data->mvdd_voltage_table.entries[count]),
913 &table->MvddLevel[count]);
914 PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
915 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
916 table->MvddLevel[count].Smio = (uint8_t) count;
917 table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low;
918 table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low;
919 } else {
920 table->MvddLevel[count].Smio = 0;
921 }
922 }
923
924 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
925
926 return 0;
927 }
928
929
ci_populate_smc_voltage_tables(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)930 static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
931 SMU7_Discrete_DpmTable *table)
932 {
933 int result;
934
935 result = ci_populate_smc_vddc_table(hwmgr, table);
936 PP_ASSERT_WITH_CODE(0 == result,
937 "can not populate VDDC voltage table to SMC", return -EINVAL);
938
939 result = ci_populate_smc_vdd_ci_table(hwmgr, table);
940 PP_ASSERT_WITH_CODE(0 == result,
941 "can not populate VDDCI voltage table to SMC", return -EINVAL);
942
943 result = ci_populate_smc_mvdd_table(hwmgr, table);
944 PP_ASSERT_WITH_CODE(0 == result,
945 "can not populate MVDD voltage table to SMC", return -EINVAL);
946
947 return 0;
948 }
949
ci_populate_ulv_level(struct pp_hwmgr * hwmgr,struct SMU7_Discrete_Ulv * state)950 static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
951 struct SMU7_Discrete_Ulv *state)
952 {
953 uint32_t voltage_response_time, ulv_voltage;
954 int result;
955 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
956
957 state->CcPwrDynRm = 0;
958 state->CcPwrDynRm1 = 0;
959
960 result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
961 PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
962
963 if (ulv_voltage == 0) {
964 data->ulv_supported = false;
965 return 0;
966 }
967
968 if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
969 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
970 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
971 state->VddcOffset = 0;
972 else
973 /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
974 state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
975 } else {
976 /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
977 if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
978 state->VddcOffsetVid = 0;
979 else /* used in SVI2 Mode */
980 state->VddcOffsetVid = (uint8_t)(
981 (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
982 * VOLTAGE_VID_OFFSET_SCALE2
983 / VOLTAGE_VID_OFFSET_SCALE1);
984 }
985 state->VddcPhase = 1;
986
987 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
988 CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
989 CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
990
991 return 0;
992 }
993
ci_populate_ulv_state(struct pp_hwmgr * hwmgr,SMU7_Discrete_Ulv * ulv_level)994 static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
995 SMU7_Discrete_Ulv *ulv_level)
996 {
997 return ci_populate_ulv_level(hwmgr, ulv_level);
998 }
999
ci_populate_smc_link_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1000 static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1001 {
1002 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1003 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1004 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1005 uint32_t i;
1006
1007 /* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
1008 for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
1009 table->LinkLevel[i].PcieGenSpeed =
1010 (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
1011 table->LinkLevel[i].PcieLaneCount =
1012 (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
1013 table->LinkLevel[i].EnabledForActivity = 1;
1014 table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
1015 table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
1016 }
1017
1018 smu_data->smc_state_table.LinkLevelCount =
1019 (uint8_t)dpm_table->pcie_speed_table.count;
1020 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
1021 phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
1022
1023 return 0;
1024 }
1025
ci_calculate_mclk_params(struct pp_hwmgr * hwmgr,uint32_t memory_clock,SMU7_Discrete_MemoryLevel * mclk,bool strobe_mode,bool dllStateOn)1026 static int ci_calculate_mclk_params(
1027 struct pp_hwmgr *hwmgr,
1028 uint32_t memory_clock,
1029 SMU7_Discrete_MemoryLevel *mclk,
1030 bool strobe_mode,
1031 bool dllStateOn
1032 )
1033 {
1034 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1035 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1036 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1037 uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
1038 uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
1039 uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
1040 uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
1041 uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
1042 uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
1043 uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
1044
1045 pp_atomctrl_memory_clock_param mpll_param;
1046 int result;
1047
1048 result = atomctrl_get_memory_pll_dividers_si(hwmgr,
1049 memory_clock, &mpll_param, strobe_mode);
1050 PP_ASSERT_WITH_CODE(0 == result,
1051 "Error retrieving Memory Clock Parameters from VBIOS.", return result);
1052
1053 mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
1054
1055 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1056 MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
1057 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1058 MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
1059 mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
1060 MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
1061
1062 mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
1063 MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1064
1065 if (data->is_memory_gddr5) {
1066 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1067 MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
1068 mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
1069 MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
1070 }
1071
1072 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1073 PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
1074 pp_atomctrl_internal_ss_info ss_info;
1075 uint32_t freq_nom;
1076 uint32_t tmp;
1077 uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
1078
1079 /* for GDDR5 for all modes and DDR3 */
1080 if (1 == mpll_param.qdr)
1081 freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
1082 else
1083 freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
1084
1085 /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
1086 tmp = (freq_nom / reference_clock);
1087 tmp = tmp * tmp;
1088
1089 if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
1090 uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
1091 uint32_t clkv =
1092 (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
1093 ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
1094
1095 mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
1096 mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
1097 }
1098 }
1099
1100 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1101 MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
1102 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1103 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
1104 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1105 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
1106
1107
1108 mclk->MclkFrequency = memory_clock;
1109 mclk->MpllFuncCntl = mpll_func_cntl;
1110 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
1111 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
1112 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
1113 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
1114 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
1115 mclk->DllCntl = dll_cntl;
1116 mclk->MpllSs1 = mpll_ss1;
1117 mclk->MpllSs2 = mpll_ss2;
1118
1119 return 0;
1120 }
1121
ci_get_mclk_frequency_ratio(uint32_t memory_clock,bool strobe_mode)1122 static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
1123 bool strobe_mode)
1124 {
1125 uint8_t mc_para_index;
1126
1127 if (strobe_mode) {
1128 if (memory_clock < 12500)
1129 mc_para_index = 0x00;
1130 else if (memory_clock > 47500)
1131 mc_para_index = 0x0f;
1132 else
1133 mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
1134 } else {
1135 if (memory_clock < 65000)
1136 mc_para_index = 0x00;
1137 else if (memory_clock > 135000)
1138 mc_para_index = 0x0f;
1139 else
1140 mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
1141 }
1142
1143 return mc_para_index;
1144 }
1145
ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)1146 static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
1147 {
1148 uint8_t mc_para_index;
1149
1150 if (memory_clock < 10000)
1151 mc_para_index = 0;
1152 else if (memory_clock >= 80000)
1153 mc_para_index = 0x0f;
1154 else
1155 mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
1156
1157 return mc_para_index;
1158 }
1159
ci_populate_phase_value_based_on_mclk(struct pp_hwmgr * hwmgr,const struct phm_phase_shedding_limits_table * pl,uint32_t memory_clock,uint32_t * p_shed)1160 static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
1161 uint32_t memory_clock, uint32_t *p_shed)
1162 {
1163 unsigned int i;
1164
1165 *p_shed = 1;
1166
1167 for (i = 0; i < pl->count; i++) {
1168 if (memory_clock < pl->entries[i].Mclk) {
1169 *p_shed = i;
1170 break;
1171 }
1172 }
1173
1174 return 0;
1175 }
1176
ci_populate_single_memory_level(struct pp_hwmgr * hwmgr,uint32_t memory_clock,SMU7_Discrete_MemoryLevel * memory_level)1177 static int ci_populate_single_memory_level(
1178 struct pp_hwmgr *hwmgr,
1179 uint32_t memory_clock,
1180 SMU7_Discrete_MemoryLevel *memory_level
1181 )
1182 {
1183 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1184 int result = 0;
1185 bool dll_state_on;
1186 uint32_t mclk_edc_wr_enable_threshold = 40000;
1187 uint32_t mclk_edc_enable_threshold = 40000;
1188 uint32_t mclk_strobe_mode_threshold = 40000;
1189
1190 if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
1191 result = ci_get_dependency_volt_by_clk(hwmgr,
1192 hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
1193 PP_ASSERT_WITH_CODE((0 == result),
1194 "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
1195 }
1196
1197 if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
1198 result = ci_get_dependency_volt_by_clk(hwmgr,
1199 hwmgr->dyn_state.vddci_dependency_on_mclk,
1200 memory_clock,
1201 &memory_level->MinVddci);
1202 PP_ASSERT_WITH_CODE((0 == result),
1203 "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
1204 }
1205
1206 if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
1207 result = ci_get_dependency_volt_by_clk(hwmgr,
1208 hwmgr->dyn_state.mvdd_dependency_on_mclk,
1209 memory_clock,
1210 &memory_level->MinMvdd);
1211 PP_ASSERT_WITH_CODE((0 == result),
1212 "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
1213 }
1214
1215 memory_level->MinVddcPhases = 1;
1216
1217 if (data->vddc_phase_shed_control) {
1218 ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
1219 memory_clock, &memory_level->MinVddcPhases);
1220 }
1221
1222 memory_level->EnabledForThrottle = 1;
1223 memory_level->EnabledForActivity = 1;
1224 memory_level->UpH = data->current_profile_setting.mclk_up_hyst;
1225 memory_level->DownH = data->current_profile_setting.mclk_down_hyst;
1226 memory_level->VoltageDownH = 0;
1227
1228 /* Indicates maximum activity level for this performance level.*/
1229 memory_level->ActivityLevel = data->current_profile_setting.mclk_activity;
1230 memory_level->StutterEnable = 0;
1231 memory_level->StrobeEnable = 0;
1232 memory_level->EdcReadEnable = 0;
1233 memory_level->EdcWriteEnable = 0;
1234 memory_level->RttEnable = 0;
1235
1236 /* default set to low watermark. Highest level will be set to high later.*/
1237 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1238
1239 data->display_timing.num_existing_displays = hwmgr->display_config->num_display;
1240 data->display_timing.vrefresh = hwmgr->display_config->vrefresh;
1241
1242 /* stutter mode not support on ci */
1243
1244 /* decide strobe mode*/
1245 memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
1246 (memory_clock <= mclk_strobe_mode_threshold);
1247
1248 /* decide EDC mode and memory clock ratio*/
1249 if (data->is_memory_gddr5) {
1250 memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
1251 memory_level->StrobeEnable);
1252
1253 if ((mclk_edc_enable_threshold != 0) &&
1254 (memory_clock > mclk_edc_enable_threshold)) {
1255 memory_level->EdcReadEnable = 1;
1256 }
1257
1258 if ((mclk_edc_wr_enable_threshold != 0) &&
1259 (memory_clock > mclk_edc_wr_enable_threshold)) {
1260 memory_level->EdcWriteEnable = 1;
1261 }
1262
1263 if (memory_level->StrobeEnable) {
1264 if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
1265 ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
1266 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1267 else
1268 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
1269 } else
1270 dll_state_on = data->dll_default_on;
1271 } else {
1272 memory_level->StrobeRatio =
1273 ci_get_ddr3_mclk_frequency_ratio(memory_clock);
1274 dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
1275 }
1276
1277 result = ci_calculate_mclk_params(hwmgr,
1278 memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
1279
1280 if (0 == result) {
1281 memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
1282 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
1283 memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
1284 memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
1285 /* MCLK frequency in units of 10KHz*/
1286 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
1287 /* Indicates maximum activity level for this performance level.*/
1288 CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
1289 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
1290 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
1291 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
1292 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
1293 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
1294 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
1295 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
1296 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
1297 CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
1298 }
1299
1300 return result;
1301 }
1302
ci_populate_all_memory_levels(struct pp_hwmgr * hwmgr)1303 static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1304 {
1305 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1306 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1307 struct smu7_dpm_table *dpm_table = &data->dpm_table;
1308 int result;
1309 struct amdgpu_device *adev = hwmgr->adev;
1310 uint32_t dev_id;
1311
1312 uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
1313 uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
1314 SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
1315 uint32_t i;
1316
1317 memset(levels, 0x00, level_array_size);
1318
1319 for (i = 0; i < dpm_table->mclk_table.count; i++) {
1320 PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
1321 "can not populate memory level as memory clock is zero", return -EINVAL);
1322 result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
1323 &(smu_data->smc_state_table.MemoryLevel[i]));
1324 if (0 != result)
1325 return result;
1326 }
1327
1328 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
1329
1330 dev_id = adev->pdev->device;
1331
1332 if ((dpm_table->mclk_table.count >= 2)
1333 && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
1334 smu_data->smc_state_table.MemoryLevel[1].MinVddci =
1335 smu_data->smc_state_table.MemoryLevel[0].MinVddci;
1336 smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
1337 smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
1338 }
1339 smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
1340 CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
1341
1342 smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
1343 data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
1344 smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
1345
1346 result = ci_copy_bytes_to_smc(hwmgr,
1347 level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
1348 SMC_RAM_END);
1349
1350 return result;
1351 }
1352
ci_populate_mvdd_value(struct pp_hwmgr * hwmgr,uint32_t mclk,SMU7_Discrete_VoltageLevel * voltage)1353 static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
1354 SMU7_Discrete_VoltageLevel *voltage)
1355 {
1356 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1357
1358 uint32_t i = 0;
1359
1360 if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
1361 /* find mvdd value which clock is more than request */
1362 for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
1363 if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
1364 /* Always round to higher voltage. */
1365 voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
1366 break;
1367 }
1368 }
1369
1370 PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
1371 "MVDD Voltage is outside the supported range.", return -EINVAL);
1372
1373 } else {
1374 return -EINVAL;
1375 }
1376
1377 return 0;
1378 }
1379
ci_populate_smc_acpi_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1380 static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1381 SMU7_Discrete_DpmTable *table)
1382 {
1383 int result = 0;
1384 const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1385 struct pp_atomctrl_clock_dividers_vi dividers;
1386
1387 SMU7_Discrete_VoltageLevel voltage_level;
1388 uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
1389 uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
1390 uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
1391 uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
1392
1393
1394 /* The ACPI state should not do DPM on DC (or ever).*/
1395 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1396
1397 if (data->acpi_vddc)
1398 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
1399 else
1400 table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
1401
1402 table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
1403 /* assign zero for now*/
1404 table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
1405
1406 /* get the engine clock dividers for this clock value*/
1407 result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
1408 table->ACPILevel.SclkFrequency, ÷rs);
1409
1410 PP_ASSERT_WITH_CODE(result == 0,
1411 "Error retrieving Engine Clock dividers from VBIOS.", return result);
1412
1413 /* divider ID for required SCLK*/
1414 table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
1415 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
1416 table->ACPILevel.DeepSleepDivId = 0;
1417
1418 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1419 CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
1420 spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
1421 CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
1422 spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
1423 CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
1424
1425 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
1426 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
1427 table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
1428 table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
1429 table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
1430 table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
1431 table->ACPILevel.CcPwrDynRm = 0;
1432 table->ACPILevel.CcPwrDynRm1 = 0;
1433
1434 /* For various features to be enabled/disabled while this level is active.*/
1435 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
1436 /* SCLK frequency in units of 10KHz*/
1437 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
1438 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
1439 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
1440 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
1441 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
1442 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
1443 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
1444 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
1445 CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
1446
1447
1448 /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
1449 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
1450 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
1451
1452 if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
1453 table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
1454 else {
1455 if (data->acpi_vddci != 0)
1456 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
1457 else
1458 table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
1459 }
1460
1461 if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
1462 table->MemoryACPILevel.MinMvdd =
1463 PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
1464 else
1465 table->MemoryACPILevel.MinMvdd = 0;
1466
1467 /* Force reset on DLL*/
1468 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1469 MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
1470 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1471 MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
1472
1473 /* Disable DLL in ACPIState*/
1474 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1475 MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
1476 mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
1477 MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
1478
1479 /* Enable DLL bypass signal*/
1480 dll_cntl = PHM_SET_FIELD(dll_cntl,
1481 DLL_CNTL, MRDCK0_BYPASS, 0);
1482 dll_cntl = PHM_SET_FIELD(dll_cntl,
1483 DLL_CNTL, MRDCK1_BYPASS, 0);
1484
1485 table->MemoryACPILevel.DllCntl =
1486 PP_HOST_TO_SMC_UL(dll_cntl);
1487 table->MemoryACPILevel.MclkPwrmgtCntl =
1488 PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
1489 table->MemoryACPILevel.MpllAdFuncCntl =
1490 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
1491 table->MemoryACPILevel.MpllDqFuncCntl =
1492 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
1493 table->MemoryACPILevel.MpllFuncCntl =
1494 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
1495 table->MemoryACPILevel.MpllFuncCntl_1 =
1496 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
1497 table->MemoryACPILevel.MpllFuncCntl_2 =
1498 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
1499 table->MemoryACPILevel.MpllSs1 =
1500 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
1501 table->MemoryACPILevel.MpllSs2 =
1502 PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
1503
1504 table->MemoryACPILevel.EnabledForThrottle = 0;
1505 table->MemoryACPILevel.EnabledForActivity = 0;
1506 table->MemoryACPILevel.UpH = 0;
1507 table->MemoryACPILevel.DownH = 100;
1508 table->MemoryACPILevel.VoltageDownH = 0;
1509 /* Indicates maximum activity level for this performance level.*/
1510 table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity);
1511
1512 table->MemoryACPILevel.StutterEnable = 0;
1513 table->MemoryACPILevel.StrobeEnable = 0;
1514 table->MemoryACPILevel.EdcReadEnable = 0;
1515 table->MemoryACPILevel.EdcWriteEnable = 0;
1516 table->MemoryACPILevel.RttEnable = 0;
1517
1518 return result;
1519 }
1520
ci_populate_smc_uvd_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1521 static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
1522 SMU7_Discrete_DpmTable *table)
1523 {
1524 int result = 0;
1525 uint8_t count;
1526 struct pp_atomctrl_clock_dividers_vi dividers;
1527 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1528 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1529
1530 table->UvdLevelCount = (uint8_t)(uvd_table->count);
1531
1532 for (count = 0; count < table->UvdLevelCount; count++) {
1533 table->UvdLevel[count].VclkFrequency =
1534 uvd_table->entries[count].vclk;
1535 table->UvdLevel[count].DclkFrequency =
1536 uvd_table->entries[count].dclk;
1537 table->UvdLevel[count].MinVddc =
1538 uvd_table->entries[count].v * VOLTAGE_SCALE;
1539 table->UvdLevel[count].MinVddcPhases = 1;
1540
1541 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1542 table->UvdLevel[count].VclkFrequency, ÷rs);
1543 PP_ASSERT_WITH_CODE((0 == result),
1544 "can not find divide id for Vclk clock", return result);
1545
1546 table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
1547
1548 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1549 table->UvdLevel[count].DclkFrequency, ÷rs);
1550 PP_ASSERT_WITH_CODE((0 == result),
1551 "can not find divide id for Dclk clock", return result);
1552
1553 table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
1554 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
1555 CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
1556 CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
1557 }
1558
1559 return result;
1560 }
1561
ci_populate_smc_vce_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1562 static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
1563 SMU7_Discrete_DpmTable *table)
1564 {
1565 int result = -EINVAL;
1566 uint8_t count;
1567 struct pp_atomctrl_clock_dividers_vi dividers;
1568 struct phm_vce_clock_voltage_dependency_table *vce_table =
1569 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1570
1571 table->VceLevelCount = (uint8_t)(vce_table->count);
1572 table->VceBootLevel = 0;
1573
1574 for (count = 0; count < table->VceLevelCount; count++) {
1575 table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
1576 table->VceLevel[count].MinVoltage =
1577 vce_table->entries[count].v * VOLTAGE_SCALE;
1578 table->VceLevel[count].MinPhases = 1;
1579
1580 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1581 table->VceLevel[count].Frequency, ÷rs);
1582 PP_ASSERT_WITH_CODE((0 == result),
1583 "can not find divide id for VCE engine clock",
1584 return result);
1585
1586 table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1587
1588 CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
1589 CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
1590 }
1591 return result;
1592 }
1593
ci_populate_smc_acp_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1594 static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
1595 SMU7_Discrete_DpmTable *table)
1596 {
1597 int result = -EINVAL;
1598 uint8_t count;
1599 struct pp_atomctrl_clock_dividers_vi dividers;
1600 struct phm_acp_clock_voltage_dependency_table *acp_table =
1601 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
1602
1603 table->AcpLevelCount = (uint8_t)(acp_table->count);
1604 table->AcpBootLevel = 0;
1605
1606 for (count = 0; count < table->AcpLevelCount; count++) {
1607 table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
1608 table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
1609 table->AcpLevel[count].MinPhases = 1;
1610
1611 result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
1612 table->AcpLevel[count].Frequency, ÷rs);
1613 PP_ASSERT_WITH_CODE((0 == result),
1614 "can not find divide id for engine clock", return result);
1615
1616 table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
1617
1618 CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
1619 CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
1620 }
1621 return result;
1622 }
1623
ci_populate_memory_timing_parameters(struct pp_hwmgr * hwmgr,uint32_t engine_clock,uint32_t memory_clock,struct SMU7_Discrete_MCArbDramTimingTableEntry * arb_regs)1624 static int ci_populate_memory_timing_parameters(
1625 struct pp_hwmgr *hwmgr,
1626 uint32_t engine_clock,
1627 uint32_t memory_clock,
1628 struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
1629 )
1630 {
1631 uint32_t dramTiming;
1632 uint32_t dramTiming2;
1633 uint32_t burstTime;
1634 int result;
1635
1636 result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
1637 engine_clock, memory_clock);
1638
1639 PP_ASSERT_WITH_CODE(result == 0,
1640 "Error calling VBIOS to set DRAM_TIMING.", return result);
1641
1642 dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
1643 dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
1644 burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
1645
1646 arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
1647 arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
1648 arb_regs->McArbBurstTime = (uint8_t)burstTime;
1649
1650 return 0;
1651 }
1652
ci_program_memory_timing_parameters(struct pp_hwmgr * hwmgr)1653 static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
1654 {
1655 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1656 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1657 int result = 0;
1658 SMU7_Discrete_MCArbDramTimingTable arb_regs;
1659 uint32_t i, j;
1660
1661 memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
1662
1663 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1664 for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
1665 result = ci_populate_memory_timing_parameters
1666 (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
1667 data->dpm_table.mclk_table.dpm_levels[j].value,
1668 &arb_regs.entries[i][j]);
1669
1670 if (0 != result)
1671 break;
1672 }
1673 }
1674
1675 if (0 == result) {
1676 result = ci_copy_bytes_to_smc(
1677 hwmgr,
1678 smu_data->arb_table_start,
1679 (uint8_t *)&arb_regs,
1680 sizeof(SMU7_Discrete_MCArbDramTimingTable),
1681 SMC_RAM_END
1682 );
1683 }
1684
1685 return result;
1686 }
1687
ci_populate_smc_boot_level(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1688 static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
1689 SMU7_Discrete_DpmTable *table)
1690 {
1691 int result = 0;
1692 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1693 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1694
1695 table->GraphicsBootLevel = 0;
1696 table->MemoryBootLevel = 0;
1697
1698 /* find boot level from dpm table*/
1699 result = phm_find_boot_level(&(data->dpm_table.sclk_table),
1700 data->vbios_boot_state.sclk_bootup_value,
1701 (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
1702
1703 if (0 != result) {
1704 smu_data->smc_state_table.GraphicsBootLevel = 0;
1705 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Graphics DPM level 0!\n");
1706 result = 0;
1707 }
1708
1709 result = phm_find_boot_level(&(data->dpm_table.mclk_table),
1710 data->vbios_boot_state.mclk_bootup_value,
1711 (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
1712
1713 if (0 != result) {
1714 smu_data->smc_state_table.MemoryBootLevel = 0;
1715 pr_err("VBIOS did not find boot engine clock value in dependency table. Using Memory DPM level 0!\n");
1716 result = 0;
1717 }
1718
1719 table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
1720 table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
1721 table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
1722
1723 return result;
1724 }
1725
ci_populate_mc_reg_address(struct pp_hwmgr * hwmgr,SMU7_Discrete_MCRegisters * mc_reg_table)1726 static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
1727 SMU7_Discrete_MCRegisters *mc_reg_table)
1728 {
1729 const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
1730
1731 uint32_t i, j;
1732
1733 for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
1734 if (smu_data->mc_reg_table.validflag & 1<<j) {
1735 PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
1736 "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
1737 mc_reg_table->address[i].s0 =
1738 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
1739 mc_reg_table->address[i].s1 =
1740 PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
1741 i++;
1742 }
1743 }
1744
1745 mc_reg_table->last = (uint8_t)i;
1746
1747 return 0;
1748 }
1749
ci_convert_mc_registers(const struct ci_mc_reg_entry * entry,SMU7_Discrete_MCRegisterSet * data,uint32_t num_entries,uint32_t valid_flag)1750 static void ci_convert_mc_registers(
1751 const struct ci_mc_reg_entry *entry,
1752 SMU7_Discrete_MCRegisterSet *data,
1753 uint32_t num_entries, uint32_t valid_flag)
1754 {
1755 uint32_t i, j;
1756
1757 for (i = 0, j = 0; j < num_entries; j++) {
1758 if (valid_flag & 1<<j) {
1759 data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
1760 i++;
1761 }
1762 }
1763 }
1764
ci_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr * hwmgr,const uint32_t memory_clock,SMU7_Discrete_MCRegisterSet * mc_reg_table_data)1765 static int ci_convert_mc_reg_table_entry_to_smc(
1766 struct pp_hwmgr *hwmgr,
1767 const uint32_t memory_clock,
1768 SMU7_Discrete_MCRegisterSet *mc_reg_table_data
1769 )
1770 {
1771 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1772 uint32_t i = 0;
1773
1774 for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
1775 if (memory_clock <=
1776 smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
1777 break;
1778 }
1779 }
1780
1781 if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
1782 --i;
1783
1784 ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
1785 mc_reg_table_data, smu_data->mc_reg_table.last,
1786 smu_data->mc_reg_table.validflag);
1787
1788 return 0;
1789 }
1790
ci_convert_mc_reg_table_to_smc(struct pp_hwmgr * hwmgr,SMU7_Discrete_MCRegisters * mc_regs)1791 static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
1792 SMU7_Discrete_MCRegisters *mc_regs)
1793 {
1794 int result = 0;
1795 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1796 int res;
1797 uint32_t i;
1798
1799 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1800 res = ci_convert_mc_reg_table_entry_to_smc(
1801 hwmgr,
1802 data->dpm_table.mclk_table.dpm_levels[i].value,
1803 &mc_regs->data[i]
1804 );
1805
1806 if (0 != res)
1807 result = res;
1808 }
1809
1810 return result;
1811 }
1812
ci_update_and_upload_mc_reg_table(struct pp_hwmgr * hwmgr)1813 static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
1814 {
1815 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1816 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1817 uint32_t address;
1818 int32_t result;
1819
1820 if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
1821 return 0;
1822
1823
1824 memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
1825
1826 result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
1827
1828 if (result != 0)
1829 return result;
1830
1831 address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
1832
1833 return ci_copy_bytes_to_smc(hwmgr, address,
1834 (uint8_t *)&smu_data->mc_regs.data[0],
1835 sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
1836 SMC_RAM_END);
1837 }
1838
ci_populate_initial_mc_reg_table(struct pp_hwmgr * hwmgr)1839 static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
1840 {
1841 int result;
1842 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1843
1844 memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
1845 result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
1846 PP_ASSERT_WITH_CODE(0 == result,
1847 "Failed to initialize MCRegTable for the MC register addresses!", return result;);
1848
1849 result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
1850 PP_ASSERT_WITH_CODE(0 == result,
1851 "Failed to initialize MCRegTable for driver state!", return result;);
1852
1853 return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
1854 (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
1855 }
1856
ci_populate_smc_initial_state(struct pp_hwmgr * hwmgr)1857 static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
1858 {
1859 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1860 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1861 uint8_t count, level;
1862
1863 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
1864
1865 for (level = 0; level < count; level++) {
1866 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
1867 >= data->vbios_boot_state.sclk_bootup_value) {
1868 smu_data->smc_state_table.GraphicsBootLevel = level;
1869 break;
1870 }
1871 }
1872
1873 count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
1874
1875 for (level = 0; level < count; level++) {
1876 if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
1877 >= data->vbios_boot_state.mclk_bootup_value) {
1878 smu_data->smc_state_table.MemoryBootLevel = level;
1879 break;
1880 }
1881 }
1882
1883 return 0;
1884 }
1885
ci_populate_smc_svi2_config(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1886 static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
1887 SMU7_Discrete_DpmTable *table)
1888 {
1889 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1890
1891 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
1892 table->SVI2Enable = 1;
1893 else
1894 table->SVI2Enable = 0;
1895 return 0;
1896 }
1897
ci_start_smc(struct pp_hwmgr * hwmgr)1898 static int ci_start_smc(struct pp_hwmgr *hwmgr)
1899 {
1900 /* set smc instruct start point at 0x0 */
1901 ci_program_jump_on_start(hwmgr);
1902
1903 /* enable smc clock */
1904 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
1905
1906 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
1907
1908 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
1909 INTERRUPTS_ENABLED, 1);
1910
1911 return 0;
1912 }
1913
ci_populate_vr_config(struct pp_hwmgr * hwmgr,SMU7_Discrete_DpmTable * table)1914 static int ci_populate_vr_config(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
1915 {
1916 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1917 uint16_t config;
1918
1919 config = VR_SVI2_PLANE_1;
1920 table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
1921
1922 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
1923 config = VR_SVI2_PLANE_2;
1924 table->VRConfig |= config;
1925 } else {
1926 pr_info("VDDCshould be on SVI2 controller!");
1927 }
1928
1929 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
1930 config = VR_SVI2_PLANE_2;
1931 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1932 } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
1933 config = VR_SMIO_PATTERN_1;
1934 table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
1935 }
1936
1937 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
1938 config = VR_SMIO_PATTERN_2;
1939 table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
1940 }
1941
1942 return 0;
1943 }
1944
ci_init_smc_table(struct pp_hwmgr * hwmgr)1945 static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
1946 {
1947 int result;
1948 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1949 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
1950 SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
1951 struct pp_atomctrl_gpio_pin_assignment gpio_pin;
1952 u32 i;
1953
1954 ci_initialize_power_tune_defaults(hwmgr);
1955 memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
1956
1957 if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
1958 ci_populate_smc_voltage_tables(hwmgr, table);
1959
1960 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1961 PHM_PlatformCaps_AutomaticDCTransition))
1962 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1963
1964
1965 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1966 PHM_PlatformCaps_StepVddc))
1967 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1968
1969 if (data->is_memory_gddr5)
1970 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1971
1972 if (data->ulv_supported) {
1973 result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
1974 PP_ASSERT_WITH_CODE(0 == result,
1975 "Failed to initialize ULV state!", return result);
1976
1977 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1978 ixCG_ULV_PARAMETER, 0x40035);
1979 }
1980
1981 result = ci_populate_all_graphic_levels(hwmgr);
1982 PP_ASSERT_WITH_CODE(0 == result,
1983 "Failed to initialize Graphics Level!", return result);
1984
1985 result = ci_populate_all_memory_levels(hwmgr);
1986 PP_ASSERT_WITH_CODE(0 == result,
1987 "Failed to initialize Memory Level!", return result);
1988
1989 result = ci_populate_smc_link_level(hwmgr, table);
1990 PP_ASSERT_WITH_CODE(0 == result,
1991 "Failed to initialize Link Level!", return result);
1992
1993 result = ci_populate_smc_acpi_level(hwmgr, table);
1994 PP_ASSERT_WITH_CODE(0 == result,
1995 "Failed to initialize ACPI Level!", return result);
1996
1997 result = ci_populate_smc_vce_level(hwmgr, table);
1998 PP_ASSERT_WITH_CODE(0 == result,
1999 "Failed to initialize VCE Level!", return result);
2000
2001 result = ci_populate_smc_acp_level(hwmgr, table);
2002 PP_ASSERT_WITH_CODE(0 == result,
2003 "Failed to initialize ACP Level!", return result);
2004
2005 /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
2006 /* need to populate the ARB settings for the initial state. */
2007 result = ci_program_memory_timing_parameters(hwmgr);
2008 PP_ASSERT_WITH_CODE(0 == result,
2009 "Failed to Write ARB settings for the initial state.", return result);
2010
2011 result = ci_populate_smc_uvd_level(hwmgr, table);
2012 PP_ASSERT_WITH_CODE(0 == result,
2013 "Failed to initialize UVD Level!", return result);
2014
2015 table->UvdBootLevel = 0;
2016 table->VceBootLevel = 0;
2017 table->AcpBootLevel = 0;
2018 table->SamuBootLevel = 0;
2019
2020 table->GraphicsBootLevel = 0;
2021 table->MemoryBootLevel = 0;
2022
2023 result = ci_populate_smc_boot_level(hwmgr, table);
2024 PP_ASSERT_WITH_CODE(0 == result,
2025 "Failed to initialize Boot Level!", return result);
2026
2027 result = ci_populate_smc_initial_state(hwmgr);
2028 PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
2029
2030 result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
2031 PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
2032
2033 table->UVDInterval = 1;
2034 table->VCEInterval = 1;
2035 table->ACPInterval = 1;
2036 table->SAMUInterval = 1;
2037 table->GraphicsVoltageChangeEnable = 1;
2038 table->GraphicsThermThrottleEnable = 1;
2039 table->GraphicsInterval = 1;
2040 table->VoltageInterval = 1;
2041 table->ThermalInterval = 1;
2042
2043 table->TemperatureLimitHigh =
2044 (data->thermal_temp_setting.temperature_high *
2045 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2046 table->TemperatureLimitLow =
2047 (data->thermal_temp_setting.temperature_low *
2048 SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2049
2050 table->MemoryVoltageChangeEnable = 1;
2051 table->MemoryInterval = 1;
2052 table->VoltageResponseTime = 0;
2053 table->VddcVddciDelta = 4000;
2054 table->PhaseResponseTime = 0;
2055 table->MemoryThermThrottleEnable = 1;
2056
2057 PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
2058 "There must be 1 or more PCIE levels defined in PPTable.",
2059 return -EINVAL);
2060
2061 table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
2062 table->PCIeGenInterval = 1;
2063
2064 result = ci_populate_vr_config(hwmgr, table);
2065 PP_ASSERT_WITH_CODE(0 == result,
2066 "Failed to populate VRConfig setting!", return result);
2067 data->vr_config = table->VRConfig;
2068
2069 ci_populate_smc_svi2_config(hwmgr, table);
2070
2071 for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
2072 CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
2073
2074 table->ThermGpio = 17;
2075 table->SclkStepSize = 0x4000;
2076 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
2077 table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
2078 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2079 PHM_PlatformCaps_RegulatorHot);
2080 } else {
2081 table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
2082 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2083 PHM_PlatformCaps_RegulatorHot);
2084 }
2085
2086 table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
2087
2088 CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
2089 CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
2090 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
2091 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
2092 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
2093 CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
2094 CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
2095 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
2096 CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
2097 table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
2098 CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
2099 CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
2100
2101 table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
2102 table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
2103 table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
2104
2105 /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
2106 result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
2107 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
2108 (uint8_t *)&(table->SystemFlags),
2109 sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
2110 SMC_RAM_END);
2111
2112 PP_ASSERT_WITH_CODE(0 == result,
2113 "Failed to upload dpm data to SMC memory!", return result;);
2114
2115 result = ci_populate_initial_mc_reg_table(hwmgr);
2116 PP_ASSERT_WITH_CODE((0 == result),
2117 "Failed to populate initialize MC Reg table!", return result);
2118
2119 result = ci_populate_pm_fuses(hwmgr);
2120 PP_ASSERT_WITH_CODE(0 == result,
2121 "Failed to populate PM fuses to SMC memory!", return result);
2122
2123 ci_start_smc(hwmgr);
2124
2125 return 0;
2126 }
2127
ci_thermal_setup_fan_table(struct pp_hwmgr * hwmgr)2128 static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
2129 {
2130 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2131 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
2132 uint32_t duty100;
2133 uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
2134 uint16_t fdo_min, slope1, slope2;
2135 uint32_t reference_clock;
2136 int res __unused;
2137 uint64_t tmp64;
2138
2139 if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
2140 return 0;
2141
2142 if (hwmgr->thermal_controller.fanInfo.bNoFan) {
2143 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2144 PHM_PlatformCaps_MicrocodeFanControl);
2145 return 0;
2146 }
2147
2148 if (0 == ci_data->fan_table_start) {
2149 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2150 return 0;
2151 }
2152
2153 duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
2154
2155 if (0 == duty100) {
2156 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
2157 return 0;
2158 }
2159
2160 tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
2161 do_div(tmp64, 10000);
2162 fdo_min = (uint16_t)tmp64;
2163
2164 t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
2165 t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
2166
2167 pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
2168 pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
2169
2170 slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
2171 slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
2172
2173 fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
2174 fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
2175 fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
2176
2177 fan_table.Slope1 = cpu_to_be16(slope1);
2178 fan_table.Slope2 = cpu_to_be16(slope2);
2179
2180 fan_table.FdoMin = cpu_to_be16(fdo_min);
2181
2182 fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
2183
2184 fan_table.HystUp = cpu_to_be16(1);
2185
2186 fan_table.HystSlope = cpu_to_be16(1);
2187
2188 fan_table.TempRespLim = cpu_to_be16(5);
2189
2190 reference_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
2191
2192 fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
2193
2194 fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
2195
2196 fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
2197
2198 res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
2199
2200 return 0;
2201 }
2202
ci_program_mem_timing_parameters(struct pp_hwmgr * hwmgr)2203 static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
2204 {
2205 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2206
2207 if (data->need_update_smu7_dpm_table &
2208 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
2209 return ci_program_memory_timing_parameters(hwmgr);
2210
2211 return 0;
2212 }
2213
ci_update_sclk_threshold(struct pp_hwmgr * hwmgr)2214 static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
2215 {
2216 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2217 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2218
2219 int result = 0;
2220 uint32_t low_sclk_interrupt_threshold = 0;
2221
2222 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2223 PHM_PlatformCaps_SclkThrottleLowNotification)
2224 && (data->low_sclk_interrupt_threshold != 0)) {
2225 low_sclk_interrupt_threshold =
2226 data->low_sclk_interrupt_threshold;
2227
2228 CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
2229
2230 result = ci_copy_bytes_to_smc(
2231 hwmgr,
2232 smu_data->dpm_table_start +
2233 offsetof(SMU7_Discrete_DpmTable,
2234 LowSclkInterruptT),
2235 (uint8_t *)&low_sclk_interrupt_threshold,
2236 sizeof(uint32_t),
2237 SMC_RAM_END);
2238 }
2239
2240 result = ci_update_and_upload_mc_reg_table(hwmgr);
2241
2242 PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
2243
2244 result = ci_program_mem_timing_parameters(hwmgr);
2245 PP_ASSERT_WITH_CODE((result == 0),
2246 "Failed to program memory timing parameters!",
2247 );
2248
2249 return result;
2250 }
2251
ci_get_offsetof(uint32_t type,uint32_t member)2252 static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
2253 {
2254 switch (type) {
2255 case SMU_SoftRegisters:
2256 switch (member) {
2257 case HandshakeDisables:
2258 return offsetof(SMU7_SoftRegisters, HandshakeDisables);
2259 case VoltageChangeTimeout:
2260 return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
2261 case AverageGraphicsActivity:
2262 return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
2263 case AverageMemoryActivity:
2264 return offsetof(SMU7_SoftRegisters, AverageMemoryA);
2265 case PreVBlankGap:
2266 return offsetof(SMU7_SoftRegisters, PreVBlankGap);
2267 case VBlankTimeout:
2268 return offsetof(SMU7_SoftRegisters, VBlankTimeout);
2269 case DRAM_LOG_ADDR_H:
2270 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
2271 case DRAM_LOG_ADDR_L:
2272 return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
2273 case DRAM_LOG_PHY_ADDR_H:
2274 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
2275 case DRAM_LOG_PHY_ADDR_L:
2276 return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
2277 case DRAM_LOG_BUFF_SIZE:
2278 return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2279 }
2280 break;
2281 case SMU_Discrete_DpmTable:
2282 switch (member) {
2283 case LowSclkInterruptThreshold:
2284 return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
2285 }
2286 break;
2287 }
2288 pr_debug("can't get the offset of type %x member %x\n", type, member);
2289 return 0;
2290 }
2291
ci_get_mac_definition(uint32_t value)2292 static uint32_t ci_get_mac_definition(uint32_t value)
2293 {
2294 switch (value) {
2295 case SMU_MAX_LEVELS_GRAPHICS:
2296 return SMU7_MAX_LEVELS_GRAPHICS;
2297 case SMU_MAX_LEVELS_MEMORY:
2298 return SMU7_MAX_LEVELS_MEMORY;
2299 case SMU_MAX_LEVELS_LINK:
2300 return SMU7_MAX_LEVELS_LINK;
2301 case SMU_MAX_ENTRIES_SMIO:
2302 return SMU7_MAX_ENTRIES_SMIO;
2303 case SMU_MAX_LEVELS_VDDC:
2304 return SMU7_MAX_LEVELS_VDDC;
2305 case SMU_MAX_LEVELS_VDDCI:
2306 return SMU7_MAX_LEVELS_VDDCI;
2307 case SMU_MAX_LEVELS_MVDD:
2308 return SMU7_MAX_LEVELS_MVDD;
2309 }
2310
2311 pr_debug("can't get the mac of %x\n", value);
2312 return 0;
2313 }
2314
ci_load_smc_ucode(struct pp_hwmgr * hwmgr)2315 static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
2316 {
2317 uint32_t byte_count, start_addr;
2318 uint8_t *src;
2319 uint32_t data;
2320
2321 struct cgs_firmware_info info = {0};
2322
2323 cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
2324
2325 hwmgr->is_kicker = info.is_kicker;
2326 hwmgr->smu_version = info.version;
2327 byte_count = info.image_size;
2328 src = (uint8_t *)info.kptr;
2329 start_addr = info.ucode_start_address;
2330
2331 if (byte_count > SMC_RAM_END) {
2332 pr_err("SMC address is beyond the SMC RAM area.\n");
2333 return -EINVAL;
2334 }
2335
2336 cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
2337 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
2338
2339 for (; byte_count >= 4; byte_count -= 4) {
2340 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
2341 cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
2342 src += 4;
2343 }
2344 PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
2345
2346 if (0 != byte_count) {
2347 pr_err("SMC size must be divisible by 4\n");
2348 return -EINVAL;
2349 }
2350
2351 return 0;
2352 }
2353
ci_upload_firmware(struct pp_hwmgr * hwmgr)2354 static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
2355 {
2356 if (ci_is_smc_ram_running(hwmgr)) {
2357 pr_info("smc is running, no need to load smc firmware\n");
2358 return 0;
2359 }
2360 PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
2361 boot_seq_done, 1);
2362 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
2363 pre_fetcher_en, 1);
2364
2365 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
2366 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
2367 return ci_load_smc_ucode(hwmgr);
2368 }
2369
ci_process_firmware_header(struct pp_hwmgr * hwmgr)2370 static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
2371 {
2372 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2373 struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2374
2375 uint32_t tmp = 0;
2376 int result;
2377 bool error = false;
2378
2379 if (ci_upload_firmware(hwmgr))
2380 return -EINVAL;
2381
2382 result = ci_read_smc_sram_dword(hwmgr,
2383 SMU7_FIRMWARE_HEADER_LOCATION +
2384 offsetof(SMU7_Firmware_Header, DpmTable),
2385 &tmp, SMC_RAM_END);
2386
2387 if (0 == result)
2388 ci_data->dpm_table_start = tmp;
2389
2390 error |= (0 != result);
2391
2392 result = ci_read_smc_sram_dword(hwmgr,
2393 SMU7_FIRMWARE_HEADER_LOCATION +
2394 offsetof(SMU7_Firmware_Header, SoftRegisters),
2395 &tmp, SMC_RAM_END);
2396
2397 if (0 == result) {
2398 data->soft_regs_start = tmp;
2399 ci_data->soft_regs_start = tmp;
2400 }
2401
2402 error |= (0 != result);
2403
2404 result = ci_read_smc_sram_dword(hwmgr,
2405 SMU7_FIRMWARE_HEADER_LOCATION +
2406 offsetof(SMU7_Firmware_Header, mcRegisterTable),
2407 &tmp, SMC_RAM_END);
2408
2409 if (0 == result)
2410 ci_data->mc_reg_table_start = tmp;
2411
2412 result = ci_read_smc_sram_dword(hwmgr,
2413 SMU7_FIRMWARE_HEADER_LOCATION +
2414 offsetof(SMU7_Firmware_Header, FanTable),
2415 &tmp, SMC_RAM_END);
2416
2417 if (0 == result)
2418 ci_data->fan_table_start = tmp;
2419
2420 error |= (0 != result);
2421
2422 result = ci_read_smc_sram_dword(hwmgr,
2423 SMU7_FIRMWARE_HEADER_LOCATION +
2424 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
2425 &tmp, SMC_RAM_END);
2426
2427 if (0 == result)
2428 ci_data->arb_table_start = tmp;
2429
2430 error |= (0 != result);
2431
2432 result = ci_read_smc_sram_dword(hwmgr,
2433 SMU7_FIRMWARE_HEADER_LOCATION +
2434 offsetof(SMU7_Firmware_Header, Version),
2435 &tmp, SMC_RAM_END);
2436
2437 if (0 == result)
2438 hwmgr->microcode_version_info.SMC = tmp;
2439
2440 error |= (0 != result);
2441
2442 return error ? 1 : 0;
2443 }
2444
ci_get_memory_modile_index(struct pp_hwmgr * hwmgr)2445 static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
2446 {
2447 return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
2448 }
2449
ci_check_s0_mc_reg_index(uint16_t in_reg,uint16_t * out_reg)2450 static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
2451 {
2452 bool result = true;
2453
2454 switch (in_reg) {
2455 case mmMC_SEQ_RAS_TIMING:
2456 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
2457 break;
2458
2459 case mmMC_SEQ_DLL_STBY:
2460 *out_reg = mmMC_SEQ_DLL_STBY_LP;
2461 break;
2462
2463 case mmMC_SEQ_G5PDX_CMD0:
2464 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
2465 break;
2466
2467 case mmMC_SEQ_G5PDX_CMD1:
2468 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
2469 break;
2470
2471 case mmMC_SEQ_G5PDX_CTRL:
2472 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
2473 break;
2474
2475 case mmMC_SEQ_CAS_TIMING:
2476 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
2477 break;
2478
2479 case mmMC_SEQ_MISC_TIMING:
2480 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
2481 break;
2482
2483 case mmMC_SEQ_MISC_TIMING2:
2484 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
2485 break;
2486
2487 case mmMC_SEQ_PMG_DVS_CMD:
2488 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
2489 break;
2490
2491 case mmMC_SEQ_PMG_DVS_CTL:
2492 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
2493 break;
2494
2495 case mmMC_SEQ_RD_CTL_D0:
2496 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
2497 break;
2498
2499 case mmMC_SEQ_RD_CTL_D1:
2500 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
2501 break;
2502
2503 case mmMC_SEQ_WR_CTL_D0:
2504 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
2505 break;
2506
2507 case mmMC_SEQ_WR_CTL_D1:
2508 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
2509 break;
2510
2511 case mmMC_PMG_CMD_EMRS:
2512 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
2513 break;
2514
2515 case mmMC_PMG_CMD_MRS:
2516 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
2517 break;
2518
2519 case mmMC_PMG_CMD_MRS1:
2520 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
2521 break;
2522
2523 case mmMC_SEQ_PMG_TIMING:
2524 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
2525 break;
2526
2527 case mmMC_PMG_CMD_MRS2:
2528 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
2529 break;
2530
2531 case mmMC_SEQ_WR_CTL_2:
2532 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
2533 break;
2534
2535 default:
2536 result = false;
2537 break;
2538 }
2539
2540 return result;
2541 }
2542
ci_set_s0_mc_reg_index(struct ci_mc_reg_table * table)2543 static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
2544 {
2545 uint32_t i;
2546 uint16_t address;
2547
2548 for (i = 0; i < table->last; i++) {
2549 table->mc_reg_address[i].s0 =
2550 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
2551 ? address : table->mc_reg_address[i].s1;
2552 }
2553 return 0;
2554 }
2555
ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table * table,struct ci_mc_reg_table * ni_table)2556 static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
2557 struct ci_mc_reg_table *ni_table)
2558 {
2559 uint8_t i, j;
2560
2561 PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2562 "Invalid VramInfo table.", return -EINVAL);
2563 PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
2564 "Invalid VramInfo table.", return -EINVAL);
2565
2566 for (i = 0; i < table->last; i++)
2567 ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
2568
2569 ni_table->last = table->last;
2570
2571 for (i = 0; i < table->num_entries; i++) {
2572 ni_table->mc_reg_table_entry[i].mclk_max =
2573 table->mc_reg_table_entry[i].mclk_max;
2574 for (j = 0; j < table->last; j++) {
2575 ni_table->mc_reg_table_entry[i].mc_data[j] =
2576 table->mc_reg_table_entry[i].mc_data[j];
2577 }
2578 }
2579
2580 ni_table->num_entries = table->num_entries;
2581
2582 return 0;
2583 }
2584
ci_set_mc_special_registers(struct pp_hwmgr * hwmgr,struct ci_mc_reg_table * table)2585 static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
2586 struct ci_mc_reg_table *table)
2587 {
2588 uint8_t i, j, k;
2589 uint32_t temp_reg;
2590 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2591
2592 for (i = 0, j = table->last; i < table->last; i++) {
2593 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2594 "Invalid VramInfo table.", return -EINVAL);
2595
2596 switch (table->mc_reg_address[i].s1) {
2597
2598 case mmMC_SEQ_MISC1:
2599 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
2600 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
2601 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
2602 for (k = 0; k < table->num_entries; k++) {
2603 table->mc_reg_table_entry[k].mc_data[j] =
2604 ((temp_reg & 0xffff0000)) |
2605 ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
2606 }
2607 j++;
2608
2609 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2610 "Invalid VramInfo table.", return -EINVAL);
2611 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
2612 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
2613 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
2614 for (k = 0; k < table->num_entries; k++) {
2615 table->mc_reg_table_entry[k].mc_data[j] =
2616 (temp_reg & 0xffff0000) |
2617 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2618
2619 if (!data->is_memory_gddr5)
2620 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
2621 }
2622 j++;
2623
2624 if (!data->is_memory_gddr5) {
2625 PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
2626 "Invalid VramInfo table.", return -EINVAL);
2627 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
2628 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
2629 for (k = 0; k < table->num_entries; k++) {
2630 table->mc_reg_table_entry[k].mc_data[j] =
2631 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
2632 }
2633 j++;
2634 }
2635
2636 break;
2637
2638 case mmMC_SEQ_RESERVE_M:
2639 temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
2640 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
2641 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
2642 for (k = 0; k < table->num_entries; k++) {
2643 table->mc_reg_table_entry[k].mc_data[j] =
2644 (temp_reg & 0xffff0000) |
2645 (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
2646 }
2647 j++;
2648 break;
2649
2650 default:
2651 break;
2652 }
2653
2654 }
2655
2656 table->last = j;
2657
2658 return 0;
2659 }
2660
ci_set_valid_flag(struct ci_mc_reg_table * table)2661 static int ci_set_valid_flag(struct ci_mc_reg_table *table)
2662 {
2663 uint8_t i, j;
2664
2665 for (i = 0; i < table->last; i++) {
2666 for (j = 1; j < table->num_entries; j++) {
2667 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
2668 table->mc_reg_table_entry[j].mc_data[i]) {
2669 table->validflag |= (1 << i);
2670 break;
2671 }
2672 }
2673 }
2674
2675 return 0;
2676 }
2677
ci_initialize_mc_reg_table(struct pp_hwmgr * hwmgr)2678 static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
2679 {
2680 int result;
2681 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
2682 pp_atomctrl_mc_reg_table *table;
2683 struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
2684 uint8_t module_index = ci_get_memory_modile_index(hwmgr);
2685
2686 table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
2687
2688 if (NULL == table)
2689 return -ENOMEM;
2690
2691 /* Program additional LP registers that are no longer programmed by VBIOS */
2692 cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
2693 cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
2694 cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
2695 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
2696 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
2697 cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
2698 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
2699 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
2700 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
2701 cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
2702 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
2703 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
2704 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
2705 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
2706 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
2707 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
2708 cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
2709 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
2710 cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
2711 cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
2712
2713 result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
2714
2715 if (0 == result)
2716 result = ci_copy_vbios_smc_reg_table(table, ni_table);
2717
2718 if (0 == result) {
2719 ci_set_s0_mc_reg_index(ni_table);
2720 result = ci_set_mc_special_registers(hwmgr, ni_table);
2721 }
2722
2723 if (0 == result)
2724 ci_set_valid_flag(ni_table);
2725
2726 kfree(table);
2727
2728 return result;
2729 }
2730
ci_is_dpm_running(struct pp_hwmgr * hwmgr)2731 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
2732 {
2733 return ci_is_smc_ram_running(hwmgr);
2734 }
2735
ci_smu_init(struct pp_hwmgr * hwmgr)2736 static int ci_smu_init(struct pp_hwmgr *hwmgr)
2737 {
2738 struct ci_smumgr *ci_priv = NULL;
2739
2740 ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
2741
2742 if (ci_priv == NULL)
2743 return -ENOMEM;
2744
2745 hwmgr->smu_backend = ci_priv;
2746
2747 return 0;
2748 }
2749
ci_smu_fini(struct pp_hwmgr * hwmgr)2750 static int ci_smu_fini(struct pp_hwmgr *hwmgr)
2751 {
2752 kfree(hwmgr->smu_backend);
2753 hwmgr->smu_backend = NULL;
2754 return 0;
2755 }
2756
ci_start_smu(struct pp_hwmgr * hwmgr)2757 static int ci_start_smu(struct pp_hwmgr *hwmgr)
2758 {
2759 return 0;
2760 }
2761
ci_update_dpm_settings(struct pp_hwmgr * hwmgr,void * profile_setting)2762 static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
2763 void *profile_setting)
2764 {
2765 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2766 struct ci_smumgr *smu_data = (struct ci_smumgr *)
2767 (hwmgr->smu_backend);
2768 struct profile_mode_setting *setting;
2769 struct SMU7_Discrete_GraphicsLevel *levels =
2770 smu_data->smc_state_table.GraphicsLevel;
2771 uint32_t array = smu_data->dpm_table_start +
2772 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2773
2774 uint32_t mclk_array = smu_data->dpm_table_start +
2775 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2776 struct SMU7_Discrete_MemoryLevel *mclk_levels =
2777 smu_data->smc_state_table.MemoryLevel;
2778 uint32_t i;
2779 uint32_t offset, up_hyst_offset, down_hyst_offset, clk_activity_offset, tmp;
2780
2781 if (profile_setting == NULL)
2782 return -EINVAL;
2783
2784 setting = (struct profile_mode_setting *)profile_setting;
2785
2786 if (setting->bupdate_sclk) {
2787 if (!data->sclk_dpm_key_disabled)
2788 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
2789 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
2790 if (levels[i].ActivityLevel !=
2791 cpu_to_be16(setting->sclk_activity)) {
2792 levels[i].ActivityLevel = cpu_to_be16(setting->sclk_activity);
2793
2794 clk_activity_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2795 + offsetof(SMU7_Discrete_GraphicsLevel, ActivityLevel);
2796 offset = clk_activity_offset & ~0x3;
2797 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2798 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, levels[i].ActivityLevel, sizeof(uint16_t));
2799 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2800
2801 }
2802 if (levels[i].UpH != setting->sclk_up_hyst ||
2803 levels[i].DownH != setting->sclk_down_hyst) {
2804 levels[i].UpH = setting->sclk_up_hyst;
2805 levels[i].DownH = setting->sclk_down_hyst;
2806 up_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2807 + offsetof(SMU7_Discrete_GraphicsLevel, UpH);
2808 down_hyst_offset = array + (sizeof(SMU7_Discrete_GraphicsLevel) * i)
2809 + offsetof(SMU7_Discrete_GraphicsLevel, DownH);
2810 offset = up_hyst_offset & ~0x3;
2811 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2812 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, levels[i].UpH, sizeof(uint8_t));
2813 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, levels[i].DownH, sizeof(uint8_t));
2814 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2815 }
2816 }
2817 if (!data->sclk_dpm_key_disabled)
2818 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
2819 }
2820
2821 if (setting->bupdate_mclk) {
2822 if (!data->mclk_dpm_key_disabled)
2823 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
2824 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
2825 if (mclk_levels[i].ActivityLevel !=
2826 cpu_to_be16(setting->mclk_activity)) {
2827 mclk_levels[i].ActivityLevel = cpu_to_be16(setting->mclk_activity);
2828
2829 clk_activity_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2830 + offsetof(SMU7_Discrete_MemoryLevel, ActivityLevel);
2831 offset = clk_activity_offset & ~0x3;
2832 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2833 tmp = phm_set_field_to_u32(clk_activity_offset, tmp, mclk_levels[i].ActivityLevel, sizeof(uint16_t));
2834 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2835
2836 }
2837 if (mclk_levels[i].UpH != setting->mclk_up_hyst ||
2838 mclk_levels[i].DownH != setting->mclk_down_hyst) {
2839 mclk_levels[i].UpH = setting->mclk_up_hyst;
2840 mclk_levels[i].DownH = setting->mclk_down_hyst;
2841 up_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2842 + offsetof(SMU7_Discrete_MemoryLevel, UpH);
2843 down_hyst_offset = mclk_array + (sizeof(SMU7_Discrete_MemoryLevel) * i)
2844 + offsetof(SMU7_Discrete_MemoryLevel, DownH);
2845 offset = up_hyst_offset & ~0x3;
2846 tmp = PP_HOST_TO_SMC_UL(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset));
2847 tmp = phm_set_field_to_u32(up_hyst_offset, tmp, mclk_levels[i].UpH, sizeof(uint8_t));
2848 tmp = phm_set_field_to_u32(down_hyst_offset, tmp, mclk_levels[i].DownH, sizeof(uint8_t));
2849 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset, PP_HOST_TO_SMC_UL(tmp));
2850 }
2851 }
2852 if (!data->mclk_dpm_key_disabled)
2853 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
2854 }
2855 return 0;
2856 }
2857
ci_update_uvd_smc_table(struct pp_hwmgr * hwmgr)2858 static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
2859 {
2860 struct amdgpu_device *adev = hwmgr->adev;
2861 struct smu7_hwmgr *data = hwmgr->backend;
2862 struct ci_smumgr *smu_data = hwmgr->smu_backend;
2863 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
2864 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
2865 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2866 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2867 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2868 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2869 uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2870 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2871 int32_t i;
2872
2873 if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
2874 smu_data->smc_state_table.UvdBootLevel = 0;
2875 else
2876 smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
2877
2878 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2879 UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
2880
2881 data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
2882
2883 for (i = uvd_table->count - 1; i >= 0; i--) {
2884 if (uvd_table->entries[i].v <= max_vddc)
2885 data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
2886 if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
2887 break;
2888 }
2889 ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
2890 data->dpm_level_enable_mask.uvd_dpm_enable_mask);
2891
2892 return 0;
2893 }
2894
ci_update_vce_smc_table(struct pp_hwmgr * hwmgr)2895 static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
2896 {
2897 struct amdgpu_device *adev = hwmgr->adev;
2898 struct smu7_hwmgr *data = hwmgr->backend;
2899 struct phm_vce_clock_voltage_dependency_table *vce_table =
2900 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
2901 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2902 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2903 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2904 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2905 uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
2906 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
2907 int32_t i;
2908
2909 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
2910 VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
2911
2912 data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
2913
2914 for (i = vce_table->count - 1; i >= 0; i--) {
2915 if (vce_table->entries[i].v <= max_vddc)
2916 data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
2917 if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
2918 break;
2919 }
2920 ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
2921 data->dpm_level_enable_mask.vce_dpm_enable_mask);
2922
2923 return 0;
2924 }
2925
ci_update_smc_table(struct pp_hwmgr * hwmgr,uint32_t type)2926 static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
2927 {
2928 switch (type) {
2929 case SMU_UVD_TABLE:
2930 ci_update_uvd_smc_table(hwmgr);
2931 break;
2932 case SMU_VCE_TABLE:
2933 ci_update_vce_smc_table(hwmgr);
2934 break;
2935 default:
2936 break;
2937 }
2938 return 0;
2939 }
2940
2941 const struct pp_smumgr_func ci_smu_funcs = {
2942 .name = "ci_smu",
2943 .smu_init = ci_smu_init,
2944 .smu_fini = ci_smu_fini,
2945 .start_smu = ci_start_smu,
2946 .check_fw_load_finish = NULL,
2947 .request_smu_load_fw = NULL,
2948 .request_smu_load_specific_fw = NULL,
2949 .send_msg_to_smc = ci_send_msg_to_smc,
2950 .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
2951 .download_pptable_settings = NULL,
2952 .upload_pptable_settings = NULL,
2953 .get_offsetof = ci_get_offsetof,
2954 .process_firmware_header = ci_process_firmware_header,
2955 .init_smc_table = ci_init_smc_table,
2956 .update_sclk_threshold = ci_update_sclk_threshold,
2957 .thermal_setup_fan_table = ci_thermal_setup_fan_table,
2958 .populate_all_graphic_levels = ci_populate_all_graphic_levels,
2959 .populate_all_memory_levels = ci_populate_all_memory_levels,
2960 .get_mac_definition = ci_get_mac_definition,
2961 .initialize_mc_reg_table = ci_initialize_mc_reg_table,
2962 .is_dpm_running = ci_is_dpm_running,
2963 .update_dpm_settings = ci_update_dpm_settings,
2964 .update_smc_table = ci_update_smc_table,
2965 };
2966