1 /* $NetBSD: amdgpu_smu7_hwmgr.c,v 1.5 2024/04/16 14:34:01 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu7_hwmgr.c,v 1.5 2024/04/16 14:34:01 riastradh Exp $");
27
28 #include "pp_debug.h"
29 #include <linux/delay.h>
30 #include <linux/fb.h>
31 #include <linux/module.h>
32 #include <linux/pci.h>
33 #include <linux/slab.h>
34 #include <linux/acpi.h>
35 #include <asm/div64.h>
36 #include <drm/amdgpu_drm.h>
37 #include "ppatomctrl.h"
38 #include "atombios.h"
39 #include "pptable_v1_0.h"
40 #include "pppcielanes.h"
41 #include "amd_pcie_helpers.h"
42 #include "hardwaremanager.h"
43 #include "process_pptables_v1_0.h"
44 #include "cgs_common.h"
45
46 #include "smu7_common.h"
47
48 #include "hwmgr.h"
49 #include "smu7_hwmgr.h"
50 #include "smu_ucode_xfer_vi.h"
51 #include "smu7_powertune.h"
52 #include "smu7_dyn_defaults.h"
53 #include "smu7_thermal.h"
54 #include "smu7_clockpowergating.h"
55 #include "processpptables.h"
56 #include "pp_thermal.h"
57 #include "smu7_baco.h"
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/nbsd-namespace.h>
62
63 #define MC_CG_ARB_FREQ_F0 0x0a
64 #define MC_CG_ARB_FREQ_F1 0x0b
65 #define MC_CG_ARB_FREQ_F2 0x0c
66 #define MC_CG_ARB_FREQ_F3 0x0d
67
68 #define MC_CG_SEQ_DRAMCONF_S0 0x05
69 #define MC_CG_SEQ_DRAMCONF_S1 0x06
70 #define MC_CG_SEQ_YCLK_SUSPEND 0x04
71 #define MC_CG_SEQ_YCLK_RESUME 0x0a
72
73 #define SMC_CG_IND_START 0xc0030000
74 #define SMC_CG_IND_END 0xc0040000
75
76 #define MEM_FREQ_LOW_LATENCY 25000
77 #define MEM_FREQ_HIGH_LATENCY 80000
78
79 #define MEM_LATENCY_HIGH 45
80 #define MEM_LATENCY_LOW 35
81 #define MEM_LATENCY_ERR 0xFFFF
82
83 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
84 #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
85 #define MC_SEQ_MISC0_GDDR5_VALUE 5
86
87 #define PCIE_BUS_CLK 10000
88 #define TCLK (PCIE_BUS_CLK / 10)
89
90 static struct profile_mode_setting smu7_profiling[7] =
91 {{0, 0, 0, 0, 0, 0, 0, 0},
92 {1, 0, 100, 30, 1, 0, 100, 10},
93 {1, 10, 0, 30, 0, 0, 0, 0},
94 {0, 0, 0, 0, 1, 10, 16, 31},
95 {1, 0, 11, 50, 1, 0, 100, 10},
96 {1, 0, 5, 30, 0, 0, 0, 0},
97 {0, 0, 0, 0, 0, 0, 0, 0},
98 };
99
100 #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310)
101
102 #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280
103 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L
104 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L
105 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005
106 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006
107
108 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
109 enum DPM_EVENT_SRC {
110 DPM_EVENT_SRC_ANALOG = 0,
111 DPM_EVENT_SRC_EXTERNAL = 1,
112 DPM_EVENT_SRC_DIGITAL = 2,
113 DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
114 DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
115 };
116
117 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
118 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
119 enum pp_clock_type type, uint32_t mask);
120
cast_phw_smu7_power_state(struct pp_hw_power_state * hw_ps)121 static struct smu7_power_state *cast_phw_smu7_power_state(
122 struct pp_hw_power_state *hw_ps)
123 {
124 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
125 "Invalid Powerstate Type!",
126 return NULL);
127
128 return (struct smu7_power_state *)hw_ps;
129 }
130
cast_const_phw_smu7_power_state(const struct pp_hw_power_state * hw_ps)131 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
132 const struct pp_hw_power_state *hw_ps)
133 {
134 PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
135 "Invalid Powerstate Type!",
136 return NULL);
137
138 return (const struct smu7_power_state *)hw_ps;
139 }
140
141 /**
142 * Find the MC microcode version and store it in the HwMgr struct
143 *
144 * @param hwmgr the address of the powerplay hardware manager.
145 * @return always 0
146 */
smu7_get_mc_microcode_version(struct pp_hwmgr * hwmgr)147 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
148 {
149 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
150
151 hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
152
153 return 0;
154 }
155
smu7_get_current_pcie_speed(struct pp_hwmgr * hwmgr)156 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
157 {
158 uint32_t speedCntl = 0;
159
160 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
161 speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
162 ixPCIE_LC_SPEED_CNTL);
163 return((uint16_t)PHM_GET_FIELD(speedCntl,
164 PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
165 }
166
smu7_get_current_pcie_lane_number(struct pp_hwmgr * hwmgr)167 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
168 {
169 uint32_t link_width;
170
171 /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
172 link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
173 PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
174
175 PP_ASSERT_WITH_CODE((7 >= link_width),
176 "Invalid PCIe lane width!", return 0);
177
178 return decode_pcie_lane_width(link_width);
179 }
180
181 /**
182 * Enable voltage control
183 *
184 * @param pHwMgr the address of the powerplay hardware manager.
185 * @return always PP_Result_OK
186 */
smu7_enable_smc_voltage_controller(struct pp_hwmgr * hwmgr)187 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
188 {
189 if (hwmgr->chip_id == CHIP_VEGAM) {
190 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
191 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
192 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
193 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
194 }
195
196 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
197 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
198
199 return 0;
200 }
201
202 /**
203 * Checks if we want to support voltage control
204 *
205 * @param hwmgr the address of the powerplay hardware manager.
206 */
smu7_voltage_control(const struct pp_hwmgr * hwmgr)207 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
208 {
209 const struct smu7_hwmgr *data =
210 (const struct smu7_hwmgr *)(hwmgr->backend);
211
212 return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
213 }
214
215 /**
216 * Enable voltage control
217 *
218 * @param hwmgr the address of the powerplay hardware manager.
219 * @return always 0
220 */
smu7_enable_voltage_control(struct pp_hwmgr * hwmgr)221 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
222 {
223 /* enable voltage control */
224 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
225 GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
226
227 return 0;
228 }
229
phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table * voltage_table,struct phm_clock_voltage_dependency_table * voltage_dependency_table)230 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
231 struct phm_clock_voltage_dependency_table *voltage_dependency_table
232 )
233 {
234 uint32_t i;
235
236 PP_ASSERT_WITH_CODE((NULL != voltage_table),
237 "Voltage Dependency Table empty.", return -EINVAL;);
238
239 voltage_table->mask_low = 0;
240 voltage_table->phase_delay = 0;
241 voltage_table->count = voltage_dependency_table->count;
242
243 for (i = 0; i < voltage_dependency_table->count; i++) {
244 voltage_table->entries[i].value =
245 voltage_dependency_table->entries[i].v;
246 voltage_table->entries[i].smio_low = 0;
247 }
248
249 return 0;
250 }
251
252
253 /**
254 * Create Voltage Tables.
255 *
256 * @param hwmgr the address of the powerplay hardware manager.
257 * @return always 0
258 */
smu7_construct_voltage_tables(struct pp_hwmgr * hwmgr)259 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
260 {
261 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
262 struct phm_ppt_v1_information *table_info =
263 (struct phm_ppt_v1_information *)hwmgr->pptable;
264 int result = 0;
265 uint32_t tmp;
266
267 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
268 result = atomctrl_get_voltage_table_v3(hwmgr,
269 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
270 &(data->mvdd_voltage_table));
271 PP_ASSERT_WITH_CODE((0 == result),
272 "Failed to retrieve MVDD table.",
273 return result);
274 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
275 if (hwmgr->pp_table_version == PP_TABLE_V1)
276 result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
277 table_info->vdd_dep_on_mclk);
278 else if (hwmgr->pp_table_version == PP_TABLE_V0)
279 result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
280 hwmgr->dyn_state.mvdd_dependency_on_mclk);
281
282 PP_ASSERT_WITH_CODE((0 == result),
283 "Failed to retrieve SVI2 MVDD table from dependency table.",
284 return result;);
285 }
286
287 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
288 result = atomctrl_get_voltage_table_v3(hwmgr,
289 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
290 &(data->vddci_voltage_table));
291 PP_ASSERT_WITH_CODE((0 == result),
292 "Failed to retrieve VDDCI table.",
293 return result);
294 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
295 if (hwmgr->pp_table_version == PP_TABLE_V1)
296 result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
297 table_info->vdd_dep_on_mclk);
298 else if (hwmgr->pp_table_version == PP_TABLE_V0)
299 result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
300 hwmgr->dyn_state.vddci_dependency_on_mclk);
301 PP_ASSERT_WITH_CODE((0 == result),
302 "Failed to retrieve SVI2 VDDCI table from dependency table.",
303 return result);
304 }
305
306 if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
307 /* VDDGFX has only SVI2 voltage control */
308 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
309 table_info->vddgfx_lookup_table);
310 PP_ASSERT_WITH_CODE((0 == result),
311 "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
312 }
313
314
315 if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
316 result = atomctrl_get_voltage_table_v3(hwmgr,
317 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
318 &data->vddc_voltage_table);
319 PP_ASSERT_WITH_CODE((0 == result),
320 "Failed to retrieve VDDC table.", return result;);
321 } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
322
323 if (hwmgr->pp_table_version == PP_TABLE_V0)
324 result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
325 hwmgr->dyn_state.vddc_dependency_on_mclk);
326 else if (hwmgr->pp_table_version == PP_TABLE_V1)
327 result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
328 table_info->vddc_lookup_table);
329
330 PP_ASSERT_WITH_CODE((0 == result),
331 "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
332 }
333
334 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
335 PP_ASSERT_WITH_CODE(
336 (data->vddc_voltage_table.count <= tmp),
337 "Too many voltage values for VDDC. Trimming to fit state table.",
338 phm_trim_voltage_table_to_fit_state_table(tmp,
339 &(data->vddc_voltage_table)));
340
341 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
342 PP_ASSERT_WITH_CODE(
343 (data->vddgfx_voltage_table.count <= tmp),
344 "Too many voltage values for VDDC. Trimming to fit state table.",
345 phm_trim_voltage_table_to_fit_state_table(tmp,
346 &(data->vddgfx_voltage_table)));
347
348 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
349 PP_ASSERT_WITH_CODE(
350 (data->vddci_voltage_table.count <= tmp),
351 "Too many voltage values for VDDCI. Trimming to fit state table.",
352 phm_trim_voltage_table_to_fit_state_table(tmp,
353 &(data->vddci_voltage_table)));
354
355 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
356 PP_ASSERT_WITH_CODE(
357 (data->mvdd_voltage_table.count <= tmp),
358 "Too many voltage values for MVDD. Trimming to fit state table.",
359 phm_trim_voltage_table_to_fit_state_table(tmp,
360 &(data->mvdd_voltage_table)));
361
362 return 0;
363 }
364
365 /**
366 * Programs static screed detection parameters
367 *
368 * @param hwmgr the address of the powerplay hardware manager.
369 * @return always 0
370 */
smu7_program_static_screen_threshold_parameters(struct pp_hwmgr * hwmgr)371 static int smu7_program_static_screen_threshold_parameters(
372 struct pp_hwmgr *hwmgr)
373 {
374 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
375
376 /* Set static screen threshold unit */
377 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
378 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
379 data->static_screen_threshold_unit);
380 /* Set static screen threshold */
381 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
382 CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
383 data->static_screen_threshold);
384
385 return 0;
386 }
387
388 /**
389 * Setup display gap for glitch free memory clock switching.
390 *
391 * @param hwmgr the address of the powerplay hardware manager.
392 * @return always 0
393 */
smu7_enable_display_gap(struct pp_hwmgr * hwmgr)394 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
395 {
396 uint32_t display_gap =
397 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
398 ixCG_DISPLAY_GAP_CNTL);
399
400 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
401 DISP_GAP, DISPLAY_GAP_IGNORE);
402
403 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
404 DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
405
406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 ixCG_DISPLAY_GAP_CNTL, display_gap);
408
409 return 0;
410 }
411
412 /**
413 * Programs activity state transition voting clients
414 *
415 * @param hwmgr the address of the powerplay hardware manager.
416 * @return always 0
417 */
smu7_program_voting_clients(struct pp_hwmgr * hwmgr)418 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
419 {
420 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
421 int i;
422
423 /* Clear reset for voting clients before enabling DPM */
424 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
425 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
426 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
427 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
428
429 for (i = 0; i < 8; i++)
430 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
431 ixCG_FREQ_TRAN_VOTING_0 + i * 4,
432 data->voting_rights_clients[i]);
433 return 0;
434 }
435
smu7_clear_voting_clients(struct pp_hwmgr * hwmgr)436 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
437 {
438 int i;
439
440 /* Reset voting clients before disabling DPM */
441 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
442 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
443 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
444 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
445
446 for (i = 0; i < 8; i++)
447 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
448 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
449
450 return 0;
451 }
452
453 /* Copy one arb setting to another and then switch the active set.
454 * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
455 */
smu7_copy_and_switch_arb_sets(struct pp_hwmgr * hwmgr,uint32_t arb_src,uint32_t arb_dest)456 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
457 uint32_t arb_src, uint32_t arb_dest)
458 {
459 uint32_t mc_arb_dram_timing;
460 uint32_t mc_arb_dram_timing2;
461 uint32_t burst_time;
462 uint32_t mc_cg_config;
463
464 switch (arb_src) {
465 case MC_CG_ARB_FREQ_F0:
466 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
467 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
468 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
469 break;
470 case MC_CG_ARB_FREQ_F1:
471 mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
472 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
473 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
474 break;
475 default:
476 return -EINVAL;
477 }
478
479 switch (arb_dest) {
480 case MC_CG_ARB_FREQ_F0:
481 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
482 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
483 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
484 break;
485 case MC_CG_ARB_FREQ_F1:
486 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
487 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
488 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
489 break;
490 default:
491 return -EINVAL;
492 }
493
494 mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
495 mc_cg_config |= 0x0000000F;
496 cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
497 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
498
499 return 0;
500 }
501
smu7_reset_to_default(struct pp_hwmgr * hwmgr)502 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
503 {
504 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
505 }
506
507 /**
508 * Initial switch from ARB F0->F1
509 *
510 * @param hwmgr the address of the powerplay hardware manager.
511 * @return always 0
512 * This function is to be called from the SetPowerState table.
513 */
smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr * hwmgr)514 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
515 {
516 return smu7_copy_and_switch_arb_sets(hwmgr,
517 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
518 }
519
smu7_force_switch_to_arbf0(struct pp_hwmgr * hwmgr)520 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
521 {
522 uint32_t tmp;
523
524 tmp = (cgs_read_ind_register(hwmgr->device,
525 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
526 0x0000ff00) >> 8;
527
528 if (tmp == MC_CG_ARB_FREQ_F0)
529 return 0;
530
531 return smu7_copy_and_switch_arb_sets(hwmgr,
532 tmp, MC_CG_ARB_FREQ_F0);
533 }
534
smu7_setup_default_pcie_table(struct pp_hwmgr * hwmgr)535 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
536 {
537 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
538
539 struct phm_ppt_v1_information *table_info =
540 (struct phm_ppt_v1_information *)(hwmgr->pptable);
541 struct phm_ppt_v1_pcie_table *pcie_table = NULL;
542
543 uint32_t i, max_entry;
544 uint32_t tmp;
545
546 PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
547 data->use_pcie_power_saving_levels), "No pcie performance levels!",
548 return -EINVAL);
549
550 if (table_info != NULL)
551 pcie_table = table_info->pcie_table;
552
553 if (data->use_pcie_performance_levels &&
554 !data->use_pcie_power_saving_levels) {
555 data->pcie_gen_power_saving = data->pcie_gen_performance;
556 data->pcie_lane_power_saving = data->pcie_lane_performance;
557 } else if (!data->use_pcie_performance_levels &&
558 data->use_pcie_power_saving_levels) {
559 data->pcie_gen_performance = data->pcie_gen_power_saving;
560 data->pcie_lane_performance = data->pcie_lane_power_saving;
561 }
562 tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
563 phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
564 tmp,
565 MAX_REGULAR_DPM_NUMBER);
566
567 if (pcie_table != NULL) {
568 /* max_entry is used to make sure we reserve one PCIE level
569 * for boot level (fix for A+A PSPP issue).
570 * If PCIE table from PPTable have ULV entry + 8 entries,
571 * then ignore the last entry.*/
572 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
573 for (i = 1; i < max_entry; i++) {
574 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
575 get_pcie_gen_support(data->pcie_gen_cap,
576 pcie_table->entries[i].gen_speed),
577 get_pcie_lane_support(data->pcie_lane_cap,
578 pcie_table->entries[i].lane_width));
579 }
580 data->dpm_table.pcie_speed_table.count = max_entry - 1;
581 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
582 } else {
583 /* Hardcode Pcie Table */
584 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
585 get_pcie_gen_support(data->pcie_gen_cap,
586 PP_Min_PCIEGen),
587 get_pcie_lane_support(data->pcie_lane_cap,
588 PP_Max_PCIELane));
589 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
590 get_pcie_gen_support(data->pcie_gen_cap,
591 PP_Min_PCIEGen),
592 get_pcie_lane_support(data->pcie_lane_cap,
593 PP_Max_PCIELane));
594 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
595 get_pcie_gen_support(data->pcie_gen_cap,
596 PP_Max_PCIEGen),
597 get_pcie_lane_support(data->pcie_lane_cap,
598 PP_Max_PCIELane));
599 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
600 get_pcie_gen_support(data->pcie_gen_cap,
601 PP_Max_PCIEGen),
602 get_pcie_lane_support(data->pcie_lane_cap,
603 PP_Max_PCIELane));
604 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
605 get_pcie_gen_support(data->pcie_gen_cap,
606 PP_Max_PCIEGen),
607 get_pcie_lane_support(data->pcie_lane_cap,
608 PP_Max_PCIELane));
609 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
610 get_pcie_gen_support(data->pcie_gen_cap,
611 PP_Max_PCIEGen),
612 get_pcie_lane_support(data->pcie_lane_cap,
613 PP_Max_PCIELane));
614
615 data->dpm_table.pcie_speed_table.count = 6;
616 }
617 /* Populate last level for boot PCIE level, but do not increment count. */
618 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
619 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
620 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
621 get_pcie_gen_support(data->pcie_gen_cap,
622 PP_Max_PCIEGen),
623 data->vbios_boot_state.pcie_lane_bootup_value);
624 } else {
625 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
626 data->dpm_table.pcie_speed_table.count,
627 get_pcie_gen_support(data->pcie_gen_cap,
628 PP_Min_PCIEGen),
629 get_pcie_lane_support(data->pcie_lane_cap,
630 PP_Max_PCIELane));
631 }
632 return 0;
633 }
634
smu7_reset_dpm_tables(struct pp_hwmgr * hwmgr)635 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
636 {
637 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
638
639 memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
640
641 phm_reset_single_dpm_table(
642 &data->dpm_table.sclk_table,
643 smum_get_mac_definition(hwmgr,
644 SMU_MAX_LEVELS_GRAPHICS),
645 MAX_REGULAR_DPM_NUMBER);
646 phm_reset_single_dpm_table(
647 &data->dpm_table.mclk_table,
648 smum_get_mac_definition(hwmgr,
649 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
650
651 phm_reset_single_dpm_table(
652 &data->dpm_table.vddc_table,
653 smum_get_mac_definition(hwmgr,
654 SMU_MAX_LEVELS_VDDC),
655 MAX_REGULAR_DPM_NUMBER);
656 phm_reset_single_dpm_table(
657 &data->dpm_table.vddci_table,
658 smum_get_mac_definition(hwmgr,
659 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
660
661 phm_reset_single_dpm_table(
662 &data->dpm_table.mvdd_table,
663 smum_get_mac_definition(hwmgr,
664 SMU_MAX_LEVELS_MVDD),
665 MAX_REGULAR_DPM_NUMBER);
666 return 0;
667 }
668 /*
669 * This function is to initialize all DPM state tables
670 * for SMU7 based on the dependency table.
671 * Dynamic state patching function will then trim these
672 * state tables to the allowed range based
673 * on the power policy or external client requests,
674 * such as UVD request, etc.
675 */
676
smu7_setup_dpm_tables_v0(struct pp_hwmgr * hwmgr)677 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
678 {
679 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
680 struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
681 hwmgr->dyn_state.vddc_dependency_on_sclk;
682 struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
683 hwmgr->dyn_state.vddc_dependency_on_mclk;
684 struct phm_cac_leakage_table *std_voltage_table =
685 hwmgr->dyn_state.cac_leakage_table;
686 uint32_t i;
687
688 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
689 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
690 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
691 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
692
693 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
694 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
695 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
696 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
697
698
699 /* Initialize Sclk DPM table based on allow Sclk values*/
700 data->dpm_table.sclk_table.count = 0;
701
702 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
703 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
704 allowed_vdd_sclk_table->entries[i].clk) {
705 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
706 allowed_vdd_sclk_table->entries[i].clk;
707 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
708 data->dpm_table.sclk_table.count++;
709 }
710 }
711
712 PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
713 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
714 /* Initialize Mclk DPM table based on allow Mclk values */
715 data->dpm_table.mclk_table.count = 0;
716 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
717 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
718 allowed_vdd_mclk_table->entries[i].clk) {
719 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
720 allowed_vdd_mclk_table->entries[i].clk;
721 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
722 data->dpm_table.mclk_table.count++;
723 }
724 }
725
726 /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
727 for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
728 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
729 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
730 /* param1 is for corresponding std voltage */
731 data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
732 }
733
734 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
735 allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
736
737 if (NULL != allowed_vdd_mclk_table) {
738 /* Initialize Vddci DPM table based on allow Mclk values */
739 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
740 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
741 data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
742 }
743 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
744 }
745
746 allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
747
748 if (NULL != allowed_vdd_mclk_table) {
749 /*
750 * Initialize MVDD DPM table based on allow Mclk
751 * values
752 */
753 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
754 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
755 data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
756 }
757 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
758 }
759
760 return 0;
761 }
762
smu7_setup_dpm_tables_v1(struct pp_hwmgr * hwmgr)763 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
764 {
765 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
766 struct phm_ppt_v1_information *table_info =
767 (struct phm_ppt_v1_information *)(hwmgr->pptable);
768 uint32_t i;
769
770 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
771 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
772
773 if (table_info == NULL)
774 return -EINVAL;
775
776 dep_sclk_table = table_info->vdd_dep_on_sclk;
777 dep_mclk_table = table_info->vdd_dep_on_mclk;
778
779 PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
780 "SCLK dependency table is missing.",
781 return -EINVAL);
782 PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
783 "SCLK dependency table count is 0.",
784 return -EINVAL);
785
786 PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
787 "MCLK dependency table is missing.",
788 return -EINVAL);
789 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
790 "MCLK dependency table count is 0",
791 return -EINVAL);
792
793 /* Initialize Sclk DPM table based on allow Sclk values */
794 data->dpm_table.sclk_table.count = 0;
795 for (i = 0; i < dep_sclk_table->count; i++) {
796 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
797 dep_sclk_table->entries[i].clk) {
798
799 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
800 dep_sclk_table->entries[i].clk;
801
802 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
803 (i == 0) ? true : false;
804 data->dpm_table.sclk_table.count++;
805 }
806 }
807 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
808 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
809 /* Initialize Mclk DPM table based on allow Mclk values */
810 data->dpm_table.mclk_table.count = 0;
811 for (i = 0; i < dep_mclk_table->count; i++) {
812 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
813 [data->dpm_table.mclk_table.count - 1].value !=
814 dep_mclk_table->entries[i].clk) {
815 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
816 dep_mclk_table->entries[i].clk;
817 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
818 (i == 0) ? true : false;
819 data->dpm_table.mclk_table.count++;
820 }
821 }
822
823 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
824 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
825 return 0;
826 }
827
smu7_odn_initial_default_setting(struct pp_hwmgr * hwmgr)828 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
829 {
830 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
831 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
832 struct phm_ppt_v1_information *table_info =
833 (struct phm_ppt_v1_information *)(hwmgr->pptable);
834 uint32_t i;
835
836 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
837 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
838 struct phm_odn_performance_level *entries;
839
840 if (table_info == NULL)
841 return -EINVAL;
842
843 dep_sclk_table = table_info->vdd_dep_on_sclk;
844 dep_mclk_table = table_info->vdd_dep_on_mclk;
845
846 odn_table->odn_core_clock_dpm_levels.num_of_pl =
847 data->golden_dpm_table.sclk_table.count;
848 entries = odn_table->odn_core_clock_dpm_levels.entries;
849 for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
850 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
851 entries[i].enabled = true;
852 entries[i].vddc = dep_sclk_table->entries[i].vddc;
853 }
854
855 smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
856 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
857
858 odn_table->odn_memory_clock_dpm_levels.num_of_pl =
859 data->golden_dpm_table.mclk_table.count;
860 entries = odn_table->odn_memory_clock_dpm_levels.entries;
861 for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
862 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
863 entries[i].enabled = true;
864 entries[i].vddc = dep_mclk_table->entries[i].vddc;
865 }
866
867 smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
868 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
869
870 return 0;
871 }
872
smu7_setup_voltage_range_from_vbios(struct pp_hwmgr * hwmgr)873 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
874 {
875 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
876 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
877 struct phm_ppt_v1_information *table_info =
878 (struct phm_ppt_v1_information *)(hwmgr->pptable);
879 uint32_t min_vddc = 0;
880 uint32_t max_vddc = 0;
881
882 if (!table_info)
883 return;
884
885 dep_sclk_table = table_info->vdd_dep_on_sclk;
886
887 atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
888
889 if (min_vddc == 0 || min_vddc > 2000
890 || min_vddc > dep_sclk_table->entries[0].vddc)
891 min_vddc = dep_sclk_table->entries[0].vddc;
892
893 if (max_vddc == 0 || max_vddc > 2000
894 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
895 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
896
897 data->odn_dpm_table.min_vddc = min_vddc;
898 data->odn_dpm_table.max_vddc = max_vddc;
899 }
900
smu7_check_dpm_table_updated(struct pp_hwmgr * hwmgr)901 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
902 {
903 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
904 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
905 struct phm_ppt_v1_information *table_info =
906 (struct phm_ppt_v1_information *)(hwmgr->pptable);
907 uint32_t i;
908
909 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
910 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
911
912 if (table_info == NULL)
913 return;
914
915 for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
916 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
917 data->dpm_table.sclk_table.dpm_levels[i].value) {
918 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
919 break;
920 }
921 }
922
923 for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
924 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
925 data->dpm_table.mclk_table.dpm_levels[i].value) {
926 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
927 break;
928 }
929 }
930
931 dep_table = table_info->vdd_dep_on_mclk;
932 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
933
934 for (i = 0; i < dep_table->count; i++) {
935 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
936 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
937 return;
938 }
939 }
940
941 dep_table = table_info->vdd_dep_on_sclk;
942 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
943 for (i = 0; i < dep_table->count; i++) {
944 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
945 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
946 return;
947 }
948 }
949 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
950 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
951 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
952 }
953 }
954
smu7_setup_default_dpm_tables(struct pp_hwmgr * hwmgr)955 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
956 {
957 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
958
959 smu7_reset_dpm_tables(hwmgr);
960
961 if (hwmgr->pp_table_version == PP_TABLE_V1)
962 smu7_setup_dpm_tables_v1(hwmgr);
963 else if (hwmgr->pp_table_version == PP_TABLE_V0)
964 smu7_setup_dpm_tables_v0(hwmgr);
965
966 smu7_setup_default_pcie_table(hwmgr);
967
968 /* save a copy of the default DPM table */
969 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
970 sizeof(struct smu7_dpm_table));
971
972 /* initialize ODN table */
973 if (hwmgr->od_enabled) {
974 if (data->odn_dpm_table.max_vddc) {
975 smu7_check_dpm_table_updated(hwmgr);
976 } else {
977 smu7_setup_voltage_range_from_vbios(hwmgr);
978 smu7_odn_initial_default_setting(hwmgr);
979 }
980 }
981 return 0;
982 }
983
smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr * hwmgr)984 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
985 {
986
987 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
988 PHM_PlatformCaps_RegulatorHot))
989 return smum_send_msg_to_smc(hwmgr,
990 PPSMC_MSG_EnableVRHotGPIOInterrupt);
991
992 return 0;
993 }
994
smu7_enable_sclk_control(struct pp_hwmgr * hwmgr)995 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
996 {
997 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
998 SCLK_PWRMGT_OFF, 0);
999 return 0;
1000 }
1001
smu7_enable_ulv(struct pp_hwmgr * hwmgr)1002 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1003 {
1004 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1005
1006 if (data->ulv_supported)
1007 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
1008
1009 return 0;
1010 }
1011
smu7_disable_ulv(struct pp_hwmgr * hwmgr)1012 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1013 {
1014 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1015
1016 if (data->ulv_supported)
1017 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
1018
1019 return 0;
1020 }
1021
smu7_enable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)1022 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1023 {
1024 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1025 PHM_PlatformCaps_SclkDeepSleep)) {
1026 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
1027 PP_ASSERT_WITH_CODE(false,
1028 "Attempt to enable Master Deep Sleep switch failed!",
1029 return -EINVAL);
1030 } else {
1031 if (smum_send_msg_to_smc(hwmgr,
1032 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1033 PP_ASSERT_WITH_CODE(false,
1034 "Attempt to disable Master Deep Sleep switch failed!",
1035 return -EINVAL);
1036 }
1037 }
1038
1039 return 0;
1040 }
1041
smu7_disable_deep_sleep_master_switch(struct pp_hwmgr * hwmgr)1042 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1043 {
1044 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1045 PHM_PlatformCaps_SclkDeepSleep)) {
1046 if (smum_send_msg_to_smc(hwmgr,
1047 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
1048 PP_ASSERT_WITH_CODE(false,
1049 "Attempt to disable Master Deep Sleep switch failed!",
1050 return -EINVAL);
1051 }
1052 }
1053
1054 return 0;
1055 }
1056
smu7_disable_sclk_vce_handshake(struct pp_hwmgr * hwmgr)1057 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1058 {
1059 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1060 uint32_t soft_register_value = 0;
1061 uint32_t handshake_disables_offset = data->soft_regs_start
1062 + smum_get_offsetof(hwmgr,
1063 SMU_SoftRegisters, HandshakeDisables);
1064
1065 soft_register_value = cgs_read_ind_register(hwmgr->device,
1066 CGS_IND_REG__SMC, handshake_disables_offset);
1067 soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1068 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1069 handshake_disables_offset, soft_register_value);
1070 return 0;
1071 }
1072
smu7_disable_handshake_uvd(struct pp_hwmgr * hwmgr)1073 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1074 {
1075 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1076 uint32_t soft_register_value = 0;
1077 uint32_t handshake_disables_offset = data->soft_regs_start
1078 + smum_get_offsetof(hwmgr,
1079 SMU_SoftRegisters, HandshakeDisables);
1080
1081 soft_register_value = cgs_read_ind_register(hwmgr->device,
1082 CGS_IND_REG__SMC, handshake_disables_offset);
1083 soft_register_value |= smum_get_mac_definition(hwmgr,
1084 SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1085 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1086 handshake_disables_offset, soft_register_value);
1087 return 0;
1088 }
1089
smu7_enable_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)1090 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1091 {
1092 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1093
1094 /* enable SCLK dpm */
1095 if (!data->sclk_dpm_key_disabled) {
1096 if (hwmgr->chip_id == CHIP_VEGAM)
1097 smu7_disable_sclk_vce_handshake(hwmgr);
1098
1099 PP_ASSERT_WITH_CODE(
1100 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
1101 "Failed to enable SCLK DPM during DPM Start Function!",
1102 return -EINVAL);
1103 }
1104
1105 /* enable MCLK dpm */
1106 if (0 == data->mclk_dpm_key_disabled) {
1107 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1108 smu7_disable_handshake_uvd(hwmgr);
1109
1110 PP_ASSERT_WITH_CODE(
1111 (0 == smum_send_msg_to_smc(hwmgr,
1112 PPSMC_MSG_MCLKDPM_Enable)),
1113 "Failed to enable MCLK DPM during DPM Start Function!",
1114 return -EINVAL);
1115
1116 if (hwmgr->chip_family != CHIP_VEGAM)
1117 PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1118
1119
1120 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1121 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1122 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1123 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1124 udelay(10);
1125 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1126 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1127 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1128 } else {
1129 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1130 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1131 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1132 udelay(10);
1133 if (hwmgr->chip_id == CHIP_VEGAM) {
1134 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1135 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1136 } else {
1137 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1138 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1139 }
1140 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1141 }
1142 }
1143
1144 return 0;
1145 }
1146
smu7_start_dpm(struct pp_hwmgr * hwmgr)1147 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1148 {
1149 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1150
1151 /*enable general power management */
1152
1153 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1154 GLOBAL_PWRMGT_EN, 1);
1155
1156 /* enable sclk deep sleep */
1157
1158 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1159 DYNAMIC_PM_EN, 1);
1160
1161 /* prepare for PCIE DPM */
1162
1163 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1164 data->soft_regs_start +
1165 smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1166 VoltageChangeTimeout), 0x1000);
1167 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1168 SWRST_COMMAND_1, RESETLC, 0x0);
1169
1170 if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1171 cgs_write_register(hwmgr->device, 0x1488,
1172 (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1173
1174 if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1175 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1176 return -EINVAL;
1177 }
1178
1179 /* enable PCIE dpm */
1180 if (0 == data->pcie_dpm_key_disabled) {
1181 PP_ASSERT_WITH_CODE(
1182 (0 == smum_send_msg_to_smc(hwmgr,
1183 PPSMC_MSG_PCIeDPM_Enable)),
1184 "Failed to enable pcie DPM during DPM Start Function!",
1185 return -EINVAL);
1186 }
1187
1188 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1189 PHM_PlatformCaps_Falcon_QuickTransition)) {
1190 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1191 PPSMC_MSG_EnableACDCGPIOInterrupt)),
1192 "Failed to enable AC DC GPIO Interrupt!",
1193 );
1194 }
1195
1196 return 0;
1197 }
1198
smu7_disable_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)1199 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1200 {
1201 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1202
1203 /* disable SCLK dpm */
1204 if (!data->sclk_dpm_key_disabled) {
1205 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1206 "Trying to disable SCLK DPM when DPM is disabled",
1207 return 0);
1208 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
1209 }
1210
1211 /* disable MCLK dpm */
1212 if (!data->mclk_dpm_key_disabled) {
1213 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1214 "Trying to disable MCLK DPM when DPM is disabled",
1215 return 0);
1216 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
1217 }
1218
1219 return 0;
1220 }
1221
smu7_stop_dpm(struct pp_hwmgr * hwmgr)1222 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1223 {
1224 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1225
1226 /* disable general power management */
1227 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1228 GLOBAL_PWRMGT_EN, 0);
1229 /* disable sclk deep sleep */
1230 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1231 DYNAMIC_PM_EN, 0);
1232
1233 /* disable PCIE dpm */
1234 if (!data->pcie_dpm_key_disabled) {
1235 PP_ASSERT_WITH_CODE(
1236 (smum_send_msg_to_smc(hwmgr,
1237 PPSMC_MSG_PCIeDPM_Disable) == 0),
1238 "Failed to disable pcie DPM during DPM Stop Function!",
1239 return -EINVAL);
1240 }
1241
1242 smu7_disable_sclk_mclk_dpm(hwmgr);
1243
1244 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1245 "Trying to disable voltage DPM when DPM is disabled",
1246 return 0);
1247
1248 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
1249
1250 return 0;
1251 }
1252
smu7_set_dpm_event_sources(struct pp_hwmgr * hwmgr,uint32_t sources)1253 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1254 {
1255 bool protection;
1256 enum DPM_EVENT_SRC src;
1257
1258 switch (sources) {
1259 default:
1260 pr_err("Unknown throttling event sources.");
1261 /* fall through */
1262 case 0:
1263 protection = false;
1264 /* src is unused */
1265 break;
1266 case (1 << PHM_AutoThrottleSource_Thermal):
1267 protection = true;
1268 src = DPM_EVENT_SRC_DIGITAL;
1269 break;
1270 case (1 << PHM_AutoThrottleSource_External):
1271 protection = true;
1272 src = DPM_EVENT_SRC_EXTERNAL;
1273 break;
1274 case (1 << PHM_AutoThrottleSource_External) |
1275 (1 << PHM_AutoThrottleSource_Thermal):
1276 protection = true;
1277 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1278 break;
1279 }
1280 /* Order matters - don't enable thermal protection for the wrong source. */
1281 if (protection) {
1282 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1283 DPM_EVENT_SRC, src);
1284 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1285 THERMAL_PROTECTION_DIS,
1286 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1287 PHM_PlatformCaps_ThermalController));
1288 } else
1289 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1290 THERMAL_PROTECTION_DIS, 1);
1291 }
1292
smu7_enable_auto_throttle_source(struct pp_hwmgr * hwmgr,PHM_AutoThrottleSource source)1293 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1294 PHM_AutoThrottleSource source)
1295 {
1296 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1297
1298 if (!(data->active_auto_throttle_sources & (1 << source))) {
1299 data->active_auto_throttle_sources |= 1 << source;
1300 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1301 }
1302 return 0;
1303 }
1304
smu7_enable_thermal_auto_throttle(struct pp_hwmgr * hwmgr)1305 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1306 {
1307 return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1308 }
1309
smu7_disable_auto_throttle_source(struct pp_hwmgr * hwmgr,PHM_AutoThrottleSource source)1310 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1311 PHM_AutoThrottleSource source)
1312 {
1313 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1314
1315 if (data->active_auto_throttle_sources & (1 << source)) {
1316 data->active_auto_throttle_sources &= ~(1 << source);
1317 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1318 }
1319 return 0;
1320 }
1321
smu7_disable_thermal_auto_throttle(struct pp_hwmgr * hwmgr)1322 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1323 {
1324 return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1325 }
1326
smu7_pcie_performance_request(struct pp_hwmgr * hwmgr)1327 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1328 {
1329 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1330 data->pcie_performance_request = true;
1331
1332 return 0;
1333 }
1334
smu7_enable_dpm_tasks(struct pp_hwmgr * hwmgr)1335 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1336 {
1337 int tmp_result = 0;
1338 int result = 0;
1339
1340 if (smu7_voltage_control(hwmgr)) {
1341 tmp_result = smu7_enable_voltage_control(hwmgr);
1342 PP_ASSERT_WITH_CODE(tmp_result == 0,
1343 "Failed to enable voltage control!",
1344 result = tmp_result);
1345
1346 tmp_result = smu7_construct_voltage_tables(hwmgr);
1347 PP_ASSERT_WITH_CODE((0 == tmp_result),
1348 "Failed to construct voltage tables!",
1349 result = tmp_result);
1350 }
1351 smum_initialize_mc_reg_table(hwmgr);
1352
1353 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1354 PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1355 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1356 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1357
1358 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1359 PHM_PlatformCaps_ThermalController))
1360 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1361 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1362
1363 tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1364 PP_ASSERT_WITH_CODE((0 == tmp_result),
1365 "Failed to program static screen threshold parameters!",
1366 result = tmp_result);
1367
1368 tmp_result = smu7_enable_display_gap(hwmgr);
1369 PP_ASSERT_WITH_CODE((0 == tmp_result),
1370 "Failed to enable display gap!", result = tmp_result);
1371
1372 tmp_result = smu7_program_voting_clients(hwmgr);
1373 PP_ASSERT_WITH_CODE((0 == tmp_result),
1374 "Failed to program voting clients!", result = tmp_result);
1375
1376 tmp_result = smum_process_firmware_header(hwmgr);
1377 PP_ASSERT_WITH_CODE((0 == tmp_result),
1378 "Failed to process firmware header!", result = tmp_result);
1379
1380 if (hwmgr->chip_id != CHIP_VEGAM) {
1381 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1382 PP_ASSERT_WITH_CODE((0 == tmp_result),
1383 "Failed to initialize switch from ArbF0 to F1!",
1384 result = tmp_result);
1385 }
1386
1387 result = smu7_setup_default_dpm_tables(hwmgr);
1388 PP_ASSERT_WITH_CODE(0 == result,
1389 "Failed to setup default DPM tables!", return result);
1390
1391 tmp_result = smum_init_smc_table(hwmgr);
1392 PP_ASSERT_WITH_CODE((0 == tmp_result),
1393 "Failed to initialize SMC table!", result = tmp_result);
1394
1395 tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1396 PP_ASSERT_WITH_CODE((0 == tmp_result),
1397 "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1398
1399 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
1400
1401 tmp_result = smu7_enable_sclk_control(hwmgr);
1402 PP_ASSERT_WITH_CODE((0 == tmp_result),
1403 "Failed to enable SCLK control!", result = tmp_result);
1404
1405 tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1406 PP_ASSERT_WITH_CODE((0 == tmp_result),
1407 "Failed to enable voltage control!", result = tmp_result);
1408
1409 tmp_result = smu7_enable_ulv(hwmgr);
1410 PP_ASSERT_WITH_CODE((0 == tmp_result),
1411 "Failed to enable ULV!", result = tmp_result);
1412
1413 tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1414 PP_ASSERT_WITH_CODE((0 == tmp_result),
1415 "Failed to enable deep sleep master switch!", result = tmp_result);
1416
1417 tmp_result = smu7_enable_didt_config(hwmgr);
1418 PP_ASSERT_WITH_CODE((tmp_result == 0),
1419 "Failed to enable deep sleep master switch!", result = tmp_result);
1420
1421 tmp_result = smu7_start_dpm(hwmgr);
1422 PP_ASSERT_WITH_CODE((0 == tmp_result),
1423 "Failed to start DPM!", result = tmp_result);
1424
1425 tmp_result = smu7_enable_smc_cac(hwmgr);
1426 PP_ASSERT_WITH_CODE((0 == tmp_result),
1427 "Failed to enable SMC CAC!", result = tmp_result);
1428
1429 tmp_result = smu7_enable_power_containment(hwmgr);
1430 PP_ASSERT_WITH_CODE((0 == tmp_result),
1431 "Failed to enable power containment!", result = tmp_result);
1432
1433 tmp_result = smu7_power_control_set_level(hwmgr);
1434 PP_ASSERT_WITH_CODE((0 == tmp_result),
1435 "Failed to power control set level!", result = tmp_result);
1436
1437 tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1438 PP_ASSERT_WITH_CODE((0 == tmp_result),
1439 "Failed to enable thermal auto throttle!", result = tmp_result);
1440
1441 tmp_result = smu7_pcie_performance_request(hwmgr);
1442 PP_ASSERT_WITH_CODE((0 == tmp_result),
1443 "pcie performance request failed!", result = tmp_result);
1444
1445 return 0;
1446 }
1447
smu7_avfs_control(struct pp_hwmgr * hwmgr,bool enable)1448 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1449 {
1450 if (!hwmgr->avfs_supported)
1451 return 0;
1452
1453 if (enable) {
1454 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1455 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1456 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1457 hwmgr, PPSMC_MSG_EnableAvfs),
1458 "Failed to enable AVFS!",
1459 return -EINVAL);
1460 }
1461 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1462 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1463 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1464 hwmgr, PPSMC_MSG_DisableAvfs),
1465 "Failed to disable AVFS!",
1466 return -EINVAL);
1467 }
1468
1469 return 0;
1470 }
1471
smu7_update_avfs(struct pp_hwmgr * hwmgr)1472 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1473 {
1474 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1475
1476 if (!hwmgr->avfs_supported)
1477 return 0;
1478
1479 if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1480 smu7_avfs_control(hwmgr, false);
1481 } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1482 smu7_avfs_control(hwmgr, false);
1483 smu7_avfs_control(hwmgr, true);
1484 } else {
1485 smu7_avfs_control(hwmgr, true);
1486 }
1487
1488 return 0;
1489 }
1490
smu7_disable_dpm_tasks(struct pp_hwmgr * hwmgr)1491 int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1492 {
1493 int tmp_result, result = 0;
1494
1495 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1496 PHM_PlatformCaps_ThermalController))
1497 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1498 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1499
1500 tmp_result = smu7_disable_power_containment(hwmgr);
1501 PP_ASSERT_WITH_CODE((tmp_result == 0),
1502 "Failed to disable power containment!", result = tmp_result);
1503
1504 tmp_result = smu7_disable_smc_cac(hwmgr);
1505 PP_ASSERT_WITH_CODE((tmp_result == 0),
1506 "Failed to disable SMC CAC!", result = tmp_result);
1507
1508 tmp_result = smu7_disable_didt_config(hwmgr);
1509 PP_ASSERT_WITH_CODE((tmp_result == 0),
1510 "Failed to disable DIDT!", result = tmp_result);
1511
1512 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1513 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1514 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1515 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1516
1517 tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1518 PP_ASSERT_WITH_CODE((tmp_result == 0),
1519 "Failed to disable thermal auto throttle!", result = tmp_result);
1520
1521 tmp_result = smu7_avfs_control(hwmgr, false);
1522 PP_ASSERT_WITH_CODE((tmp_result == 0),
1523 "Failed to disable AVFS!", result = tmp_result);
1524
1525 tmp_result = smu7_stop_dpm(hwmgr);
1526 PP_ASSERT_WITH_CODE((tmp_result == 0),
1527 "Failed to stop DPM!", result = tmp_result);
1528
1529 tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1530 PP_ASSERT_WITH_CODE((tmp_result == 0),
1531 "Failed to disable deep sleep master switch!", result = tmp_result);
1532
1533 tmp_result = smu7_disable_ulv(hwmgr);
1534 PP_ASSERT_WITH_CODE((tmp_result == 0),
1535 "Failed to disable ULV!", result = tmp_result);
1536
1537 tmp_result = smu7_clear_voting_clients(hwmgr);
1538 PP_ASSERT_WITH_CODE((tmp_result == 0),
1539 "Failed to clear voting clients!", result = tmp_result);
1540
1541 tmp_result = smu7_reset_to_default(hwmgr);
1542 PP_ASSERT_WITH_CODE((tmp_result == 0),
1543 "Failed to reset to default!", result = tmp_result);
1544
1545 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1546 PP_ASSERT_WITH_CODE((tmp_result == 0),
1547 "Failed to force to switch arbf0!", result = tmp_result);
1548
1549 return result;
1550 }
1551
smu7_reset_asic_tasks(struct pp_hwmgr * hwmgr)1552 int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr)
1553 {
1554
1555 return 0;
1556 }
1557
smu7_init_dpm_defaults(struct pp_hwmgr * hwmgr)1558 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1559 {
1560 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1561 struct phm_ppt_v1_information *table_info =
1562 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1563 struct amdgpu_device *adev = hwmgr->adev;
1564
1565 data->dll_default_on = false;
1566 data->mclk_dpm0_activity_target = 0xa;
1567 data->vddc_vddgfx_delta = 300;
1568 data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1569 data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1570 data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1571 data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1572 data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1573 data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1574 data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1575 data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1576 data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1577 data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1578
1579 data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1580 data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1581 data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1582 /* need to set voltage control types before EVV patching */
1583 data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1584 data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1585 data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1586 data->enable_tdc_limit_feature = true;
1587 data->enable_pkg_pwr_tracking_feature = true;
1588 data->force_pcie_gen = PP_PCIEGenInvalid;
1589 data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1590 data->current_profile_setting.bupdate_sclk = 1;
1591 data->current_profile_setting.sclk_up_hyst = 0;
1592 data->current_profile_setting.sclk_down_hyst = 100;
1593 data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1594 data->current_profile_setting.bupdate_mclk = 1;
1595 data->current_profile_setting.mclk_up_hyst = 0;
1596 data->current_profile_setting.mclk_down_hyst = 100;
1597 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1598 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1599 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1600 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1601
1602 if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1603 uint8_t tmp1, tmp2;
1604 uint16_t tmp3 = 0;
1605 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1606 &tmp3);
1607 tmp3 = (tmp3 >> 5) & 0x3;
1608 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1609 } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1610 data->vddc_phase_shed_control = 1;
1611 } else {
1612 data->vddc_phase_shed_control = 0;
1613 }
1614
1615 if (hwmgr->chip_id == CHIP_HAWAII) {
1616 data->thermal_temp_setting.temperature_low = 94500;
1617 data->thermal_temp_setting.temperature_high = 95000;
1618 data->thermal_temp_setting.temperature_shutdown = 104000;
1619 } else {
1620 data->thermal_temp_setting.temperature_low = 99500;
1621 data->thermal_temp_setting.temperature_high = 100000;
1622 data->thermal_temp_setting.temperature_shutdown = 104000;
1623 }
1624
1625 data->fast_watermark_threshold = 100;
1626 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1627 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1628 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1629 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1630 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1631 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1632
1633 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1634 PHM_PlatformCaps_ControlVDDGFX)) {
1635 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1636 VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1637 data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1638 }
1639 }
1640
1641 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1642 PHM_PlatformCaps_EnableMVDDControl)) {
1643 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1644 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1645 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1646 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1647 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1648 data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1649 }
1650
1651 if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1652 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1653 PHM_PlatformCaps_ControlVDDGFX);
1654
1655 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1656 PHM_PlatformCaps_ControlVDDCI)) {
1657 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1658 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1659 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1660 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1661 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1662 data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1663 }
1664
1665 if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1666 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1667 PHM_PlatformCaps_EnableMVDDControl);
1668
1669 if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1670 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1671 PHM_PlatformCaps_ControlVDDCI);
1672
1673 if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1674 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1675 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1676 PHM_PlatformCaps_ClockStretcher);
1677
1678 data->pcie_gen_performance.max = PP_PCIEGen1;
1679 data->pcie_gen_performance.min = PP_PCIEGen3;
1680 data->pcie_gen_power_saving.max = PP_PCIEGen1;
1681 data->pcie_gen_power_saving.min = PP_PCIEGen3;
1682 data->pcie_lane_performance.max = 0;
1683 data->pcie_lane_performance.min = 16;
1684 data->pcie_lane_power_saving.max = 0;
1685 data->pcie_lane_power_saving.min = 16;
1686
1687
1688 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1689 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1690 PHM_PlatformCaps_UVDPowerGating);
1691 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1692 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1693 PHM_PlatformCaps_VCEPowerGating);
1694 }
1695
1696 /**
1697 * Get Leakage VDDC based on leakage ID.
1698 *
1699 * @param hwmgr the address of the powerplay hardware manager.
1700 * @return always 0
1701 */
smu7_get_evv_voltages(struct pp_hwmgr * hwmgr)1702 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1703 {
1704 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1705 uint16_t vv_id;
1706 uint16_t vddc = 0;
1707 uint16_t vddgfx = 0;
1708 uint16_t i, j;
1709 uint32_t sclk = 0;
1710 struct phm_ppt_v1_information *table_info =
1711 (struct phm_ppt_v1_information *)hwmgr->pptable;
1712 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1713
1714
1715 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1716 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1717
1718 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1719 if ((hwmgr->pp_table_version == PP_TABLE_V1)
1720 && !phm_get_sclk_for_voltage_evv(hwmgr,
1721 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1722 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1723 PHM_PlatformCaps_ClockStretcher)) {
1724 sclk_table = table_info->vdd_dep_on_sclk;
1725
1726 for (j = 1; j < sclk_table->count; j++) {
1727 if (sclk_table->entries[j].clk == sclk &&
1728 sclk_table->entries[j].cks_enable == 0) {
1729 sclk += 5000;
1730 break;
1731 }
1732 }
1733 }
1734 if (0 == atomctrl_get_voltage_evv_on_sclk
1735 (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1736 vv_id, &vddgfx)) {
1737 /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1738 PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1739
1740 /* the voltage should not be zero nor equal to leakage ID */
1741 if (vddgfx != 0 && vddgfx != vv_id) {
1742 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1743 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1744 data->vddcgfx_leakage.count++;
1745 }
1746 } else {
1747 pr_info("Error retrieving EVV voltage value!\n");
1748 }
1749 }
1750 } else {
1751 if ((hwmgr->pp_table_version == PP_TABLE_V0)
1752 || !phm_get_sclk_for_voltage_evv(hwmgr,
1753 table_info->vddc_lookup_table, vv_id, &sclk)) {
1754 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1755 PHM_PlatformCaps_ClockStretcher)) {
1756 if (table_info == NULL)
1757 return -EINVAL;
1758 sclk_table = table_info->vdd_dep_on_sclk;
1759
1760 for (j = 1; j < sclk_table->count; j++) {
1761 if (sclk_table->entries[j].clk == sclk &&
1762 sclk_table->entries[j].cks_enable == 0) {
1763 sclk += 5000;
1764 break;
1765 }
1766 }
1767 }
1768
1769 if (phm_get_voltage_evv_on_sclk(hwmgr,
1770 VOLTAGE_TYPE_VDDC,
1771 sclk, vv_id, &vddc) == 0) {
1772 if (vddc >= 2000 || vddc == 0)
1773 return -EINVAL;
1774 } else {
1775 pr_debug("failed to retrieving EVV voltage!\n");
1776 continue;
1777 }
1778
1779 /* the voltage should not be zero nor equal to leakage ID */
1780 if (vddc != 0 && vddc != vv_id) {
1781 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1782 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1783 data->vddc_leakage.count++;
1784 }
1785 }
1786 }
1787 }
1788
1789 return 0;
1790 }
1791
1792 /**
1793 * Change virtual leakage voltage to actual value.
1794 *
1795 * @param hwmgr the address of the powerplay hardware manager.
1796 * @param pointer to changing voltage
1797 * @param pointer to leakage table
1798 */
smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr * hwmgr,uint16_t * voltage,struct smu7_leakage_voltage * leakage_table)1799 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1800 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1801 {
1802 uint32_t index;
1803
1804 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1805 for (index = 0; index < leakage_table->count; index++) {
1806 /* if this voltage matches a leakage voltage ID */
1807 /* patch with actual leakage voltage */
1808 if (leakage_table->leakage_id[index] == *voltage) {
1809 *voltage = leakage_table->actual_voltage[index];
1810 break;
1811 }
1812 }
1813
1814 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1815 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1816 }
1817
1818 /**
1819 * Patch voltage lookup table by EVV leakages.
1820 *
1821 * @param hwmgr the address of the powerplay hardware manager.
1822 * @param pointer to voltage lookup table
1823 * @param pointer to leakage table
1824 * @return always 0
1825 */
smu7_patch_lookup_table_with_leakage(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * lookup_table,struct smu7_leakage_voltage * leakage_table)1826 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
1827 phm_ppt_v1_voltage_lookup_table *lookup_table,
1828 struct smu7_leakage_voltage *leakage_table)
1829 {
1830 uint32_t i;
1831
1832 for (i = 0; i < lookup_table->count; i++)
1833 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
1834 &lookup_table->entries[i].us_vdd, leakage_table);
1835
1836 return 0;
1837 }
1838
smu7_patch_clock_voltage_limits_with_vddc_leakage(struct pp_hwmgr * hwmgr,struct smu7_leakage_voltage * leakage_table,uint16_t * vddc)1839 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
1840 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
1841 uint16_t *vddc)
1842 {
1843 struct phm_ppt_v1_information *table_info =
1844 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1845 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
1846 hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
1847 table_info->max_clock_voltage_on_dc.vddc;
1848 return 0;
1849 }
1850
smu7_patch_voltage_dependency_tables_with_lookup_table(struct pp_hwmgr * hwmgr)1851 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
1852 struct pp_hwmgr *hwmgr)
1853 {
1854 uint8_t entry_id;
1855 uint8_t voltage_id;
1856 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1857 struct phm_ppt_v1_information *table_info =
1858 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1859
1860 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
1861 table_info->vdd_dep_on_sclk;
1862 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
1863 table_info->vdd_dep_on_mclk;
1864 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
1865 table_info->mm_dep_table;
1866
1867 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1868 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1869 voltage_id = sclk_table->entries[entry_id].vddInd;
1870 sclk_table->entries[entry_id].vddgfx =
1871 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
1872 }
1873 } else {
1874 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1875 voltage_id = sclk_table->entries[entry_id].vddInd;
1876 sclk_table->entries[entry_id].vddc =
1877 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1878 }
1879 }
1880
1881 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1882 voltage_id = mclk_table->entries[entry_id].vddInd;
1883 mclk_table->entries[entry_id].vddc =
1884 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1885 }
1886
1887 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
1888 voltage_id = mm_table->entries[entry_id].vddcInd;
1889 mm_table->entries[entry_id].vddc =
1890 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
1891 }
1892
1893 return 0;
1894
1895 }
1896
phm_add_voltage(struct pp_hwmgr * hwmgr,phm_ppt_v1_voltage_lookup_table * look_up_table,phm_ppt_v1_voltage_lookup_record * record)1897 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
1898 phm_ppt_v1_voltage_lookup_table *look_up_table,
1899 phm_ppt_v1_voltage_lookup_record *record)
1900 {
1901 uint32_t i;
1902
1903 PP_ASSERT_WITH_CODE((NULL != look_up_table),
1904 "Lookup Table empty.", return -EINVAL);
1905 PP_ASSERT_WITH_CODE((0 != look_up_table->count),
1906 "Lookup Table empty.", return -EINVAL);
1907
1908 i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
1909 PP_ASSERT_WITH_CODE((i >= look_up_table->count),
1910 "Lookup Table is full.", return -EINVAL);
1911
1912 /* This is to avoid entering duplicate calculated records. */
1913 for (i = 0; i < look_up_table->count; i++) {
1914 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
1915 if (look_up_table->entries[i].us_calculated == 1)
1916 return 0;
1917 break;
1918 }
1919 }
1920
1921 look_up_table->entries[i].us_calculated = 1;
1922 look_up_table->entries[i].us_vdd = record->us_vdd;
1923 look_up_table->entries[i].us_cac_low = record->us_cac_low;
1924 look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
1925 look_up_table->entries[i].us_cac_high = record->us_cac_high;
1926 /* Only increment the count when we're appending, not replacing duplicate entry. */
1927 if (i == look_up_table->count)
1928 look_up_table->count++;
1929
1930 return 0;
1931 }
1932
1933
smu7_calc_voltage_dependency_tables(struct pp_hwmgr * hwmgr)1934 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
1935 {
1936 uint8_t entry_id;
1937 struct phm_ppt_v1_voltage_lookup_record v_record;
1938 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1939 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1940
1941 phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
1942 phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
1943
1944 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1945 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
1946 if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
1947 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1948 sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1949 else
1950 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
1951 sclk_table->entries[entry_id].vdd_offset;
1952
1953 sclk_table->entries[entry_id].vddc =
1954 v_record.us_cac_low = v_record.us_cac_mid =
1955 v_record.us_cac_high = v_record.us_vdd;
1956
1957 phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
1958 }
1959
1960 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
1961 if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
1962 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1963 mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
1964 else
1965 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
1966 mclk_table->entries[entry_id].vdd_offset;
1967
1968 mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1969 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1970 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1971 }
1972 }
1973 return 0;
1974 }
1975
smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr * hwmgr)1976 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
1977 {
1978 uint8_t entry_id;
1979 struct phm_ppt_v1_voltage_lookup_record v_record;
1980 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1981 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1982 phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
1983
1984 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1985 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
1986 if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
1987 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1988 mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
1989 else
1990 v_record.us_vdd = mm_table->entries[entry_id].vddc +
1991 mm_table->entries[entry_id].vddgfx_offset;
1992
1993 /* Add the calculated VDDGFX to the VDDGFX lookup table */
1994 mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
1995 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
1996 phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
1997 }
1998 }
1999 return 0;
2000 }
2001
smu7_sort_lookup_table(struct pp_hwmgr * hwmgr,struct phm_ppt_v1_voltage_lookup_table * lookup_table)2002 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2003 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2004 {
2005 uint32_t table_size, i, j;
2006 table_size = lookup_table->count;
2007
2008 PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2009 "Lookup table is empty", return -EINVAL);
2010
2011 /* Sorting voltages */
2012 for (i = 0; i < table_size - 1; i++) {
2013 for (j = i + 1; j > 0; j--) {
2014 if (lookup_table->entries[j].us_vdd <
2015 lookup_table->entries[j - 1].us_vdd) {
2016 swap(lookup_table->entries[j - 1],
2017 lookup_table->entries[j]);
2018 }
2019 }
2020 }
2021
2022 return 0;
2023 }
2024
smu7_complete_dependency_tables(struct pp_hwmgr * hwmgr)2025 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2026 {
2027 int result = 0;
2028 int tmp_result;
2029 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2030 struct phm_ppt_v1_information *table_info =
2031 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2032
2033 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2034 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2035 table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2036 if (tmp_result != 0)
2037 result = tmp_result;
2038
2039 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2040 &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2041 } else {
2042
2043 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2044 table_info->vddc_lookup_table, &(data->vddc_leakage));
2045 if (tmp_result)
2046 result = tmp_result;
2047
2048 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2049 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2050 if (tmp_result)
2051 result = tmp_result;
2052 }
2053
2054 tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2055 if (tmp_result)
2056 result = tmp_result;
2057
2058 tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2059 if (tmp_result)
2060 result = tmp_result;
2061
2062 tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2063 if (tmp_result)
2064 result = tmp_result;
2065
2066 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2067 if (tmp_result)
2068 result = tmp_result;
2069
2070 tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2071 if (tmp_result)
2072 result = tmp_result;
2073
2074 return result;
2075 }
2076
smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr * hwmgr)2077 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2078 {
2079 struct phm_ppt_v1_information *table_info =
2080 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2081
2082 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2083 table_info->vdd_dep_on_sclk;
2084 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2085 table_info->vdd_dep_on_mclk;
2086
2087 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2088 "VDD dependency on SCLK table is missing.",
2089 return -EINVAL);
2090 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2091 "VDD dependency on SCLK table has to have is missing.",
2092 return -EINVAL);
2093
2094 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2095 "VDD dependency on MCLK table is missing",
2096 return -EINVAL);
2097 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2098 "VDD dependency on MCLK table has to have is missing.",
2099 return -EINVAL);
2100
2101 table_info->max_clock_voltage_on_ac.sclk =
2102 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2103 table_info->max_clock_voltage_on_ac.mclk =
2104 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2105 table_info->max_clock_voltage_on_ac.vddc =
2106 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2107 table_info->max_clock_voltage_on_ac.vddci =
2108 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2109
2110 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2111 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2112 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2113 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2114
2115 return 0;
2116 }
2117
smu7_patch_voltage_workaround(struct pp_hwmgr * hwmgr)2118 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2119 {
2120 struct phm_ppt_v1_information *table_info =
2121 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2122 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2123 struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2124 uint32_t i;
2125 uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2126 struct amdgpu_device *adev = hwmgr->adev;
2127
2128 if (table_info != NULL) {
2129 dep_mclk_table = table_info->vdd_dep_on_mclk;
2130 lookup_table = table_info->vddc_lookup_table;
2131 } else
2132 return 0;
2133
2134 hw_revision = adev->pdev->revision;
2135 sub_sys_id = adev->pdev->subsystem_device;
2136 sub_vendor_id = adev->pdev->subsystem_vendor;
2137
2138 if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 &&
2139 ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2140 (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2141 (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2142 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2143 return 0;
2144
2145 for (i = 0; i < lookup_table->count; i++) {
2146 if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2147 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2148 return 0;
2149 }
2150 }
2151 }
2152 return 0;
2153 }
2154
smu7_thermal_parameter_init(struct pp_hwmgr * hwmgr)2155 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2156 {
2157 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2158 uint32_t temp_reg;
2159 struct phm_ppt_v1_information *table_info =
2160 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2161
2162
2163 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2164 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2165 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2166 case 0:
2167 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2168 break;
2169 case 1:
2170 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2171 break;
2172 case 2:
2173 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2174 break;
2175 case 3:
2176 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2177 break;
2178 case 4:
2179 temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2180 break;
2181 default:
2182 break;
2183 }
2184 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2185 }
2186
2187 if (table_info == NULL)
2188 return 0;
2189
2190 if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2191 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2192 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2193 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2194
2195 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2196 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2197
2198 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2199
2200 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2201
2202 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2203 (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2204
2205 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2206
2207 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2208 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2209
2210 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2211 table_info->cac_dtp_table->usOperatingTempStep = 1;
2212 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2213
2214 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2215 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2216
2217 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2218 hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2219
2220 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2221 table_info->cac_dtp_table->usOperatingTempMinLimit;
2222
2223 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2224 table_info->cac_dtp_table->usOperatingTempMaxLimit;
2225
2226 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2227 table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2228
2229 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2230 table_info->cac_dtp_table->usOperatingTempStep;
2231
2232 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2233 table_info->cac_dtp_table->usTargetOperatingTemp;
2234 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2235 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2236 PHM_PlatformCaps_ODFuzzyFanControlSupport);
2237 }
2238
2239 return 0;
2240 }
2241
2242 /**
2243 * Change virtual leakage voltage to actual value.
2244 *
2245 * @param hwmgr the address of the powerplay hardware manager.
2246 * @param pointer to changing voltage
2247 * @param pointer to leakage table
2248 */
smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr * hwmgr,uint32_t * voltage,struct smu7_leakage_voltage * leakage_table)2249 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2250 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2251 {
2252 uint32_t index;
2253
2254 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2255 for (index = 0; index < leakage_table->count; index++) {
2256 /* if this voltage matches a leakage voltage ID */
2257 /* patch with actual leakage voltage */
2258 if (leakage_table->leakage_id[index] == *voltage) {
2259 *voltage = leakage_table->actual_voltage[index];
2260 break;
2261 }
2262 }
2263
2264 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2265 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2266 }
2267
2268
smu7_patch_vddc(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * tab)2269 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2270 struct phm_clock_voltage_dependency_table *tab)
2271 {
2272 uint16_t i;
2273 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2274
2275 if (tab)
2276 for (i = 0; i < tab->count; i++)
2277 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2278 &data->vddc_leakage);
2279
2280 return 0;
2281 }
2282
smu7_patch_vddci(struct pp_hwmgr * hwmgr,struct phm_clock_voltage_dependency_table * tab)2283 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2284 struct phm_clock_voltage_dependency_table *tab)
2285 {
2286 uint16_t i;
2287 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2288
2289 if (tab)
2290 for (i = 0; i < tab->count; i++)
2291 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2292 &data->vddci_leakage);
2293
2294 return 0;
2295 }
2296
smu7_patch_vce_vddc(struct pp_hwmgr * hwmgr,struct phm_vce_clock_voltage_dependency_table * tab)2297 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2298 struct phm_vce_clock_voltage_dependency_table *tab)
2299 {
2300 uint16_t i;
2301 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2302
2303 if (tab)
2304 for (i = 0; i < tab->count; i++)
2305 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2306 &data->vddc_leakage);
2307
2308 return 0;
2309 }
2310
2311
smu7_patch_uvd_vddc(struct pp_hwmgr * hwmgr,struct phm_uvd_clock_voltage_dependency_table * tab)2312 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2313 struct phm_uvd_clock_voltage_dependency_table *tab)
2314 {
2315 uint16_t i;
2316 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2317
2318 if (tab)
2319 for (i = 0; i < tab->count; i++)
2320 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2321 &data->vddc_leakage);
2322
2323 return 0;
2324 }
2325
smu7_patch_vddc_shed_limit(struct pp_hwmgr * hwmgr,struct phm_phase_shedding_limits_table * tab)2326 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2327 struct phm_phase_shedding_limits_table *tab)
2328 {
2329 uint16_t i;
2330 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2331
2332 if (tab)
2333 for (i = 0; i < tab->count; i++)
2334 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2335 &data->vddc_leakage);
2336
2337 return 0;
2338 }
2339
smu7_patch_samu_vddc(struct pp_hwmgr * hwmgr,struct phm_samu_clock_voltage_dependency_table * tab)2340 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2341 struct phm_samu_clock_voltage_dependency_table *tab)
2342 {
2343 uint16_t i;
2344 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2345
2346 if (tab)
2347 for (i = 0; i < tab->count; i++)
2348 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2349 &data->vddc_leakage);
2350
2351 return 0;
2352 }
2353
smu7_patch_acp_vddc(struct pp_hwmgr * hwmgr,struct phm_acp_clock_voltage_dependency_table * tab)2354 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2355 struct phm_acp_clock_voltage_dependency_table *tab)
2356 {
2357 uint16_t i;
2358 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2359
2360 if (tab)
2361 for (i = 0; i < tab->count; i++)
2362 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2363 &data->vddc_leakage);
2364
2365 return 0;
2366 }
2367
smu7_patch_limits_vddc(struct pp_hwmgr * hwmgr,struct phm_clock_and_voltage_limits * tab)2368 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2369 struct phm_clock_and_voltage_limits *tab)
2370 {
2371 uint32_t vddc, vddci;
2372 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2373
2374 if (tab) {
2375 vddc = tab->vddc;
2376 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2377 &data->vddc_leakage);
2378 tab->vddc = vddc;
2379 vddci = tab->vddci;
2380 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2381 &data->vddci_leakage);
2382 tab->vddci = vddci;
2383 }
2384
2385 return 0;
2386 }
2387
smu7_patch_cac_vddc(struct pp_hwmgr * hwmgr,struct phm_cac_leakage_table * tab)2388 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2389 {
2390 uint32_t i;
2391 uint32_t vddc;
2392 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2393
2394 if (tab) {
2395 for (i = 0; i < tab->count; i++) {
2396 vddc = (uint32_t)(tab->entries[i].Vddc);
2397 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2398 tab->entries[i].Vddc = (uint16_t)vddc;
2399 }
2400 }
2401
2402 return 0;
2403 }
2404
smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr * hwmgr)2405 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2406 {
2407 int tmp;
2408
2409 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2410 if (tmp)
2411 return -EINVAL;
2412
2413 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2414 if (tmp)
2415 return -EINVAL;
2416
2417 tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2418 if (tmp)
2419 return -EINVAL;
2420
2421 tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2422 if (tmp)
2423 return -EINVAL;
2424
2425 tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2426 if (tmp)
2427 return -EINVAL;
2428
2429 tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2430 if (tmp)
2431 return -EINVAL;
2432
2433 tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2434 if (tmp)
2435 return -EINVAL;
2436
2437 tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2438 if (tmp)
2439 return -EINVAL;
2440
2441 tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2442 if (tmp)
2443 return -EINVAL;
2444
2445 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2446 if (tmp)
2447 return -EINVAL;
2448
2449 tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2450 if (tmp)
2451 return -EINVAL;
2452
2453 tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2454 if (tmp)
2455 return -EINVAL;
2456
2457 return 0;
2458 }
2459
2460
smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr * hwmgr)2461 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2462 {
2463 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2464
2465 struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2466 struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2467 struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2468
2469 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2470 "VDDC dependency on SCLK table is missing. This table is mandatory",
2471 return -EINVAL);
2472 PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2473 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2474 return -EINVAL);
2475
2476 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2477 "VDDC dependency on MCLK table is missing. This table is mandatory",
2478 return -EINVAL);
2479 PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2480 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2481 return -EINVAL);
2482
2483 data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2484 data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2485
2486 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2487 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2488 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2489 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2490 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2491 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2492
2493 if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2494 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2495 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2496 }
2497
2498 if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2499 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2500
2501 return 0;
2502 }
2503
smu7_hwmgr_backend_fini(struct pp_hwmgr * hwmgr)2504 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2505 {
2506 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2507 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2508 kfree(hwmgr->backend);
2509 hwmgr->backend = NULL;
2510
2511 return 0;
2512 }
2513
smu7_get_elb_voltages(struct pp_hwmgr * hwmgr)2514 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2515 {
2516 uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2517 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2518 int i;
2519
2520 if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2521 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2522 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2523 if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2524 virtual_voltage_id,
2525 efuse_voltage_id) == 0) {
2526 if (vddc != 0 && vddc != virtual_voltage_id) {
2527 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2528 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2529 data->vddc_leakage.count++;
2530 }
2531 if (vddci != 0 && vddci != virtual_voltage_id) {
2532 data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2533 data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2534 data->vddci_leakage.count++;
2535 }
2536 }
2537 }
2538 }
2539 return 0;
2540 }
2541
smu7_hwmgr_backend_init(struct pp_hwmgr * hwmgr)2542 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2543 {
2544 struct smu7_hwmgr *data;
2545 int result = 0;
2546
2547 data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2548 if (data == NULL)
2549 return -ENOMEM;
2550
2551 hwmgr->backend = data;
2552 smu7_patch_voltage_workaround(hwmgr);
2553 smu7_init_dpm_defaults(hwmgr);
2554
2555 /* Get leakage voltage based on leakage ID. */
2556 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2557 PHM_PlatformCaps_EVV)) {
2558 result = smu7_get_evv_voltages(hwmgr);
2559 if (result) {
2560 pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
2561 return -EINVAL;
2562 }
2563 } else {
2564 smu7_get_elb_voltages(hwmgr);
2565 }
2566
2567 if (hwmgr->pp_table_version == PP_TABLE_V1) {
2568 smu7_complete_dependency_tables(hwmgr);
2569 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2570 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2571 smu7_patch_dependency_tables_with_leakage(hwmgr);
2572 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2573 }
2574
2575 /* Initalize Dynamic State Adjustment Rule Settings */
2576 result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2577
2578 if (0 == result) {
2579 struct amdgpu_device *adev = hwmgr->adev;
2580
2581 data->is_tlu_enabled = false;
2582
2583 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2584 SMU7_MAX_HARDWARE_POWERLEVELS;
2585 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2586 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2587
2588 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2589 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2590 data->pcie_spc_cap = 20;
2591 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2592
2593 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2594 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2595 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2596 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2597 smu7_thermal_parameter_init(hwmgr);
2598 } else {
2599 /* Ignore return value in here, we are cleaning up a mess. */
2600 smu7_hwmgr_backend_fini(hwmgr);
2601 }
2602
2603 return 0;
2604 }
2605
smu7_force_dpm_highest(struct pp_hwmgr * hwmgr)2606 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2607 {
2608 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2609 uint32_t level, tmp;
2610
2611 if (!data->pcie_dpm_key_disabled) {
2612 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2613 level = 0;
2614 tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2615 while (tmp >>= 1)
2616 level++;
2617
2618 if (level)
2619 smum_send_msg_to_smc_with_parameter(hwmgr,
2620 PPSMC_MSG_PCIeDPM_ForceLevel, level);
2621 }
2622 }
2623
2624 if (!data->sclk_dpm_key_disabled) {
2625 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2626 level = 0;
2627 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2628 while (tmp >>= 1)
2629 level++;
2630
2631 if (level)
2632 smum_send_msg_to_smc_with_parameter(hwmgr,
2633 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2634 (1 << level));
2635 }
2636 }
2637
2638 if (!data->mclk_dpm_key_disabled) {
2639 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2640 level = 0;
2641 tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2642 while (tmp >>= 1)
2643 level++;
2644
2645 if (level)
2646 smum_send_msg_to_smc_with_parameter(hwmgr,
2647 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2648 (1 << level));
2649 }
2650 }
2651
2652 return 0;
2653 }
2654
smu7_upload_dpm_level_enable_mask(struct pp_hwmgr * hwmgr)2655 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2656 {
2657 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2658
2659 if (hwmgr->pp_table_version == PP_TABLE_V1)
2660 phm_apply_dal_min_voltage_request(hwmgr);
2661 /* TO DO for v0 iceland and Ci*/
2662
2663 if (!data->sclk_dpm_key_disabled) {
2664 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2665 smum_send_msg_to_smc_with_parameter(hwmgr,
2666 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2667 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2668 }
2669
2670 if (!data->mclk_dpm_key_disabled) {
2671 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2672 smum_send_msg_to_smc_with_parameter(hwmgr,
2673 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2674 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2675 }
2676
2677 return 0;
2678 }
2679
smu7_unforce_dpm_levels(struct pp_hwmgr * hwmgr)2680 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2681 {
2682 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2683
2684 if (!smum_is_dpm_running(hwmgr))
2685 return -EINVAL;
2686
2687 if (!data->pcie_dpm_key_disabled) {
2688 smum_send_msg_to_smc(hwmgr,
2689 PPSMC_MSG_PCIeDPM_UnForceLevel);
2690 }
2691
2692 return smu7_upload_dpm_level_enable_mask(hwmgr);
2693 }
2694
smu7_force_dpm_lowest(struct pp_hwmgr * hwmgr)2695 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2696 {
2697 struct smu7_hwmgr *data =
2698 (struct smu7_hwmgr *)(hwmgr->backend);
2699 uint32_t level;
2700
2701 if (!data->sclk_dpm_key_disabled)
2702 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2703 level = phm_get_lowest_enabled_level(hwmgr,
2704 data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2705 smum_send_msg_to_smc_with_parameter(hwmgr,
2706 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2707 (1 << level));
2708
2709 }
2710
2711 if (!data->mclk_dpm_key_disabled) {
2712 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2713 level = phm_get_lowest_enabled_level(hwmgr,
2714 data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2715 smum_send_msg_to_smc_with_parameter(hwmgr,
2716 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2717 (1 << level));
2718 }
2719 }
2720
2721 if (!data->pcie_dpm_key_disabled) {
2722 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2723 level = phm_get_lowest_enabled_level(hwmgr,
2724 data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2725 smum_send_msg_to_smc_with_parameter(hwmgr,
2726 PPSMC_MSG_PCIeDPM_ForceLevel,
2727 (level));
2728 }
2729 }
2730
2731 return 0;
2732 }
2733
smu7_get_profiling_clk(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level,uint32_t * sclk_mask,uint32_t * mclk_mask,uint32_t * pcie_mask)2734 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2735 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2736 {
2737 uint32_t percentage;
2738 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2739 struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2740 int32_t tmp_mclk;
2741 int32_t tmp_sclk;
2742 int32_t count;
2743
2744 if (golden_dpm_table->mclk_table.count < 1)
2745 return -EINVAL;
2746
2747 percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2748 golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2749
2750 if (golden_dpm_table->mclk_table.count == 1) {
2751 percentage = 70;
2752 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2753 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2754 } else {
2755 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2756 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2757 }
2758
2759 tmp_sclk = tmp_mclk * percentage / 100;
2760
2761 if (hwmgr->pp_table_version == PP_TABLE_V0) {
2762 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2763 count >= 0; count--) {
2764 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
2765 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
2766 *sclk_mask = count;
2767 break;
2768 }
2769 }
2770 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2771 *sclk_mask = 0;
2772 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
2773 }
2774
2775 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2776 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2777 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
2778 struct phm_ppt_v1_information *table_info =
2779 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2780
2781 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
2782 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
2783 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
2784 *sclk_mask = count;
2785 break;
2786 }
2787 }
2788 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
2789 *sclk_mask = 0;
2790 tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2791 }
2792
2793 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2794 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
2795 }
2796
2797 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
2798 *mclk_mask = 0;
2799 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2800 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2801
2802 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
2803 hwmgr->pstate_sclk = tmp_sclk;
2804 hwmgr->pstate_mclk = tmp_mclk;
2805
2806 return 0;
2807 }
2808
smu7_force_dpm_level(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level level)2809 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
2810 enum amd_dpm_forced_level level)
2811 {
2812 int ret = 0;
2813 uint32_t sclk_mask = 0;
2814 uint32_t mclk_mask = 0;
2815 uint32_t pcie_mask = 0;
2816
2817 if (hwmgr->pstate_sclk == 0)
2818 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2819
2820 switch (level) {
2821 case AMD_DPM_FORCED_LEVEL_HIGH:
2822 ret = smu7_force_dpm_highest(hwmgr);
2823 break;
2824 case AMD_DPM_FORCED_LEVEL_LOW:
2825 ret = smu7_force_dpm_lowest(hwmgr);
2826 break;
2827 case AMD_DPM_FORCED_LEVEL_AUTO:
2828 ret = smu7_unforce_dpm_levels(hwmgr);
2829 break;
2830 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
2831 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
2832 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
2833 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
2834 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
2835 if (ret)
2836 return ret;
2837 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
2838 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
2839 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
2840 break;
2841 case AMD_DPM_FORCED_LEVEL_MANUAL:
2842 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
2843 default:
2844 break;
2845 }
2846
2847 if (!ret) {
2848 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2849 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
2850 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
2851 smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
2852 }
2853 return ret;
2854 }
2855
smu7_get_power_state_size(struct pp_hwmgr * hwmgr)2856 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
2857 {
2858 return sizeof(struct smu7_power_state);
2859 }
2860
smu7_vblank_too_short(struct pp_hwmgr * hwmgr,uint32_t vblank_time_us)2861 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
2862 uint32_t vblank_time_us)
2863 {
2864 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2865 uint32_t switch_limit_us;
2866
2867 switch (hwmgr->chip_id) {
2868 case CHIP_POLARIS10:
2869 case CHIP_POLARIS11:
2870 case CHIP_POLARIS12:
2871 if (hwmgr->is_kicker)
2872 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2873 else
2874 switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
2875 break;
2876 case CHIP_VEGAM:
2877 switch_limit_us = 30;
2878 break;
2879 default:
2880 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
2881 break;
2882 }
2883
2884 if (vblank_time_us < switch_limit_us)
2885 return true;
2886 else
2887 return false;
2888 }
2889
smu7_apply_state_adjust_rules(struct pp_hwmgr * hwmgr,struct pp_power_state * request_ps,const struct pp_power_state * current_ps)2890 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2891 struct pp_power_state *request_ps,
2892 const struct pp_power_state *current_ps)
2893 {
2894 struct amdgpu_device *adev = hwmgr->adev;
2895 struct smu7_power_state *smu7_ps =
2896 cast_phw_smu7_power_state(&request_ps->hardware);
2897 uint32_t sclk;
2898 uint32_t mclk;
2899 struct PP_Clocks minimum_clocks = {0};
2900 bool disable_mclk_switching;
2901 bool disable_mclk_switching_for_frame_lock;
2902 const struct phm_clock_and_voltage_limits *max_limits;
2903 uint32_t i;
2904 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2905 struct phm_ppt_v1_information *table_info =
2906 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2907 int32_t count;
2908 int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
2909
2910 data->battery_state = (PP_StateUILabel_Battery ==
2911 request_ps->classification.ui_label);
2912
2913 PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
2914 "VI should always have 2 performance levels",
2915 );
2916
2917 max_limits = adev->pm.ac_power ?
2918 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
2919 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
2920
2921 /* Cap clock DPM tables at DC MAX if it is in DC. */
2922 if (!adev->pm.ac_power) {
2923 for (i = 0; i < smu7_ps->performance_level_count; i++) {
2924 if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
2925 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
2926 if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
2927 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
2928 }
2929 }
2930
2931 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
2932 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
2933
2934 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2935 PHM_PlatformCaps_StablePState)) {
2936 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
2937 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
2938
2939 for (count = table_info->vdd_dep_on_sclk->count - 1;
2940 count >= 0; count--) {
2941 if (stable_pstate_sclk >=
2942 table_info->vdd_dep_on_sclk->entries[count].clk) {
2943 stable_pstate_sclk =
2944 table_info->vdd_dep_on_sclk->entries[count].clk;
2945 break;
2946 }
2947 }
2948
2949 if (count < 0)
2950 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
2951
2952 stable_pstate_mclk = max_limits->mclk;
2953
2954 minimum_clocks.engineClock = stable_pstate_sclk;
2955 minimum_clocks.memoryClock = stable_pstate_mclk;
2956 }
2957
2958 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
2959 hwmgr->platform_descriptor.platformCaps,
2960 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2961
2962
2963 if (hwmgr->display_config->num_display == 0)
2964 disable_mclk_switching = false;
2965 else
2966 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
2967 !hwmgr->display_config->multi_monitor_in_sync) ||
2968 disable_mclk_switching_for_frame_lock ||
2969 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
2970
2971 sclk = smu7_ps->performance_levels[0].engine_clock;
2972 mclk = smu7_ps->performance_levels[0].memory_clock;
2973
2974 if (disable_mclk_switching)
2975 mclk = smu7_ps->performance_levels
2976 [smu7_ps->performance_level_count - 1].memory_clock;
2977
2978 if (sclk < minimum_clocks.engineClock)
2979 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
2980 max_limits->sclk : minimum_clocks.engineClock;
2981
2982 if (mclk < minimum_clocks.memoryClock)
2983 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
2984 max_limits->mclk : minimum_clocks.memoryClock;
2985
2986 smu7_ps->performance_levels[0].engine_clock = sclk;
2987 smu7_ps->performance_levels[0].memory_clock = mclk;
2988
2989 smu7_ps->performance_levels[1].engine_clock =
2990 (smu7_ps->performance_levels[1].engine_clock >=
2991 smu7_ps->performance_levels[0].engine_clock) ?
2992 smu7_ps->performance_levels[1].engine_clock :
2993 smu7_ps->performance_levels[0].engine_clock;
2994
2995 if (disable_mclk_switching) {
2996 if (mclk < smu7_ps->performance_levels[1].memory_clock)
2997 mclk = smu7_ps->performance_levels[1].memory_clock;
2998
2999 smu7_ps->performance_levels[0].memory_clock = mclk;
3000 smu7_ps->performance_levels[1].memory_clock = mclk;
3001 } else {
3002 if (smu7_ps->performance_levels[1].memory_clock <
3003 smu7_ps->performance_levels[0].memory_clock)
3004 smu7_ps->performance_levels[1].memory_clock =
3005 smu7_ps->performance_levels[0].memory_clock;
3006 }
3007
3008 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3009 PHM_PlatformCaps_StablePState)) {
3010 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3011 smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3012 smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3013 smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3014 smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3015 }
3016 }
3017 return 0;
3018 }
3019
3020
smu7_dpm_get_mclk(struct pp_hwmgr * hwmgr,bool low)3021 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3022 {
3023 struct pp_power_state *ps;
3024 struct smu7_power_state *smu7_ps;
3025
3026 if (hwmgr == NULL)
3027 return -EINVAL;
3028
3029 ps = hwmgr->request_ps;
3030
3031 if (ps == NULL)
3032 return -EINVAL;
3033
3034 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3035
3036 if (low)
3037 return smu7_ps->performance_levels[0].memory_clock;
3038 else
3039 return smu7_ps->performance_levels
3040 [smu7_ps->performance_level_count-1].memory_clock;
3041 }
3042
smu7_dpm_get_sclk(struct pp_hwmgr * hwmgr,bool low)3043 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3044 {
3045 struct pp_power_state *ps;
3046 struct smu7_power_state *smu7_ps;
3047
3048 if (hwmgr == NULL)
3049 return -EINVAL;
3050
3051 ps = hwmgr->request_ps;
3052
3053 if (ps == NULL)
3054 return -EINVAL;
3055
3056 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3057
3058 if (low)
3059 return smu7_ps->performance_levels[0].engine_clock;
3060 else
3061 return smu7_ps->performance_levels
3062 [smu7_ps->performance_level_count-1].engine_clock;
3063 }
3064
smu7_dpm_patch_boot_state(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * hw_ps)3065 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3066 struct pp_hw_power_state *hw_ps)
3067 {
3068 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3069 struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3070 ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3071 uint16_t size;
3072 uint8_t frev, crev;
3073 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3074
3075 /* First retrieve the Boot clocks and VDDC from the firmware info table.
3076 * We assume here that fw_info is unchanged if this call fails.
3077 */
3078 fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3079 &size, &frev, &crev);
3080 if (!fw_info)
3081 /* During a test, there is no firmware info table. */
3082 return 0;
3083
3084 /* Patch the state. */
3085 data->vbios_boot_state.sclk_bootup_value =
3086 le32_to_cpu(fw_info->ulDefaultEngineClock);
3087 data->vbios_boot_state.mclk_bootup_value =
3088 le32_to_cpu(fw_info->ulDefaultMemoryClock);
3089 data->vbios_boot_state.mvdd_bootup_value =
3090 le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3091 data->vbios_boot_state.vddc_bootup_value =
3092 le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3093 data->vbios_boot_state.vddci_bootup_value =
3094 le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3095 data->vbios_boot_state.pcie_gen_bootup_value =
3096 smu7_get_current_pcie_speed(hwmgr);
3097
3098 data->vbios_boot_state.pcie_lane_bootup_value =
3099 (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3100
3101 /* set boot power state */
3102 ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3103 ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3104 ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3105 ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3106
3107 return 0;
3108 }
3109
smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr * hwmgr)3110 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3111 {
3112 int result;
3113 unsigned long ret = 0;
3114
3115 if (hwmgr->pp_table_version == PP_TABLE_V0) {
3116 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3117 return result ? 0 : ret;
3118 } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3119 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3120 return result;
3121 }
3122 return 0;
3123 }
3124
smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr * hwmgr,void * state,struct pp_power_state * power_state,void * pp_table,uint32_t classification_flag)3125 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3126 void *state, struct pp_power_state *power_state,
3127 void *pp_table, uint32_t classification_flag)
3128 {
3129 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3130 struct smu7_power_state *smu7_power_state =
3131 (struct smu7_power_state *)(&(power_state->hardware));
3132 struct smu7_performance_level *performance_level;
3133 ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3134 ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3135 (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3136 PPTable_Generic_SubTable_Header *sclk_dep_table =
3137 (PPTable_Generic_SubTable_Header *)
3138 (((unsigned long)powerplay_table) +
3139 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3140
3141 ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3142 (ATOM_Tonga_MCLK_Dependency_Table *)
3143 (((unsigned long)powerplay_table) +
3144 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3145
3146 /* The following fields are not initialized here: id orderedList allStatesList */
3147 power_state->classification.ui_label =
3148 (le16_to_cpu(state_entry->usClassification) &
3149 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3150 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3151 power_state->classification.flags = classification_flag;
3152 /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3153
3154 power_state->classification.temporary_state = false;
3155 power_state->classification.to_be_deleted = false;
3156
3157 power_state->validation.disallowOnDC =
3158 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3159 ATOM_Tonga_DISALLOW_ON_DC));
3160
3161 power_state->pcie.lanes = 0;
3162
3163 power_state->display.disableFrameModulation = false;
3164 power_state->display.limitRefreshrate = false;
3165 power_state->display.enableVariBright =
3166 (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3167 ATOM_Tonga_ENABLE_VARIBRIGHT));
3168
3169 power_state->validation.supportedPowerLevels = 0;
3170 power_state->uvd_clocks.VCLK = 0;
3171 power_state->uvd_clocks.DCLK = 0;
3172 power_state->temperatures.min = 0;
3173 power_state->temperatures.max = 0;
3174
3175 performance_level = &(smu7_power_state->performance_levels
3176 [smu7_power_state->performance_level_count++]);
3177
3178 PP_ASSERT_WITH_CODE(
3179 (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3180 "Performance levels exceeds SMC limit!",
3181 return -EINVAL);
3182
3183 PP_ASSERT_WITH_CODE(
3184 (smu7_power_state->performance_level_count <=
3185 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3186 "Performance levels exceeds Driver limit!",
3187 return -EINVAL);
3188
3189 /* Performance levels are arranged from low to high. */
3190 performance_level->memory_clock = mclk_dep_table->entries
3191 [state_entry->ucMemoryClockIndexLow].ulMclk;
3192 if (sclk_dep_table->ucRevId == 0)
3193 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3194 [state_entry->ucEngineClockIndexLow].ulSclk;
3195 else if (sclk_dep_table->ucRevId == 1)
3196 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3197 [state_entry->ucEngineClockIndexLow].ulSclk;
3198 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3199 state_entry->ucPCIEGenLow);
3200 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3201 state_entry->ucPCIELaneLow);
3202
3203 performance_level = &(smu7_power_state->performance_levels
3204 [smu7_power_state->performance_level_count++]);
3205 performance_level->memory_clock = mclk_dep_table->entries
3206 [state_entry->ucMemoryClockIndexHigh].ulMclk;
3207
3208 if (sclk_dep_table->ucRevId == 0)
3209 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3210 [state_entry->ucEngineClockIndexHigh].ulSclk;
3211 else if (sclk_dep_table->ucRevId == 1)
3212 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3213 [state_entry->ucEngineClockIndexHigh].ulSclk;
3214
3215 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3216 state_entry->ucPCIEGenHigh);
3217 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3218 state_entry->ucPCIELaneHigh);
3219
3220 return 0;
3221 }
3222
smu7_get_pp_table_entry_v1(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3223 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3224 unsigned long entry_index, struct pp_power_state *state)
3225 {
3226 int result;
3227 struct smu7_power_state *ps;
3228 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3229 struct phm_ppt_v1_information *table_info =
3230 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3231 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3232 table_info->vdd_dep_on_mclk;
3233
3234 state->hardware.magic = PHM_VIslands_Magic;
3235
3236 ps = (struct smu7_power_state *)(&state->hardware);
3237
3238 result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3239 smu7_get_pp_table_entry_callback_func_v1);
3240
3241 /* This is the earliest time we have all the dependency table and the VBIOS boot state
3242 * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3243 * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3244 */
3245 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3246 if (dep_mclk_table->entries[0].clk !=
3247 data->vbios_boot_state.mclk_bootup_value)
3248 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3249 "does not match VBIOS boot MCLK level");
3250 if (dep_mclk_table->entries[0].vddci !=
3251 data->vbios_boot_state.vddci_bootup_value)
3252 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3253 "does not match VBIOS boot VDDCI level");
3254 }
3255
3256 /* set DC compatible flag if this state supports DC */
3257 if (!state->validation.disallowOnDC)
3258 ps->dc_compatible = true;
3259
3260 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3261 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3262
3263 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3264 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3265
3266 if (!result) {
3267 uint32_t i;
3268
3269 switch (state->classification.ui_label) {
3270 case PP_StateUILabel_Performance:
3271 data->use_pcie_performance_levels = true;
3272 for (i = 0; i < ps->performance_level_count; i++) {
3273 if (data->pcie_gen_performance.max <
3274 ps->performance_levels[i].pcie_gen)
3275 data->pcie_gen_performance.max =
3276 ps->performance_levels[i].pcie_gen;
3277
3278 if (data->pcie_gen_performance.min >
3279 ps->performance_levels[i].pcie_gen)
3280 data->pcie_gen_performance.min =
3281 ps->performance_levels[i].pcie_gen;
3282
3283 if (data->pcie_lane_performance.max <
3284 ps->performance_levels[i].pcie_lane)
3285 data->pcie_lane_performance.max =
3286 ps->performance_levels[i].pcie_lane;
3287 if (data->pcie_lane_performance.min >
3288 ps->performance_levels[i].pcie_lane)
3289 data->pcie_lane_performance.min =
3290 ps->performance_levels[i].pcie_lane;
3291 }
3292 break;
3293 case PP_StateUILabel_Battery:
3294 data->use_pcie_power_saving_levels = true;
3295
3296 for (i = 0; i < ps->performance_level_count; i++) {
3297 if (data->pcie_gen_power_saving.max <
3298 ps->performance_levels[i].pcie_gen)
3299 data->pcie_gen_power_saving.max =
3300 ps->performance_levels[i].pcie_gen;
3301
3302 if (data->pcie_gen_power_saving.min >
3303 ps->performance_levels[i].pcie_gen)
3304 data->pcie_gen_power_saving.min =
3305 ps->performance_levels[i].pcie_gen;
3306
3307 if (data->pcie_lane_power_saving.max <
3308 ps->performance_levels[i].pcie_lane)
3309 data->pcie_lane_power_saving.max =
3310 ps->performance_levels[i].pcie_lane;
3311
3312 if (data->pcie_lane_power_saving.min >
3313 ps->performance_levels[i].pcie_lane)
3314 data->pcie_lane_power_saving.min =
3315 ps->performance_levels[i].pcie_lane;
3316 }
3317 break;
3318 default:
3319 break;
3320 }
3321 }
3322 return 0;
3323 }
3324
smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr * hwmgr,struct pp_hw_power_state * power_state,unsigned int index,const void * clock_info)3325 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3326 struct pp_hw_power_state *power_state,
3327 unsigned int index, const void *clock_info)
3328 {
3329 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3330 struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state);
3331 const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3332 struct smu7_performance_level *performance_level;
3333 uint32_t engine_clock, memory_clock;
3334 uint16_t pcie_gen_from_bios;
3335
3336 engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3337 memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3338
3339 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3340 data->highest_mclk = memory_clock;
3341
3342 PP_ASSERT_WITH_CODE(
3343 (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3344 "Performance levels exceeds SMC limit!",
3345 return -EINVAL);
3346
3347 PP_ASSERT_WITH_CODE(
3348 (ps->performance_level_count <
3349 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3350 "Performance levels exceeds Driver limit, Skip!",
3351 return 0);
3352
3353 performance_level = &(ps->performance_levels
3354 [ps->performance_level_count++]);
3355
3356 /* Performance levels are arranged from low to high. */
3357 performance_level->memory_clock = memory_clock;
3358 performance_level->engine_clock = engine_clock;
3359
3360 pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3361
3362 performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3363 performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3364
3365 return 0;
3366 }
3367
smu7_get_pp_table_entry_v0(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3368 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3369 unsigned long entry_index, struct pp_power_state *state)
3370 {
3371 int result;
3372 struct smu7_power_state *ps;
3373 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3374 struct phm_clock_voltage_dependency_table *dep_mclk_table =
3375 hwmgr->dyn_state.vddci_dependency_on_mclk;
3376
3377 memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3378
3379 state->hardware.magic = PHM_VIslands_Magic;
3380
3381 ps = (struct smu7_power_state *)(&state->hardware);
3382
3383 result = pp_tables_get_entry(hwmgr, entry_index, state,
3384 smu7_get_pp_table_entry_callback_func_v0);
3385
3386 /*
3387 * This is the earliest time we have all the dependency table
3388 * and the VBIOS boot state as
3389 * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3390 * state if there is only one VDDCI/MCLK level, check if it's
3391 * the same as VBIOS boot state
3392 */
3393 if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3394 if (dep_mclk_table->entries[0].clk !=
3395 data->vbios_boot_state.mclk_bootup_value)
3396 pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3397 "does not match VBIOS boot MCLK level");
3398 if (dep_mclk_table->entries[0].v !=
3399 data->vbios_boot_state.vddci_bootup_value)
3400 pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3401 "does not match VBIOS boot VDDCI level");
3402 }
3403
3404 /* set DC compatible flag if this state supports DC */
3405 if (!state->validation.disallowOnDC)
3406 ps->dc_compatible = true;
3407
3408 if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3409 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3410
3411 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3412 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3413
3414 if (!result) {
3415 uint32_t i;
3416
3417 switch (state->classification.ui_label) {
3418 case PP_StateUILabel_Performance:
3419 data->use_pcie_performance_levels = true;
3420
3421 for (i = 0; i < ps->performance_level_count; i++) {
3422 if (data->pcie_gen_performance.max <
3423 ps->performance_levels[i].pcie_gen)
3424 data->pcie_gen_performance.max =
3425 ps->performance_levels[i].pcie_gen;
3426
3427 if (data->pcie_gen_performance.min >
3428 ps->performance_levels[i].pcie_gen)
3429 data->pcie_gen_performance.min =
3430 ps->performance_levels[i].pcie_gen;
3431
3432 if (data->pcie_lane_performance.max <
3433 ps->performance_levels[i].pcie_lane)
3434 data->pcie_lane_performance.max =
3435 ps->performance_levels[i].pcie_lane;
3436
3437 if (data->pcie_lane_performance.min >
3438 ps->performance_levels[i].pcie_lane)
3439 data->pcie_lane_performance.min =
3440 ps->performance_levels[i].pcie_lane;
3441 }
3442 break;
3443 case PP_StateUILabel_Battery:
3444 data->use_pcie_power_saving_levels = true;
3445
3446 for (i = 0; i < ps->performance_level_count; i++) {
3447 if (data->pcie_gen_power_saving.max <
3448 ps->performance_levels[i].pcie_gen)
3449 data->pcie_gen_power_saving.max =
3450 ps->performance_levels[i].pcie_gen;
3451
3452 if (data->pcie_gen_power_saving.min >
3453 ps->performance_levels[i].pcie_gen)
3454 data->pcie_gen_power_saving.min =
3455 ps->performance_levels[i].pcie_gen;
3456
3457 if (data->pcie_lane_power_saving.max <
3458 ps->performance_levels[i].pcie_lane)
3459 data->pcie_lane_power_saving.max =
3460 ps->performance_levels[i].pcie_lane;
3461
3462 if (data->pcie_lane_power_saving.min >
3463 ps->performance_levels[i].pcie_lane)
3464 data->pcie_lane_power_saving.min =
3465 ps->performance_levels[i].pcie_lane;
3466 }
3467 break;
3468 default:
3469 break;
3470 }
3471 }
3472 return 0;
3473 }
3474
smu7_get_pp_table_entry(struct pp_hwmgr * hwmgr,unsigned long entry_index,struct pp_power_state * state)3475 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3476 unsigned long entry_index, struct pp_power_state *state)
3477 {
3478 if (hwmgr->pp_table_version == PP_TABLE_V0)
3479 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3480 else if (hwmgr->pp_table_version == PP_TABLE_V1)
3481 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3482
3483 return 0;
3484 }
3485
smu7_get_gpu_power(struct pp_hwmgr * hwmgr,u32 * query)3486 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3487 {
3488 struct amdgpu_device *adev = hwmgr->adev;
3489 int i;
3490 u32 tmp = 0;
3491
3492 if (!query)
3493 return -EINVAL;
3494
3495 /*
3496 * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3497 * - Hawaii
3498 * - Bonaire
3499 * - Fiji
3500 * - Tonga
3501 */
3502 if ((adev->asic_type != CHIP_HAWAII) &&
3503 (adev->asic_type != CHIP_BONAIRE) &&
3504 (adev->asic_type != CHIP_FIJI) &&
3505 (adev->asic_type != CHIP_TONGA)) {
3506 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
3507 tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3508 *query = tmp;
3509
3510 if (tmp != 0)
3511 return 0;
3512 }
3513
3514 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3515 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3516 ixSMU_PM_STATUS_95, 0);
3517
3518 for (i = 0; i < 10; i++) {
3519 msleep(500);
3520 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3521 tmp = cgs_read_ind_register(hwmgr->device,
3522 CGS_IND_REG__SMC,
3523 ixSMU_PM_STATUS_95);
3524 if (tmp != 0)
3525 break;
3526 }
3527 *query = tmp;
3528
3529 return 0;
3530 }
3531
smu7_read_sensor(struct pp_hwmgr * hwmgr,int idx,void * value,int * size)3532 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3533 void *value, int *size)
3534 {
3535 uint32_t sclk, mclk, activity_percent;
3536 uint32_t offset, val_vid;
3537 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3538
3539 /* size must be at least 4 bytes for all sensors */
3540 if (*size < 4)
3541 return -EINVAL;
3542
3543 switch (idx) {
3544 case AMDGPU_PP_SENSOR_GFX_SCLK:
3545 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
3546 sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3547 *((uint32_t *)value) = sclk;
3548 *size = 4;
3549 return 0;
3550 case AMDGPU_PP_SENSOR_GFX_MCLK:
3551 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
3552 mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
3553 *((uint32_t *)value) = mclk;
3554 *size = 4;
3555 return 0;
3556 case AMDGPU_PP_SENSOR_GPU_LOAD:
3557 case AMDGPU_PP_SENSOR_MEM_LOAD:
3558 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3559 SMU_SoftRegisters,
3560 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3561 AverageGraphicsActivity:
3562 AverageMemoryActivity);
3563
3564 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3565 activity_percent += 0x80;
3566 activity_percent >>= 8;
3567 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3568 *size = 4;
3569 return 0;
3570 case AMDGPU_PP_SENSOR_GPU_TEMP:
3571 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3572 *size = 4;
3573 return 0;
3574 case AMDGPU_PP_SENSOR_UVD_POWER:
3575 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3576 *size = 4;
3577 return 0;
3578 case AMDGPU_PP_SENSOR_VCE_POWER:
3579 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3580 *size = 4;
3581 return 0;
3582 case AMDGPU_PP_SENSOR_GPU_POWER:
3583 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3584 case AMDGPU_PP_SENSOR_VDDGFX:
3585 if ((data->vr_config & 0xff) == 0x2)
3586 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3587 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3588 else
3589 val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3590 CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3591
3592 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3593 return 0;
3594 default:
3595 return -EINVAL;
3596 }
3597 }
3598
smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr * hwmgr,const void * input)3599 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3600 {
3601 const struct phm_set_power_state_input *states =
3602 (const struct phm_set_power_state_input *)input;
3603 const struct smu7_power_state *smu7_ps =
3604 cast_const_phw_smu7_power_state(states->pnew_state);
3605 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3606 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3607 uint32_t sclk = smu7_ps->performance_levels
3608 [smu7_ps->performance_level_count - 1].engine_clock;
3609 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3610 uint32_t mclk = smu7_ps->performance_levels
3611 [smu7_ps->performance_level_count - 1].memory_clock;
3612 struct PP_Clocks min_clocks = {0};
3613 uint32_t i;
3614
3615 for (i = 0; i < sclk_table->count; i++) {
3616 if (sclk == sclk_table->dpm_levels[i].value)
3617 break;
3618 }
3619
3620 if (i >= sclk_table->count) {
3621 if (sclk > sclk_table->dpm_levels[i-1].value) {
3622 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3623 sclk_table->dpm_levels[i-1].value = sclk;
3624 }
3625 } else {
3626 /* TODO: Check SCLK in DAL's minimum clocks
3627 * in case DeepSleep divider update is required.
3628 */
3629 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3630 (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3631 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3632 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3633 }
3634
3635 for (i = 0; i < mclk_table->count; i++) {
3636 if (mclk == mclk_table->dpm_levels[i].value)
3637 break;
3638 }
3639
3640 if (i >= mclk_table->count) {
3641 if (mclk > mclk_table->dpm_levels[i-1].value) {
3642 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3643 mclk_table->dpm_levels[i-1].value = mclk;
3644 }
3645 }
3646
3647 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3648 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3649
3650 return 0;
3651 }
3652
smu7_get_maximum_link_speed(struct pp_hwmgr * hwmgr,const struct smu7_power_state * smu7_ps)3653 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3654 const struct smu7_power_state *smu7_ps)
3655 {
3656 uint32_t i;
3657 uint32_t sclk, max_sclk = 0;
3658 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3659 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3660
3661 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3662 sclk = smu7_ps->performance_levels[i].engine_clock;
3663 if (max_sclk < sclk)
3664 max_sclk = sclk;
3665 }
3666
3667 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3668 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3669 return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3670 dpm_table->pcie_speed_table.dpm_levels
3671 [dpm_table->pcie_speed_table.count - 1].value :
3672 dpm_table->pcie_speed_table.dpm_levels[i].value);
3673 }
3674
3675 return 0;
3676 }
3677
smu7_request_link_speed_change_before_state_change(struct pp_hwmgr * hwmgr,const void * input)3678 static int smu7_request_link_speed_change_before_state_change(
3679 struct pp_hwmgr *hwmgr, const void *input)
3680 {
3681 const struct phm_set_power_state_input *states =
3682 (const struct phm_set_power_state_input *)input;
3683 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3684 const struct smu7_power_state *smu7_nps =
3685 cast_const_phw_smu7_power_state(states->pnew_state);
3686 const struct smu7_power_state *polaris10_cps =
3687 cast_const_phw_smu7_power_state(states->pcurrent_state);
3688
3689 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3690 uint16_t current_link_speed;
3691
3692 if (data->force_pcie_gen == PP_PCIEGenInvalid)
3693 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3694 else
3695 current_link_speed = data->force_pcie_gen;
3696
3697 data->force_pcie_gen = PP_PCIEGenInvalid;
3698 data->pspp_notify_required = false;
3699
3700 if (target_link_speed > current_link_speed) {
3701 switch (target_link_speed) {
3702 #ifdef CONFIG_ACPI
3703 case PP_PCIEGen3:
3704 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3705 break;
3706 data->force_pcie_gen = PP_PCIEGen2;
3707 if (current_link_speed == PP_PCIEGen2)
3708 break;
3709 /* fall through */
3710 case PP_PCIEGen2:
3711 if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3712 break;
3713 #endif
3714 /* fall through */
3715 default:
3716 data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3717 break;
3718 }
3719 } else {
3720 if (target_link_speed < current_link_speed)
3721 data->pspp_notify_required = true;
3722 }
3723
3724 return 0;
3725 }
3726
smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)3727 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3728 {
3729 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3730
3731 if (0 == data->need_update_smu7_dpm_table)
3732 return 0;
3733
3734 if ((0 == data->sclk_dpm_key_disabled) &&
3735 (data->need_update_smu7_dpm_table &
3736 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3737 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3738 "Trying to freeze SCLK DPM when DPM is disabled",
3739 );
3740 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3741 PPSMC_MSG_SCLKDPM_FreezeLevel),
3742 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3743 return -EINVAL);
3744 }
3745
3746 if ((0 == data->mclk_dpm_key_disabled) &&
3747 (data->need_update_smu7_dpm_table &
3748 DPMTABLE_OD_UPDATE_MCLK)) {
3749 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3750 "Trying to freeze MCLK DPM when DPM is disabled",
3751 );
3752 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3753 PPSMC_MSG_MCLKDPM_FreezeLevel),
3754 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3755 return -EINVAL);
3756 }
3757
3758 return 0;
3759 }
3760
smu7_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr * hwmgr,const void * input)3761 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3762 struct pp_hwmgr *hwmgr, const void *input)
3763 {
3764 int result = 0;
3765 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3766 struct smu7_dpm_table *dpm_table = &data->dpm_table;
3767 uint32_t count;
3768 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
3769 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
3770 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
3771
3772 if (0 == data->need_update_smu7_dpm_table)
3773 return 0;
3774
3775 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3776 for (count = 0; count < dpm_table->sclk_table.count; count++) {
3777 dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
3778 dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
3779 }
3780 }
3781
3782 if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3783 for (count = 0; count < dpm_table->mclk_table.count; count++) {
3784 dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
3785 dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
3786 }
3787 }
3788
3789 if (data->need_update_smu7_dpm_table &
3790 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
3791 result = smum_populate_all_graphic_levels(hwmgr);
3792 PP_ASSERT_WITH_CODE((0 == result),
3793 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3794 return result);
3795 }
3796
3797 if (data->need_update_smu7_dpm_table &
3798 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3799 /*populate MCLK dpm table to SMU7 */
3800 result = smum_populate_all_memory_levels(hwmgr);
3801 PP_ASSERT_WITH_CODE((0 == result),
3802 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3803 return result);
3804 }
3805
3806 return result;
3807 }
3808
smu7_trim_single_dpm_states(struct pp_hwmgr * hwmgr,struct smu7_single_dpm_table * dpm_table,uint32_t low_limit,uint32_t high_limit)3809 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3810 struct smu7_single_dpm_table *dpm_table,
3811 uint32_t low_limit, uint32_t high_limit)
3812 {
3813 uint32_t i;
3814
3815 for (i = 0; i < dpm_table->count; i++) {
3816 /*skip the trim if od is enabled*/
3817 if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
3818 || dpm_table->dpm_levels[i].value > high_limit))
3819 dpm_table->dpm_levels[i].enabled = false;
3820 else
3821 dpm_table->dpm_levels[i].enabled = true;
3822 }
3823
3824 return 0;
3825 }
3826
smu7_trim_dpm_states(struct pp_hwmgr * hwmgr,const struct smu7_power_state * smu7_ps)3827 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
3828 const struct smu7_power_state *smu7_ps)
3829 {
3830 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3831 uint32_t high_limit_count;
3832
3833 PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
3834 "power state did not have any performance level",
3835 return -EINVAL);
3836
3837 high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
3838
3839 smu7_trim_single_dpm_states(hwmgr,
3840 &(data->dpm_table.sclk_table),
3841 smu7_ps->performance_levels[0].engine_clock,
3842 smu7_ps->performance_levels[high_limit_count].engine_clock);
3843
3844 smu7_trim_single_dpm_states(hwmgr,
3845 &(data->dpm_table.mclk_table),
3846 smu7_ps->performance_levels[0].memory_clock,
3847 smu7_ps->performance_levels[high_limit_count].memory_clock);
3848
3849 return 0;
3850 }
3851
smu7_generate_dpm_level_enable_mask(struct pp_hwmgr * hwmgr,const void * input)3852 static int smu7_generate_dpm_level_enable_mask(
3853 struct pp_hwmgr *hwmgr, const void *input)
3854 {
3855 int result = 0;
3856 const struct phm_set_power_state_input *states =
3857 (const struct phm_set_power_state_input *)input;
3858 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3859 const struct smu7_power_state *smu7_ps =
3860 cast_const_phw_smu7_power_state(states->pnew_state);
3861
3862
3863 result = smu7_trim_dpm_states(hwmgr, smu7_ps);
3864 if (result)
3865 return result;
3866
3867 data->dpm_level_enable_mask.sclk_dpm_enable_mask =
3868 phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
3869 data->dpm_level_enable_mask.mclk_dpm_enable_mask =
3870 phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
3871 data->dpm_level_enable_mask.pcie_dpm_enable_mask =
3872 phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
3873
3874 return 0;
3875 }
3876
smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr * hwmgr)3877 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3878 {
3879 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3880
3881 if (0 == data->need_update_smu7_dpm_table)
3882 return 0;
3883
3884 if ((0 == data->sclk_dpm_key_disabled) &&
3885 (data->need_update_smu7_dpm_table &
3886 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3887
3888 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3889 "Trying to Unfreeze SCLK DPM when DPM is disabled",
3890 );
3891 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3892 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
3893 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
3894 return -EINVAL);
3895 }
3896
3897 if ((0 == data->mclk_dpm_key_disabled) &&
3898 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
3899
3900 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3901 "Trying to Unfreeze MCLK DPM when DPM is disabled",
3902 );
3903 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3904 PPSMC_MSG_MCLKDPM_UnfreezeLevel),
3905 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
3906 return -EINVAL);
3907 }
3908
3909 data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3910
3911 return 0;
3912 }
3913
smu7_notify_link_speed_change_after_state_change(struct pp_hwmgr * hwmgr,const void * input)3914 static int smu7_notify_link_speed_change_after_state_change(
3915 struct pp_hwmgr *hwmgr, const void *input)
3916 {
3917 const struct phm_set_power_state_input *states =
3918 (const struct phm_set_power_state_input *)input;
3919 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3920 const struct smu7_power_state *smu7_ps =
3921 cast_const_phw_smu7_power_state(states->pnew_state);
3922 uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
3923 uint8_t request;
3924
3925 if (data->pspp_notify_required) {
3926 if (target_link_speed == PP_PCIEGen3)
3927 request = PCIE_PERF_REQ_GEN3;
3928 else if (target_link_speed == PP_PCIEGen2)
3929 request = PCIE_PERF_REQ_GEN2;
3930 else
3931 request = PCIE_PERF_REQ_GEN1;
3932
3933 if (request == PCIE_PERF_REQ_GEN1 &&
3934 smu7_get_current_pcie_speed(hwmgr) > 0)
3935 return 0;
3936
3937 #ifdef CONFIG_ACPI
3938 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
3939 if (PP_PCIEGen2 == target_link_speed)
3940 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
3941 else
3942 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
3943 }
3944 #endif
3945 }
3946
3947 return 0;
3948 }
3949
smu7_notify_smc_display(struct pp_hwmgr * hwmgr)3950 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
3951 {
3952 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3953
3954 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
3955 if (hwmgr->chip_id == CHIP_VEGAM)
3956 smum_send_msg_to_smc_with_parameter(hwmgr,
3957 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
3958 else
3959 smum_send_msg_to_smc_with_parameter(hwmgr,
3960 (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
3961 }
3962 return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
3963 }
3964
smu7_set_power_state_tasks(struct pp_hwmgr * hwmgr,const void * input)3965 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
3966 {
3967 int tmp_result, result = 0;
3968 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3969
3970 tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3971 PP_ASSERT_WITH_CODE((0 == tmp_result),
3972 "Failed to find DPM states clocks in DPM table!",
3973 result = tmp_result);
3974
3975 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3976 PHM_PlatformCaps_PCIEPerformanceRequest)) {
3977 tmp_result =
3978 smu7_request_link_speed_change_before_state_change(hwmgr, input);
3979 PP_ASSERT_WITH_CODE((0 == tmp_result),
3980 "Failed to request link speed change before state change!",
3981 result = tmp_result);
3982 }
3983
3984 tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
3985 PP_ASSERT_WITH_CODE((0 == tmp_result),
3986 "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
3987
3988 tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3989 PP_ASSERT_WITH_CODE((0 == tmp_result),
3990 "Failed to populate and upload SCLK MCLK DPM levels!",
3991 result = tmp_result);
3992
3993 /*
3994 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
3995 * That effectively disables AVFS feature.
3996 */
3997 if (hwmgr->hardcode_pp_table != NULL)
3998 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
3999
4000 tmp_result = smu7_update_avfs(hwmgr);
4001 PP_ASSERT_WITH_CODE((0 == tmp_result),
4002 "Failed to update avfs voltages!",
4003 result = tmp_result);
4004
4005 tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4006 PP_ASSERT_WITH_CODE((0 == tmp_result),
4007 "Failed to generate DPM level enabled mask!",
4008 result = tmp_result);
4009
4010 tmp_result = smum_update_sclk_threshold(hwmgr);
4011 PP_ASSERT_WITH_CODE((0 == tmp_result),
4012 "Failed to update SCLK threshold!",
4013 result = tmp_result);
4014
4015 tmp_result = smu7_notify_smc_display(hwmgr);
4016 PP_ASSERT_WITH_CODE((0 == tmp_result),
4017 "Failed to notify smc display settings!",
4018 result = tmp_result);
4019
4020 tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4021 PP_ASSERT_WITH_CODE((0 == tmp_result),
4022 "Failed to unfreeze SCLK MCLK DPM!",
4023 result = tmp_result);
4024
4025 tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4026 PP_ASSERT_WITH_CODE((0 == tmp_result),
4027 "Failed to upload DPM level enabled mask!",
4028 result = tmp_result);
4029
4030 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4031 PHM_PlatformCaps_PCIEPerformanceRequest)) {
4032 tmp_result =
4033 smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4034 PP_ASSERT_WITH_CODE((0 == tmp_result),
4035 "Failed to notify link speed change after state change!",
4036 result = tmp_result);
4037 }
4038 data->apply_optimized_settings = false;
4039 return result;
4040 }
4041
smu7_set_max_fan_pwm_output(struct pp_hwmgr * hwmgr,uint16_t us_max_fan_pwm)4042 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4043 {
4044 hwmgr->thermal_controller.
4045 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4046
4047 return smum_send_msg_to_smc_with_parameter(hwmgr,
4048 PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
4049 }
4050
4051 static int
smu7_notify_smc_display_change(struct pp_hwmgr * hwmgr,bool has_display)4052 smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4053 {
4054 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4055
4056 return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
4057 }
4058
4059 static int
smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr * hwmgr)4060 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4061 {
4062 if (hwmgr->display_config->num_display > 1 &&
4063 !hwmgr->display_config->multi_monitor_in_sync)
4064 smu7_notify_smc_display_change(hwmgr, false);
4065
4066 return 0;
4067 }
4068
4069 /**
4070 * Programs the display gap
4071 *
4072 * @param hwmgr the address of the powerplay hardware manager.
4073 * @return always OK
4074 */
smu7_program_display_gap(struct pp_hwmgr * hwmgr)4075 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4076 {
4077 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4078 uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4079 uint32_t display_gap2;
4080 uint32_t pre_vbi_time_in_us;
4081 uint32_t frame_time_in_us;
4082 uint32_t ref_clock, refresh_rate;
4083
4084 display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4085 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4086
4087 ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4088 refresh_rate = hwmgr->display_config->vrefresh;
4089
4090 if (0 == refresh_rate)
4091 refresh_rate = 60;
4092
4093 frame_time_in_us = 1000000 / refresh_rate;
4094
4095 pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4096
4097 data->frame_time_x2 = frame_time_in_us * 2 / 100;
4098
4099 if (data->frame_time_x2 < 280) {
4100 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4101 data->frame_time_x2 = 280;
4102 }
4103
4104 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4105
4106 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4107
4108 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4109 data->soft_regs_start + smum_get_offsetof(hwmgr,
4110 SMU_SoftRegisters,
4111 PreVBlankGap), 0x64);
4112
4113 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4114 data->soft_regs_start + smum_get_offsetof(hwmgr,
4115 SMU_SoftRegisters,
4116 VBlankTimeout),
4117 (frame_time_in_us - pre_vbi_time_in_us));
4118
4119 return 0;
4120 }
4121
smu7_display_configuration_changed_task(struct pp_hwmgr * hwmgr)4122 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4123 {
4124 return smu7_program_display_gap(hwmgr);
4125 }
4126
4127 /**
4128 * Set maximum target operating fan output RPM
4129 *
4130 * @param hwmgr: the address of the powerplay hardware manager.
4131 * @param usMaxFanRpm: max operating fan RPM value.
4132 * @return The response that came from the SMC.
4133 */
smu7_set_max_fan_rpm_output(struct pp_hwmgr * hwmgr,uint16_t us_max_fan_rpm)4134 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4135 {
4136 hwmgr->thermal_controller.
4137 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4138
4139 return smum_send_msg_to_smc_with_parameter(hwmgr,
4140 PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
4141 }
4142
4143 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4144 .process = phm_irq_process,
4145 };
4146
smu7_register_irq_handlers(struct pp_hwmgr * hwmgr)4147 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4148 {
4149 struct amdgpu_irq_src *source =
4150 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4151
4152 if (!source)
4153 return -ENOMEM;
4154
4155 source->funcs = &smu7_irq_funcs;
4156
4157 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4158 AMDGPU_IRQ_CLIENTID_LEGACY,
4159 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4160 source);
4161 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4162 AMDGPU_IRQ_CLIENTID_LEGACY,
4163 VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4164 source);
4165
4166 /* Register CTF(GPIO_19) interrupt */
4167 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4168 AMDGPU_IRQ_CLIENTID_LEGACY,
4169 VISLANDS30_IV_SRCID_GPIO_19,
4170 source);
4171
4172 return 0;
4173 }
4174
4175 static bool
smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr * hwmgr)4176 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4177 {
4178 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4179 bool is_update_required = false;
4180
4181 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4182 is_update_required = true;
4183
4184 if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4185 is_update_required = true;
4186
4187 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4188 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4189 (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4190 hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4191 is_update_required = true;
4192 }
4193 return is_update_required;
4194 }
4195
smu7_are_power_levels_equal(const struct smu7_performance_level * pl1,const struct smu7_performance_level * pl2)4196 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4197 const struct smu7_performance_level *pl2)
4198 {
4199 return ((pl1->memory_clock == pl2->memory_clock) &&
4200 (pl1->engine_clock == pl2->engine_clock) &&
4201 (pl1->pcie_gen == pl2->pcie_gen) &&
4202 (pl1->pcie_lane == pl2->pcie_lane));
4203 }
4204
smu7_check_states_equal(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * pstate1,const struct pp_hw_power_state * pstate2,bool * equal)4205 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4206 const struct pp_hw_power_state *pstate1,
4207 const struct pp_hw_power_state *pstate2, bool *equal)
4208 {
4209 const struct smu7_power_state *psa;
4210 const struct smu7_power_state *psb;
4211 int i;
4212 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4213
4214 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4215 return -EINVAL;
4216
4217 psa = cast_const_phw_smu7_power_state(pstate1);
4218 psb = cast_const_phw_smu7_power_state(pstate2);
4219 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4220 if (psa->performance_level_count != psb->performance_level_count) {
4221 *equal = false;
4222 return 0;
4223 }
4224
4225 for (i = 0; i < psa->performance_level_count; i++) {
4226 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4227 /* If we have found even one performance level pair that is different the states are different. */
4228 *equal = false;
4229 return 0;
4230 }
4231 }
4232
4233 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4234 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4235 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4236 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4237 /* For OD call, set value based on flag */
4238 *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4239 DPMTABLE_OD_UPDATE_MCLK |
4240 DPMTABLE_OD_UPDATE_VDDC));
4241
4242 return 0;
4243 }
4244
smu7_check_mc_firmware(struct pp_hwmgr * hwmgr)4245 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4246 {
4247 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4248
4249 uint32_t tmp;
4250
4251 /* Read MC indirect register offset 0x9F bits [3:0] to see
4252 * if VBIOS has already loaded a full version of MC ucode
4253 * or not.
4254 */
4255
4256 smu7_get_mc_microcode_version(hwmgr);
4257
4258 data->need_long_memory_training = false;
4259
4260 cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4261 ixMC_IO_DEBUG_UP_13);
4262 tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4263
4264 if (tmp & (1 << 23)) {
4265 data->mem_latency_high = MEM_LATENCY_HIGH;
4266 data->mem_latency_low = MEM_LATENCY_LOW;
4267 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4268 (hwmgr->chip_id == CHIP_POLARIS11) ||
4269 (hwmgr->chip_id == CHIP_POLARIS12))
4270 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
4271 } else {
4272 data->mem_latency_high = 330;
4273 data->mem_latency_low = 330;
4274 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4275 (hwmgr->chip_id == CHIP_POLARIS11) ||
4276 (hwmgr->chip_id == CHIP_POLARIS12))
4277 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
4278 }
4279
4280 return 0;
4281 }
4282
smu7_read_clock_registers(struct pp_hwmgr * hwmgr)4283 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4284 {
4285 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4286
4287 data->clock_registers.vCG_SPLL_FUNC_CNTL =
4288 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4289 data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
4290 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4291 data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
4292 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4293 data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
4294 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4295 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
4296 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4297 data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4298 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4299 data->clock_registers.vDLL_CNTL =
4300 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4301 data->clock_registers.vMCLK_PWRMGT_CNTL =
4302 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4303 data->clock_registers.vMPLL_AD_FUNC_CNTL =
4304 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4305 data->clock_registers.vMPLL_DQ_FUNC_CNTL =
4306 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4307 data->clock_registers.vMPLL_FUNC_CNTL =
4308 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4309 data->clock_registers.vMPLL_FUNC_CNTL_1 =
4310 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4311 data->clock_registers.vMPLL_FUNC_CNTL_2 =
4312 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4313 data->clock_registers.vMPLL_SS1 =
4314 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4315 data->clock_registers.vMPLL_SS2 =
4316 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4317 return 0;
4318
4319 }
4320
4321 /**
4322 * Find out if memory is GDDR5.
4323 *
4324 * @param hwmgr the address of the powerplay hardware manager.
4325 * @return always 0
4326 */
smu7_get_memory_type(struct pp_hwmgr * hwmgr)4327 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4328 {
4329 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4330 struct amdgpu_device *adev = hwmgr->adev;
4331
4332 data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4333
4334 return 0;
4335 }
4336
4337 /**
4338 * Enables Dynamic Power Management by SMC
4339 *
4340 * @param hwmgr the address of the powerplay hardware manager.
4341 * @return always 0
4342 */
smu7_enable_acpi_power_management(struct pp_hwmgr * hwmgr)4343 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4344 {
4345 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4346 GENERAL_PWRMGT, STATIC_PM_EN, 1);
4347
4348 return 0;
4349 }
4350
4351 /**
4352 * Initialize PowerGating States for different engines
4353 *
4354 * @param hwmgr the address of the powerplay hardware manager.
4355 * @return always 0
4356 */
smu7_init_power_gate_state(struct pp_hwmgr * hwmgr)4357 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4358 {
4359 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4360
4361 data->uvd_power_gated = false;
4362 data->vce_power_gated = false;
4363
4364 return 0;
4365 }
4366
smu7_init_sclk_threshold(struct pp_hwmgr * hwmgr)4367 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4368 {
4369 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4370
4371 data->low_sclk_interrupt_threshold = 0;
4372 return 0;
4373 }
4374
smu7_setup_asic_task(struct pp_hwmgr * hwmgr)4375 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4376 {
4377 int tmp_result, result = 0;
4378
4379 smu7_check_mc_firmware(hwmgr);
4380
4381 tmp_result = smu7_read_clock_registers(hwmgr);
4382 PP_ASSERT_WITH_CODE((0 == tmp_result),
4383 "Failed to read clock registers!", result = tmp_result);
4384
4385 tmp_result = smu7_get_memory_type(hwmgr);
4386 PP_ASSERT_WITH_CODE((0 == tmp_result),
4387 "Failed to get memory type!", result = tmp_result);
4388
4389 tmp_result = smu7_enable_acpi_power_management(hwmgr);
4390 PP_ASSERT_WITH_CODE((0 == tmp_result),
4391 "Failed to enable ACPI power management!", result = tmp_result);
4392
4393 tmp_result = smu7_init_power_gate_state(hwmgr);
4394 PP_ASSERT_WITH_CODE((0 == tmp_result),
4395 "Failed to init power gate state!", result = tmp_result);
4396
4397 tmp_result = smu7_get_mc_microcode_version(hwmgr);
4398 PP_ASSERT_WITH_CODE((0 == tmp_result),
4399 "Failed to get MC microcode version!", result = tmp_result);
4400
4401 tmp_result = smu7_init_sclk_threshold(hwmgr);
4402 PP_ASSERT_WITH_CODE((0 == tmp_result),
4403 "Failed to init sclk threshold!", result = tmp_result);
4404
4405 return result;
4406 }
4407
smu7_force_clock_level(struct pp_hwmgr * hwmgr,enum pp_clock_type type,uint32_t mask)4408 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4409 enum pp_clock_type type, uint32_t mask)
4410 {
4411 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4412
4413 if (mask == 0)
4414 return -EINVAL;
4415
4416 switch (type) {
4417 case PP_SCLK:
4418 if (!data->sclk_dpm_key_disabled)
4419 smum_send_msg_to_smc_with_parameter(hwmgr,
4420 PPSMC_MSG_SCLKDPM_SetEnabledMask,
4421 data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
4422 break;
4423 case PP_MCLK:
4424 if (!data->mclk_dpm_key_disabled)
4425 smum_send_msg_to_smc_with_parameter(hwmgr,
4426 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4427 data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
4428 break;
4429 case PP_PCIE:
4430 {
4431 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4432
4433 if (!data->pcie_dpm_key_disabled) {
4434 if (fls(tmp) != ffs(tmp))
4435 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
4436 else
4437 smum_send_msg_to_smc_with_parameter(hwmgr,
4438 PPSMC_MSG_PCIeDPM_ForceLevel,
4439 fls(tmp) - 1);
4440 }
4441 break;
4442 }
4443 default:
4444 break;
4445 }
4446
4447 return 0;
4448 }
4449
smu7_print_clock_levels(struct pp_hwmgr * hwmgr,enum pp_clock_type type,char * buf)4450 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4451 enum pp_clock_type type, char *buf)
4452 {
4453 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4454 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4455 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4456 struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4457 struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4458 struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4459 struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4460 int i, now, size = 0;
4461 uint32_t clock, pcie_speed;
4462
4463 switch (type) {
4464 case PP_SCLK:
4465 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
4466 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4467
4468 for (i = 0; i < sclk_table->count; i++) {
4469 if (clock > sclk_table->dpm_levels[i].value)
4470 continue;
4471 break;
4472 }
4473 now = i;
4474
4475 for (i = 0; i < sclk_table->count; i++)
4476 size += sprintf(buf + size, "%d: %uMhz %s\n",
4477 i, sclk_table->dpm_levels[i].value / 100,
4478 (i == now) ? "*" : "");
4479 break;
4480 case PP_MCLK:
4481 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
4482 clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
4483
4484 for (i = 0; i < mclk_table->count; i++) {
4485 if (clock > mclk_table->dpm_levels[i].value)
4486 continue;
4487 break;
4488 }
4489 now = i;
4490
4491 for (i = 0; i < mclk_table->count; i++)
4492 size += sprintf(buf + size, "%d: %uMhz %s\n",
4493 i, mclk_table->dpm_levels[i].value / 100,
4494 (i == now) ? "*" : "");
4495 break;
4496 case PP_PCIE:
4497 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4498 for (i = 0; i < pcie_table->count; i++) {
4499 if (pcie_speed != pcie_table->dpm_levels[i].value)
4500 continue;
4501 break;
4502 }
4503 now = i;
4504
4505 for (i = 0; i < pcie_table->count; i++)
4506 size += sprintf(buf + size, "%d: %s %s\n", i,
4507 (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4508 (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4509 (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4510 (i == now) ? "*" : "");
4511 break;
4512 case OD_SCLK:
4513 if (hwmgr->od_enabled) {
4514 size = sprintf(buf, "%s:\n", "OD_SCLK");
4515 for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4516 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4517 i, odn_sclk_table->entries[i].clock/100,
4518 odn_sclk_table->entries[i].vddc);
4519 }
4520 break;
4521 case OD_MCLK:
4522 if (hwmgr->od_enabled) {
4523 size = sprintf(buf, "%s:\n", "OD_MCLK");
4524 for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4525 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4526 i, odn_mclk_table->entries[i].clock/100,
4527 odn_mclk_table->entries[i].vddc);
4528 }
4529 break;
4530 case OD_RANGE:
4531 if (hwmgr->od_enabled) {
4532 size = sprintf(buf, "%s:\n", "OD_RANGE");
4533 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4534 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4535 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4536 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4537 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4538 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4539 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4540 data->odn_dpm_table.min_vddc,
4541 data->odn_dpm_table.max_vddc);
4542 }
4543 break;
4544 default:
4545 break;
4546 }
4547 return size;
4548 }
4549
smu7_set_fan_control_mode(struct pp_hwmgr * hwmgr,uint32_t mode)4550 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4551 {
4552 switch (mode) {
4553 case AMD_FAN_CTRL_NONE:
4554 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4555 break;
4556 case AMD_FAN_CTRL_MANUAL:
4557 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4558 PHM_PlatformCaps_MicrocodeFanControl))
4559 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4560 break;
4561 case AMD_FAN_CTRL_AUTO:
4562 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4563 smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4564 break;
4565 default:
4566 break;
4567 }
4568 }
4569
smu7_get_fan_control_mode(struct pp_hwmgr * hwmgr)4570 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4571 {
4572 return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4573 }
4574
smu7_get_sclk_od(struct pp_hwmgr * hwmgr)4575 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4576 {
4577 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4578 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4579 struct smu7_single_dpm_table *golden_sclk_table =
4580 &(data->golden_dpm_table.sclk_table);
4581 int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4582 int golden_value = golden_sclk_table->dpm_levels
4583 [golden_sclk_table->count - 1].value;
4584
4585 value -= golden_value;
4586 value = DIV_ROUND_UP(value * 100, golden_value);
4587
4588 return value;
4589 }
4590
smu7_set_sclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4591 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4592 {
4593 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4594 struct smu7_single_dpm_table *golden_sclk_table =
4595 &(data->golden_dpm_table.sclk_table);
4596 struct pp_power_state *ps;
4597 struct smu7_power_state *smu7_ps;
4598
4599 if (value > 20)
4600 value = 20;
4601
4602 ps = hwmgr->request_ps;
4603
4604 if (ps == NULL)
4605 return -EINVAL;
4606
4607 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4608
4609 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4610 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4611 value / 100 +
4612 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4613
4614 return 0;
4615 }
4616
smu7_get_mclk_od(struct pp_hwmgr * hwmgr)4617 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4618 {
4619 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4620 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4621 struct smu7_single_dpm_table *golden_mclk_table =
4622 &(data->golden_dpm_table.mclk_table);
4623 int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4624 int golden_value = golden_mclk_table->dpm_levels
4625 [golden_mclk_table->count - 1].value;
4626
4627 value -= golden_value;
4628 value = DIV_ROUND_UP(value * 100, golden_value);
4629
4630 return value;
4631 }
4632
smu7_set_mclk_od(struct pp_hwmgr * hwmgr,uint32_t value)4633 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4634 {
4635 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4636 struct smu7_single_dpm_table *golden_mclk_table =
4637 &(data->golden_dpm_table.mclk_table);
4638 struct pp_power_state *ps;
4639 struct smu7_power_state *smu7_ps;
4640
4641 if (value > 20)
4642 value = 20;
4643
4644 ps = hwmgr->request_ps;
4645
4646 if (ps == NULL)
4647 return -EINVAL;
4648
4649 smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4650
4651 smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4652 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4653 value / 100 +
4654 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4655
4656 return 0;
4657 }
4658
4659
smu7_get_sclks(struct pp_hwmgr * hwmgr,struct amd_pp_clocks * clocks)4660 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4661 {
4662 struct phm_ppt_v1_information *table_info =
4663 (struct phm_ppt_v1_information *)hwmgr->pptable;
4664 struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4665 struct phm_clock_voltage_dependency_table *sclk_table;
4666 int i;
4667
4668 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4669 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4670 return -EINVAL;
4671 dep_sclk_table = table_info->vdd_dep_on_sclk;
4672 for (i = 0; i < dep_sclk_table->count; i++)
4673 clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
4674 clocks->count = dep_sclk_table->count;
4675 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4676 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4677 for (i = 0; i < sclk_table->count; i++)
4678 clocks->clock[i] = sclk_table->entries[i].clk * 10;
4679 clocks->count = sclk_table->count;
4680 }
4681
4682 return 0;
4683 }
4684
smu7_get_mem_latency(struct pp_hwmgr * hwmgr,uint32_t clk)4685 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4686 {
4687 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4688
4689 if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4690 return data->mem_latency_high;
4691 else if (clk >= MEM_FREQ_HIGH_LATENCY)
4692 return data->mem_latency_low;
4693 else
4694 return MEM_LATENCY_ERR;
4695 }
4696
smu7_get_mclks(struct pp_hwmgr * hwmgr,struct amd_pp_clocks * clocks)4697 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4698 {
4699 struct phm_ppt_v1_information *table_info =
4700 (struct phm_ppt_v1_information *)hwmgr->pptable;
4701 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4702 int i;
4703 struct phm_clock_voltage_dependency_table *mclk_table;
4704
4705 if (hwmgr->pp_table_version == PP_TABLE_V1) {
4706 if (table_info == NULL)
4707 return -EINVAL;
4708 dep_mclk_table = table_info->vdd_dep_on_mclk;
4709 for (i = 0; i < dep_mclk_table->count; i++) {
4710 clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
4711 clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4712 dep_mclk_table->entries[i].clk);
4713 }
4714 clocks->count = dep_mclk_table->count;
4715 } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4716 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4717 for (i = 0; i < mclk_table->count; i++)
4718 clocks->clock[i] = mclk_table->entries[i].clk * 10;
4719 clocks->count = mclk_table->count;
4720 }
4721 return 0;
4722 }
4723
smu7_get_clock_by_type(struct pp_hwmgr * hwmgr,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)4724 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4725 struct amd_pp_clocks *clocks)
4726 {
4727 switch (type) {
4728 case amd_pp_sys_clock:
4729 smu7_get_sclks(hwmgr, clocks);
4730 break;
4731 case amd_pp_mem_clock:
4732 smu7_get_mclks(hwmgr, clocks);
4733 break;
4734 default:
4735 return -EINVAL;
4736 }
4737
4738 return 0;
4739 }
4740
smu7_notify_cac_buffer_info(struct pp_hwmgr * hwmgr,uint32_t virtual_addr_low,uint32_t virtual_addr_hi,uint32_t mc_addr_low,uint32_t mc_addr_hi,uint32_t size)4741 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4742 uint32_t virtual_addr_low,
4743 uint32_t virtual_addr_hi,
4744 uint32_t mc_addr_low,
4745 uint32_t mc_addr_hi,
4746 uint32_t size)
4747 {
4748 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4749
4750 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4751 data->soft_regs_start +
4752 smum_get_offsetof(hwmgr,
4753 SMU_SoftRegisters, DRAM_LOG_ADDR_H),
4754 mc_addr_hi);
4755
4756 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4757 data->soft_regs_start +
4758 smum_get_offsetof(hwmgr,
4759 SMU_SoftRegisters, DRAM_LOG_ADDR_L),
4760 mc_addr_low);
4761
4762 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4763 data->soft_regs_start +
4764 smum_get_offsetof(hwmgr,
4765 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
4766 virtual_addr_hi);
4767
4768 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4769 data->soft_regs_start +
4770 smum_get_offsetof(hwmgr,
4771 SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
4772 virtual_addr_low);
4773
4774 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4775 data->soft_regs_start +
4776 smum_get_offsetof(hwmgr,
4777 SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
4778 size);
4779 return 0;
4780 }
4781
smu7_get_max_high_clocks(struct pp_hwmgr * hwmgr,struct amd_pp_simple_clock_info * clocks)4782 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
4783 struct amd_pp_simple_clock_info *clocks)
4784 {
4785 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4786 struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4787 struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4788
4789 if (clocks == NULL)
4790 return -EINVAL;
4791
4792 clocks->memory_max_clock = mclk_table->count > 1 ?
4793 mclk_table->dpm_levels[mclk_table->count-1].value :
4794 mclk_table->dpm_levels[0].value;
4795 clocks->engine_max_clock = sclk_table->count > 1 ?
4796 sclk_table->dpm_levels[sclk_table->count-1].value :
4797 sclk_table->dpm_levels[0].value;
4798 return 0;
4799 }
4800
smu7_get_thermal_temperature_range(struct pp_hwmgr * hwmgr,struct PP_TemperatureRange * thermal_data)4801 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4802 struct PP_TemperatureRange *thermal_data)
4803 {
4804 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4805 struct phm_ppt_v1_information *table_info =
4806 (struct phm_ppt_v1_information *)hwmgr->pptable;
4807
4808 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
4809
4810 if (hwmgr->pp_table_version == PP_TABLE_V1)
4811 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
4812 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4813 else if (hwmgr->pp_table_version == PP_TABLE_V0)
4814 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
4815 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4816
4817 return 0;
4818 }
4819
smu7_check_clk_voltage_valid(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,uint32_t clk,uint32_t voltage)4820 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4821 enum PP_OD_DPM_TABLE_COMMAND type,
4822 uint32_t clk,
4823 uint32_t voltage)
4824 {
4825 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4826
4827 if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
4828 pr_info("OD voltage is out of range [%d - %d] mV\n",
4829 data->odn_dpm_table.min_vddc,
4830 data->odn_dpm_table.max_vddc);
4831 return false;
4832 }
4833
4834 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4835 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
4836 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4837 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4838 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4839 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4840 return false;
4841 }
4842 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4843 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
4844 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4845 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4846 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4847 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4848 return false;
4849 }
4850 } else {
4851 return false;
4852 }
4853
4854 return true;
4855 }
4856
smu7_odn_edit_dpm_table(struct pp_hwmgr * hwmgr,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)4857 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4858 enum PP_OD_DPM_TABLE_COMMAND type,
4859 long *input, uint32_t size)
4860 {
4861 uint32_t i;
4862 struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
4863 struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
4864 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4865
4866 uint32_t input_clk;
4867 uint32_t input_vol;
4868 uint32_t input_level;
4869
4870 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4871 return -EINVAL);
4872
4873 if (!hwmgr->od_enabled) {
4874 pr_info("OverDrive feature not enabled\n");
4875 return -EINVAL;
4876 }
4877
4878 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4879 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
4880 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
4881 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4882 "Failed to get ODN SCLK and Voltage tables",
4883 return -EINVAL);
4884 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4885 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
4886 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
4887
4888 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
4889 "Failed to get ODN MCLK and Voltage tables",
4890 return -EINVAL);
4891 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4892 smu7_odn_initial_default_setting(hwmgr);
4893 return 0;
4894 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4895 smu7_check_dpm_table_updated(hwmgr);
4896 return 0;
4897 } else {
4898 return -EINVAL;
4899 }
4900
4901 for (i = 0; i < size; i += 3) {
4902 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
4903 pr_info("invalid clock voltage input \n");
4904 return 0;
4905 }
4906 input_level = input[i];
4907 input_clk = input[i+1] * 100;
4908 input_vol = input[i+2];
4909
4910 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4911 podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
4912 podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
4913 podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
4914 podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
4915 podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
4916 } else {
4917 return -EINVAL;
4918 }
4919 }
4920
4921 return 0;
4922 }
4923
smu7_get_power_profile_mode(struct pp_hwmgr * hwmgr,char * buf)4924 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4925 {
4926 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4927 uint32_t i, size = 0;
4928 uint32_t len;
4929
4930 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4931 "3D_FULL_SCREEN",
4932 "POWER_SAVING",
4933 "VIDEO",
4934 "VR",
4935 "COMPUTE",
4936 "CUSTOM"};
4937
4938 static const char *title[8] = {"NUM",
4939 "MODE_NAME",
4940 "SCLK_UP_HYST",
4941 "SCLK_DOWN_HYST",
4942 "SCLK_ACTIVE_LEVEL",
4943 "MCLK_UP_HYST",
4944 "MCLK_DOWN_HYST",
4945 "MCLK_ACTIVE_LEVEL"};
4946
4947 if (!buf)
4948 return -EINVAL;
4949
4950 size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
4951 title[0], title[1], title[2], title[3],
4952 title[4], title[5], title[6], title[7]);
4953
4954 len = ARRAY_SIZE(smu7_profiling);
4955
4956 for (i = 0; i < len; i++) {
4957 if (i == hwmgr->power_profile_mode) {
4958 size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
4959 i, profile_name[i], "*",
4960 data->current_profile_setting.sclk_up_hyst,
4961 data->current_profile_setting.sclk_down_hyst,
4962 data->current_profile_setting.sclk_activity,
4963 data->current_profile_setting.mclk_up_hyst,
4964 data->current_profile_setting.mclk_down_hyst,
4965 data->current_profile_setting.mclk_activity);
4966 continue;
4967 }
4968 if (smu7_profiling[i].bupdate_sclk)
4969 size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
4970 i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
4971 smu7_profiling[i].sclk_down_hyst,
4972 smu7_profiling[i].sclk_activity);
4973 else
4974 size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
4975 i, profile_name[i], "-", "-", "-");
4976
4977 if (smu7_profiling[i].bupdate_mclk)
4978 size += sprintf(buf + size, "%16d %16d %16d\n",
4979 smu7_profiling[i].mclk_up_hyst,
4980 smu7_profiling[i].mclk_down_hyst,
4981 smu7_profiling[i].mclk_activity);
4982 else
4983 size += sprintf(buf + size, "%16s %16s %16s\n",
4984 "-", "-", "-");
4985 }
4986
4987 return size;
4988 }
4989
smu7_patch_compute_profile_mode(struct pp_hwmgr * hwmgr,enum PP_SMC_POWER_PROFILE requst)4990 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
4991 enum PP_SMC_POWER_PROFILE requst)
4992 {
4993 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4994 uint32_t tmp, level;
4995
4996 if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
4997 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4998 level = 0;
4999 tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5000 while (tmp >>= 1)
5001 level++;
5002 if (level > 0)
5003 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5004 }
5005 } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5006 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5007 }
5008 }
5009
smu7_set_power_profile_mode(struct pp_hwmgr * hwmgr,long * input,uint32_t size)5010 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5011 {
5012 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5013 struct profile_mode_setting tmp;
5014 enum PP_SMC_POWER_PROFILE mode;
5015
5016 if (input == NULL)
5017 return -EINVAL;
5018
5019 mode = input[size];
5020 switch (mode) {
5021 case PP_SMC_POWER_PROFILE_CUSTOM:
5022 if (size < 8 && size != 0)
5023 return -EINVAL;
5024 /* If only CUSTOM is passed in, use the saved values. Check
5025 * that we actually have a CUSTOM profile by ensuring that
5026 * the "use sclk" or the "use mclk" bits are set
5027 */
5028 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5029 if (size == 0) {
5030 if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5031 return -EINVAL;
5032 } else {
5033 tmp.bupdate_sclk = input[0];
5034 tmp.sclk_up_hyst = input[1];
5035 tmp.sclk_down_hyst = input[2];
5036 tmp.sclk_activity = input[3];
5037 tmp.bupdate_mclk = input[4];
5038 tmp.mclk_up_hyst = input[5];
5039 tmp.mclk_down_hyst = input[6];
5040 tmp.mclk_activity = input[7];
5041 smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5042 }
5043 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5044 memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5045 hwmgr->power_profile_mode = mode;
5046 }
5047 break;
5048 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5049 case PP_SMC_POWER_PROFILE_POWERSAVING:
5050 case PP_SMC_POWER_PROFILE_VIDEO:
5051 case PP_SMC_POWER_PROFILE_VR:
5052 case PP_SMC_POWER_PROFILE_COMPUTE:
5053 if (mode == hwmgr->power_profile_mode)
5054 return 0;
5055
5056 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5057 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5058 if (tmp.bupdate_sclk) {
5059 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5060 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5061 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5062 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5063 }
5064 if (tmp.bupdate_mclk) {
5065 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5066 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5067 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5068 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5069 }
5070 smu7_patch_compute_profile_mode(hwmgr, mode);
5071 hwmgr->power_profile_mode = mode;
5072 }
5073 break;
5074 default:
5075 return -EINVAL;
5076 }
5077
5078 return 0;
5079 }
5080
smu7_get_performance_level(struct pp_hwmgr * hwmgr,const struct pp_hw_power_state * state,PHM_PerformanceLevelDesignation designation,uint32_t index,PHM_PerformanceLevel * level)5081 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5082 PHM_PerformanceLevelDesignation designation, uint32_t index,
5083 PHM_PerformanceLevel *level)
5084 {
5085 const struct smu7_power_state *ps;
5086 uint32_t i;
5087
5088 if (level == NULL || hwmgr == NULL || state == NULL)
5089 return -EINVAL;
5090
5091 ps = cast_const_phw_smu7_power_state(state);
5092
5093 i = index > ps->performance_level_count - 1 ?
5094 ps->performance_level_count - 1 : index;
5095
5096 level->coreClock = ps->performance_levels[i].engine_clock;
5097 level->memory_clock = ps->performance_levels[i].memory_clock;
5098
5099 return 0;
5100 }
5101
smu7_power_off_asic(struct pp_hwmgr * hwmgr)5102 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5103 {
5104 int result;
5105
5106 result = smu7_disable_dpm_tasks(hwmgr);
5107 PP_ASSERT_WITH_CODE((0 == result),
5108 "[disable_dpm_tasks] Failed to disable DPM!",
5109 );
5110
5111 return result;
5112 }
5113
5114 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5115 .backend_init = &smu7_hwmgr_backend_init,
5116 .backend_fini = &smu7_hwmgr_backend_fini,
5117 .asic_setup = &smu7_setup_asic_task,
5118 .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5119 .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5120 .force_dpm_level = &smu7_force_dpm_level,
5121 .power_state_set = smu7_set_power_state_tasks,
5122 .get_power_state_size = smu7_get_power_state_size,
5123 .get_mclk = smu7_dpm_get_mclk,
5124 .get_sclk = smu7_dpm_get_sclk,
5125 .patch_boot_state = smu7_dpm_patch_boot_state,
5126 .get_pp_table_entry = smu7_get_pp_table_entry,
5127 .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5128 .powerdown_uvd = smu7_powerdown_uvd,
5129 .powergate_uvd = smu7_powergate_uvd,
5130 .powergate_vce = smu7_powergate_vce,
5131 .disable_clock_power_gating = smu7_disable_clock_power_gating,
5132 .update_clock_gatings = smu7_update_clock_gatings,
5133 .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5134 .display_config_changed = smu7_display_configuration_changed_task,
5135 .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5136 .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5137 .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5138 .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5139 .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5140 .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5141 .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5142 .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5143 .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5144 .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5145 .register_irq_handlers = smu7_register_irq_handlers,
5146 .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5147 .check_states_equal = smu7_check_states_equal,
5148 .set_fan_control_mode = smu7_set_fan_control_mode,
5149 .get_fan_control_mode = smu7_get_fan_control_mode,
5150 .force_clock_level = smu7_force_clock_level,
5151 .print_clock_levels = smu7_print_clock_levels,
5152 .powergate_gfx = smu7_powergate_gfx,
5153 .get_sclk_od = smu7_get_sclk_od,
5154 .set_sclk_od = smu7_set_sclk_od,
5155 .get_mclk_od = smu7_get_mclk_od,
5156 .set_mclk_od = smu7_set_mclk_od,
5157 .get_clock_by_type = smu7_get_clock_by_type,
5158 .read_sensor = smu7_read_sensor,
5159 .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5160 .avfs_control = smu7_avfs_control,
5161 .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5162 .start_thermal_controller = smu7_start_thermal_controller,
5163 .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5164 .get_max_high_clocks = smu7_get_max_high_clocks,
5165 .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5166 .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5167 .set_power_limit = smu7_set_power_limit,
5168 .get_power_profile_mode = smu7_get_power_profile_mode,
5169 .set_power_profile_mode = smu7_set_power_profile_mode,
5170 .get_performance_level = smu7_get_performance_level,
5171 .get_asic_baco_capability = smu7_baco_get_capability,
5172 .get_asic_baco_state = smu7_baco_get_state,
5173 .set_asic_baco_state = smu7_baco_set_state,
5174 .power_off_asic = smu7_power_off_asic,
5175 };
5176
smu7_get_sleep_divider_id_from_clock(uint32_t clock,uint32_t clock_insr)5177 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5178 uint32_t clock_insr)
5179 {
5180 uint8_t i;
5181 uint32_t temp;
5182 uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5183
5184 PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5185 for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
5186 temp = clock >> i;
5187
5188 if (temp >= min || i == 0)
5189 break;
5190 }
5191 return i;
5192 }
5193
smu7_init_function_pointers(struct pp_hwmgr * hwmgr)5194 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5195 {
5196 hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5197 if (hwmgr->pp_table_version == PP_TABLE_V0)
5198 hwmgr->pptable_func = &pptable_funcs;
5199 else if (hwmgr->pp_table_version == PP_TABLE_V1)
5200 hwmgr->pptable_func = &pptable_v1_0_funcs;
5201
5202 return 0;
5203 }
5204