xref: /openbsd-src/sys/dev/pci/drm/amd/pm/swsmu/smu11/vangogh_ppt.c (revision 5cd94c4490ab3aa13a1c5469114527a183cf85ac)
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #define SWSMU_CODE_LAYER_L2
25 
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_v11_0.h"
29 #include "smu11_driver_if_vangogh.h"
30 #include "vangogh_ppt.h"
31 #include "smu_v11_5_ppsmc.h"
32 #include "smu_v11_5_pmfw.h"
33 #include "smu_cmn.h"
34 #include "soc15_common.h"
35 #include "asic_reg/gc/gc_10_3_0_offset.h"
36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h"
37 #include <asm/processor.h>
38 
39 /*
40  * DO NOT use these for err/warn/info/debug messages.
41  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42  * They are more MGPU friendly.
43  */
44 #undef pr_err
45 #undef pr_warn
46 #undef pr_info
47 #undef pr_debug
48 
49 // Registers related to GFXOFF
50 // addressBlock: smuio_smuio_SmuSmuioDec
51 // base address: 0x5a000
52 #define mmSMUIO_GFX_MISC_CNTL			0x00c5
53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX		0
54 
55 //SMUIO_GFX_MISC_CNTL
56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT	0x0
57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT		0x1
58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK	0x00000001L
59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK		0x00000006L
60 
61 #define FEATURE_MASK(feature) (1ULL << feature)
62 #define SMC_DPM_FEATURE ( \
63 	FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
64 	FEATURE_MASK(FEATURE_VCN_DPM_BIT)	 | \
65 	FEATURE_MASK(FEATURE_FCLK_DPM_BIT)	 | \
66 	FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)	 | \
67 	FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)	 | \
68 	FEATURE_MASK(FEATURE_LCLK_DPM_BIT)	 | \
69 	FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT)	 | \
70 	FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
71 	FEATURE_MASK(FEATURE_GFX_DPM_BIT))
72 
73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
74 	MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,			0),
75 	MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,		0),
76 	MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,	0),
77 	MSG_MAP(EnableGfxOff,                   PPSMC_MSG_EnableGfxOff,			0),
78 	MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,          0),
79 	MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,		0),
80 	MSG_MAP(PowerDownIspByTile,             PPSMC_MSG_PowerDownIspByTile,	0),
81 	MSG_MAP(PowerUpIspByTile,               PPSMC_MSG_PowerUpIspByTile,		0),
82 	MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,			0),
83 	MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,			0),
84 	MSG_MAP(RlcPowerNotify,                 PPSMC_MSG_RlcPowerNotify,		0),
85 	MSG_MAP(SetHardMinVcn,                  PPSMC_MSG_SetHardMinVcn,		0),
86 	MSG_MAP(SetSoftMinGfxclk,               PPSMC_MSG_SetSoftMinGfxclk,		0),
87 	MSG_MAP(ActiveProcessNotify,            PPSMC_MSG_ActiveProcessNotify,		0),
88 	MSG_MAP(SetHardMinIspiclkByFreq,        PPSMC_MSG_SetHardMinIspiclkByFreq,	0),
89 	MSG_MAP(SetHardMinIspxclkByFreq,        PPSMC_MSG_SetHardMinIspxclkByFreq,	0),
90 	MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,	0),
91 	MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,		0),
92 	MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,	0),
93 	MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,	0),
94 	MSG_MAP(GfxDeviceDriverReset,           PPSMC_MSG_GfxDeviceDriverReset,		0),
95 	MSG_MAP(GetEnabledSmuFeatures,          PPSMC_MSG_GetEnabledSmuFeatures,	0),
96 	MSG_MAP(SetHardMinSocclkByFreq,         PPSMC_MSG_SetHardMinSocclkByFreq,	0),
97 	MSG_MAP(SetSoftMinFclk,                 PPSMC_MSG_SetSoftMinFclk,		0),
98 	MSG_MAP(SetSoftMinVcn,                  PPSMC_MSG_SetSoftMinVcn,		0),
99 	MSG_MAP(EnablePostCode,                 PPSMC_MSG_EnablePostCode,		0),
100 	MSG_MAP(GetGfxclkFrequency,             PPSMC_MSG_GetGfxclkFrequency,	0),
101 	MSG_MAP(GetFclkFrequency,               PPSMC_MSG_GetFclkFrequency,		0),
102 	MSG_MAP(SetSoftMaxGfxClk,               PPSMC_MSG_SetSoftMaxGfxClk,		0),
103 	MSG_MAP(SetHardMinGfxClk,               PPSMC_MSG_SetHardMinGfxClk,		0),
104 	MSG_MAP(SetSoftMaxSocclkByFreq,         PPSMC_MSG_SetSoftMaxSocclkByFreq,	0),
105 	MSG_MAP(SetSoftMaxFclkByFreq,           PPSMC_MSG_SetSoftMaxFclkByFreq,		0),
106 	MSG_MAP(SetSoftMaxVcn,                  PPSMC_MSG_SetSoftMaxVcn,			0),
107 	MSG_MAP(SetPowerLimitPercentage,        PPSMC_MSG_SetPowerLimitPercentage,	0),
108 	MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,			0),
109 	MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,				0),
110 	MSG_MAP(SetHardMinFclkByFreq,           PPSMC_MSG_SetHardMinFclkByFreq,		0),
111 	MSG_MAP(SetSoftMinSocclkByFreq,         PPSMC_MSG_SetSoftMinSocclkByFreq,	0),
112 	MSG_MAP(PowerUpCvip,                    PPSMC_MSG_PowerUpCvip,				0),
113 	MSG_MAP(PowerDownCvip,                  PPSMC_MSG_PowerDownCvip,			0),
114 	MSG_MAP(GetPptLimit,                        PPSMC_MSG_GetPptLimit,			0),
115 	MSG_MAP(GetThermalLimit,                    PPSMC_MSG_GetThermalLimit,		0),
116 	MSG_MAP(GetCurrentTemperature,              PPSMC_MSG_GetCurrentTemperature, 0),
117 	MSG_MAP(GetCurrentPower,                    PPSMC_MSG_GetCurrentPower,		 0),
118 	MSG_MAP(GetCurrentVoltage,                  PPSMC_MSG_GetCurrentVoltage,	 0),
119 	MSG_MAP(GetCurrentCurrent,                  PPSMC_MSG_GetCurrentCurrent,	 0),
120 	MSG_MAP(GetAverageCpuActivity,              PPSMC_MSG_GetAverageCpuActivity, 0),
121 	MSG_MAP(GetAverageGfxActivity,              PPSMC_MSG_GetAverageGfxActivity, 0),
122 	MSG_MAP(GetAveragePower,                    PPSMC_MSG_GetAveragePower,		 0),
123 	MSG_MAP(GetAverageTemperature,              PPSMC_MSG_GetAverageTemperature, 0),
124 	MSG_MAP(SetAveragePowerTimeConstant,        PPSMC_MSG_SetAveragePowerTimeConstant,			0),
125 	MSG_MAP(SetAverageActivityTimeConstant,     PPSMC_MSG_SetAverageActivityTimeConstant,		0),
126 	MSG_MAP(SetAverageTemperatureTimeConstant,  PPSMC_MSG_SetAverageTemperatureTimeConstant,	0),
127 	MSG_MAP(SetMitigationEndHysteresis,         PPSMC_MSG_SetMitigationEndHysteresis,			0),
128 	MSG_MAP(GetCurrentFreq,                     PPSMC_MSG_GetCurrentFreq,						0),
129 	MSG_MAP(SetReducedPptLimit,                 PPSMC_MSG_SetReducedPptLimit,					0),
130 	MSG_MAP(SetReducedThermalLimit,             PPSMC_MSG_SetReducedThermalLimit,				0),
131 	MSG_MAP(DramLogSetDramAddr,                 PPSMC_MSG_DramLogSetDramAddr,					0),
132 	MSG_MAP(StartDramLogging,                   PPSMC_MSG_StartDramLogging,						0),
133 	MSG_MAP(StopDramLogging,                    PPSMC_MSG_StopDramLogging,						0),
134 	MSG_MAP(SetSoftMinCclk,                     PPSMC_MSG_SetSoftMinCclk,						0),
135 	MSG_MAP(SetSoftMaxCclk,                     PPSMC_MSG_SetSoftMaxCclk,						0),
136 	MSG_MAP(RequestActiveWgp,                   PPSMC_MSG_RequestActiveWgp,                     0),
137 	MSG_MAP(SetFastPPTLimit,                    PPSMC_MSG_SetFastPPTLimit,						0),
138 	MSG_MAP(SetSlowPPTLimit,                    PPSMC_MSG_SetSlowPPTLimit,						0),
139 	MSG_MAP(GetFastPPTLimit,                    PPSMC_MSG_GetFastPPTLimit,						0),
140 	MSG_MAP(GetSlowPPTLimit,                    PPSMC_MSG_GetSlowPPTLimit,						0),
141 	MSG_MAP(GetGfxOffStatus,		    PPSMC_MSG_GetGfxOffStatus,						0),
142 	MSG_MAP(GetGfxOffEntryCount,		    PPSMC_MSG_GetGfxOffEntryCount,					0),
143 	MSG_MAP(LogGfxOffResidency,		    PPSMC_MSG_LogGfxOffResidency,					0),
144 };
145 
146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
147 	FEA_MAP(PPT),
148 	FEA_MAP(TDC),
149 	FEA_MAP(THERMAL),
150 	FEA_MAP(DS_GFXCLK),
151 	FEA_MAP(DS_SOCCLK),
152 	FEA_MAP(DS_LCLK),
153 	FEA_MAP(DS_FCLK),
154 	FEA_MAP(DS_MP1CLK),
155 	FEA_MAP(DS_MP0CLK),
156 	FEA_MAP(ATHUB_PG),
157 	FEA_MAP(CCLK_DPM),
158 	FEA_MAP(FAN_CONTROLLER),
159 	FEA_MAP(ULV),
160 	FEA_MAP(VCN_DPM),
161 	FEA_MAP(LCLK_DPM),
162 	FEA_MAP(SHUBCLK_DPM),
163 	FEA_MAP(DCFCLK_DPM),
164 	FEA_MAP(DS_DCFCLK),
165 	FEA_MAP(S0I2),
166 	FEA_MAP(SMU_LOW_POWER),
167 	FEA_MAP(GFX_DEM),
168 	FEA_MAP(PSI),
169 	FEA_MAP(PROCHOT),
170 	FEA_MAP(CPUOFF),
171 	FEA_MAP(STAPM),
172 	FEA_MAP(S0I3),
173 	FEA_MAP(DF_CSTATES),
174 	FEA_MAP(PERF_LIMIT),
175 	FEA_MAP(CORE_DLDO),
176 	FEA_MAP(RSMU_LOW_POWER),
177 	FEA_MAP(SMN_LOW_POWER),
178 	FEA_MAP(THM_LOW_POWER),
179 	FEA_MAP(SMUIO_LOW_POWER),
180 	FEA_MAP(MP1_LOW_POWER),
181 	FEA_MAP(DS_VCN),
182 	FEA_MAP(CPPC),
183 	FEA_MAP(OS_CSTATES),
184 	FEA_MAP(ISP_DPM),
185 	FEA_MAP(A55_DPM),
186 	FEA_MAP(CVIP_DSP_DPM),
187 	FEA_MAP(MSMU_LOW_POWER),
188 	FEA_MAP_REVERSE(SOCCLK),
189 	FEA_MAP_REVERSE(FCLK),
190 	FEA_MAP_HALF_REVERSE(GFX),
191 };
192 
193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
194 	TAB_MAP_VALID(WATERMARKS),
195 	TAB_MAP_VALID(SMU_METRICS),
196 	TAB_MAP_VALID(CUSTOM_DPM),
197 	TAB_MAP_VALID(DPMCLOCKS),
198 };
199 
200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
201 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
202 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
203 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
204 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
205 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
206 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED,		WORKLOAD_PPLIB_CAPPED_BIT),
207 	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED,		WORKLOAD_PPLIB_UNCAPPED_BIT),
208 };
209 
210 static const uint8_t vangogh_throttler_map[] = {
211 	[THROTTLER_STATUS_BIT_SPL]	= (SMU_THROTTLER_SPL_BIT),
212 	[THROTTLER_STATUS_BIT_FPPT]	= (SMU_THROTTLER_FPPT_BIT),
213 	[THROTTLER_STATUS_BIT_SPPT]	= (SMU_THROTTLER_SPPT_BIT),
214 	[THROTTLER_STATUS_BIT_SPPT_APU]	= (SMU_THROTTLER_SPPT_APU_BIT),
215 	[THROTTLER_STATUS_BIT_THM_CORE]	= (SMU_THROTTLER_TEMP_CORE_BIT),
216 	[THROTTLER_STATUS_BIT_THM_GFX]	= (SMU_THROTTLER_TEMP_GPU_BIT),
217 	[THROTTLER_STATUS_BIT_THM_SOC]	= (SMU_THROTTLER_TEMP_SOC_BIT),
218 	[THROTTLER_STATUS_BIT_TDC_VDD]	= (SMU_THROTTLER_TDC_VDD_BIT),
219 	[THROTTLER_STATUS_BIT_TDC_SOC]	= (SMU_THROTTLER_TDC_SOC_BIT),
220 	[THROTTLER_STATUS_BIT_TDC_GFX]	= (SMU_THROTTLER_TDC_GFX_BIT),
221 	[THROTTLER_STATUS_BIT_TDC_CVIP]	= (SMU_THROTTLER_TDC_CVIP_BIT),
222 };
223 
224 static int vangogh_tables_init(struct smu_context *smu)
225 {
226 	struct smu_table_context *smu_table = &smu->smu_table;
227 	struct smu_table *tables = smu_table->tables;
228 	uint32_t if_version;
229 	uint32_t smu_version;
230 	uint32_t ret = 0;
231 
232 	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
233 	if (ret) {
234 		return ret;
235 	}
236 
237 	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
238 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
239 	SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
240 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
241 	SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
242 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
243 	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
244 		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
245 
246 	if (if_version < 0x3) {
247 		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
248 				PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
249 		smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
250 	} else {
251 		SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
252 				PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
253 		smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
254 	}
255 	if (!smu_table->metrics_table)
256 		goto err0_out;
257 	smu_table->metrics_time = 0;
258 
259 	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
260 	smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
261 	smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
262 	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
263 	if (!smu_table->gpu_metrics_table)
264 		goto err1_out;
265 
266 	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
267 	if (!smu_table->watermarks_table)
268 		goto err2_out;
269 
270 	smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
271 	if (!smu_table->clocks_table)
272 		goto err3_out;
273 
274 	return 0;
275 
276 err3_out:
277 	kfree(smu_table->watermarks_table);
278 err2_out:
279 	kfree(smu_table->gpu_metrics_table);
280 err1_out:
281 	kfree(smu_table->metrics_table);
282 err0_out:
283 	return -ENOMEM;
284 }
285 
286 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
287 				       MetricsMember_t member,
288 				       uint32_t *value)
289 {
290 	struct smu_table_context *smu_table = &smu->smu_table;
291 	SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
292 	int ret = 0;
293 
294 	ret = smu_cmn_get_metrics_table(smu,
295 					NULL,
296 					false);
297 	if (ret)
298 		return ret;
299 
300 	switch (member) {
301 	case METRICS_CURR_GFXCLK:
302 		*value = metrics->GfxclkFrequency;
303 		break;
304 	case METRICS_AVERAGE_SOCCLK:
305 		*value = metrics->SocclkFrequency;
306 		break;
307 	case METRICS_AVERAGE_VCLK:
308 		*value = metrics->VclkFrequency;
309 		break;
310 	case METRICS_AVERAGE_DCLK:
311 		*value = metrics->DclkFrequency;
312 		break;
313 	case METRICS_CURR_UCLK:
314 		*value = metrics->MemclkFrequency;
315 		break;
316 	case METRICS_AVERAGE_GFXACTIVITY:
317 		*value = metrics->GfxActivity / 100;
318 		break;
319 	case METRICS_AVERAGE_VCNACTIVITY:
320 		*value = metrics->UvdActivity;
321 		break;
322 	case METRICS_AVERAGE_SOCKETPOWER:
323 		*value = (metrics->CurrentSocketPower << 8) /
324 		1000 ;
325 		break;
326 	case METRICS_TEMPERATURE_EDGE:
327 		*value = metrics->GfxTemperature / 100 *
328 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
329 		break;
330 	case METRICS_TEMPERATURE_HOTSPOT:
331 		*value = metrics->SocTemperature / 100 *
332 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
333 		break;
334 	case METRICS_THROTTLER_STATUS:
335 		*value = metrics->ThrottlerStatus;
336 		break;
337 	case METRICS_VOLTAGE_VDDGFX:
338 		*value = metrics->Voltage[2];
339 		break;
340 	case METRICS_VOLTAGE_VDDSOC:
341 		*value = metrics->Voltage[1];
342 		break;
343 	case METRICS_AVERAGE_CPUCLK:
344 		memcpy(value, &metrics->CoreFrequency[0],
345 		       smu->cpu_core_num * sizeof(uint16_t));
346 		break;
347 	default:
348 		*value = UINT_MAX;
349 		break;
350 	}
351 
352 	return ret;
353 }
354 
355 static int vangogh_get_smu_metrics_data(struct smu_context *smu,
356 				       MetricsMember_t member,
357 				       uint32_t *value)
358 {
359 	struct smu_table_context *smu_table = &smu->smu_table;
360 	SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
361 	int ret = 0;
362 
363 	ret = smu_cmn_get_metrics_table(smu,
364 					NULL,
365 					false);
366 	if (ret)
367 		return ret;
368 
369 	switch (member) {
370 	case METRICS_CURR_GFXCLK:
371 		*value = metrics->Current.GfxclkFrequency;
372 		break;
373 	case METRICS_AVERAGE_SOCCLK:
374 		*value = metrics->Current.SocclkFrequency;
375 		break;
376 	case METRICS_AVERAGE_VCLK:
377 		*value = metrics->Current.VclkFrequency;
378 		break;
379 	case METRICS_AVERAGE_DCLK:
380 		*value = metrics->Current.DclkFrequency;
381 		break;
382 	case METRICS_CURR_UCLK:
383 		*value = metrics->Current.MemclkFrequency;
384 		break;
385 	case METRICS_AVERAGE_GFXACTIVITY:
386 		*value = metrics->Current.GfxActivity;
387 		break;
388 	case METRICS_AVERAGE_VCNACTIVITY:
389 		*value = metrics->Current.UvdActivity;
390 		break;
391 	case METRICS_AVERAGE_SOCKETPOWER:
392 		*value = (metrics->Average.CurrentSocketPower << 8) /
393 		1000;
394 		break;
395 	case METRICS_CURR_SOCKETPOWER:
396 		*value = (metrics->Current.CurrentSocketPower << 8) /
397 		1000;
398 		break;
399 	case METRICS_TEMPERATURE_EDGE:
400 		*value = metrics->Current.GfxTemperature / 100 *
401 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
402 		break;
403 	case METRICS_TEMPERATURE_HOTSPOT:
404 		*value = metrics->Current.SocTemperature / 100 *
405 		SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
406 		break;
407 	case METRICS_THROTTLER_STATUS:
408 		*value = metrics->Current.ThrottlerStatus;
409 		break;
410 	case METRICS_VOLTAGE_VDDGFX:
411 		*value = metrics->Current.Voltage[2];
412 		break;
413 	case METRICS_VOLTAGE_VDDSOC:
414 		*value = metrics->Current.Voltage[1];
415 		break;
416 	case METRICS_AVERAGE_CPUCLK:
417 		memcpy(value, &metrics->Current.CoreFrequency[0],
418 		       smu->cpu_core_num * sizeof(uint16_t));
419 		break;
420 	default:
421 		*value = UINT_MAX;
422 		break;
423 	}
424 
425 	return ret;
426 }
427 
428 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
429 				       MetricsMember_t member,
430 				       uint32_t *value)
431 {
432 	struct amdgpu_device *adev = smu->adev;
433 	uint32_t if_version;
434 	int ret = 0;
435 
436 	ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
437 	if (ret) {
438 		dev_err(adev->dev, "Failed to get smu if version!\n");
439 		return ret;
440 	}
441 
442 	if (if_version < 0x3)
443 		ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
444 	else
445 		ret = vangogh_get_smu_metrics_data(smu, member, value);
446 
447 	return ret;
448 }
449 
450 static int vangogh_allocate_dpm_context(struct smu_context *smu)
451 {
452 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
453 
454 	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
455 				       GFP_KERNEL);
456 	if (!smu_dpm->dpm_context)
457 		return -ENOMEM;
458 
459 	smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
460 
461 	return 0;
462 }
463 
464 static int vangogh_init_smc_tables(struct smu_context *smu)
465 {
466 	int ret = 0;
467 
468 	ret = vangogh_tables_init(smu);
469 	if (ret)
470 		return ret;
471 
472 	ret = vangogh_allocate_dpm_context(smu);
473 	if (ret)
474 		return ret;
475 
476 #ifdef CONFIG_X86
477 	/* AMD x86 APU only */
478 #ifdef __linux__
479 	smu->cpu_core_num = boot_cpu_data.x86_max_cores;
480 #else
481 	{
482 		uint32_t eax, ebx, ecx, edx;
483 		CPUID_LEAF(4, 0, eax, ebx, ecx, edx);
484 		smu->cpu_core_num = ((eax >> 26) & 0x3f) + 1;
485 	}
486 #endif
487 #else
488 	smu->cpu_core_num = 4;
489 #endif
490 
491 	return smu_v11_0_init_smc_tables(smu);
492 }
493 
494 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
495 {
496 	int ret = 0;
497 
498 	if (enable) {
499 		/* vcn dpm on is a prerequisite for vcn power gate messages */
500 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
501 		if (ret)
502 			return ret;
503 	} else {
504 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
505 		if (ret)
506 			return ret;
507 	}
508 
509 	return ret;
510 }
511 
512 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
513 {
514 	int ret = 0;
515 
516 	if (enable) {
517 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
518 		if (ret)
519 			return ret;
520 	} else {
521 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
522 		if (ret)
523 			return ret;
524 	}
525 
526 	return ret;
527 }
528 
529 static bool vangogh_is_dpm_running(struct smu_context *smu)
530 {
531 	struct amdgpu_device *adev = smu->adev;
532 	int ret = 0;
533 	uint64_t feature_enabled;
534 
535 	/* we need to re-init after suspend so return false */
536 	if (adev->in_suspend)
537 		return false;
538 
539 	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
540 
541 	if (ret)
542 		return false;
543 
544 	return !!(feature_enabled & SMC_DPM_FEATURE);
545 }
546 
547 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
548 						uint32_t dpm_level, uint32_t *freq)
549 {
550 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
551 
552 	if (!clk_table || clk_type >= SMU_CLK_COUNT)
553 		return -EINVAL;
554 
555 	switch (clk_type) {
556 	case SMU_SOCCLK:
557 		if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
558 			return -EINVAL;
559 		*freq = clk_table->SocClocks[dpm_level];
560 		break;
561 	case SMU_VCLK:
562 		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
563 			return -EINVAL;
564 		*freq = clk_table->VcnClocks[dpm_level].vclk;
565 		break;
566 	case SMU_DCLK:
567 		if (dpm_level >= clk_table->VcnClkLevelsEnabled)
568 			return -EINVAL;
569 		*freq = clk_table->VcnClocks[dpm_level].dclk;
570 		break;
571 	case SMU_UCLK:
572 	case SMU_MCLK:
573 		if (dpm_level >= clk_table->NumDfPstatesEnabled)
574 			return -EINVAL;
575 		*freq = clk_table->DfPstateTable[dpm_level].memclk;
576 
577 		break;
578 	case SMU_FCLK:
579 		if (dpm_level >= clk_table->NumDfPstatesEnabled)
580 			return -EINVAL;
581 		*freq = clk_table->DfPstateTable[dpm_level].fclk;
582 		break;
583 	default:
584 		return -EINVAL;
585 	}
586 
587 	return 0;
588 }
589 
590 static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
591 			enum smu_clk_type clk_type, char *buf)
592 {
593 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
594 	SmuMetrics_legacy_t metrics;
595 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
596 	int i, idx, size = 0, ret = 0;
597 	uint32_t cur_value = 0, value = 0, count = 0;
598 	bool cur_value_match_level = false;
599 
600 	memset(&metrics, 0, sizeof(metrics));
601 
602 	ret = smu_cmn_get_metrics_table(smu, &metrics, false);
603 	if (ret)
604 		return ret;
605 
606 	smu_cmn_get_sysfs_buf(&buf, &size);
607 
608 	switch (clk_type) {
609 	case SMU_OD_SCLK:
610 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
611 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
612 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
613 			(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
614 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
615 			(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
616 		}
617 		break;
618 	case SMU_OD_CCLK:
619 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
620 			size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
621 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
622 			(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
623 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
624 			(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
625 		}
626 		break;
627 	case SMU_OD_RANGE:
628 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
629 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
630 			size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
631 				smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
632 			size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
633 				smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
634 		}
635 		break;
636 	case SMU_SOCCLK:
637 		/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
638 		count = clk_table->NumSocClkLevelsEnabled;
639 		cur_value = metrics.SocclkFrequency;
640 		break;
641 	case SMU_VCLK:
642 		count = clk_table->VcnClkLevelsEnabled;
643 		cur_value = metrics.VclkFrequency;
644 		break;
645 	case SMU_DCLK:
646 		count = clk_table->VcnClkLevelsEnabled;
647 		cur_value = metrics.DclkFrequency;
648 		break;
649 	case SMU_MCLK:
650 		count = clk_table->NumDfPstatesEnabled;
651 		cur_value = metrics.MemclkFrequency;
652 		break;
653 	case SMU_FCLK:
654 		count = clk_table->NumDfPstatesEnabled;
655 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
656 		if (ret)
657 			return ret;
658 		break;
659 	default:
660 		break;
661 	}
662 
663 	switch (clk_type) {
664 	case SMU_SOCCLK:
665 	case SMU_VCLK:
666 	case SMU_DCLK:
667 	case SMU_MCLK:
668 	case SMU_FCLK:
669 		for (i = 0; i < count; i++) {
670 			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
671 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
672 			if (ret)
673 				return ret;
674 			if (!value)
675 				continue;
676 			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
677 					cur_value == value ? "*" : "");
678 			if (cur_value == value)
679 				cur_value_match_level = true;
680 		}
681 
682 		if (!cur_value_match_level)
683 			size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
684 		break;
685 	default:
686 		break;
687 	}
688 
689 	return size;
690 }
691 
692 static int vangogh_print_clk_levels(struct smu_context *smu,
693 			enum smu_clk_type clk_type, char *buf)
694 {
695 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
696 	SmuMetrics_t metrics;
697 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
698 	int i, idx, size = 0, ret = 0;
699 	uint32_t cur_value = 0, value = 0, count = 0;
700 	bool cur_value_match_level = false;
701 	uint32_t min, max;
702 
703 	memset(&metrics, 0, sizeof(metrics));
704 
705 	ret = smu_cmn_get_metrics_table(smu, &metrics, false);
706 	if (ret)
707 		return ret;
708 
709 	smu_cmn_get_sysfs_buf(&buf, &size);
710 
711 	switch (clk_type) {
712 	case SMU_OD_SCLK:
713 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
714 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
715 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
716 			(smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
717 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
718 			(smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
719 		}
720 		break;
721 	case SMU_OD_CCLK:
722 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
723 			size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n",  smu->cpu_core_id_select);
724 			size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
725 			(smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
726 			size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
727 			(smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
728 		}
729 		break;
730 	case SMU_OD_RANGE:
731 		if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
732 			size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
733 			size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
734 				smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
735 			size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
736 				smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
737 		}
738 		break;
739 	case SMU_SOCCLK:
740 		/* the level 3 ~ 6 of socclk use the same frequency for vangogh */
741 		count = clk_table->NumSocClkLevelsEnabled;
742 		cur_value = metrics.Current.SocclkFrequency;
743 		break;
744 	case SMU_VCLK:
745 		count = clk_table->VcnClkLevelsEnabled;
746 		cur_value = metrics.Current.VclkFrequency;
747 		break;
748 	case SMU_DCLK:
749 		count = clk_table->VcnClkLevelsEnabled;
750 		cur_value = metrics.Current.DclkFrequency;
751 		break;
752 	case SMU_MCLK:
753 		count = clk_table->NumDfPstatesEnabled;
754 		cur_value = metrics.Current.MemclkFrequency;
755 		break;
756 	case SMU_FCLK:
757 		count = clk_table->NumDfPstatesEnabled;
758 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
759 		if (ret)
760 			return ret;
761 		break;
762 	case SMU_GFXCLK:
763 	case SMU_SCLK:
764 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
765 		if (ret) {
766 			return ret;
767 		}
768 		break;
769 	default:
770 		break;
771 	}
772 
773 	switch (clk_type) {
774 	case SMU_SOCCLK:
775 	case SMU_VCLK:
776 	case SMU_DCLK:
777 	case SMU_MCLK:
778 	case SMU_FCLK:
779 		for (i = 0; i < count; i++) {
780 			idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
781 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
782 			if (ret)
783 				return ret;
784 			if (!value)
785 				continue;
786 			size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
787 					cur_value == value ? "*" : "");
788 			if (cur_value == value)
789 				cur_value_match_level = true;
790 		}
791 
792 		if (!cur_value_match_level)
793 			size += sysfs_emit_at(buf, size, "   %uMhz *\n", cur_value);
794 		break;
795 	case SMU_GFXCLK:
796 	case SMU_SCLK:
797 		min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
798 		max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
799 		if (cur_value  == max)
800 			i = 2;
801 		else if (cur_value == min)
802 			i = 0;
803 		else
804 			i = 1;
805 		size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
806 				i == 0 ? "*" : "");
807 		size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
808 				i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
809 				i == 1 ? "*" : "");
810 		size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
811 				i == 2 ? "*" : "");
812 		break;
813 	default:
814 		break;
815 	}
816 
817 	return size;
818 }
819 
820 static int vangogh_common_print_clk_levels(struct smu_context *smu,
821 			enum smu_clk_type clk_type, char *buf)
822 {
823 	struct amdgpu_device *adev = smu->adev;
824 	uint32_t if_version;
825 	int ret = 0;
826 
827 	ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
828 	if (ret) {
829 		dev_err(adev->dev, "Failed to get smu if version!\n");
830 		return ret;
831 	}
832 
833 	if (if_version < 0x3)
834 		ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
835 	else
836 		ret = vangogh_print_clk_levels(smu, clk_type, buf);
837 
838 	return ret;
839 }
840 
841 static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
842 					 enum amd_dpm_forced_level level,
843 					 uint32_t *vclk_mask,
844 					 uint32_t *dclk_mask,
845 					 uint32_t *mclk_mask,
846 					 uint32_t *fclk_mask,
847 					 uint32_t *soc_mask)
848 {
849 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
850 
851 	if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
852 		if (mclk_mask)
853 			*mclk_mask = clk_table->NumDfPstatesEnabled - 1;
854 
855 		if (fclk_mask)
856 			*fclk_mask = clk_table->NumDfPstatesEnabled - 1;
857 
858 		if (soc_mask)
859 			*soc_mask = 0;
860 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
861 		if (mclk_mask)
862 			*mclk_mask = 0;
863 
864 		if (fclk_mask)
865 			*fclk_mask = 0;
866 
867 		if (soc_mask)
868 			*soc_mask = 1;
869 
870 		if (vclk_mask)
871 			*vclk_mask = 1;
872 
873 		if (dclk_mask)
874 			*dclk_mask = 1;
875 	} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
876 		if (mclk_mask)
877 			*mclk_mask = 0;
878 
879 		if (fclk_mask)
880 			*fclk_mask = 0;
881 
882 		if (soc_mask)
883 			*soc_mask = 1;
884 
885 		if (vclk_mask)
886 			*vclk_mask = 1;
887 
888 		if (dclk_mask)
889 			*dclk_mask = 1;
890 	}
891 
892 	return 0;
893 }
894 
895 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
896 				enum smu_clk_type clk_type)
897 {
898 	enum smu_feature_mask feature_id = 0;
899 
900 	switch (clk_type) {
901 	case SMU_MCLK:
902 	case SMU_UCLK:
903 	case SMU_FCLK:
904 		feature_id = SMU_FEATURE_DPM_FCLK_BIT;
905 		break;
906 	case SMU_GFXCLK:
907 	case SMU_SCLK:
908 		feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
909 		break;
910 	case SMU_SOCCLK:
911 		feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
912 		break;
913 	case SMU_VCLK:
914 	case SMU_DCLK:
915 		feature_id = SMU_FEATURE_VCN_DPM_BIT;
916 		break;
917 	default:
918 		return true;
919 	}
920 
921 	if (!smu_cmn_feature_is_enabled(smu, feature_id))
922 		return false;
923 
924 	return true;
925 }
926 
927 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
928 					enum smu_clk_type clk_type,
929 					uint32_t *min,
930 					uint32_t *max)
931 {
932 	int ret = 0;
933 	uint32_t soc_mask;
934 	uint32_t vclk_mask;
935 	uint32_t dclk_mask;
936 	uint32_t mclk_mask;
937 	uint32_t fclk_mask;
938 	uint32_t clock_limit;
939 
940 	if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
941 		switch (clk_type) {
942 		case SMU_MCLK:
943 		case SMU_UCLK:
944 			clock_limit = smu->smu_table.boot_values.uclk;
945 			break;
946 		case SMU_FCLK:
947 			clock_limit = smu->smu_table.boot_values.fclk;
948 			break;
949 		case SMU_GFXCLK:
950 		case SMU_SCLK:
951 			clock_limit = smu->smu_table.boot_values.gfxclk;
952 			break;
953 		case SMU_SOCCLK:
954 			clock_limit = smu->smu_table.boot_values.socclk;
955 			break;
956 		case SMU_VCLK:
957 			clock_limit = smu->smu_table.boot_values.vclk;
958 			break;
959 		case SMU_DCLK:
960 			clock_limit = smu->smu_table.boot_values.dclk;
961 			break;
962 		default:
963 			clock_limit = 0;
964 			break;
965 		}
966 
967 		/* clock in Mhz unit */
968 		if (min)
969 			*min = clock_limit / 100;
970 		if (max)
971 			*max = clock_limit / 100;
972 
973 		return 0;
974 	}
975 	if (max) {
976 		ret = vangogh_get_profiling_clk_mask(smu,
977 							AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
978 							&vclk_mask,
979 							&dclk_mask,
980 							&mclk_mask,
981 							&fclk_mask,
982 							&soc_mask);
983 		if (ret)
984 			goto failed;
985 
986 		switch (clk_type) {
987 		case SMU_UCLK:
988 		case SMU_MCLK:
989 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
990 			if (ret)
991 				goto failed;
992 			break;
993 		case SMU_SOCCLK:
994 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
995 			if (ret)
996 				goto failed;
997 			break;
998 		case SMU_FCLK:
999 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
1000 			if (ret)
1001 				goto failed;
1002 			break;
1003 		case SMU_VCLK:
1004 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
1005 			if (ret)
1006 				goto failed;
1007 			break;
1008 		case SMU_DCLK:
1009 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
1010 			if (ret)
1011 				goto failed;
1012 			break;
1013 		default:
1014 			ret = -EINVAL;
1015 			goto failed;
1016 		}
1017 	}
1018 	if (min) {
1019 		ret = vangogh_get_profiling_clk_mask(smu,
1020 						     AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
1021 						     NULL,
1022 						     NULL,
1023 						     &mclk_mask,
1024 						     &fclk_mask,
1025 						     &soc_mask);
1026 		if (ret)
1027 			goto failed;
1028 
1029 		vclk_mask = dclk_mask = 0;
1030 
1031 		switch (clk_type) {
1032 		case SMU_UCLK:
1033 		case SMU_MCLK:
1034 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
1035 			if (ret)
1036 				goto failed;
1037 			break;
1038 		case SMU_SOCCLK:
1039 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
1040 			if (ret)
1041 				goto failed;
1042 			break;
1043 		case SMU_FCLK:
1044 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
1045 			if (ret)
1046 				goto failed;
1047 			break;
1048 		case SMU_VCLK:
1049 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
1050 			if (ret)
1051 				goto failed;
1052 			break;
1053 		case SMU_DCLK:
1054 			ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
1055 			if (ret)
1056 				goto failed;
1057 			break;
1058 		default:
1059 			ret = -EINVAL;
1060 			goto failed;
1061 		}
1062 	}
1063 failed:
1064 	return ret;
1065 }
1066 
1067 static int vangogh_get_power_profile_mode(struct smu_context *smu,
1068 					   char *buf)
1069 {
1070 	uint32_t i, size = 0;
1071 	int16_t workload_type = 0;
1072 
1073 	if (!buf)
1074 		return -EINVAL;
1075 
1076 	for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1077 		/*
1078 		 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
1079 		 * Not all profile modes are supported on vangogh.
1080 		 */
1081 		workload_type = smu_cmn_to_asic_specific_index(smu,
1082 							       CMN2ASIC_MAPPING_WORKLOAD,
1083 							       i);
1084 
1085 		if (workload_type < 0)
1086 			continue;
1087 
1088 		size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
1089 			i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1090 	}
1091 
1092 	return size;
1093 }
1094 
1095 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1096 {
1097 	int workload_type, ret;
1098 	uint32_t profile_mode = input[size];
1099 
1100 	if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
1101 		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
1102 		return -EINVAL;
1103 	}
1104 
1105 	if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
1106 			profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
1107 		return 0;
1108 
1109 	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1110 	workload_type = smu_cmn_to_asic_specific_index(smu,
1111 						       CMN2ASIC_MAPPING_WORKLOAD,
1112 						       profile_mode);
1113 	if (workload_type < 0) {
1114 		dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
1115 					profile_mode);
1116 		return -EINVAL;
1117 	}
1118 
1119 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1120 				    1 << workload_type,
1121 				    NULL);
1122 	if (ret) {
1123 		dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
1124 					workload_type);
1125 		return ret;
1126 	}
1127 
1128 	smu->power_profile_mode = profile_mode;
1129 
1130 	return 0;
1131 }
1132 
1133 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
1134 					  enum smu_clk_type clk_type,
1135 					  uint32_t min,
1136 					  uint32_t max)
1137 {
1138 	int ret = 0;
1139 
1140 	if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
1141 		return 0;
1142 
1143 	switch (clk_type) {
1144 	case SMU_GFXCLK:
1145 	case SMU_SCLK:
1146 		ret = smu_cmn_send_smc_msg_with_param(smu,
1147 							SMU_MSG_SetHardMinGfxClk,
1148 							min, NULL);
1149 		if (ret)
1150 			return ret;
1151 
1152 		ret = smu_cmn_send_smc_msg_with_param(smu,
1153 							SMU_MSG_SetSoftMaxGfxClk,
1154 							max, NULL);
1155 		if (ret)
1156 			return ret;
1157 		break;
1158 	case SMU_FCLK:
1159 		ret = smu_cmn_send_smc_msg_with_param(smu,
1160 							SMU_MSG_SetHardMinFclkByFreq,
1161 							min, NULL);
1162 		if (ret)
1163 			return ret;
1164 
1165 		ret = smu_cmn_send_smc_msg_with_param(smu,
1166 							SMU_MSG_SetSoftMaxFclkByFreq,
1167 							max, NULL);
1168 		if (ret)
1169 			return ret;
1170 		break;
1171 	case SMU_SOCCLK:
1172 		ret = smu_cmn_send_smc_msg_with_param(smu,
1173 							SMU_MSG_SetHardMinSocclkByFreq,
1174 							min, NULL);
1175 		if (ret)
1176 			return ret;
1177 
1178 		ret = smu_cmn_send_smc_msg_with_param(smu,
1179 							SMU_MSG_SetSoftMaxSocclkByFreq,
1180 							max, NULL);
1181 		if (ret)
1182 			return ret;
1183 		break;
1184 	case SMU_VCLK:
1185 		ret = smu_cmn_send_smc_msg_with_param(smu,
1186 							SMU_MSG_SetHardMinVcn,
1187 							min << 16, NULL);
1188 		if (ret)
1189 			return ret;
1190 		ret = smu_cmn_send_smc_msg_with_param(smu,
1191 							SMU_MSG_SetSoftMaxVcn,
1192 							max << 16, NULL);
1193 		if (ret)
1194 			return ret;
1195 		break;
1196 	case SMU_DCLK:
1197 		ret = smu_cmn_send_smc_msg_with_param(smu,
1198 							SMU_MSG_SetHardMinVcn,
1199 							min, NULL);
1200 		if (ret)
1201 			return ret;
1202 		ret = smu_cmn_send_smc_msg_with_param(smu,
1203 							SMU_MSG_SetSoftMaxVcn,
1204 							max, NULL);
1205 		if (ret)
1206 			return ret;
1207 		break;
1208 	default:
1209 		return -EINVAL;
1210 	}
1211 
1212 	return ret;
1213 }
1214 
1215 static int vangogh_force_clk_levels(struct smu_context *smu,
1216 				   enum smu_clk_type clk_type, uint32_t mask)
1217 {
1218 	uint32_t soft_min_level = 0, soft_max_level = 0;
1219 	uint32_t min_freq = 0, max_freq = 0;
1220 	int ret = 0 ;
1221 
1222 	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1223 	soft_max_level = mask ? (fls(mask) - 1) : 0;
1224 
1225 	switch (clk_type) {
1226 	case SMU_SOCCLK:
1227 		ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1228 						soft_min_level, &min_freq);
1229 		if (ret)
1230 			return ret;
1231 		ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1232 						soft_max_level, &max_freq);
1233 		if (ret)
1234 			return ret;
1235 		ret = smu_cmn_send_smc_msg_with_param(smu,
1236 								SMU_MSG_SetSoftMaxSocclkByFreq,
1237 								max_freq, NULL);
1238 		if (ret)
1239 			return ret;
1240 		ret = smu_cmn_send_smc_msg_with_param(smu,
1241 								SMU_MSG_SetHardMinSocclkByFreq,
1242 								min_freq, NULL);
1243 		if (ret)
1244 			return ret;
1245 		break;
1246 	case SMU_FCLK:
1247 		ret = vangogh_get_dpm_clk_limited(smu,
1248 							clk_type, soft_min_level, &min_freq);
1249 		if (ret)
1250 			return ret;
1251 		ret = vangogh_get_dpm_clk_limited(smu,
1252 							clk_type, soft_max_level, &max_freq);
1253 		if (ret)
1254 			return ret;
1255 		ret = smu_cmn_send_smc_msg_with_param(smu,
1256 								SMU_MSG_SetSoftMaxFclkByFreq,
1257 								max_freq, NULL);
1258 		if (ret)
1259 			return ret;
1260 		ret = smu_cmn_send_smc_msg_with_param(smu,
1261 								SMU_MSG_SetHardMinFclkByFreq,
1262 								min_freq, NULL);
1263 		if (ret)
1264 			return ret;
1265 		break;
1266 	case SMU_VCLK:
1267 		ret = vangogh_get_dpm_clk_limited(smu,
1268 							clk_type, soft_min_level, &min_freq);
1269 		if (ret)
1270 			return ret;
1271 
1272 		ret = vangogh_get_dpm_clk_limited(smu,
1273 							clk_type, soft_max_level, &max_freq);
1274 		if (ret)
1275 			return ret;
1276 
1277 
1278 		ret = smu_cmn_send_smc_msg_with_param(smu,
1279 								SMU_MSG_SetHardMinVcn,
1280 								min_freq << 16, NULL);
1281 		if (ret)
1282 			return ret;
1283 
1284 		ret = smu_cmn_send_smc_msg_with_param(smu,
1285 								SMU_MSG_SetSoftMaxVcn,
1286 								max_freq << 16, NULL);
1287 		if (ret)
1288 			return ret;
1289 
1290 		break;
1291 	case SMU_DCLK:
1292 		ret = vangogh_get_dpm_clk_limited(smu,
1293 							clk_type, soft_min_level, &min_freq);
1294 		if (ret)
1295 			return ret;
1296 
1297 		ret = vangogh_get_dpm_clk_limited(smu,
1298 							clk_type, soft_max_level, &max_freq);
1299 		if (ret)
1300 			return ret;
1301 
1302 		ret = smu_cmn_send_smc_msg_with_param(smu,
1303 							SMU_MSG_SetHardMinVcn,
1304 							min_freq, NULL);
1305 		if (ret)
1306 			return ret;
1307 
1308 		ret = smu_cmn_send_smc_msg_with_param(smu,
1309 							SMU_MSG_SetSoftMaxVcn,
1310 							max_freq, NULL);
1311 		if (ret)
1312 			return ret;
1313 
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 	return ret;
1320 }
1321 
1322 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
1323 {
1324 	int ret = 0, i = 0;
1325 	uint32_t min_freq, max_freq, force_freq;
1326 	enum smu_clk_type clk_type;
1327 
1328 	enum smu_clk_type clks[] = {
1329 		SMU_SOCCLK,
1330 		SMU_VCLK,
1331 		SMU_DCLK,
1332 		SMU_FCLK,
1333 	};
1334 
1335 	for (i = 0; i < ARRAY_SIZE(clks); i++) {
1336 		clk_type = clks[i];
1337 		ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1338 		if (ret)
1339 			return ret;
1340 
1341 		force_freq = highest ? max_freq : min_freq;
1342 		ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
1343 		if (ret)
1344 			return ret;
1345 	}
1346 
1347 	return ret;
1348 }
1349 
1350 static int vangogh_unforce_dpm_levels(struct smu_context *smu)
1351 {
1352 	int ret = 0, i = 0;
1353 	uint32_t min_freq, max_freq;
1354 	enum smu_clk_type clk_type;
1355 
1356 	struct clk_feature_map {
1357 		enum smu_clk_type clk_type;
1358 		uint32_t	feature;
1359 	} clk_feature_map[] = {
1360 		{SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
1361 		{SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
1362 		{SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
1363 		{SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
1364 	};
1365 
1366 	for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
1367 
1368 		if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
1369 		    continue;
1370 
1371 		clk_type = clk_feature_map[i].clk_type;
1372 
1373 		ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1374 
1375 		if (ret)
1376 			return ret;
1377 
1378 		ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1379 
1380 		if (ret)
1381 			return ret;
1382 	}
1383 
1384 	return ret;
1385 }
1386 
1387 static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
1388 {
1389 	int ret = 0;
1390 	uint32_t socclk_freq = 0, fclk_freq = 0;
1391 	uint32_t vclk_freq = 0, dclk_freq = 0;
1392 
1393 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
1394 	if (ret)
1395 		return ret;
1396 
1397 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
1398 	if (ret)
1399 		return ret;
1400 
1401 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
1402 	if (ret)
1403 		return ret;
1404 
1405 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
1406 	if (ret)
1407 		return ret;
1408 
1409 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
1410 	if (ret)
1411 		return ret;
1412 
1413 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
1414 	if (ret)
1415 		return ret;
1416 
1417 	ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
1418 	if (ret)
1419 		return ret;
1420 
1421 	ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
1422 	if (ret)
1423 		return ret;
1424 
1425 	return ret;
1426 }
1427 
1428 static int vangogh_set_performance_level(struct smu_context *smu,
1429 					enum amd_dpm_forced_level level)
1430 {
1431 	int ret = 0, i;
1432 	uint32_t soc_mask, mclk_mask, fclk_mask;
1433 	uint32_t vclk_mask = 0, dclk_mask = 0;
1434 
1435 	smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1436 	smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1437 
1438 	switch (level) {
1439 	case AMD_DPM_FORCED_LEVEL_HIGH:
1440 		smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
1441 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1442 
1443 
1444 		ret = vangogh_force_dpm_limit_value(smu, true);
1445 		if (ret)
1446 			return ret;
1447 		break;
1448 	case AMD_DPM_FORCED_LEVEL_LOW:
1449 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1450 		smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1451 
1452 		ret = vangogh_force_dpm_limit_value(smu, false);
1453 		if (ret)
1454 			return ret;
1455 		break;
1456 	case AMD_DPM_FORCED_LEVEL_AUTO:
1457 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1458 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1459 
1460 		ret = vangogh_unforce_dpm_levels(smu);
1461 		if (ret)
1462 			return ret;
1463 		break;
1464 	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1465 		smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1466 		smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1467 
1468 		ret = vangogh_get_profiling_clk_mask(smu, level,
1469 							&vclk_mask,
1470 							&dclk_mask,
1471 							&mclk_mask,
1472 							&fclk_mask,
1473 							&soc_mask);
1474 		if (ret)
1475 			return ret;
1476 
1477 		vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1478 		vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1479 		vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
1480 		vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
1481 		break;
1482 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1483 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1484 		smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1485 		break;
1486 	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1487 		smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1488 		smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1489 
1490 		ret = vangogh_get_profiling_clk_mask(smu, level,
1491 							NULL,
1492 							NULL,
1493 							&mclk_mask,
1494 							&fclk_mask,
1495 							NULL);
1496 		if (ret)
1497 			return ret;
1498 
1499 		vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1500 		break;
1501 	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1502 		smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1503 		smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1504 
1505 		ret = vangogh_set_peak_clock_by_device(smu);
1506 		if (ret)
1507 			return ret;
1508 		break;
1509 	case AMD_DPM_FORCED_LEVEL_MANUAL:
1510 	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1511 	default:
1512 		return 0;
1513 	}
1514 
1515 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1516 					      smu->gfx_actual_hard_min_freq, NULL);
1517 	if (ret)
1518 		return ret;
1519 
1520 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1521 					      smu->gfx_actual_soft_max_freq, NULL);
1522 	if (ret)
1523 		return ret;
1524 
1525 	if (smu->adev->pm.fw_version >= 0x43f1b00) {
1526 		for (i = 0; i < smu->cpu_core_num; i++) {
1527 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1528 							      ((i << 20)
1529 							       | smu->cpu_actual_soft_min_freq),
1530 							      NULL);
1531 			if (ret)
1532 				return ret;
1533 
1534 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1535 							      ((i << 20)
1536 							       | smu->cpu_actual_soft_max_freq),
1537 							      NULL);
1538 			if (ret)
1539 				return ret;
1540 		}
1541 	}
1542 
1543 	return ret;
1544 }
1545 
1546 static int vangogh_read_sensor(struct smu_context *smu,
1547 				 enum amd_pp_sensors sensor,
1548 				 void *data, uint32_t *size)
1549 {
1550 	int ret = 0;
1551 
1552 	if (!data || !size)
1553 		return -EINVAL;
1554 
1555 	switch (sensor) {
1556 	case AMDGPU_PP_SENSOR_GPU_LOAD:
1557 		ret = vangogh_common_get_smu_metrics_data(smu,
1558 						   METRICS_AVERAGE_GFXACTIVITY,
1559 						   (uint32_t *)data);
1560 		*size = 4;
1561 		break;
1562 	case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
1563 		ret = vangogh_common_get_smu_metrics_data(smu,
1564 						   METRICS_AVERAGE_SOCKETPOWER,
1565 						   (uint32_t *)data);
1566 		*size = 4;
1567 		break;
1568 	case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
1569 		ret = vangogh_common_get_smu_metrics_data(smu,
1570 						   METRICS_CURR_SOCKETPOWER,
1571 						   (uint32_t *)data);
1572 		*size = 4;
1573 		break;
1574 	case AMDGPU_PP_SENSOR_EDGE_TEMP:
1575 		ret = vangogh_common_get_smu_metrics_data(smu,
1576 						   METRICS_TEMPERATURE_EDGE,
1577 						   (uint32_t *)data);
1578 		*size = 4;
1579 		break;
1580 	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1581 		ret = vangogh_common_get_smu_metrics_data(smu,
1582 						   METRICS_TEMPERATURE_HOTSPOT,
1583 						   (uint32_t *)data);
1584 		*size = 4;
1585 		break;
1586 	case AMDGPU_PP_SENSOR_GFX_MCLK:
1587 		ret = vangogh_common_get_smu_metrics_data(smu,
1588 						   METRICS_CURR_UCLK,
1589 						   (uint32_t *)data);
1590 		*(uint32_t *)data *= 100;
1591 		*size = 4;
1592 		break;
1593 	case AMDGPU_PP_SENSOR_GFX_SCLK:
1594 		ret = vangogh_common_get_smu_metrics_data(smu,
1595 						   METRICS_CURR_GFXCLK,
1596 						   (uint32_t *)data);
1597 		*(uint32_t *)data *= 100;
1598 		*size = 4;
1599 		break;
1600 	case AMDGPU_PP_SENSOR_VDDGFX:
1601 		ret = vangogh_common_get_smu_metrics_data(smu,
1602 						   METRICS_VOLTAGE_VDDGFX,
1603 						   (uint32_t *)data);
1604 		*size = 4;
1605 		break;
1606 	case AMDGPU_PP_SENSOR_VDDNB:
1607 		ret = vangogh_common_get_smu_metrics_data(smu,
1608 						   METRICS_VOLTAGE_VDDSOC,
1609 						   (uint32_t *)data);
1610 		*size = 4;
1611 		break;
1612 	case AMDGPU_PP_SENSOR_CPU_CLK:
1613 		ret = vangogh_common_get_smu_metrics_data(smu,
1614 						   METRICS_AVERAGE_CPUCLK,
1615 						   (uint32_t *)data);
1616 		*size = smu->cpu_core_num * sizeof(uint16_t);
1617 		break;
1618 	default:
1619 		ret = -EOPNOTSUPP;
1620 		break;
1621 	}
1622 
1623 	return ret;
1624 }
1625 
1626 static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit)
1627 {
1628 	return smu_cmn_send_smc_msg_with_param(smu,
1629 					      SMU_MSG_GetThermalLimit,
1630 					      0, limit);
1631 }
1632 
1633 static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit)
1634 {
1635 	return smu_cmn_send_smc_msg_with_param(smu,
1636 					      SMU_MSG_SetReducedThermalLimit,
1637 					      limit, NULL);
1638 }
1639 
1640 
1641 static int vangogh_set_watermarks_table(struct smu_context *smu,
1642 				       struct pp_smu_wm_range_sets *clock_ranges)
1643 {
1644 	int i;
1645 	int ret = 0;
1646 	Watermarks_t *table = smu->smu_table.watermarks_table;
1647 
1648 	if (!table || !clock_ranges)
1649 		return -EINVAL;
1650 
1651 	if (clock_ranges) {
1652 		if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1653 			clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1654 			return -EINVAL;
1655 
1656 		for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1657 			table->WatermarkRow[WM_DCFCLK][i].MinClock =
1658 				clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1659 			table->WatermarkRow[WM_DCFCLK][i].MaxClock =
1660 				clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1661 			table->WatermarkRow[WM_DCFCLK][i].MinMclk =
1662 				clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1663 			table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
1664 				clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1665 
1666 			table->WatermarkRow[WM_DCFCLK][i].WmSetting =
1667 				clock_ranges->reader_wm_sets[i].wm_inst;
1668 		}
1669 
1670 		for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1671 			table->WatermarkRow[WM_SOCCLK][i].MinClock =
1672 				clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1673 			table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1674 				clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1675 			table->WatermarkRow[WM_SOCCLK][i].MinMclk =
1676 				clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1677 			table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
1678 				clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1679 
1680 			table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1681 				clock_ranges->writer_wm_sets[i].wm_inst;
1682 		}
1683 
1684 		smu->watermarks_bitmap |= WATERMARKS_EXIST;
1685 	}
1686 
1687 	/* pass data to smu controller */
1688 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1689 	     !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1690 		ret = smu_cmn_write_watermarks_table(smu);
1691 		if (ret) {
1692 			dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1693 			return ret;
1694 		}
1695 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu,
1702 				      void **table)
1703 {
1704 	struct smu_table_context *smu_table = &smu->smu_table;
1705 	struct gpu_metrics_v2_3 *gpu_metrics =
1706 		(struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
1707 	SmuMetrics_legacy_t metrics;
1708 	int ret = 0;
1709 
1710 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1711 	if (ret)
1712 		return ret;
1713 
1714 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
1715 
1716 	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1717 	gpu_metrics->temperature_soc = metrics.SocTemperature;
1718 	memcpy(&gpu_metrics->temperature_core[0],
1719 		&metrics.CoreTemperature[0],
1720 		sizeof(uint16_t) * 4);
1721 	gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1722 
1723 	gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1724 	gpu_metrics->average_mm_activity = metrics.UvdActivity;
1725 
1726 	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1727 	gpu_metrics->average_cpu_power = metrics.Power[0];
1728 	gpu_metrics->average_soc_power = metrics.Power[1];
1729 	gpu_metrics->average_gfx_power = metrics.Power[2];
1730 	memcpy(&gpu_metrics->average_core_power[0],
1731 		&metrics.CorePower[0],
1732 		sizeof(uint16_t) * 4);
1733 
1734 	gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1735 	gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1736 	gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1737 	gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1738 	gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1739 	gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1740 
1741 	memcpy(&gpu_metrics->current_coreclk[0],
1742 		&metrics.CoreFrequency[0],
1743 		sizeof(uint16_t) * 4);
1744 	gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1745 
1746 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1747 	gpu_metrics->indep_throttle_status =
1748 			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1749 							   vangogh_throttler_map);
1750 
1751 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1752 
1753 	*table = (void *)gpu_metrics;
1754 
1755 	return sizeof(struct gpu_metrics_v2_3);
1756 }
1757 
1758 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
1759 				      void **table)
1760 {
1761 	struct smu_table_context *smu_table = &smu->smu_table;
1762 	struct gpu_metrics_v2_2 *gpu_metrics =
1763 		(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1764 	SmuMetrics_legacy_t metrics;
1765 	int ret = 0;
1766 
1767 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1768 	if (ret)
1769 		return ret;
1770 
1771 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1772 
1773 	gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1774 	gpu_metrics->temperature_soc = metrics.SocTemperature;
1775 	memcpy(&gpu_metrics->temperature_core[0],
1776 		&metrics.CoreTemperature[0],
1777 		sizeof(uint16_t) * 4);
1778 	gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1779 
1780 	gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1781 	gpu_metrics->average_mm_activity = metrics.UvdActivity;
1782 
1783 	gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1784 	gpu_metrics->average_cpu_power = metrics.Power[0];
1785 	gpu_metrics->average_soc_power = metrics.Power[1];
1786 	gpu_metrics->average_gfx_power = metrics.Power[2];
1787 	memcpy(&gpu_metrics->average_core_power[0],
1788 		&metrics.CorePower[0],
1789 		sizeof(uint16_t) * 4);
1790 
1791 	gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1792 	gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1793 	gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1794 	gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1795 	gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1796 	gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1797 
1798 	memcpy(&gpu_metrics->current_coreclk[0],
1799 		&metrics.CoreFrequency[0],
1800 		sizeof(uint16_t) * 4);
1801 	gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1802 
1803 	gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1804 	gpu_metrics->indep_throttle_status =
1805 			smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1806 							   vangogh_throttler_map);
1807 
1808 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1809 
1810 	*table = (void *)gpu_metrics;
1811 
1812 	return sizeof(struct gpu_metrics_v2_2);
1813 }
1814 
1815 static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
1816 				      void **table)
1817 {
1818 	struct smu_table_context *smu_table = &smu->smu_table;
1819 	struct gpu_metrics_v2_3 *gpu_metrics =
1820 		(struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
1821 	SmuMetrics_t metrics;
1822 	int ret = 0;
1823 
1824 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1825 	if (ret)
1826 		return ret;
1827 
1828 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
1829 
1830 	gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1831 	gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1832 	memcpy(&gpu_metrics->temperature_core[0],
1833 		&metrics.Current.CoreTemperature[0],
1834 		sizeof(uint16_t) * 4);
1835 	gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1836 
1837 	gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
1838 	gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
1839 	memcpy(&gpu_metrics->average_temperature_core[0],
1840 		&metrics.Average.CoreTemperature[0],
1841 		sizeof(uint16_t) * 4);
1842 	gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
1843 
1844 	gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1845 	gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1846 
1847 	gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1848 	gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1849 	gpu_metrics->average_soc_power = metrics.Current.Power[1];
1850 	gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1851 	memcpy(&gpu_metrics->average_core_power[0],
1852 		&metrics.Average.CorePower[0],
1853 		sizeof(uint16_t) * 4);
1854 
1855 	gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1856 	gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1857 	gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1858 	gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1859 	gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1860 	gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1861 
1862 	gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1863 	gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1864 	gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1865 	gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1866 	gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1867 	gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1868 
1869 	memcpy(&gpu_metrics->current_coreclk[0],
1870 		&metrics.Current.CoreFrequency[0],
1871 		sizeof(uint16_t) * 4);
1872 	gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1873 
1874 	gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1875 	gpu_metrics->indep_throttle_status =
1876 			smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1877 							   vangogh_throttler_map);
1878 
1879 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1880 
1881 	*table = (void *)gpu_metrics;
1882 
1883 	return sizeof(struct gpu_metrics_v2_3);
1884 }
1885 
1886 static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu,
1887 					    void **table)
1888 {
1889 	SmuMetrics_t metrics;
1890 	struct smu_table_context *smu_table = &smu->smu_table;
1891 	struct gpu_metrics_v2_4 *gpu_metrics =
1892 				(struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table;
1893 	int ret = 0;
1894 
1895 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1896 	if (ret)
1897 		return ret;
1898 
1899 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4);
1900 
1901 	gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1902 	gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1903 	memcpy(&gpu_metrics->temperature_core[0],
1904 	       &metrics.Current.CoreTemperature[0],
1905 	       sizeof(uint16_t) * 4);
1906 	gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1907 
1908 	gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
1909 	gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
1910 	memcpy(&gpu_metrics->average_temperature_core[0],
1911 	       &metrics.Average.CoreTemperature[0],
1912 	       sizeof(uint16_t) * 4);
1913 	gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
1914 
1915 	gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1916 	gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1917 
1918 	gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1919 	gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1920 	gpu_metrics->average_soc_power = metrics.Current.Power[1];
1921 	gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1922 
1923 	gpu_metrics->average_cpu_voltage = metrics.Current.Voltage[0];
1924 	gpu_metrics->average_soc_voltage = metrics.Current.Voltage[1];
1925 	gpu_metrics->average_gfx_voltage = metrics.Current.Voltage[2];
1926 
1927 	gpu_metrics->average_cpu_current = metrics.Current.Current[0];
1928 	gpu_metrics->average_soc_current = metrics.Current.Current[1];
1929 	gpu_metrics->average_gfx_current = metrics.Current.Current[2];
1930 
1931 	memcpy(&gpu_metrics->average_core_power[0],
1932 	       &metrics.Average.CorePower[0],
1933 	       sizeof(uint16_t) * 4);
1934 
1935 	gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1936 	gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1937 	gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1938 	gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1939 	gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1940 	gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1941 
1942 	gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1943 	gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1944 	gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1945 	gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1946 	gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1947 	gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1948 
1949 	memcpy(&gpu_metrics->current_coreclk[0],
1950 	       &metrics.Current.CoreFrequency[0],
1951 	       sizeof(uint16_t) * 4);
1952 	gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1953 
1954 	gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1955 	gpu_metrics->indep_throttle_status =
1956 			smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1957 							   vangogh_throttler_map);
1958 
1959 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1960 
1961 	*table = (void *)gpu_metrics;
1962 
1963 	return sizeof(struct gpu_metrics_v2_4);
1964 }
1965 
1966 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
1967 				      void **table)
1968 {
1969 	struct smu_table_context *smu_table = &smu->smu_table;
1970 	struct gpu_metrics_v2_2 *gpu_metrics =
1971 		(struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1972 	SmuMetrics_t metrics;
1973 	int ret = 0;
1974 
1975 	ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1976 	if (ret)
1977 		return ret;
1978 
1979 	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1980 
1981 	gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1982 	gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1983 	memcpy(&gpu_metrics->temperature_core[0],
1984 		&metrics.Current.CoreTemperature[0],
1985 		sizeof(uint16_t) * 4);
1986 	gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1987 
1988 	gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1989 	gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1990 
1991 	gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1992 	gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1993 	gpu_metrics->average_soc_power = metrics.Current.Power[1];
1994 	gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1995 	memcpy(&gpu_metrics->average_core_power[0],
1996 		&metrics.Average.CorePower[0],
1997 		sizeof(uint16_t) * 4);
1998 
1999 	gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
2000 	gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
2001 	gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
2002 	gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
2003 	gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
2004 	gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
2005 
2006 	gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
2007 	gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
2008 	gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
2009 	gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
2010 	gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
2011 	gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
2012 
2013 	memcpy(&gpu_metrics->current_coreclk[0],
2014 		&metrics.Current.CoreFrequency[0],
2015 		sizeof(uint16_t) * 4);
2016 	gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
2017 
2018 	gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
2019 	gpu_metrics->indep_throttle_status =
2020 			smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
2021 							   vangogh_throttler_map);
2022 
2023 	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2024 
2025 	*table = (void *)gpu_metrics;
2026 
2027 	return sizeof(struct gpu_metrics_v2_2);
2028 }
2029 
2030 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
2031 				      void **table)
2032 {
2033 	uint32_t if_version;
2034 	uint32_t smu_version;
2035 	uint32_t smu_program;
2036 	uint32_t fw_version;
2037 	int ret = 0;
2038 
2039 	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
2040 	if (ret)
2041 		return ret;
2042 
2043 	smu_program = (smu_version >> 24) & 0xff;
2044 	fw_version = smu_version & 0xffffff;
2045 	if (smu_program == 6) {
2046 		if (fw_version >= 0x3F0800)
2047 			ret = vangogh_get_gpu_metrics_v2_4(smu, table);
2048 		else
2049 			ret = vangogh_get_gpu_metrics_v2_3(smu, table);
2050 
2051 	} else {
2052 		if (smu_version >= 0x043F3E00) {
2053 			if (if_version < 0x3)
2054 				ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
2055 			else
2056 				ret = vangogh_get_gpu_metrics_v2_3(smu, table);
2057 		} else {
2058 			if (if_version < 0x3)
2059 				ret = vangogh_get_legacy_gpu_metrics(smu, table);
2060 			else
2061 				ret = vangogh_get_gpu_metrics(smu, table);
2062 		}
2063 	}
2064 
2065 	return ret;
2066 }
2067 
2068 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
2069 					long input[], uint32_t size)
2070 {
2071 	int ret = 0;
2072 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2073 
2074 	if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
2075 		dev_warn(smu->adev->dev,
2076 			"pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
2077 		return -EINVAL;
2078 	}
2079 
2080 	switch (type) {
2081 	case PP_OD_EDIT_CCLK_VDDC_TABLE:
2082 		if (size != 3) {
2083 			dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
2084 			return -EINVAL;
2085 		}
2086 		if (input[0] >= smu->cpu_core_num) {
2087 			dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
2088 				smu->cpu_core_num);
2089 		}
2090 		smu->cpu_core_id_select = input[0];
2091 		if (input[1] == 0) {
2092 			if (input[2] < smu->cpu_default_soft_min_freq) {
2093 				dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2094 					input[2], smu->cpu_default_soft_min_freq);
2095 				return -EINVAL;
2096 			}
2097 			smu->cpu_actual_soft_min_freq = input[2];
2098 		} else if (input[1] == 1) {
2099 			if (input[2] > smu->cpu_default_soft_max_freq) {
2100 				dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2101 					input[2], smu->cpu_default_soft_max_freq);
2102 				return -EINVAL;
2103 			}
2104 			smu->cpu_actual_soft_max_freq = input[2];
2105 		} else {
2106 			return -EINVAL;
2107 		}
2108 		break;
2109 	case PP_OD_EDIT_SCLK_VDDC_TABLE:
2110 		if (size != 2) {
2111 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
2112 			return -EINVAL;
2113 		}
2114 
2115 		if (input[0] == 0) {
2116 			if (input[1] < smu->gfx_default_hard_min_freq) {
2117 				dev_warn(smu->adev->dev,
2118 					"Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2119 					input[1], smu->gfx_default_hard_min_freq);
2120 				return -EINVAL;
2121 			}
2122 			smu->gfx_actual_hard_min_freq = input[1];
2123 		} else if (input[0] == 1) {
2124 			if (input[1] > smu->gfx_default_soft_max_freq) {
2125 				dev_warn(smu->adev->dev,
2126 					"Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2127 					input[1], smu->gfx_default_soft_max_freq);
2128 				return -EINVAL;
2129 			}
2130 			smu->gfx_actual_soft_max_freq = input[1];
2131 		} else {
2132 			return -EINVAL;
2133 		}
2134 		break;
2135 	case PP_OD_RESTORE_DEFAULT_TABLE:
2136 		if (size != 0) {
2137 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
2138 			return -EINVAL;
2139 		} else {
2140 			smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2141 			smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2142 			smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
2143 			smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
2144 		}
2145 		break;
2146 	case PP_OD_COMMIT_DPM_TABLE:
2147 		if (size != 0) {
2148 			dev_err(smu->adev->dev, "Input parameter number not correct\n");
2149 			return -EINVAL;
2150 		} else {
2151 			if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2152 				dev_err(smu->adev->dev,
2153 					"The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
2154 					smu->gfx_actual_hard_min_freq,
2155 					smu->gfx_actual_soft_max_freq);
2156 				return -EINVAL;
2157 			}
2158 
2159 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2160 									smu->gfx_actual_hard_min_freq, NULL);
2161 			if (ret) {
2162 				dev_err(smu->adev->dev, "Set hard min sclk failed!");
2163 				return ret;
2164 			}
2165 
2166 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2167 									smu->gfx_actual_soft_max_freq, NULL);
2168 			if (ret) {
2169 				dev_err(smu->adev->dev, "Set soft max sclk failed!");
2170 				return ret;
2171 			}
2172 
2173 			if (smu->adev->pm.fw_version < 0x43f1b00) {
2174 				dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
2175 				break;
2176 			}
2177 
2178 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
2179 							      ((smu->cpu_core_id_select << 20)
2180 							       | smu->cpu_actual_soft_min_freq),
2181 							      NULL);
2182 			if (ret) {
2183 				dev_err(smu->adev->dev, "Set hard min cclk failed!");
2184 				return ret;
2185 			}
2186 
2187 			ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
2188 							      ((smu->cpu_core_id_select << 20)
2189 							       | smu->cpu_actual_soft_max_freq),
2190 							      NULL);
2191 			if (ret) {
2192 				dev_err(smu->adev->dev, "Set soft max cclk failed!");
2193 				return ret;
2194 			}
2195 		}
2196 		break;
2197 	default:
2198 		return -ENOSYS;
2199 	}
2200 
2201 	return ret;
2202 }
2203 
2204 static int vangogh_set_default_dpm_tables(struct smu_context *smu)
2205 {
2206 	struct smu_table_context *smu_table = &smu->smu_table;
2207 
2208 	return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
2209 }
2210 
2211 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
2212 {
2213 	DpmClocks_t *clk_table = smu->smu_table.clocks_table;
2214 
2215 	smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
2216 	smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
2217 	smu->gfx_actual_hard_min_freq = 0;
2218 	smu->gfx_actual_soft_max_freq = 0;
2219 
2220 	smu->cpu_default_soft_min_freq = 1400;
2221 	smu->cpu_default_soft_max_freq = 3500;
2222 	smu->cpu_actual_soft_min_freq = 0;
2223 	smu->cpu_actual_soft_max_freq = 0;
2224 
2225 	return 0;
2226 }
2227 
2228 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
2229 {
2230 	DpmClocks_t *table = smu->smu_table.clocks_table;
2231 	int i;
2232 
2233 	if (!clock_table || !table)
2234 		return -EINVAL;
2235 
2236 	for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
2237 		clock_table->SocClocks[i].Freq = table->SocClocks[i];
2238 		clock_table->SocClocks[i].Vol = table->SocVoltage[i];
2239 	}
2240 
2241 	for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
2242 		clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
2243 		clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
2244 	}
2245 
2246 	for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
2247 		clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
2248 		clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
2249 	}
2250 
2251 	return 0;
2252 }
2253 
2254 
2255 static int vangogh_system_features_control(struct smu_context *smu, bool en)
2256 {
2257 	struct amdgpu_device *adev = smu->adev;
2258 	int ret = 0;
2259 
2260 	if (adev->pm.fw_version >= 0x43f1700 && !en)
2261 		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
2262 						      RLC_STATUS_OFF, NULL);
2263 
2264 	return ret;
2265 }
2266 
2267 static int vangogh_post_smu_init(struct smu_context *smu)
2268 {
2269 	struct amdgpu_device *adev = smu->adev;
2270 	uint32_t tmp;
2271 	int ret = 0;
2272 	uint8_t aon_bits = 0;
2273 	/* Two CUs in one WGP */
2274 	uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
2275 	uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
2276 		adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2277 
2278 	/* allow message will be sent after enable message on Vangogh*/
2279 	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
2280 			(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
2281 		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
2282 		if (ret) {
2283 			dev_err(adev->dev, "Failed to Enable GfxOff!\n");
2284 			return ret;
2285 		}
2286 	} else {
2287 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2288 		dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
2289 	}
2290 
2291 	/* if all CUs are active, no need to power off any WGPs */
2292 	if (total_cu == adev->gfx.cu_info.number)
2293 		return 0;
2294 
2295 	/*
2296 	 * Calculate the total bits number of always on WGPs for all SA/SEs in
2297 	 * RLC_PG_ALWAYS_ON_WGP_MASK.
2298 	 */
2299 	tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
2300 	tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
2301 
2302 	aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2303 
2304 	/* Do not request any WGPs less than set in the AON_WGP_MASK */
2305 	if (aon_bits > req_active_wgps) {
2306 		dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
2307 		return 0;
2308 	} else {
2309 		return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
2310 	}
2311 }
2312 
2313 static int vangogh_mode_reset(struct smu_context *smu, int type)
2314 {
2315 	int ret = 0, index = 0;
2316 
2317 	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2318 					       SMU_MSG_GfxDeviceDriverReset);
2319 	if (index < 0)
2320 		return index == -EACCES ? 0 : index;
2321 
2322 	mutex_lock(&smu->message_lock);
2323 
2324 	ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
2325 
2326 	mutex_unlock(&smu->message_lock);
2327 
2328 	mdelay(10);
2329 
2330 	return ret;
2331 }
2332 
2333 static int vangogh_mode2_reset(struct smu_context *smu)
2334 {
2335 	return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
2336 }
2337 
2338 /**
2339  * vangogh_get_gfxoff_status - Get gfxoff status
2340  *
2341  * @smu: amdgpu_device pointer
2342  *
2343  * Get current gfxoff status
2344  *
2345  * Return:
2346  * * 0	- GFXOFF (default if enabled).
2347  * * 1	- Transition out of GFX State.
2348  * * 2	- Not in GFXOFF.
2349  * * 3	- Transition into GFXOFF.
2350  */
2351 static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
2352 {
2353 	struct amdgpu_device *adev = smu->adev;
2354 	u32 reg, gfxoff_status;
2355 
2356 	reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
2357 	gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
2358 		>> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
2359 
2360 	return gfxoff_status;
2361 }
2362 
2363 static int vangogh_get_power_limit(struct smu_context *smu,
2364 				   uint32_t *current_power_limit,
2365 				   uint32_t *default_power_limit,
2366 				   uint32_t *max_power_limit)
2367 {
2368 	struct smu_11_5_power_context *power_context =
2369 								smu->smu_power.power_context;
2370 	uint32_t ppt_limit;
2371 	int ret = 0;
2372 
2373 	if (smu->adev->pm.fw_version < 0x43f1e00)
2374 		return ret;
2375 
2376 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
2377 	if (ret) {
2378 		dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
2379 		return ret;
2380 	}
2381 	/* convert from milliwatt to watt */
2382 	if (current_power_limit)
2383 		*current_power_limit = ppt_limit / 1000;
2384 	if (default_power_limit)
2385 		*default_power_limit = ppt_limit / 1000;
2386 	if (max_power_limit)
2387 		*max_power_limit = 29;
2388 
2389 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
2390 	if (ret) {
2391 		dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
2392 		return ret;
2393 	}
2394 	/* convert from milliwatt to watt */
2395 	power_context->current_fast_ppt_limit =
2396 			power_context->default_fast_ppt_limit = ppt_limit / 1000;
2397 	power_context->max_fast_ppt_limit = 30;
2398 
2399 	return ret;
2400 }
2401 
2402 static int vangogh_get_ppt_limit(struct smu_context *smu,
2403 								uint32_t *ppt_limit,
2404 								enum smu_ppt_limit_type type,
2405 								enum smu_ppt_limit_level level)
2406 {
2407 	struct smu_11_5_power_context *power_context =
2408 							smu->smu_power.power_context;
2409 
2410 	if (!power_context)
2411 		return -EOPNOTSUPP;
2412 
2413 	if (type == SMU_FAST_PPT_LIMIT) {
2414 		switch (level) {
2415 		case SMU_PPT_LIMIT_MAX:
2416 			*ppt_limit = power_context->max_fast_ppt_limit;
2417 			break;
2418 		case SMU_PPT_LIMIT_CURRENT:
2419 			*ppt_limit = power_context->current_fast_ppt_limit;
2420 			break;
2421 		case SMU_PPT_LIMIT_DEFAULT:
2422 			*ppt_limit = power_context->default_fast_ppt_limit;
2423 			break;
2424 		default:
2425 			break;
2426 		}
2427 	}
2428 
2429 	return 0;
2430 }
2431 
2432 static int vangogh_set_power_limit(struct smu_context *smu,
2433 				   enum smu_ppt_limit_type limit_type,
2434 				   uint32_t ppt_limit)
2435 {
2436 	struct smu_11_5_power_context *power_context =
2437 			smu->smu_power.power_context;
2438 	int ret = 0;
2439 
2440 	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
2441 		dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
2442 		return -EOPNOTSUPP;
2443 	}
2444 
2445 	switch (limit_type) {
2446 	case SMU_DEFAULT_PPT_LIMIT:
2447 		ret = smu_cmn_send_smc_msg_with_param(smu,
2448 				SMU_MSG_SetSlowPPTLimit,
2449 				ppt_limit * 1000, /* convert from watt to milliwatt */
2450 				NULL);
2451 		if (ret)
2452 			return ret;
2453 
2454 		smu->current_power_limit = ppt_limit;
2455 		break;
2456 	case SMU_FAST_PPT_LIMIT:
2457 		ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
2458 		if (ppt_limit > power_context->max_fast_ppt_limit) {
2459 			dev_err(smu->adev->dev,
2460 				"New power limit (%d) is over the max allowed %d\n",
2461 				ppt_limit, power_context->max_fast_ppt_limit);
2462 			return ret;
2463 		}
2464 
2465 		ret = smu_cmn_send_smc_msg_with_param(smu,
2466 				SMU_MSG_SetFastPPTLimit,
2467 				ppt_limit * 1000, /* convert from watt to milliwatt */
2468 				NULL);
2469 		if (ret)
2470 			return ret;
2471 
2472 		power_context->current_fast_ppt_limit = ppt_limit;
2473 		break;
2474 	default:
2475 		return -EINVAL;
2476 	}
2477 
2478 	return ret;
2479 }
2480 
2481 /**
2482  * vangogh_set_gfxoff_residency
2483  *
2484  * @smu: amdgpu_device pointer
2485  * @start: start/stop residency log
2486  *
2487  * This function will be used to log gfxoff residency
2488  *
2489  *
2490  * Returns standard response codes.
2491  */
2492 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
2493 {
2494 	int ret = 0;
2495 	u32 residency;
2496 	struct amdgpu_device *adev = smu->adev;
2497 
2498 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
2499 		return 0;
2500 
2501 	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
2502 					      start, &residency);
2503 	if (ret)
2504 		return ret;
2505 
2506 	if (!start)
2507 		adev->gfx.gfx_off_residency = residency;
2508 
2509 	return ret;
2510 }
2511 
2512 /**
2513  * vangogh_get_gfxoff_residency
2514  *
2515  * @smu: amdgpu_device pointer
2516  * @residency: placeholder for return value
2517  *
2518  * This function will be used to get gfxoff residency.
2519  *
2520  * Returns standard response codes.
2521  */
2522 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency)
2523 {
2524 	struct amdgpu_device *adev = smu->adev;
2525 
2526 	*residency = adev->gfx.gfx_off_residency;
2527 
2528 	return 0;
2529 }
2530 
2531 /**
2532  * vangogh_get_gfxoff_entrycount - get gfxoff entry count
2533  *
2534  * @smu: amdgpu_device pointer
2535  * @entrycount: placeholder for return value
2536  *
2537  * This function will be used to get gfxoff entry count
2538  *
2539  * Returns standard response codes.
2540  */
2541 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount)
2542 {
2543 	int ret = 0, value = 0;
2544 	struct amdgpu_device *adev = smu->adev;
2545 
2546 	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
2547 		return 0;
2548 
2549 	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value);
2550 	*entrycount = value + adev->gfx.gfx_off_entrycount;
2551 
2552 	return ret;
2553 }
2554 
2555 static const struct pptable_funcs vangogh_ppt_funcs = {
2556 
2557 	.check_fw_status = smu_v11_0_check_fw_status,
2558 	.check_fw_version = smu_v11_0_check_fw_version,
2559 	.init_smc_tables = vangogh_init_smc_tables,
2560 	.fini_smc_tables = smu_v11_0_fini_smc_tables,
2561 	.init_power = smu_v11_0_init_power,
2562 	.fini_power = smu_v11_0_fini_power,
2563 	.register_irq_handler = smu_v11_0_register_irq_handler,
2564 	.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2565 	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2566 	.send_smc_msg = smu_cmn_send_smc_msg,
2567 	.dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable,
2568 	.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
2569 	.is_dpm_running = vangogh_is_dpm_running,
2570 	.read_sensor = vangogh_read_sensor,
2571 	.get_apu_thermal_limit = vangogh_get_apu_thermal_limit,
2572 	.set_apu_thermal_limit = vangogh_set_apu_thermal_limit,
2573 	.get_enabled_mask = smu_cmn_get_enabled_mask,
2574 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2575 	.set_watermarks_table = vangogh_set_watermarks_table,
2576 	.set_driver_table_location = smu_v11_0_set_driver_table_location,
2577 	.interrupt_work = smu_v11_0_interrupt_work,
2578 	.get_gpu_metrics = vangogh_common_get_gpu_metrics,
2579 	.od_edit_dpm_table = vangogh_od_edit_dpm_table,
2580 	.print_clk_levels = vangogh_common_print_clk_levels,
2581 	.set_default_dpm_table = vangogh_set_default_dpm_tables,
2582 	.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
2583 	.system_features_control = vangogh_system_features_control,
2584 	.feature_is_enabled = smu_cmn_feature_is_enabled,
2585 	.set_power_profile_mode = vangogh_set_power_profile_mode,
2586 	.get_power_profile_mode = vangogh_get_power_profile_mode,
2587 	.get_dpm_clock_table = vangogh_get_dpm_clock_table,
2588 	.force_clk_levels = vangogh_force_clk_levels,
2589 	.set_performance_level = vangogh_set_performance_level,
2590 	.post_init = vangogh_post_smu_init,
2591 	.mode2_reset = vangogh_mode2_reset,
2592 	.gfx_off_control = smu_v11_0_gfx_off_control,
2593 	.get_gfx_off_status = vangogh_get_gfxoff_status,
2594 	.get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount,
2595 	.get_gfx_off_residency = vangogh_get_gfxoff_residency,
2596 	.set_gfx_off_residency = vangogh_set_gfxoff_residency,
2597 	.get_ppt_limit = vangogh_get_ppt_limit,
2598 	.get_power_limit = vangogh_get_power_limit,
2599 	.set_power_limit = vangogh_set_power_limit,
2600 	.get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2601 };
2602 
2603 void vangogh_set_ppt_funcs(struct smu_context *smu)
2604 {
2605 	smu->ppt_funcs = &vangogh_ppt_funcs;
2606 	smu->message_map = vangogh_message_map;
2607 	smu->feature_map = vangogh_feature_mask_map;
2608 	smu->table_map = vangogh_table_map;
2609 	smu->workload_map = vangogh_workload_map;
2610 	smu->is_apu = true;
2611 	smu_v11_0_set_smu_mailbox_registers(smu);
2612 }
2613