xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dpm.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1*41ec0267Sriastradh /*	$NetBSD: amdgpu_dpm.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $	*/
2efa246c0Sriastradh 
3efa246c0Sriastradh /*
4efa246c0Sriastradh  * Copyright 2011 Advanced Micro Devices, Inc.
5efa246c0Sriastradh  *
6efa246c0Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
7efa246c0Sriastradh  * copy of this software and associated documentation files (the "Software"),
8efa246c0Sriastradh  * to deal in the Software without restriction, including without limitation
9efa246c0Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10efa246c0Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
11efa246c0Sriastradh  * Software is furnished to do so, subject to the following conditions:
12efa246c0Sriastradh  *
13efa246c0Sriastradh  * The above copyright notice and this permission notice shall be included in
14efa246c0Sriastradh  * all copies or substantial portions of the Software.
15efa246c0Sriastradh  *
16efa246c0Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17efa246c0Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18efa246c0Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19efa246c0Sriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20efa246c0Sriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21efa246c0Sriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22efa246c0Sriastradh  * OTHER DEALINGS IN THE SOFTWARE.
23efa246c0Sriastradh  *
24efa246c0Sriastradh  * Authors: Alex Deucher
25efa246c0Sriastradh  */
26efa246c0Sriastradh 
27efa246c0Sriastradh #include <sys/cdefs.h>
28*41ec0267Sriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_dpm.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $");
29efa246c0Sriastradh 
30efa246c0Sriastradh #include "amdgpu.h"
31efa246c0Sriastradh #include "amdgpu_atombios.h"
32efa246c0Sriastradh #include "amdgpu_i2c.h"
33efa246c0Sriastradh #include "amdgpu_dpm.h"
34efa246c0Sriastradh #include "atom.h"
35*41ec0267Sriastradh #include "amd_pcie.h"
36efa246c0Sriastradh 
amdgpu_dpm_print_class_info(u32 class,u32 class2)37efa246c0Sriastradh void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38efa246c0Sriastradh {
39*41ec0267Sriastradh 	const char *s;
40*41ec0267Sriastradh 
41efa246c0Sriastradh 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42efa246c0Sriastradh 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43efa246c0Sriastradh 	default:
44*41ec0267Sriastradh 		s = "none";
45efa246c0Sriastradh 		break;
46efa246c0Sriastradh 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47*41ec0267Sriastradh 		s = "battery";
48efa246c0Sriastradh 		break;
49efa246c0Sriastradh 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50*41ec0267Sriastradh 		s = "balanced";
51efa246c0Sriastradh 		break;
52efa246c0Sriastradh 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53*41ec0267Sriastradh 		s = "performance";
54efa246c0Sriastradh 		break;
55efa246c0Sriastradh 	}
56*41ec0267Sriastradh 	printk("\tui class: %s\n", s);
57efa246c0Sriastradh 	printk("\tinternal class:");
58efa246c0Sriastradh 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59efa246c0Sriastradh 	    (class2 == 0))
60*41ec0267Sriastradh 		pr_cont(" none");
61efa246c0Sriastradh 	else {
62efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63*41ec0267Sriastradh 			pr_cont(" boot");
64efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65*41ec0267Sriastradh 			pr_cont(" thermal");
66efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67*41ec0267Sriastradh 			pr_cont(" limited_pwr");
68efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69*41ec0267Sriastradh 			pr_cont(" rest");
70efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71*41ec0267Sriastradh 			pr_cont(" forced");
72efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73*41ec0267Sriastradh 			pr_cont(" 3d_perf");
74efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75*41ec0267Sriastradh 			pr_cont(" ovrdrv");
76efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77*41ec0267Sriastradh 			pr_cont(" uvd");
78efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79*41ec0267Sriastradh 			pr_cont(" 3d_low");
80efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81*41ec0267Sriastradh 			pr_cont(" acpi");
82efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83*41ec0267Sriastradh 			pr_cont(" uvd_hd2");
84efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85*41ec0267Sriastradh 			pr_cont(" uvd_hd");
86efa246c0Sriastradh 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87*41ec0267Sriastradh 			pr_cont(" uvd_sd");
88efa246c0Sriastradh 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89*41ec0267Sriastradh 			pr_cont(" limited_pwr2");
90efa246c0Sriastradh 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91*41ec0267Sriastradh 			pr_cont(" ulv");
92efa246c0Sriastradh 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93*41ec0267Sriastradh 			pr_cont(" uvd_mvc");
94efa246c0Sriastradh 	}
95*41ec0267Sriastradh 	pr_cont("\n");
96efa246c0Sriastradh }
97efa246c0Sriastradh 
amdgpu_dpm_print_cap_info(u32 caps)98efa246c0Sriastradh void amdgpu_dpm_print_cap_info(u32 caps)
99efa246c0Sriastradh {
100efa246c0Sriastradh 	printk("\tcaps:");
101efa246c0Sriastradh 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102*41ec0267Sriastradh 		pr_cont(" single_disp");
103efa246c0Sriastradh 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104*41ec0267Sriastradh 		pr_cont(" video");
105efa246c0Sriastradh 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106*41ec0267Sriastradh 		pr_cont(" no_dc");
107*41ec0267Sriastradh 	pr_cont("\n");
108efa246c0Sriastradh }
109efa246c0Sriastradh 
amdgpu_dpm_print_ps_status(struct amdgpu_device * adev,struct amdgpu_ps * rps)110efa246c0Sriastradh void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111efa246c0Sriastradh 				struct amdgpu_ps *rps)
112efa246c0Sriastradh {
113efa246c0Sriastradh 	printk("\tstatus:");
114efa246c0Sriastradh 	if (rps == adev->pm.dpm.current_ps)
115*41ec0267Sriastradh 		pr_cont(" c");
116efa246c0Sriastradh 	if (rps == adev->pm.dpm.requested_ps)
117*41ec0267Sriastradh 		pr_cont(" r");
118efa246c0Sriastradh 	if (rps == adev->pm.dpm.boot_ps)
119*41ec0267Sriastradh 		pr_cont(" b");
120*41ec0267Sriastradh 	pr_cont("\n");
121*41ec0267Sriastradh }
122*41ec0267Sriastradh 
amdgpu_dpm_get_active_displays(struct amdgpu_device * adev)123*41ec0267Sriastradh void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124*41ec0267Sriastradh {
125*41ec0267Sriastradh 	struct drm_device *ddev = adev->ddev;
126*41ec0267Sriastradh 	struct drm_crtc *crtc;
127*41ec0267Sriastradh 	struct amdgpu_crtc *amdgpu_crtc;
128*41ec0267Sriastradh 
129*41ec0267Sriastradh 	adev->pm.dpm.new_active_crtcs = 0;
130*41ec0267Sriastradh 	adev->pm.dpm.new_active_crtc_count = 0;
131*41ec0267Sriastradh 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132*41ec0267Sriastradh 		list_for_each_entry(crtc,
133*41ec0267Sriastradh 				    &ddev->mode_config.crtc_list, head) {
134*41ec0267Sriastradh 			amdgpu_crtc = to_amdgpu_crtc(crtc);
135*41ec0267Sriastradh 			if (amdgpu_crtc->enabled) {
136*41ec0267Sriastradh 				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137*41ec0267Sriastradh 				adev->pm.dpm.new_active_crtc_count++;
138*41ec0267Sriastradh 			}
139*41ec0267Sriastradh 		}
140*41ec0267Sriastradh 	}
141efa246c0Sriastradh }
142efa246c0Sriastradh 
143efa246c0Sriastradh 
amdgpu_dpm_get_vblank_time(struct amdgpu_device * adev)144efa246c0Sriastradh u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145efa246c0Sriastradh {
146efa246c0Sriastradh 	struct drm_device *dev = adev->ddev;
147efa246c0Sriastradh 	struct drm_crtc *crtc;
148efa246c0Sriastradh 	struct amdgpu_crtc *amdgpu_crtc;
149efa246c0Sriastradh 	u32 vblank_in_pixels;
150efa246c0Sriastradh 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151efa246c0Sriastradh 
152efa246c0Sriastradh 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153efa246c0Sriastradh 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154efa246c0Sriastradh 			amdgpu_crtc = to_amdgpu_crtc(crtc);
155efa246c0Sriastradh 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156efa246c0Sriastradh 				vblank_in_pixels =
157efa246c0Sriastradh 					amdgpu_crtc->hw_mode.crtc_htotal *
158efa246c0Sriastradh 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
159efa246c0Sriastradh 					amdgpu_crtc->hw_mode.crtc_vdisplay +
160efa246c0Sriastradh 					(amdgpu_crtc->v_border * 2));
161efa246c0Sriastradh 
162efa246c0Sriastradh 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163efa246c0Sriastradh 				break;
164efa246c0Sriastradh 			}
165efa246c0Sriastradh 		}
166efa246c0Sriastradh 	}
167efa246c0Sriastradh 
168efa246c0Sriastradh 	return vblank_time_us;
169efa246c0Sriastradh }
170efa246c0Sriastradh 
amdgpu_dpm_get_vrefresh(struct amdgpu_device * adev)171efa246c0Sriastradh u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172efa246c0Sriastradh {
173efa246c0Sriastradh 	struct drm_device *dev = adev->ddev;
174efa246c0Sriastradh 	struct drm_crtc *crtc;
175efa246c0Sriastradh 	struct amdgpu_crtc *amdgpu_crtc;
176efa246c0Sriastradh 	u32 vrefresh = 0;
177efa246c0Sriastradh 
178efa246c0Sriastradh 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179efa246c0Sriastradh 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180efa246c0Sriastradh 			amdgpu_crtc = to_amdgpu_crtc(crtc);
181efa246c0Sriastradh 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182efa246c0Sriastradh 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183efa246c0Sriastradh 				break;
184efa246c0Sriastradh 			}
185efa246c0Sriastradh 		}
186efa246c0Sriastradh 	}
187efa246c0Sriastradh 
188efa246c0Sriastradh 	return vrefresh;
189efa246c0Sriastradh }
190efa246c0Sriastradh 
amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)191efa246c0Sriastradh bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192efa246c0Sriastradh {
193efa246c0Sriastradh 	switch (sensor) {
194efa246c0Sriastradh 	case THERMAL_TYPE_RV6XX:
195efa246c0Sriastradh 	case THERMAL_TYPE_RV770:
196efa246c0Sriastradh 	case THERMAL_TYPE_EVERGREEN:
197efa246c0Sriastradh 	case THERMAL_TYPE_SUMO:
198efa246c0Sriastradh 	case THERMAL_TYPE_NI:
199efa246c0Sriastradh 	case THERMAL_TYPE_SI:
200efa246c0Sriastradh 	case THERMAL_TYPE_CI:
201efa246c0Sriastradh 	case THERMAL_TYPE_KV:
202efa246c0Sriastradh 		return true;
203efa246c0Sriastradh 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204efa246c0Sriastradh 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205efa246c0Sriastradh 		return false; /* need special handling */
206efa246c0Sriastradh 	case THERMAL_TYPE_NONE:
207efa246c0Sriastradh 	case THERMAL_TYPE_EXTERNAL:
208efa246c0Sriastradh 	case THERMAL_TYPE_EXTERNAL_GPIO:
209efa246c0Sriastradh 	default:
210efa246c0Sriastradh 		return false;
211efa246c0Sriastradh 	}
212efa246c0Sriastradh }
213efa246c0Sriastradh 
214efa246c0Sriastradh union power_info {
215efa246c0Sriastradh 	struct _ATOM_POWERPLAY_INFO info;
216efa246c0Sriastradh 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
217efa246c0Sriastradh 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
218efa246c0Sriastradh 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219efa246c0Sriastradh 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220efa246c0Sriastradh 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221efa246c0Sriastradh 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222efa246c0Sriastradh 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223efa246c0Sriastradh };
224efa246c0Sriastradh 
225efa246c0Sriastradh union fan_info {
226efa246c0Sriastradh 	struct _ATOM_PPLIB_FANTABLE fan;
227efa246c0Sriastradh 	struct _ATOM_PPLIB_FANTABLE2 fan2;
228efa246c0Sriastradh 	struct _ATOM_PPLIB_FANTABLE3 fan3;
229efa246c0Sriastradh };
230efa246c0Sriastradh 
amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table * amdgpu_table,ATOM_PPLIB_Clock_Voltage_Dependency_Table * atom_table)231efa246c0Sriastradh static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232efa246c0Sriastradh 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233efa246c0Sriastradh {
234efa246c0Sriastradh 	u32 size = atom_table->ucNumEntries *
235efa246c0Sriastradh 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
236efa246c0Sriastradh 	int i;
237efa246c0Sriastradh 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238efa246c0Sriastradh 
239efa246c0Sriastradh 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240efa246c0Sriastradh 	if (!amdgpu_table->entries)
241efa246c0Sriastradh 		return -ENOMEM;
242efa246c0Sriastradh 
243efa246c0Sriastradh 	entry = &atom_table->entries[0];
244efa246c0Sriastradh 	for (i = 0; i < atom_table->ucNumEntries; i++) {
245efa246c0Sriastradh 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246efa246c0Sriastradh 			(entry->ucClockHigh << 16);
247efa246c0Sriastradh 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248efa246c0Sriastradh 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249efa246c0Sriastradh 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250efa246c0Sriastradh 	}
251efa246c0Sriastradh 	amdgpu_table->count = atom_table->ucNumEntries;
252efa246c0Sriastradh 
253efa246c0Sriastradh 	return 0;
254efa246c0Sriastradh }
255efa246c0Sriastradh 
amdgpu_get_platform_caps(struct amdgpu_device * adev)256efa246c0Sriastradh int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257efa246c0Sriastradh {
258efa246c0Sriastradh 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
259efa246c0Sriastradh 	union power_info *power_info;
260efa246c0Sriastradh 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261efa246c0Sriastradh 	u16 data_offset;
262efa246c0Sriastradh 	u8 frev, crev;
263efa246c0Sriastradh 
264efa246c0Sriastradh 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265efa246c0Sriastradh 				   &frev, &crev, &data_offset))
266efa246c0Sriastradh 		return -EINVAL;
267e66da3d6Sriastradh 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268efa246c0Sriastradh 
269efa246c0Sriastradh 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270efa246c0Sriastradh 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271efa246c0Sriastradh 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272efa246c0Sriastradh 
273efa246c0Sriastradh 	return 0;
274efa246c0Sriastradh }
275efa246c0Sriastradh 
276efa246c0Sriastradh /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284efa246c0Sriastradh #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285efa246c0Sriastradh 
amdgpu_parse_extended_power_table(struct amdgpu_device * adev)286efa246c0Sriastradh int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287efa246c0Sriastradh {
288efa246c0Sriastradh 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289efa246c0Sriastradh 	union power_info *power_info;
290efa246c0Sriastradh 	union fan_info *fan_info;
291efa246c0Sriastradh 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292efa246c0Sriastradh 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293efa246c0Sriastradh 	u16 data_offset;
294efa246c0Sriastradh 	u8 frev, crev;
295efa246c0Sriastradh 	int ret, i;
296efa246c0Sriastradh 
297efa246c0Sriastradh 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298efa246c0Sriastradh 				   &frev, &crev, &data_offset))
299efa246c0Sriastradh 		return -EINVAL;
300e66da3d6Sriastradh 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301efa246c0Sriastradh 
302efa246c0Sriastradh 	/* fan table */
303efa246c0Sriastradh 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
304efa246c0Sriastradh 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305efa246c0Sriastradh 		if (power_info->pplib3.usFanTableOffset) {
306e66da3d6Sriastradh 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307efa246c0Sriastradh 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
308efa246c0Sriastradh 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309efa246c0Sriastradh 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310efa246c0Sriastradh 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311efa246c0Sriastradh 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312efa246c0Sriastradh 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313efa246c0Sriastradh 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314efa246c0Sriastradh 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315efa246c0Sriastradh 			if (fan_info->fan.ucFanTableFormat >= 2)
316efa246c0Sriastradh 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317efa246c0Sriastradh 			else
318efa246c0Sriastradh 				adev->pm.dpm.fan.t_max = 10900;
319efa246c0Sriastradh 			adev->pm.dpm.fan.cycle_delay = 100000;
320efa246c0Sriastradh 			if (fan_info->fan.ucFanTableFormat >= 3) {
321efa246c0Sriastradh 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322efa246c0Sriastradh 				adev->pm.dpm.fan.default_max_fan_pwm =
323efa246c0Sriastradh 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
324efa246c0Sriastradh 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325efa246c0Sriastradh 				adev->pm.dpm.fan.fan_output_sensitivity =
326efa246c0Sriastradh 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327efa246c0Sriastradh 			}
328efa246c0Sriastradh 			adev->pm.dpm.fan.ucode_fan_control = true;
329efa246c0Sriastradh 		}
330efa246c0Sriastradh 	}
331efa246c0Sriastradh 
332efa246c0Sriastradh 	/* clock dependancy tables, shedding tables */
333efa246c0Sriastradh 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334efa246c0Sriastradh 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335efa246c0Sriastradh 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336efa246c0Sriastradh 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
338efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339efa246c0Sriastradh 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340efa246c0Sriastradh 								 dep_table);
341efa246c0Sriastradh 			if (ret) {
342efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
343efa246c0Sriastradh 				return ret;
344efa246c0Sriastradh 			}
345efa246c0Sriastradh 		}
346efa246c0Sriastradh 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347efa246c0Sriastradh 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
349efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350efa246c0Sriastradh 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351efa246c0Sriastradh 								 dep_table);
352efa246c0Sriastradh 			if (ret) {
353efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
354efa246c0Sriastradh 				return ret;
355efa246c0Sriastradh 			}
356efa246c0Sriastradh 		}
357efa246c0Sriastradh 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358efa246c0Sriastradh 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
360efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361efa246c0Sriastradh 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362efa246c0Sriastradh 								 dep_table);
363efa246c0Sriastradh 			if (ret) {
364efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
365efa246c0Sriastradh 				return ret;
366efa246c0Sriastradh 			}
367efa246c0Sriastradh 		}
368efa246c0Sriastradh 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369efa246c0Sriastradh 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
371efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372efa246c0Sriastradh 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373efa246c0Sriastradh 								 dep_table);
374efa246c0Sriastradh 			if (ret) {
375efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
376efa246c0Sriastradh 				return ret;
377efa246c0Sriastradh 			}
378efa246c0Sriastradh 		}
379efa246c0Sriastradh 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380efa246c0Sriastradh 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381efa246c0Sriastradh 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
383efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384efa246c0Sriastradh 			if (clk_v->ucNumEntries) {
385efa246c0Sriastradh 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386efa246c0Sriastradh 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
387efa246c0Sriastradh 					(clk_v->entries[0].ucSclkHigh << 16);
388efa246c0Sriastradh 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389efa246c0Sriastradh 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
390efa246c0Sriastradh 					(clk_v->entries[0].ucMclkHigh << 16);
391efa246c0Sriastradh 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392efa246c0Sriastradh 					le16_to_cpu(clk_v->entries[0].usVddc);
393efa246c0Sriastradh 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394efa246c0Sriastradh 					le16_to_cpu(clk_v->entries[0].usVddci);
395efa246c0Sriastradh 			}
396efa246c0Sriastradh 		}
397efa246c0Sriastradh 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398efa246c0Sriastradh 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399efa246c0Sriastradh 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
400e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
401efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402efa246c0Sriastradh 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403efa246c0Sriastradh 
404efa246c0Sriastradh 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405*41ec0267Sriastradh 				kcalloc(psl->ucNumEntries,
406efa246c0Sriastradh 					sizeof(struct amdgpu_phase_shedding_limits_entry),
407efa246c0Sriastradh 					GFP_KERNEL);
408efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
410efa246c0Sriastradh 				return -ENOMEM;
411efa246c0Sriastradh 			}
412efa246c0Sriastradh 
413efa246c0Sriastradh 			entry = &psl->entries[0];
414efa246c0Sriastradh 			for (i = 0; i < psl->ucNumEntries; i++) {
415efa246c0Sriastradh 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416efa246c0Sriastradh 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417efa246c0Sriastradh 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418efa246c0Sriastradh 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419efa246c0Sriastradh 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420efa246c0Sriastradh 					le16_to_cpu(entry->usVoltage);
421efa246c0Sriastradh 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422efa246c0Sriastradh 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423efa246c0Sriastradh 			}
424efa246c0Sriastradh 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425efa246c0Sriastradh 				psl->ucNumEntries;
426efa246c0Sriastradh 		}
427efa246c0Sriastradh 	}
428efa246c0Sriastradh 
429efa246c0Sriastradh 	/* cac data */
430efa246c0Sriastradh 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
431efa246c0Sriastradh 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432efa246c0Sriastradh 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433efa246c0Sriastradh 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434efa246c0Sriastradh 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435efa246c0Sriastradh 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436efa246c0Sriastradh 		if (adev->pm.dpm.tdp_od_limit)
437efa246c0Sriastradh 			adev->pm.dpm.power_control = true;
438efa246c0Sriastradh 		else
439efa246c0Sriastradh 			adev->pm.dpm.power_control = false;
440efa246c0Sriastradh 		adev->pm.dpm.tdp_adjustment = 0;
441efa246c0Sriastradh 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442efa246c0Sriastradh 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443efa246c0Sriastradh 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444efa246c0Sriastradh 		if (power_info->pplib5.usCACLeakageTableOffset) {
445efa246c0Sriastradh 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446efa246c0Sriastradh 				(ATOM_PPLIB_CAC_Leakage_Table *)
447e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
448efa246c0Sriastradh 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449efa246c0Sriastradh 			ATOM_PPLIB_CAC_Leakage_Record *entry;
450efa246c0Sriastradh 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
454efa246c0Sriastradh 				return -ENOMEM;
455efa246c0Sriastradh 			}
456efa246c0Sriastradh 			entry = &cac_table->entries[0];
457efa246c0Sriastradh 			for (i = 0; i < cac_table->ucNumEntries; i++) {
458efa246c0Sriastradh 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459efa246c0Sriastradh 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460efa246c0Sriastradh 						le16_to_cpu(entry->usVddc1);
461efa246c0Sriastradh 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462efa246c0Sriastradh 						le16_to_cpu(entry->usVddc2);
463efa246c0Sriastradh 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464efa246c0Sriastradh 						le16_to_cpu(entry->usVddc3);
465efa246c0Sriastradh 				} else {
466efa246c0Sriastradh 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467efa246c0Sriastradh 						le16_to_cpu(entry->usVddc);
468efa246c0Sriastradh 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469efa246c0Sriastradh 						le32_to_cpu(entry->ulLeakageValue);
470efa246c0Sriastradh 				}
471efa246c0Sriastradh 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472efa246c0Sriastradh 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473efa246c0Sriastradh 			}
474efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475efa246c0Sriastradh 		}
476efa246c0Sriastradh 	}
477efa246c0Sriastradh 
478efa246c0Sriastradh 	/* ext tables */
479efa246c0Sriastradh 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
480efa246c0Sriastradh 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481efa246c0Sriastradh 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482e66da3d6Sriastradh 			(mode_info->atom_context->bios + data_offset +
483efa246c0Sriastradh 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485efa246c0Sriastradh 			ext_hdr->usVCETableOffset) {
486efa246c0Sriastradh 			VCEClockInfoArray *array = (VCEClockInfoArray *)
487e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
488efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489efa246c0Sriastradh 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490efa246c0Sriastradh 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
492efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493efa246c0Sriastradh 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
494efa246c0Sriastradh 			ATOM_PPLIB_VCE_State_Table *states =
495efa246c0Sriastradh 				(ATOM_PPLIB_VCE_State_Table *)
496e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
497efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498efa246c0Sriastradh 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499efa246c0Sriastradh 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500efa246c0Sriastradh 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501efa246c0Sriastradh 			ATOM_PPLIB_VCE_State_Record *state_entry;
502efa246c0Sriastradh 			VCEClockInfo *vce_clk;
503efa246c0Sriastradh 			u32 size = limits->numEntries *
504efa246c0Sriastradh 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505efa246c0Sriastradh 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506efa246c0Sriastradh 				kzalloc(size, GFP_KERNEL);
507efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
509efa246c0Sriastradh 				return -ENOMEM;
510efa246c0Sriastradh 			}
511efa246c0Sriastradh 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512efa246c0Sriastradh 				limits->numEntries;
513efa246c0Sriastradh 			entry = &limits->entries[0];
514efa246c0Sriastradh 			state_entry = &states->entries[0];
515efa246c0Sriastradh 			for (i = 0; i < limits->numEntries; i++) {
516efa246c0Sriastradh 				vce_clk = (VCEClockInfo *)
517efa246c0Sriastradh 					((u8 *)&array->entries[0] +
518efa246c0Sriastradh 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519efa246c0Sriastradh 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520efa246c0Sriastradh 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521efa246c0Sriastradh 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522efa246c0Sriastradh 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523efa246c0Sriastradh 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524efa246c0Sriastradh 					le16_to_cpu(entry->usVoltage);
525efa246c0Sriastradh 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526efa246c0Sriastradh 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527efa246c0Sriastradh 			}
528*41ec0267Sriastradh 			adev->pm.dpm.num_of_vce_states =
529*41ec0267Sriastradh 					states->numEntries > AMD_MAX_VCE_LEVELS ?
530*41ec0267Sriastradh 					AMD_MAX_VCE_LEVELS : states->numEntries;
531*41ec0267Sriastradh 			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532efa246c0Sriastradh 				vce_clk = (VCEClockInfo *)
533efa246c0Sriastradh 					((u8 *)&array->entries[0] +
534efa246c0Sriastradh 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535efa246c0Sriastradh 				adev->pm.dpm.vce_states[i].evclk =
536efa246c0Sriastradh 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537efa246c0Sriastradh 				adev->pm.dpm.vce_states[i].ecclk =
538efa246c0Sriastradh 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539efa246c0Sriastradh 				adev->pm.dpm.vce_states[i].clk_idx =
540efa246c0Sriastradh 					state_entry->ucClockInfoIndex & 0x3f;
541efa246c0Sriastradh 				adev->pm.dpm.vce_states[i].pstate =
542efa246c0Sriastradh 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
543efa246c0Sriastradh 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
544efa246c0Sriastradh 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545efa246c0Sriastradh 			}
546efa246c0Sriastradh 		}
547efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548efa246c0Sriastradh 			ext_hdr->usUVDTableOffset) {
549efa246c0Sriastradh 			UVDClockInfoArray *array = (UVDClockInfoArray *)
550e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
551efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552efa246c0Sriastradh 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553efa246c0Sriastradh 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
555efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556efa246c0Sriastradh 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557efa246c0Sriastradh 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558efa246c0Sriastradh 			u32 size = limits->numEntries *
559efa246c0Sriastradh 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560efa246c0Sriastradh 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561efa246c0Sriastradh 				kzalloc(size, GFP_KERNEL);
562efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
564efa246c0Sriastradh 				return -ENOMEM;
565efa246c0Sriastradh 			}
566efa246c0Sriastradh 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567efa246c0Sriastradh 				limits->numEntries;
568efa246c0Sriastradh 			entry = &limits->entries[0];
569efa246c0Sriastradh 			for (i = 0; i < limits->numEntries; i++) {
570efa246c0Sriastradh 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
571efa246c0Sriastradh 					((u8 *)&array->entries[0] +
572efa246c0Sriastradh 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573efa246c0Sriastradh 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574efa246c0Sriastradh 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575efa246c0Sriastradh 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576efa246c0Sriastradh 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577efa246c0Sriastradh 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578efa246c0Sriastradh 					le16_to_cpu(entry->usVoltage);
579efa246c0Sriastradh 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580efa246c0Sriastradh 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581efa246c0Sriastradh 			}
582efa246c0Sriastradh 		}
583efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584efa246c0Sriastradh 			ext_hdr->usSAMUTableOffset) {
585efa246c0Sriastradh 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586efa246c0Sriastradh 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
588efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589efa246c0Sriastradh 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590efa246c0Sriastradh 			u32 size = limits->numEntries *
591efa246c0Sriastradh 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
592efa246c0Sriastradh 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593efa246c0Sriastradh 				kzalloc(size, GFP_KERNEL);
594efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
596efa246c0Sriastradh 				return -ENOMEM;
597efa246c0Sriastradh 			}
598efa246c0Sriastradh 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599efa246c0Sriastradh 				limits->numEntries;
600efa246c0Sriastradh 			entry = &limits->entries[0];
601efa246c0Sriastradh 			for (i = 0; i < limits->numEntries; i++) {
602efa246c0Sriastradh 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603efa246c0Sriastradh 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604efa246c0Sriastradh 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605efa246c0Sriastradh 					le16_to_cpu(entry->usVoltage);
606efa246c0Sriastradh 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607efa246c0Sriastradh 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608efa246c0Sriastradh 			}
609efa246c0Sriastradh 		}
610efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611efa246c0Sriastradh 		    ext_hdr->usPPMTableOffset) {
612efa246c0Sriastradh 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
614efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
615efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table =
616efa246c0Sriastradh 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.ppm_table) {
618efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
619efa246c0Sriastradh 				return -ENOMEM;
620efa246c0Sriastradh 			}
621efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623efa246c0Sriastradh 				le16_to_cpu(ppm->usCpuCoreNumber);
624efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625efa246c0Sriastradh 				le32_to_cpu(ppm->ulPlatformTDP);
626efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627efa246c0Sriastradh 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
628efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629efa246c0Sriastradh 				le32_to_cpu(ppm->ulPlatformTDC);
630efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631efa246c0Sriastradh 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
632efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633efa246c0Sriastradh 				le32_to_cpu(ppm->ulApuTDP);
634efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635efa246c0Sriastradh 				le32_to_cpu(ppm->ulDGpuTDP);
636efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637efa246c0Sriastradh 				le32_to_cpu(ppm->ulDGpuUlvPower);
638efa246c0Sriastradh 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
639efa246c0Sriastradh 				le32_to_cpu(ppm->ulTjmax);
640efa246c0Sriastradh 		}
641efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642efa246c0Sriastradh 			ext_hdr->usACPTableOffset) {
643efa246c0Sriastradh 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644efa246c0Sriastradh 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
646efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647efa246c0Sriastradh 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648efa246c0Sriastradh 			u32 size = limits->numEntries *
649efa246c0Sriastradh 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
650efa246c0Sriastradh 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651efa246c0Sriastradh 				kzalloc(size, GFP_KERNEL);
652efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
654efa246c0Sriastradh 				return -ENOMEM;
655efa246c0Sriastradh 			}
656efa246c0Sriastradh 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657efa246c0Sriastradh 				limits->numEntries;
658efa246c0Sriastradh 			entry = &limits->entries[0];
659efa246c0Sriastradh 			for (i = 0; i < limits->numEntries; i++) {
660efa246c0Sriastradh 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661efa246c0Sriastradh 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662efa246c0Sriastradh 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663efa246c0Sriastradh 					le16_to_cpu(entry->usVoltage);
664efa246c0Sriastradh 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665efa246c0Sriastradh 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666efa246c0Sriastradh 			}
667efa246c0Sriastradh 		}
668efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669efa246c0Sriastradh 			ext_hdr->usPowerTuneTableOffset) {
670e66da3d6Sriastradh 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671efa246c0Sriastradh 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672efa246c0Sriastradh 			ATOM_PowerTune_Table *pt;
673efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table =
674efa246c0Sriastradh 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675efa246c0Sriastradh 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676efa246c0Sriastradh 				amdgpu_free_extended_power_table(adev);
677efa246c0Sriastradh 				return -ENOMEM;
678efa246c0Sriastradh 			}
679efa246c0Sriastradh 			if (rev > 0) {
680efa246c0Sriastradh 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681e66da3d6Sriastradh 					(mode_info->atom_context->bios + data_offset +
682efa246c0Sriastradh 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683efa246c0Sriastradh 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684efa246c0Sriastradh 					ppt->usMaximumPowerDeliveryLimit;
685efa246c0Sriastradh 				pt = &ppt->power_tune_table;
686efa246c0Sriastradh 			} else {
687efa246c0Sriastradh 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688e66da3d6Sriastradh 					(mode_info->atom_context->bios + data_offset +
689efa246c0Sriastradh 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690efa246c0Sriastradh 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691efa246c0Sriastradh 				pt = &ppt->power_tune_table;
692efa246c0Sriastradh 			}
693efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695efa246c0Sriastradh 				le16_to_cpu(pt->usConfigurableTDP);
696efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698efa246c0Sriastradh 				le16_to_cpu(pt->usBatteryPowerLimit);
699efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700efa246c0Sriastradh 				le16_to_cpu(pt->usSmallPowerLimit);
701efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702efa246c0Sriastradh 				le16_to_cpu(pt->usLowCACLeakage);
703efa246c0Sriastradh 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704efa246c0Sriastradh 				le16_to_cpu(pt->usHighCACLeakage);
705efa246c0Sriastradh 		}
706efa246c0Sriastradh 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707efa246c0Sriastradh 				ext_hdr->usSclkVddgfxTableOffset) {
708efa246c0Sriastradh 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709e66da3d6Sriastradh 				(mode_info->atom_context->bios + data_offset +
710efa246c0Sriastradh 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711efa246c0Sriastradh 			ret = amdgpu_parse_clk_voltage_dep_table(
712efa246c0Sriastradh 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713efa246c0Sriastradh 					dep_table);
714efa246c0Sriastradh 			if (ret) {
715efa246c0Sriastradh 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716efa246c0Sriastradh 				return ret;
717efa246c0Sriastradh 			}
718efa246c0Sriastradh 		}
719efa246c0Sriastradh 	}
720efa246c0Sriastradh 
721efa246c0Sriastradh 	return 0;
722efa246c0Sriastradh }
723efa246c0Sriastradh 
amdgpu_free_extended_power_table(struct amdgpu_device * adev)724efa246c0Sriastradh void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725efa246c0Sriastradh {
726efa246c0Sriastradh 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727efa246c0Sriastradh 
728efa246c0Sriastradh 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
729efa246c0Sriastradh 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
730efa246c0Sriastradh 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
731efa246c0Sriastradh 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732efa246c0Sriastradh 	kfree(dyn_state->cac_leakage_table.entries);
733efa246c0Sriastradh 	kfree(dyn_state->phase_shedding_limits_table.entries);
734efa246c0Sriastradh 	kfree(dyn_state->ppm_table);
735efa246c0Sriastradh 	kfree(dyn_state->cac_tdp_table);
736efa246c0Sriastradh 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737efa246c0Sriastradh 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738efa246c0Sriastradh 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739efa246c0Sriastradh 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740efa246c0Sriastradh 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741efa246c0Sriastradh }
742efa246c0Sriastradh 
743efa246c0Sriastradh static const char *pp_lib_thermal_controller_names[] = {
744efa246c0Sriastradh 	"NONE",
745efa246c0Sriastradh 	"lm63",
746efa246c0Sriastradh 	"adm1032",
747efa246c0Sriastradh 	"adm1030",
748efa246c0Sriastradh 	"max6649",
749efa246c0Sriastradh 	"lm64",
750efa246c0Sriastradh 	"f75375",
751efa246c0Sriastradh 	"RV6xx",
752efa246c0Sriastradh 	"RV770",
753efa246c0Sriastradh 	"adt7473",
754efa246c0Sriastradh 	"NONE",
755efa246c0Sriastradh 	"External GPIO",
756efa246c0Sriastradh 	"Evergreen",
757efa246c0Sriastradh 	"emc2103",
758efa246c0Sriastradh 	"Sumo",
759efa246c0Sriastradh 	"Northern Islands",
760efa246c0Sriastradh 	"Southern Islands",
761efa246c0Sriastradh 	"lm96163",
762efa246c0Sriastradh 	"Sea Islands",
763efa246c0Sriastradh 	"Kaveri/Kabini",
764efa246c0Sriastradh };
765efa246c0Sriastradh 
amdgpu_add_thermal_controller(struct amdgpu_device * adev)766efa246c0Sriastradh void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767efa246c0Sriastradh {
768efa246c0Sriastradh 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
769efa246c0Sriastradh 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
770efa246c0Sriastradh 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771efa246c0Sriastradh 	ATOM_PPLIB_THERMALCONTROLLER *controller;
772efa246c0Sriastradh 	struct amdgpu_i2c_bus_rec i2c_bus;
773efa246c0Sriastradh 	u16 data_offset;
774efa246c0Sriastradh 	u8 frev, crev;
775efa246c0Sriastradh 
776efa246c0Sriastradh 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777efa246c0Sriastradh 				   &frev, &crev, &data_offset))
778efa246c0Sriastradh 		return;
779efa246c0Sriastradh 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780e66da3d6Sriastradh 		(mode_info->atom_context->bios + data_offset);
781efa246c0Sriastradh 	controller = &power_table->sThermalController;
782efa246c0Sriastradh 
783efa246c0Sriastradh 	/* add the i2c bus for thermal/fan chip */
784efa246c0Sriastradh 	if (controller->ucType > 0) {
785efa246c0Sriastradh 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786efa246c0Sriastradh 			adev->pm.no_fan = true;
787efa246c0Sriastradh 		adev->pm.fan_pulses_per_revolution =
788efa246c0Sriastradh 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789efa246c0Sriastradh 		if (adev->pm.fan_pulses_per_revolution) {
790efa246c0Sriastradh 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791efa246c0Sriastradh 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792efa246c0Sriastradh 		}
793efa246c0Sriastradh 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
795efa246c0Sriastradh 				 (controller->ucFanParameters &
796efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
800efa246c0Sriastradh 				 (controller->ucFanParameters &
801efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
805efa246c0Sriastradh 				 (controller->ucFanParameters &
806efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
810efa246c0Sriastradh 				 (controller->ucFanParameters &
811efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
815efa246c0Sriastradh 				 (controller->ucFanParameters &
816efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
820efa246c0Sriastradh 				 (controller->ucFanParameters &
821efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
825efa246c0Sriastradh 				 (controller->ucFanParameters &
826efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829efa246c0Sriastradh 			DRM_INFO("Internal thermal controller %s fan control\n",
830efa246c0Sriastradh 				 (controller->ucFanParameters &
831efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833efa246c0Sriastradh 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834efa246c0Sriastradh 			DRM_INFO("External GPIO thermal controller %s fan control\n",
835efa246c0Sriastradh 				 (controller->ucFanParameters &
836efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838efa246c0Sriastradh 		} else if (controller->ucType ==
839efa246c0Sriastradh 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840efa246c0Sriastradh 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841efa246c0Sriastradh 				 (controller->ucFanParameters &
842efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844efa246c0Sriastradh 		} else if (controller->ucType ==
845efa246c0Sriastradh 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846efa246c0Sriastradh 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847efa246c0Sriastradh 				 (controller->ucFanParameters &
848efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850efa246c0Sriastradh 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851efa246c0Sriastradh 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852efa246c0Sriastradh 				 pp_lib_thermal_controller_names[controller->ucType],
853efa246c0Sriastradh 				 controller->ucI2cAddress >> 1,
854efa246c0Sriastradh 				 (controller->ucFanParameters &
855efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856efa246c0Sriastradh 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857efa246c0Sriastradh 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858efa246c0Sriastradh 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859efa246c0Sriastradh 			if (adev->pm.i2c_bus) {
860efa246c0Sriastradh 				struct i2c_board_info info = { };
861efa246c0Sriastradh 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
862efa246c0Sriastradh 				info.addr = controller->ucI2cAddress >> 1;
863efa246c0Sriastradh 				strlcpy(info.type, name, sizeof(info.type));
864efa246c0Sriastradh 				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
865efa246c0Sriastradh 			}
866efa246c0Sriastradh 		} else {
867efa246c0Sriastradh 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868efa246c0Sriastradh 				 controller->ucType,
869efa246c0Sriastradh 				 controller->ucI2cAddress >> 1,
870efa246c0Sriastradh 				 (controller->ucFanParameters &
871efa246c0Sriastradh 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872efa246c0Sriastradh 		}
873efa246c0Sriastradh 	}
874efa246c0Sriastradh }
875efa246c0Sriastradh 
amdgpu_get_pcie_gen_support(struct amdgpu_device * adev,u32 sys_mask,enum amdgpu_pcie_gen asic_gen,enum amdgpu_pcie_gen default_gen)876efa246c0Sriastradh enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877efa246c0Sriastradh 						 u32 sys_mask,
878efa246c0Sriastradh 						 enum amdgpu_pcie_gen asic_gen,
879efa246c0Sriastradh 						 enum amdgpu_pcie_gen default_gen)
880efa246c0Sriastradh {
881efa246c0Sriastradh 	switch (asic_gen) {
882efa246c0Sriastradh 	case AMDGPU_PCIE_GEN1:
883efa246c0Sriastradh 		return AMDGPU_PCIE_GEN1;
884efa246c0Sriastradh 	case AMDGPU_PCIE_GEN2:
885efa246c0Sriastradh 		return AMDGPU_PCIE_GEN2;
886efa246c0Sriastradh 	case AMDGPU_PCIE_GEN3:
887efa246c0Sriastradh 		return AMDGPU_PCIE_GEN3;
888efa246c0Sriastradh 	default:
889*41ec0267Sriastradh 		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890*41ec0267Sriastradh 		    (default_gen == AMDGPU_PCIE_GEN3))
891efa246c0Sriastradh 			return AMDGPU_PCIE_GEN3;
892*41ec0267Sriastradh 		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893*41ec0267Sriastradh 			 (default_gen == AMDGPU_PCIE_GEN2))
894efa246c0Sriastradh 			return AMDGPU_PCIE_GEN2;
895efa246c0Sriastradh 		else
896efa246c0Sriastradh 			return AMDGPU_PCIE_GEN1;
897efa246c0Sriastradh 	}
898efa246c0Sriastradh 	return AMDGPU_PCIE_GEN1;
899efa246c0Sriastradh }
900efa246c0Sriastradh 
901*41ec0267Sriastradh struct amd_vce_state*
amdgpu_get_vce_clock_state(void * handle,u32 idx)902*41ec0267Sriastradh amdgpu_get_vce_clock_state(void *handle, u32 idx)
903efa246c0Sriastradh {
904*41ec0267Sriastradh 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905*41ec0267Sriastradh 
906*41ec0267Sriastradh 	if (idx < adev->pm.dpm.num_of_vce_states)
907*41ec0267Sriastradh 		return &adev->pm.dpm.vce_states[idx];
908*41ec0267Sriastradh 
909*41ec0267Sriastradh 	return NULL;
910efa246c0Sriastradh }
911efa246c0Sriastradh 
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)912*41ec0267Sriastradh int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913efa246c0Sriastradh {
914*41ec0267Sriastradh 	uint32_t clk_freq;
915*41ec0267Sriastradh 	int ret = 0;
916*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
917*41ec0267Sriastradh 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
918*41ec0267Sriastradh 					     low ? &clk_freq : NULL,
919*41ec0267Sriastradh 					     !low ? &clk_freq : NULL,
920*41ec0267Sriastradh 					     true);
921*41ec0267Sriastradh 		if (ret)
922efa246c0Sriastradh 			return 0;
923*41ec0267Sriastradh 		return clk_freq * 100;
924efa246c0Sriastradh 
925*41ec0267Sriastradh 	} else {
926*41ec0267Sriastradh 		return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
927*41ec0267Sriastradh 	}
928*41ec0267Sriastradh }
929*41ec0267Sriastradh 
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)930*41ec0267Sriastradh int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
931*41ec0267Sriastradh {
932*41ec0267Sriastradh 	uint32_t clk_freq;
933*41ec0267Sriastradh 	int ret = 0;
934*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
935*41ec0267Sriastradh 		ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
936*41ec0267Sriastradh 					     low ? &clk_freq : NULL,
937*41ec0267Sriastradh 					     !low ? &clk_freq : NULL,
938*41ec0267Sriastradh 					     true);
939*41ec0267Sriastradh 		if (ret)
940*41ec0267Sriastradh 			return 0;
941*41ec0267Sriastradh 		return clk_freq * 100;
942*41ec0267Sriastradh 
943*41ec0267Sriastradh 	} else {
944*41ec0267Sriastradh 		return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
945*41ec0267Sriastradh 	}
946*41ec0267Sriastradh }
947*41ec0267Sriastradh 
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate)948*41ec0267Sriastradh int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
949*41ec0267Sriastradh {
950*41ec0267Sriastradh 	int ret = 0;
951*41ec0267Sriastradh 	bool swsmu = is_support_sw_smu(adev);
952*41ec0267Sriastradh 
953*41ec0267Sriastradh 	switch (block_type) {
954*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_UVD:
955*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_VCE:
956*41ec0267Sriastradh 		if (swsmu) {
957*41ec0267Sriastradh 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
958*41ec0267Sriastradh 		} else if (adev->powerplay.pp_funcs &&
959*41ec0267Sriastradh 			   adev->powerplay.pp_funcs->set_powergating_by_smu) {
960*41ec0267Sriastradh 			/*
961*41ec0267Sriastradh 			 * TODO: need a better lock mechanism
962*41ec0267Sriastradh 			 *
963*41ec0267Sriastradh 			 * Here adev->pm.mutex lock protection is enforced on
964*41ec0267Sriastradh 			 * UVD and VCE cases only. Since for other cases, there
965*41ec0267Sriastradh 			 * may be already lock protection in amdgpu_pm.c.
966*41ec0267Sriastradh 			 * This is a quick fix for the deadlock issue below.
967*41ec0267Sriastradh 			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
968*41ec0267Sriastradh 			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
969*41ec0267Sriastradh 			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
970*41ec0267Sriastradh 			 *     cltst          D    0  2028   2026 0x00000000
971*41ec0267Sriastradh 			 *     all Trace:
972*41ec0267Sriastradh 			 *     __schedule+0x2c0/0x870
973*41ec0267Sriastradh 			 *     schedule+0x2c/0x70
974*41ec0267Sriastradh 			 *     schedule_preempt_disabled+0xe/0x10
975*41ec0267Sriastradh 			 *     __mutex_lock.isra.9+0x26d/0x4e0
976*41ec0267Sriastradh 			 *     __mutex_lock_slowpath+0x13/0x20
977*41ec0267Sriastradh 			 *     ? __mutex_lock_slowpath+0x13/0x20
978*41ec0267Sriastradh 			 *     mutex_lock+0x2f/0x40
979*41ec0267Sriastradh 			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
980*41ec0267Sriastradh 			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
981*41ec0267Sriastradh 			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
982*41ec0267Sriastradh 			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
983*41ec0267Sriastradh 			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
984*41ec0267Sriastradh 			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
985*41ec0267Sriastradh 			 */
986*41ec0267Sriastradh 			mutex_lock(&adev->pm.mutex);
987*41ec0267Sriastradh 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
988*41ec0267Sriastradh 				(adev)->powerplay.pp_handle, block_type, gate));
989*41ec0267Sriastradh 			mutex_unlock(&adev->pm.mutex);
990*41ec0267Sriastradh 		}
991*41ec0267Sriastradh 		break;
992*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_GFX:
993*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_VCN:
994*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_SDMA:
995*41ec0267Sriastradh 		if (swsmu)
996*41ec0267Sriastradh 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
997*41ec0267Sriastradh 		else if (adev->powerplay.pp_funcs &&
998*41ec0267Sriastradh 			 adev->powerplay.pp_funcs->set_powergating_by_smu)
999*41ec0267Sriastradh 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1000*41ec0267Sriastradh 				(adev)->powerplay.pp_handle, block_type, gate));
1001*41ec0267Sriastradh 		break;
1002*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_JPEG:
1003*41ec0267Sriastradh 		if (swsmu)
1004*41ec0267Sriastradh 			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
1005*41ec0267Sriastradh 		break;
1006*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_GMC:
1007*41ec0267Sriastradh 	case AMD_IP_BLOCK_TYPE_ACP:
1008*41ec0267Sriastradh 		if (adev->powerplay.pp_funcs &&
1009*41ec0267Sriastradh 		    adev->powerplay.pp_funcs->set_powergating_by_smu)
1010*41ec0267Sriastradh 			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1011*41ec0267Sriastradh 				(adev)->powerplay.pp_handle, block_type, gate));
1012*41ec0267Sriastradh 		break;
1013*41ec0267Sriastradh 	default:
1014*41ec0267Sriastradh 		break;
1015*41ec0267Sriastradh 	}
1016*41ec0267Sriastradh 
1017*41ec0267Sriastradh 	return ret;
1018*41ec0267Sriastradh }
1019*41ec0267Sriastradh 
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)1020*41ec0267Sriastradh int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
1021*41ec0267Sriastradh {
1022*41ec0267Sriastradh 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1023*41ec0267Sriastradh 	void *pp_handle = adev->powerplay.pp_handle;
1024*41ec0267Sriastradh 	struct smu_context *smu = &adev->smu;
1025*41ec0267Sriastradh 	int ret = 0;
1026*41ec0267Sriastradh 
1027*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
1028*41ec0267Sriastradh 		ret = smu_baco_enter(smu);
1029*41ec0267Sriastradh 	} else {
1030*41ec0267Sriastradh 		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1031*41ec0267Sriastradh 			return -ENOENT;
1032*41ec0267Sriastradh 
1033*41ec0267Sriastradh 		/* enter BACO state */
1034*41ec0267Sriastradh 		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1035*41ec0267Sriastradh 	}
1036*41ec0267Sriastradh 
1037*41ec0267Sriastradh 	return ret;
1038*41ec0267Sriastradh }
1039*41ec0267Sriastradh 
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)1040*41ec0267Sriastradh int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1041*41ec0267Sriastradh {
1042*41ec0267Sriastradh 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043*41ec0267Sriastradh 	void *pp_handle = adev->powerplay.pp_handle;
1044*41ec0267Sriastradh 	struct smu_context *smu = &adev->smu;
1045*41ec0267Sriastradh 	int ret = 0;
1046*41ec0267Sriastradh 
1047*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
1048*41ec0267Sriastradh 		ret = smu_baco_exit(smu);
1049*41ec0267Sriastradh 	} else {
1050*41ec0267Sriastradh 		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1051*41ec0267Sriastradh 			return -ENOENT;
1052*41ec0267Sriastradh 
1053*41ec0267Sriastradh 		/* exit BACO state */
1054*41ec0267Sriastradh 		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1055*41ec0267Sriastradh 	}
1056*41ec0267Sriastradh 
1057*41ec0267Sriastradh 	return ret;
1058*41ec0267Sriastradh }
1059*41ec0267Sriastradh 
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)1060*41ec0267Sriastradh int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1061*41ec0267Sriastradh 			     enum pp_mp1_state mp1_state)
1062*41ec0267Sriastradh {
1063*41ec0267Sriastradh 	int ret = 0;
1064*41ec0267Sriastradh 
1065*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
1066*41ec0267Sriastradh 		ret = smu_set_mp1_state(&adev->smu, mp1_state);
1067*41ec0267Sriastradh 	} else if (adev->powerplay.pp_funcs &&
1068*41ec0267Sriastradh 		   adev->powerplay.pp_funcs->set_mp1_state) {
1069*41ec0267Sriastradh 		ret = adev->powerplay.pp_funcs->set_mp1_state(
1070*41ec0267Sriastradh 				adev->powerplay.pp_handle,
1071*41ec0267Sriastradh 				mp1_state);
1072*41ec0267Sriastradh 	}
1073*41ec0267Sriastradh 
1074*41ec0267Sriastradh 	return ret;
1075*41ec0267Sriastradh }
1076*41ec0267Sriastradh 
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)1077*41ec0267Sriastradh bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1078*41ec0267Sriastradh {
1079*41ec0267Sriastradh 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1080*41ec0267Sriastradh 	void *pp_handle = adev->powerplay.pp_handle;
1081*41ec0267Sriastradh 	struct smu_context *smu = &adev->smu;
1082*41ec0267Sriastradh 	bool baco_cap;
1083*41ec0267Sriastradh 
1084*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
1085*41ec0267Sriastradh 		return smu_baco_is_support(smu);
1086*41ec0267Sriastradh 	} else {
1087*41ec0267Sriastradh 		if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1088*41ec0267Sriastradh 			return false;
1089*41ec0267Sriastradh 
1090*41ec0267Sriastradh 		if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1091*41ec0267Sriastradh 			return false;
1092*41ec0267Sriastradh 
1093*41ec0267Sriastradh 		return baco_cap ? true : false;
1094*41ec0267Sriastradh 	}
1095*41ec0267Sriastradh }
1096*41ec0267Sriastradh 
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)1097*41ec0267Sriastradh int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1098*41ec0267Sriastradh {
1099*41ec0267Sriastradh 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1100*41ec0267Sriastradh 	void *pp_handle = adev->powerplay.pp_handle;
1101*41ec0267Sriastradh 	struct smu_context *smu = &adev->smu;
1102*41ec0267Sriastradh 
1103*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
1104*41ec0267Sriastradh 		return smu_mode2_reset(smu);
1105*41ec0267Sriastradh 	} else {
1106*41ec0267Sriastradh 		if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1107*41ec0267Sriastradh 			return -ENOENT;
1108*41ec0267Sriastradh 
1109*41ec0267Sriastradh 		return pp_funcs->asic_reset_mode_2(pp_handle);
1110*41ec0267Sriastradh 	}
1111*41ec0267Sriastradh }
1112*41ec0267Sriastradh 
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)1113*41ec0267Sriastradh int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1114*41ec0267Sriastradh {
1115*41ec0267Sriastradh 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1116*41ec0267Sriastradh 	void *pp_handle = adev->powerplay.pp_handle;
1117*41ec0267Sriastradh 	struct smu_context *smu = &adev->smu;
1118*41ec0267Sriastradh 	int ret = 0;
1119*41ec0267Sriastradh 
1120*41ec0267Sriastradh 	dev_info(adev->dev, "GPU BACO reset\n");
1121*41ec0267Sriastradh 
1122*41ec0267Sriastradh 	if (is_support_sw_smu(adev)) {
1123*41ec0267Sriastradh 		ret = smu_baco_enter(smu);
1124*41ec0267Sriastradh 		if (ret)
1125*41ec0267Sriastradh 			return ret;
1126*41ec0267Sriastradh 
1127*41ec0267Sriastradh 		ret = smu_baco_exit(smu);
1128*41ec0267Sriastradh 		if (ret)
1129*41ec0267Sriastradh 			return ret;
1130*41ec0267Sriastradh 	} else {
1131*41ec0267Sriastradh 		if (!pp_funcs
1132*41ec0267Sriastradh 		    || !pp_funcs->set_asic_baco_state)
1133*41ec0267Sriastradh 			return -ENOENT;
1134*41ec0267Sriastradh 
1135*41ec0267Sriastradh 		/* enter BACO state */
1136*41ec0267Sriastradh 		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1137*41ec0267Sriastradh 		if (ret)
1138*41ec0267Sriastradh 			return ret;
1139*41ec0267Sriastradh 
1140*41ec0267Sriastradh 		/* exit BACO state */
1141*41ec0267Sriastradh 		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1142*41ec0267Sriastradh 		if (ret)
1143*41ec0267Sriastradh 			return ret;
1144*41ec0267Sriastradh 	}
1145*41ec0267Sriastradh 
1146*41ec0267Sriastradh 	return 0;
1147*41ec0267Sriastradh }
1148*41ec0267Sriastradh 
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)1149*41ec0267Sriastradh int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1150*41ec0267Sriastradh 				    enum PP_SMC_POWER_PROFILE type,
1151*41ec0267Sriastradh 				    bool en)
1152*41ec0267Sriastradh {
1153*41ec0267Sriastradh 	int ret = 0;
1154*41ec0267Sriastradh 
1155*41ec0267Sriastradh 	if (is_support_sw_smu(adev))
1156*41ec0267Sriastradh 		ret = smu_switch_power_profile(&adev->smu, type, en);
1157*41ec0267Sriastradh 	else if (adev->powerplay.pp_funcs &&
1158*41ec0267Sriastradh 		 adev->powerplay.pp_funcs->switch_power_profile)
1159*41ec0267Sriastradh 		ret = adev->powerplay.pp_funcs->switch_power_profile(
1160*41ec0267Sriastradh 			adev->powerplay.pp_handle, type, en);
1161*41ec0267Sriastradh 
1162*41ec0267Sriastradh 	return ret;
1163*41ec0267Sriastradh }
1164*41ec0267Sriastradh 
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)1165*41ec0267Sriastradh int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1166*41ec0267Sriastradh 			       uint32_t pstate)
1167*41ec0267Sriastradh {
1168*41ec0267Sriastradh 	int ret = 0;
1169*41ec0267Sriastradh 
1170*41ec0267Sriastradh 	if (is_support_sw_smu_xgmi(adev))
1171*41ec0267Sriastradh 		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
1172*41ec0267Sriastradh 	else if (adev->powerplay.pp_funcs &&
1173*41ec0267Sriastradh 		 adev->powerplay.pp_funcs->set_xgmi_pstate)
1174*41ec0267Sriastradh 		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1175*41ec0267Sriastradh 								pstate);
1176*41ec0267Sriastradh 
1177*41ec0267Sriastradh 	return ret;
1178efa246c0Sriastradh }
1179