xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dpm.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: amdgpu_dpm.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2011 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dpm.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $");
29 
30 #include <asm/byteorder.h>
31 #include "drmP.h"
32 #include "amdgpu.h"
33 #include "amdgpu_atombios.h"
34 #include "amdgpu_i2c.h"
35 #include "amdgpu_dpm.h"
36 #include "atom.h"
37 
38 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
39 {
40 	printk("\tui class: ");
41 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43 	default:
44 		printk("none\n");
45 		break;
46 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47 		printk("battery\n");
48 		break;
49 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50 		printk("balanced\n");
51 		break;
52 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53 		printk("performance\n");
54 		break;
55 	}
56 	printk("\tinternal class: ");
57 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
58 	    (class2 == 0))
59 		printk("none");
60 	else {
61 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
62 			printk("boot ");
63 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
64 			printk("thermal ");
65 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
66 			printk("limited_pwr ");
67 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
68 			printk("rest ");
69 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
70 			printk("forced ");
71 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
72 			printk("3d_perf ");
73 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
74 			printk("ovrdrv ");
75 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
76 			printk("uvd ");
77 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
78 			printk("3d_low ");
79 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
80 			printk("acpi ");
81 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
82 			printk("uvd_hd2 ");
83 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
84 			printk("uvd_hd ");
85 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
86 			printk("uvd_sd ");
87 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
88 			printk("limited_pwr2 ");
89 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
90 			printk("ulv ");
91 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
92 			printk("uvd_mvc ");
93 	}
94 	printk("\n");
95 }
96 
97 void amdgpu_dpm_print_cap_info(u32 caps)
98 {
99 	printk("\tcaps: ");
100 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
101 		printk("single_disp ");
102 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
103 		printk("video ");
104 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
105 		printk("no_dc ");
106 	printk("\n");
107 }
108 
109 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
110 				struct amdgpu_ps *rps)
111 {
112 	printk("\tstatus: ");
113 	if (rps == adev->pm.dpm.current_ps)
114 		printk("c ");
115 	if (rps == adev->pm.dpm.requested_ps)
116 		printk("r ");
117 	if (rps == adev->pm.dpm.boot_ps)
118 		printk("b ");
119 	printk("\n");
120 }
121 
122 
123 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
124 {
125 	struct drm_device *dev = adev->ddev;
126 	struct drm_crtc *crtc;
127 	struct amdgpu_crtc *amdgpu_crtc;
128 	u32 vblank_in_pixels;
129 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
130 
131 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
133 			amdgpu_crtc = to_amdgpu_crtc(crtc);
134 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
135 				vblank_in_pixels =
136 					amdgpu_crtc->hw_mode.crtc_htotal *
137 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
138 					amdgpu_crtc->hw_mode.crtc_vdisplay +
139 					(amdgpu_crtc->v_border * 2));
140 
141 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
142 				break;
143 			}
144 		}
145 	}
146 
147 	return vblank_time_us;
148 }
149 
150 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
151 {
152 	struct drm_device *dev = adev->ddev;
153 	struct drm_crtc *crtc;
154 	struct amdgpu_crtc *amdgpu_crtc;
155 	u32 vrefresh = 0;
156 
157 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
158 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
159 			amdgpu_crtc = to_amdgpu_crtc(crtc);
160 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
161 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
162 				break;
163 			}
164 		}
165 	}
166 
167 	return vrefresh;
168 }
169 
170 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
171 			      u32 *p, u32 *u)
172 {
173 	u32 b_c = 0;
174 	u32 i_c;
175 	u32 tmp;
176 
177 	i_c = (i * r_c) / 100;
178 	tmp = i_c >> p_b;
179 
180 	while (tmp) {
181 		b_c++;
182 		tmp >>= 1;
183 	}
184 
185 	*u = (b_c + 1) / 2;
186 	*p = i_c / (1 << (2 * (*u)));
187 }
188 
189 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
190 {
191 	u32 k, a, ah, al;
192 	u32 t1;
193 
194 	if ((fl == 0) || (fh == 0) || (fl > fh))
195 		return -EINVAL;
196 
197 	k = (100 * fh) / fl;
198 	t1 = (t * (k - 100));
199 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
200 	a = (a + 5) / 10;
201 	ah = ((a * t) + 5000) / 10000;
202 	al = a - ah;
203 
204 	*th = t - ah;
205 	*tl = t + al;
206 
207 	return 0;
208 }
209 
210 bool amdgpu_is_uvd_state(u32 class, u32 class2)
211 {
212 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
213 		return true;
214 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
215 		return true;
216 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
217 		return true;
218 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
219 		return true;
220 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
221 		return true;
222 	return false;
223 }
224 
225 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
226 {
227 	switch (sensor) {
228 	case THERMAL_TYPE_RV6XX:
229 	case THERMAL_TYPE_RV770:
230 	case THERMAL_TYPE_EVERGREEN:
231 	case THERMAL_TYPE_SUMO:
232 	case THERMAL_TYPE_NI:
233 	case THERMAL_TYPE_SI:
234 	case THERMAL_TYPE_CI:
235 	case THERMAL_TYPE_KV:
236 		return true;
237 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
238 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
239 		return false; /* need special handling */
240 	case THERMAL_TYPE_NONE:
241 	case THERMAL_TYPE_EXTERNAL:
242 	case THERMAL_TYPE_EXTERNAL_GPIO:
243 	default:
244 		return false;
245 	}
246 }
247 
248 union power_info {
249 	struct _ATOM_POWERPLAY_INFO info;
250 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
251 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
252 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
253 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
254 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
255 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
256 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
257 };
258 
259 union fan_info {
260 	struct _ATOM_PPLIB_FANTABLE fan;
261 	struct _ATOM_PPLIB_FANTABLE2 fan2;
262 	struct _ATOM_PPLIB_FANTABLE3 fan3;
263 };
264 
265 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
266 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
267 {
268 	u32 size = atom_table->ucNumEntries *
269 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
270 	int i;
271 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
272 
273 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
274 	if (!amdgpu_table->entries)
275 		return -ENOMEM;
276 
277 	entry = &atom_table->entries[0];
278 	for (i = 0; i < atom_table->ucNumEntries; i++) {
279 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
280 			(entry->ucClockHigh << 16);
281 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
282 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
283 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
284 	}
285 	amdgpu_table->count = atom_table->ucNumEntries;
286 
287 	return 0;
288 }
289 
290 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
291 {
292 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
293 	union power_info *power_info;
294 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
295 	u16 data_offset;
296 	u8 frev, crev;
297 
298 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
299 				   &frev, &crev, &data_offset))
300 		return -EINVAL;
301 	power_info = (union power_info *)((char *)mode_info->atom_context->bios + data_offset);
302 
303 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
304 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
305 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
306 
307 	return 0;
308 }
309 
310 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
311 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
312 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
313 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
314 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
315 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
316 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
317 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
318 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
319 
320 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
321 {
322 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
323 	union power_info *power_info;
324 	union fan_info *fan_info;
325 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
326 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
327 	u16 data_offset;
328 	u8 frev, crev;
329 	int ret, i;
330 
331 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
332 				   &frev, &crev, &data_offset))
333 		return -EINVAL;
334 	power_info = (union power_info *)((char *)mode_info->atom_context->bios + data_offset);
335 
336 	/* fan table */
337 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
338 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
339 		if (power_info->pplib3.usFanTableOffset) {
340 			fan_info = (union fan_info *)((char *)mode_info->atom_context->bios + data_offset +
341 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
342 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
343 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
344 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
345 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
346 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
347 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
348 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
349 			if (fan_info->fan.ucFanTableFormat >= 2)
350 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
351 			else
352 				adev->pm.dpm.fan.t_max = 10900;
353 			adev->pm.dpm.fan.cycle_delay = 100000;
354 			if (fan_info->fan.ucFanTableFormat >= 3) {
355 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
356 				adev->pm.dpm.fan.default_max_fan_pwm =
357 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
358 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
359 				adev->pm.dpm.fan.fan_output_sensitivity =
360 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
361 			}
362 			adev->pm.dpm.fan.ucode_fan_control = true;
363 		}
364 	}
365 
366 	/* clock dependancy tables, shedding tables */
367 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
368 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
369 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
370 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
371 				((char *)mode_info->atom_context->bios + data_offset +
372 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
373 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
374 								 dep_table);
375 			if (ret) {
376 				amdgpu_free_extended_power_table(adev);
377 				return ret;
378 			}
379 		}
380 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
381 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
382 				((char *)mode_info->atom_context->bios + data_offset +
383 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
384 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
385 								 dep_table);
386 			if (ret) {
387 				amdgpu_free_extended_power_table(adev);
388 				return ret;
389 			}
390 		}
391 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
392 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
393 				((char *)mode_info->atom_context->bios + data_offset +
394 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
395 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
396 								 dep_table);
397 			if (ret) {
398 				amdgpu_free_extended_power_table(adev);
399 				return ret;
400 			}
401 		}
402 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
403 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
404 				((char *)mode_info->atom_context->bios + data_offset +
405 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
406 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
407 								 dep_table);
408 			if (ret) {
409 				amdgpu_free_extended_power_table(adev);
410 				return ret;
411 			}
412 		}
413 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
414 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
415 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
416 				((char *)mode_info->atom_context->bios + data_offset +
417 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
418 			if (clk_v->ucNumEntries) {
419 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
420 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
421 					(clk_v->entries[0].ucSclkHigh << 16);
422 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
423 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
424 					(clk_v->entries[0].ucMclkHigh << 16);
425 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
426 					le16_to_cpu(clk_v->entries[0].usVddc);
427 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
428 					le16_to_cpu(clk_v->entries[0].usVddci);
429 			}
430 		}
431 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
432 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
433 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
434 				((char *)mode_info->atom_context->bios + data_offset +
435 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
436 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
437 
438 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
439 				kzalloc(psl->ucNumEntries *
440 					sizeof(struct amdgpu_phase_shedding_limits_entry),
441 					GFP_KERNEL);
442 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
443 				amdgpu_free_extended_power_table(adev);
444 				return -ENOMEM;
445 			}
446 
447 			entry = &psl->entries[0];
448 			for (i = 0; i < psl->ucNumEntries; i++) {
449 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
450 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
451 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
452 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
453 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
454 					le16_to_cpu(entry->usVoltage);
455 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
456 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
457 			}
458 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
459 				psl->ucNumEntries;
460 		}
461 	}
462 
463 	/* cac data */
464 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
465 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
466 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
467 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
468 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
469 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
470 		if (adev->pm.dpm.tdp_od_limit)
471 			adev->pm.dpm.power_control = true;
472 		else
473 			adev->pm.dpm.power_control = false;
474 		adev->pm.dpm.tdp_adjustment = 0;
475 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
476 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
477 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
478 		if (power_info->pplib5.usCACLeakageTableOffset) {
479 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
480 				(ATOM_PPLIB_CAC_Leakage_Table *)
481 				((char *)mode_info->atom_context->bios + data_offset +
482 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
483 			ATOM_PPLIB_CAC_Leakage_Record *entry;
484 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
485 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
486 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
487 				amdgpu_free_extended_power_table(adev);
488 				return -ENOMEM;
489 			}
490 			entry = &cac_table->entries[0];
491 			for (i = 0; i < cac_table->ucNumEntries; i++) {
492 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
493 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
494 						le16_to_cpu(entry->usVddc1);
495 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
496 						le16_to_cpu(entry->usVddc2);
497 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
498 						le16_to_cpu(entry->usVddc3);
499 				} else {
500 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
501 						le16_to_cpu(entry->usVddc);
502 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
503 						le32_to_cpu(entry->ulLeakageValue);
504 				}
505 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
506 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
507 			}
508 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
509 		}
510 	}
511 
512 	/* ext tables */
513 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
514 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
515 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
516 			((char *)mode_info->atom_context->bios + data_offset +
517 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
518 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
519 			ext_hdr->usVCETableOffset) {
520 			VCEClockInfoArray *array = (VCEClockInfoArray *)
521 				((char *)mode_info->atom_context->bios + data_offset +
522 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
523 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
524 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
525 				((char *)mode_info->atom_context->bios + data_offset +
526 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
527 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
528 			ATOM_PPLIB_VCE_State_Table *states =
529 				(ATOM_PPLIB_VCE_State_Table *)
530 				((char *)mode_info->atom_context->bios + data_offset +
531 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
532 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
533 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
534 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
535 			ATOM_PPLIB_VCE_State_Record *state_entry;
536 			VCEClockInfo *vce_clk;
537 			u32 size = limits->numEntries *
538 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
539 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
540 				kzalloc(size, GFP_KERNEL);
541 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
542 				amdgpu_free_extended_power_table(adev);
543 				return -ENOMEM;
544 			}
545 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
546 				limits->numEntries;
547 			entry = &limits->entries[0];
548 			state_entry = &states->entries[0];
549 			for (i = 0; i < limits->numEntries; i++) {
550 				vce_clk = (VCEClockInfo *)
551 					((u8 *)&array->entries[0] +
552 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
553 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
554 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
555 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
556 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
557 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
558 					le16_to_cpu(entry->usVoltage);
559 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
560 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
561 			}
562 			for (i = 0; i < states->numEntries; i++) {
563 				if (i >= AMDGPU_MAX_VCE_LEVELS)
564 					break;
565 				vce_clk = (VCEClockInfo *)
566 					((u8 *)&array->entries[0] +
567 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
568 				adev->pm.dpm.vce_states[i].evclk =
569 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
570 				adev->pm.dpm.vce_states[i].ecclk =
571 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
572 				adev->pm.dpm.vce_states[i].clk_idx =
573 					state_entry->ucClockInfoIndex & 0x3f;
574 				adev->pm.dpm.vce_states[i].pstate =
575 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
576 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
577 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
578 			}
579 		}
580 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
581 			ext_hdr->usUVDTableOffset) {
582 			UVDClockInfoArray *array = (UVDClockInfoArray *)
583 				((char *)mode_info->atom_context->bios + data_offset +
584 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
585 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
586 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
587 				((char *)mode_info->atom_context->bios + data_offset +
588 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
589 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
590 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
591 			u32 size = limits->numEntries *
592 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
593 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
594 				kzalloc(size, GFP_KERNEL);
595 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
596 				amdgpu_free_extended_power_table(adev);
597 				return -ENOMEM;
598 			}
599 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
600 				limits->numEntries;
601 			entry = &limits->entries[0];
602 			for (i = 0; i < limits->numEntries; i++) {
603 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
604 					((u8 *)&array->entries[0] +
605 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
606 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
607 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
608 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
609 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
610 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
611 					le16_to_cpu(entry->usVoltage);
612 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
613 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
614 			}
615 		}
616 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
617 			ext_hdr->usSAMUTableOffset) {
618 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
619 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
620 				((char *)mode_info->atom_context->bios + data_offset +
621 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
622 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
623 			u32 size = limits->numEntries *
624 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
625 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
626 				kzalloc(size, GFP_KERNEL);
627 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
628 				amdgpu_free_extended_power_table(adev);
629 				return -ENOMEM;
630 			}
631 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
632 				limits->numEntries;
633 			entry = &limits->entries[0];
634 			for (i = 0; i < limits->numEntries; i++) {
635 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
636 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
637 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
638 					le16_to_cpu(entry->usVoltage);
639 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
640 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
641 			}
642 		}
643 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
644 		    ext_hdr->usPPMTableOffset) {
645 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
646 				((char *)mode_info->atom_context->bios + data_offset +
647 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
648 			adev->pm.dpm.dyn_state.ppm_table =
649 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
650 			if (!adev->pm.dpm.dyn_state.ppm_table) {
651 				amdgpu_free_extended_power_table(adev);
652 				return -ENOMEM;
653 			}
654 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
655 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
656 				le16_to_cpu(ppm->usCpuCoreNumber);
657 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
658 				le32_to_cpu(ppm->ulPlatformTDP);
659 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
660 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
661 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
662 				le32_to_cpu(ppm->ulPlatformTDC);
663 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
664 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
665 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
666 				le32_to_cpu(ppm->ulApuTDP);
667 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
668 				le32_to_cpu(ppm->ulDGpuTDP);
669 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
670 				le32_to_cpu(ppm->ulDGpuUlvPower);
671 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
672 				le32_to_cpu(ppm->ulTjmax);
673 		}
674 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
675 			ext_hdr->usACPTableOffset) {
676 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
677 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
678 				((char *)mode_info->atom_context->bios + data_offset +
679 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
680 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
681 			u32 size = limits->numEntries *
682 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
683 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
684 				kzalloc(size, GFP_KERNEL);
685 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
686 				amdgpu_free_extended_power_table(adev);
687 				return -ENOMEM;
688 			}
689 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
690 				limits->numEntries;
691 			entry = &limits->entries[0];
692 			for (i = 0; i < limits->numEntries; i++) {
693 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
694 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
695 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
696 					le16_to_cpu(entry->usVoltage);
697 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
698 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
699 			}
700 		}
701 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
702 			ext_hdr->usPowerTuneTableOffset) {
703 			u8 rev = *(u8 *)((char *)mode_info->atom_context->bios + data_offset +
704 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
705 			ATOM_PowerTune_Table *pt;
706 			adev->pm.dpm.dyn_state.cac_tdp_table =
707 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
708 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
709 				amdgpu_free_extended_power_table(adev);
710 				return -ENOMEM;
711 			}
712 			if (rev > 0) {
713 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
714 					((char *)mode_info->atom_context->bios + data_offset +
715 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
716 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
717 					ppt->usMaximumPowerDeliveryLimit;
718 				pt = &ppt->power_tune_table;
719 			} else {
720 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
721 					((char *)mode_info->atom_context->bios + data_offset +
722 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
723 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
724 				pt = &ppt->power_tune_table;
725 			}
726 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
727 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
728 				le16_to_cpu(pt->usConfigurableTDP);
729 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
730 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
731 				le16_to_cpu(pt->usBatteryPowerLimit);
732 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
733 				le16_to_cpu(pt->usSmallPowerLimit);
734 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
735 				le16_to_cpu(pt->usLowCACLeakage);
736 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
737 				le16_to_cpu(pt->usHighCACLeakage);
738 		}
739 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
740 				ext_hdr->usSclkVddgfxTableOffset) {
741 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
742 				((char *)mode_info->atom_context->bios + data_offset +
743 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
744 			ret = amdgpu_parse_clk_voltage_dep_table(
745 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
746 					dep_table);
747 			if (ret) {
748 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
749 				return ret;
750 			}
751 		}
752 	}
753 
754 	return 0;
755 }
756 
757 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
758 {
759 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
760 
761 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
762 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
763 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
764 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
765 	kfree(dyn_state->cac_leakage_table.entries);
766 	kfree(dyn_state->phase_shedding_limits_table.entries);
767 	kfree(dyn_state->ppm_table);
768 	kfree(dyn_state->cac_tdp_table);
769 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
770 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
771 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
772 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
773 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
774 }
775 
776 static const char *pp_lib_thermal_controller_names[] = {
777 	"NONE",
778 	"lm63",
779 	"adm1032",
780 	"adm1030",
781 	"max6649",
782 	"lm64",
783 	"f75375",
784 	"RV6xx",
785 	"RV770",
786 	"adt7473",
787 	"NONE",
788 	"External GPIO",
789 	"Evergreen",
790 	"emc2103",
791 	"Sumo",
792 	"Northern Islands",
793 	"Southern Islands",
794 	"lm96163",
795 	"Sea Islands",
796 	"Kaveri/Kabini",
797 };
798 
799 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
800 {
801 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
802 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
803 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
804 	ATOM_PPLIB_THERMALCONTROLLER *controller;
805 	struct amdgpu_i2c_bus_rec i2c_bus;
806 	u16 data_offset;
807 	u8 frev, crev;
808 
809 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
810 				   &frev, &crev, &data_offset))
811 		return;
812 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
813 		((char *)mode_info->atom_context->bios + data_offset);
814 	controller = &power_table->sThermalController;
815 
816 	/* add the i2c bus for thermal/fan chip */
817 	if (controller->ucType > 0) {
818 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
819 			adev->pm.no_fan = true;
820 		adev->pm.fan_pulses_per_revolution =
821 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
822 		if (adev->pm.fan_pulses_per_revolution) {
823 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
824 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
825 		}
826 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
827 			DRM_INFO("Internal thermal controller %s fan control\n",
828 				 (controller->ucFanParameters &
829 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
830 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
831 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
832 			DRM_INFO("Internal thermal controller %s fan control\n",
833 				 (controller->ucFanParameters &
834 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
835 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
836 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
837 			DRM_INFO("Internal thermal controller %s fan control\n",
838 				 (controller->ucFanParameters &
839 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
840 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
841 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
842 			DRM_INFO("Internal thermal controller %s fan control\n",
843 				 (controller->ucFanParameters &
844 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
845 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
846 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
847 			DRM_INFO("Internal thermal controller %s fan control\n",
848 				 (controller->ucFanParameters &
849 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
850 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
851 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
852 			DRM_INFO("Internal thermal controller %s fan control\n",
853 				 (controller->ucFanParameters &
854 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
855 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
856 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
857 			DRM_INFO("Internal thermal controller %s fan control\n",
858 				 (controller->ucFanParameters &
859 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
860 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
861 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
862 			DRM_INFO("Internal thermal controller %s fan control\n",
863 				 (controller->ucFanParameters &
864 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
865 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
866 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
867 			DRM_INFO("External GPIO thermal controller %s fan control\n",
868 				 (controller->ucFanParameters &
869 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
870 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
871 		} else if (controller->ucType ==
872 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
873 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
874 				 (controller->ucFanParameters &
875 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
876 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
877 		} else if (controller->ucType ==
878 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
879 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
880 				 (controller->ucFanParameters &
881 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
882 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
883 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
884 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
885 				 pp_lib_thermal_controller_names[controller->ucType],
886 				 controller->ucI2cAddress >> 1,
887 				 (controller->ucFanParameters &
888 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
889 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
890 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
891 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
892 			if (adev->pm.i2c_bus) {
893 				struct i2c_board_info info = { };
894 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
895 				info.addr = controller->ucI2cAddress >> 1;
896 				strlcpy(info.type, name, sizeof(info.type));
897 				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
898 			}
899 		} else {
900 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
901 				 controller->ucType,
902 				 controller->ucI2cAddress >> 1,
903 				 (controller->ucFanParameters &
904 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
905 		}
906 	}
907 }
908 
909 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
910 						 u32 sys_mask,
911 						 enum amdgpu_pcie_gen asic_gen,
912 						 enum amdgpu_pcie_gen default_gen)
913 {
914 	switch (asic_gen) {
915 	case AMDGPU_PCIE_GEN1:
916 		return AMDGPU_PCIE_GEN1;
917 	case AMDGPU_PCIE_GEN2:
918 		return AMDGPU_PCIE_GEN2;
919 	case AMDGPU_PCIE_GEN3:
920 		return AMDGPU_PCIE_GEN3;
921 	default:
922 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
923 			return AMDGPU_PCIE_GEN3;
924 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
925 			return AMDGPU_PCIE_GEN2;
926 		else
927 			return AMDGPU_PCIE_GEN1;
928 	}
929 	return AMDGPU_PCIE_GEN1;
930 }
931 
932 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
933 				 u16 asic_lanes,
934 				 u16 default_lanes)
935 {
936 	switch (asic_lanes) {
937 	case 0:
938 	default:
939 		return default_lanes;
940 	case 1:
941 		return 1;
942 	case 2:
943 		return 2;
944 	case 4:
945 		return 4;
946 	case 8:
947 		return 8;
948 	case 12:
949 		return 12;
950 	case 16:
951 		return 16;
952 	}
953 }
954 
955 u8 amdgpu_encode_pci_lane_width(u32 lanes)
956 {
957 	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
958 
959 	if (lanes > 16)
960 		return 0;
961 
962 	return encoded_lanes[lanes];
963 }
964