xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_dpm.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: amdgpu_dpm.c,v 1.5 2020/02/14 14:34:57 maya Exp $	*/
2 
3 /*
4  * Copyright 2011 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dpm.c,v 1.5 2020/02/14 14:34:57 maya Exp $");
29 
30 #include "drmP.h"
31 #include "amdgpu.h"
32 #include "amdgpu_atombios.h"
33 #include "amdgpu_i2c.h"
34 #include "amdgpu_dpm.h"
35 #include "atom.h"
36 
37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39 	printk("\tui class: ");
40 	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
41 	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
42 	default:
43 		printk("none\n");
44 		break;
45 	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
46 		printk("battery\n");
47 		break;
48 	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
49 		printk("balanced\n");
50 		break;
51 	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
52 		printk("performance\n");
53 		break;
54 	}
55 	printk("\tinternal class: ");
56 	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
57 	    (class2 == 0))
58 		printk("none");
59 	else {
60 		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
61 			printk("boot ");
62 		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
63 			printk("thermal ");
64 		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
65 			printk("limited_pwr ");
66 		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
67 			printk("rest ");
68 		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
69 			printk("forced ");
70 		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
71 			printk("3d_perf ");
72 		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
73 			printk("ovrdrv ");
74 		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
75 			printk("uvd ");
76 		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
77 			printk("3d_low ");
78 		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
79 			printk("acpi ");
80 		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
81 			printk("uvd_hd2 ");
82 		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
83 			printk("uvd_hd ");
84 		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
85 			printk("uvd_sd ");
86 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
87 			printk("limited_pwr2 ");
88 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
89 			printk("ulv ");
90 		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
91 			printk("uvd_mvc ");
92 	}
93 	printk("\n");
94 }
95 
96 void amdgpu_dpm_print_cap_info(u32 caps)
97 {
98 	printk("\tcaps: ");
99 	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
100 		printk("single_disp ");
101 	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
102 		printk("video ");
103 	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
104 		printk("no_dc ");
105 	printk("\n");
106 }
107 
108 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
109 				struct amdgpu_ps *rps)
110 {
111 	printk("\tstatus: ");
112 	if (rps == adev->pm.dpm.current_ps)
113 		printk("c ");
114 	if (rps == adev->pm.dpm.requested_ps)
115 		printk("r ");
116 	if (rps == adev->pm.dpm.boot_ps)
117 		printk("b ");
118 	printk("\n");
119 }
120 
121 
122 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
123 {
124 	struct drm_device *dev = adev->ddev;
125 	struct drm_crtc *crtc;
126 	struct amdgpu_crtc *amdgpu_crtc;
127 	u32 vblank_in_pixels;
128 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
129 
130 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
131 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
132 			amdgpu_crtc = to_amdgpu_crtc(crtc);
133 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
134 				vblank_in_pixels =
135 					amdgpu_crtc->hw_mode.crtc_htotal *
136 					(amdgpu_crtc->hw_mode.crtc_vblank_end -
137 					amdgpu_crtc->hw_mode.crtc_vdisplay +
138 					(amdgpu_crtc->v_border * 2));
139 
140 				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
141 				break;
142 			}
143 		}
144 	}
145 
146 	return vblank_time_us;
147 }
148 
149 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
150 {
151 	struct drm_device *dev = adev->ddev;
152 	struct drm_crtc *crtc;
153 	struct amdgpu_crtc *amdgpu_crtc;
154 	u32 vrefresh = 0;
155 
156 	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
157 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
158 			amdgpu_crtc = to_amdgpu_crtc(crtc);
159 			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
160 				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
161 				break;
162 			}
163 		}
164 	}
165 
166 	return vrefresh;
167 }
168 
169 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
170 			      u32 *p, u32 *u)
171 {
172 	u32 b_c = 0;
173 	u32 i_c;
174 	u32 tmp;
175 
176 	i_c = (i * r_c) / 100;
177 	tmp = i_c >> p_b;
178 
179 	while (tmp) {
180 		b_c++;
181 		tmp >>= 1;
182 	}
183 
184 	*u = (b_c + 1) / 2;
185 	*p = i_c / (1 << (2 * (*u)));
186 }
187 
188 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
189 {
190 	u32 k, a, ah, al;
191 	u32 t1;
192 
193 	if ((fl == 0) || (fh == 0) || (fl > fh))
194 		return -EINVAL;
195 
196 	k = (100 * fh) / fl;
197 	t1 = (t * (k - 100));
198 	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
199 	a = (a + 5) / 10;
200 	ah = ((a * t) + 5000) / 10000;
201 	al = a - ah;
202 
203 	*th = t - ah;
204 	*tl = t + al;
205 
206 	return 0;
207 }
208 
209 bool amdgpu_is_uvd_state(u32 class, u32 class2)
210 {
211 	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
212 		return true;
213 	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
214 		return true;
215 	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
216 		return true;
217 	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
218 		return true;
219 	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
220 		return true;
221 	return false;
222 }
223 
224 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
225 {
226 	switch (sensor) {
227 	case THERMAL_TYPE_RV6XX:
228 	case THERMAL_TYPE_RV770:
229 	case THERMAL_TYPE_EVERGREEN:
230 	case THERMAL_TYPE_SUMO:
231 	case THERMAL_TYPE_NI:
232 	case THERMAL_TYPE_SI:
233 	case THERMAL_TYPE_CI:
234 	case THERMAL_TYPE_KV:
235 		return true;
236 	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
237 	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
238 		return false; /* need special handling */
239 	case THERMAL_TYPE_NONE:
240 	case THERMAL_TYPE_EXTERNAL:
241 	case THERMAL_TYPE_EXTERNAL_GPIO:
242 	default:
243 		return false;
244 	}
245 }
246 
247 union power_info {
248 	struct _ATOM_POWERPLAY_INFO info;
249 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
250 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
251 	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
252 	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
253 	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
254 	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
255 	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
256 };
257 
258 union fan_info {
259 	struct _ATOM_PPLIB_FANTABLE fan;
260 	struct _ATOM_PPLIB_FANTABLE2 fan2;
261 	struct _ATOM_PPLIB_FANTABLE3 fan3;
262 };
263 
264 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
265 					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
266 {
267 	u32 size = atom_table->ucNumEntries *
268 		sizeof(struct amdgpu_clock_voltage_dependency_entry);
269 	int i;
270 	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
271 
272 	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
273 	if (!amdgpu_table->entries)
274 		return -ENOMEM;
275 
276 	entry = &atom_table->entries[0];
277 	for (i = 0; i < atom_table->ucNumEntries; i++) {
278 		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
279 			(entry->ucClockHigh << 16);
280 		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
281 		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
282 			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
283 	}
284 	amdgpu_table->count = atom_table->ucNumEntries;
285 
286 	return 0;
287 }
288 
289 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
290 {
291 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
292 	union power_info *power_info;
293 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
294 	u16 data_offset;
295 	u8 frev, crev;
296 
297 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298 				   &frev, &crev, &data_offset))
299 		return -EINVAL;
300 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301 
302 	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
303 	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
304 	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
305 
306 	return 0;
307 }
308 
309 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
311 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
312 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
313 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
314 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
315 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
316 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
317 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
318 
319 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
320 {
321 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
322 	union power_info *power_info;
323 	union fan_info *fan_info;
324 	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
325 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
326 	u16 data_offset;
327 	u8 frev, crev;
328 	int ret, i;
329 
330 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
331 				   &frev, &crev, &data_offset))
332 		return -EINVAL;
333 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
334 
335 	/* fan table */
336 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
337 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
338 		if (power_info->pplib3.usFanTableOffset) {
339 			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
340 						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
341 			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
342 			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
343 			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
344 			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
345 			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
346 			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
347 			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
348 			if (fan_info->fan.ucFanTableFormat >= 2)
349 				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
350 			else
351 				adev->pm.dpm.fan.t_max = 10900;
352 			adev->pm.dpm.fan.cycle_delay = 100000;
353 			if (fan_info->fan.ucFanTableFormat >= 3) {
354 				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
355 				adev->pm.dpm.fan.default_max_fan_pwm =
356 					le16_to_cpu(fan_info->fan3.usFanPWMMax);
357 				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
358 				adev->pm.dpm.fan.fan_output_sensitivity =
359 					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
360 			}
361 			adev->pm.dpm.fan.ucode_fan_control = true;
362 		}
363 	}
364 
365 	/* clock dependancy tables, shedding tables */
366 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
367 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
368 		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
369 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370 				(mode_info->atom_context->bios + data_offset +
371 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
372 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
373 								 dep_table);
374 			if (ret) {
375 				amdgpu_free_extended_power_table(adev);
376 				return ret;
377 			}
378 		}
379 		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
380 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
381 				(mode_info->atom_context->bios + data_offset +
382 				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
383 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
384 								 dep_table);
385 			if (ret) {
386 				amdgpu_free_extended_power_table(adev);
387 				return ret;
388 			}
389 		}
390 		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
391 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
392 				(mode_info->atom_context->bios + data_offset +
393 				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
394 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
395 								 dep_table);
396 			if (ret) {
397 				amdgpu_free_extended_power_table(adev);
398 				return ret;
399 			}
400 		}
401 		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
402 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
403 				(mode_info->atom_context->bios + data_offset +
404 				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
405 			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
406 								 dep_table);
407 			if (ret) {
408 				amdgpu_free_extended_power_table(adev);
409 				return ret;
410 			}
411 		}
412 		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
413 			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
414 				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
415 				(mode_info->atom_context->bios + data_offset +
416 				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
417 			if (clk_v->ucNumEntries) {
418 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
419 					le16_to_cpu(clk_v->entries[0].usSclkLow) |
420 					(clk_v->entries[0].ucSclkHigh << 16);
421 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
422 					le16_to_cpu(clk_v->entries[0].usMclkLow) |
423 					(clk_v->entries[0].ucMclkHigh << 16);
424 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
425 					le16_to_cpu(clk_v->entries[0].usVddc);
426 				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
427 					le16_to_cpu(clk_v->entries[0].usVddci);
428 			}
429 		}
430 		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
431 			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
432 				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
433 				(mode_info->atom_context->bios + data_offset +
434 				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
435 			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
436 
437 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
438 				kzalloc(psl->ucNumEntries *
439 					sizeof(struct amdgpu_phase_shedding_limits_entry),
440 					GFP_KERNEL);
441 			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
442 				amdgpu_free_extended_power_table(adev);
443 				return -ENOMEM;
444 			}
445 
446 			entry = &psl->entries[0];
447 			for (i = 0; i < psl->ucNumEntries; i++) {
448 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
449 					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
450 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
451 					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
452 				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
453 					le16_to_cpu(entry->usVoltage);
454 				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
455 					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
456 			}
457 			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
458 				psl->ucNumEntries;
459 		}
460 	}
461 
462 	/* cac data */
463 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
464 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
465 		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
466 		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
467 		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
468 		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
469 		if (adev->pm.dpm.tdp_od_limit)
470 			adev->pm.dpm.power_control = true;
471 		else
472 			adev->pm.dpm.power_control = false;
473 		adev->pm.dpm.tdp_adjustment = 0;
474 		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
475 		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
476 		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
477 		if (power_info->pplib5.usCACLeakageTableOffset) {
478 			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
479 				(ATOM_PPLIB_CAC_Leakage_Table *)
480 				(mode_info->atom_context->bios + data_offset +
481 				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
482 			ATOM_PPLIB_CAC_Leakage_Record *entry;
483 			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
484 			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
485 			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
486 				amdgpu_free_extended_power_table(adev);
487 				return -ENOMEM;
488 			}
489 			entry = &cac_table->entries[0];
490 			for (i = 0; i < cac_table->ucNumEntries; i++) {
491 				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
492 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
493 						le16_to_cpu(entry->usVddc1);
494 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
495 						le16_to_cpu(entry->usVddc2);
496 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
497 						le16_to_cpu(entry->usVddc3);
498 				} else {
499 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
500 						le16_to_cpu(entry->usVddc);
501 					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
502 						le32_to_cpu(entry->ulLeakageValue);
503 				}
504 				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
505 					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
506 			}
507 			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
508 		}
509 	}
510 
511 	/* ext tables */
512 	if (le16_to_cpu(power_info->pplib.usTableSize) >=
513 	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
514 		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
515 			(mode_info->atom_context->bios + data_offset +
516 			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
517 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
518 			ext_hdr->usVCETableOffset) {
519 			VCEClockInfoArray *array = (VCEClockInfoArray *)
520 				(mode_info->atom_context->bios + data_offset +
521 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
522 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
523 				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
524 				(mode_info->atom_context->bios + data_offset +
525 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
526 				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
527 			ATOM_PPLIB_VCE_State_Table *states =
528 				(ATOM_PPLIB_VCE_State_Table *)
529 				(mode_info->atom_context->bios + data_offset +
530 				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
531 				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
532 				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
533 			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
534 			ATOM_PPLIB_VCE_State_Record *state_entry;
535 			VCEClockInfo *vce_clk;
536 			u32 size = limits->numEntries *
537 				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
538 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
539 				kzalloc(size, GFP_KERNEL);
540 			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
541 				amdgpu_free_extended_power_table(adev);
542 				return -ENOMEM;
543 			}
544 			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
545 				limits->numEntries;
546 			entry = &limits->entries[0];
547 			state_entry = &states->entries[0];
548 			for (i = 0; i < limits->numEntries; i++) {
549 				vce_clk = (VCEClockInfo *)
550 					((u8 *)&array->entries[0] +
551 					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
552 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
553 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
554 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
555 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
556 				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
557 					le16_to_cpu(entry->usVoltage);
558 				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
559 					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
560 			}
561 			for (i = 0; i < states->numEntries; i++) {
562 				if (i >= AMDGPU_MAX_VCE_LEVELS)
563 					break;
564 				vce_clk = (VCEClockInfo *)
565 					((u8 *)&array->entries[0] +
566 					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
567 				adev->pm.dpm.vce_states[i].evclk =
568 					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
569 				adev->pm.dpm.vce_states[i].ecclk =
570 					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
571 				adev->pm.dpm.vce_states[i].clk_idx =
572 					state_entry->ucClockInfoIndex & 0x3f;
573 				adev->pm.dpm.vce_states[i].pstate =
574 					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
575 				state_entry = (ATOM_PPLIB_VCE_State_Record *)
576 					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
577 			}
578 		}
579 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
580 			ext_hdr->usUVDTableOffset) {
581 			UVDClockInfoArray *array = (UVDClockInfoArray *)
582 				(mode_info->atom_context->bios + data_offset +
583 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
584 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
585 				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
586 				(mode_info->atom_context->bios + data_offset +
587 				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
588 				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
589 			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
590 			u32 size = limits->numEntries *
591 				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
592 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
593 				kzalloc(size, GFP_KERNEL);
594 			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
595 				amdgpu_free_extended_power_table(adev);
596 				return -ENOMEM;
597 			}
598 			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
599 				limits->numEntries;
600 			entry = &limits->entries[0];
601 			for (i = 0; i < limits->numEntries; i++) {
602 				UVDClockInfo *uvd_clk = (UVDClockInfo *)
603 					((u8 *)&array->entries[0] +
604 					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
605 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
606 					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
607 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
608 					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
609 				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
610 					le16_to_cpu(entry->usVoltage);
611 				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
612 					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
613 			}
614 		}
615 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
616 			ext_hdr->usSAMUTableOffset) {
617 			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
618 				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
619 				(mode_info->atom_context->bios + data_offset +
620 				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
621 			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
622 			u32 size = limits->numEntries *
623 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
624 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
625 				kzalloc(size, GFP_KERNEL);
626 			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
627 				amdgpu_free_extended_power_table(adev);
628 				return -ENOMEM;
629 			}
630 			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
631 				limits->numEntries;
632 			entry = &limits->entries[0];
633 			for (i = 0; i < limits->numEntries; i++) {
634 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
635 					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
636 				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
637 					le16_to_cpu(entry->usVoltage);
638 				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
639 					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
640 			}
641 		}
642 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
643 		    ext_hdr->usPPMTableOffset) {
644 			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
645 				(mode_info->atom_context->bios + data_offset +
646 				 le16_to_cpu(ext_hdr->usPPMTableOffset));
647 			adev->pm.dpm.dyn_state.ppm_table =
648 				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
649 			if (!adev->pm.dpm.dyn_state.ppm_table) {
650 				amdgpu_free_extended_power_table(adev);
651 				return -ENOMEM;
652 			}
653 			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
654 			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
655 				le16_to_cpu(ppm->usCpuCoreNumber);
656 			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
657 				le32_to_cpu(ppm->ulPlatformTDP);
658 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
659 				le32_to_cpu(ppm->ulSmallACPlatformTDP);
660 			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
661 				le32_to_cpu(ppm->ulPlatformTDC);
662 			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
663 				le32_to_cpu(ppm->ulSmallACPlatformTDC);
664 			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
665 				le32_to_cpu(ppm->ulApuTDP);
666 			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
667 				le32_to_cpu(ppm->ulDGpuTDP);
668 			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
669 				le32_to_cpu(ppm->ulDGpuUlvPower);
670 			adev->pm.dpm.dyn_state.ppm_table->tj_max =
671 				le32_to_cpu(ppm->ulTjmax);
672 		}
673 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
674 			ext_hdr->usACPTableOffset) {
675 			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
676 				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
677 				(mode_info->atom_context->bios + data_offset +
678 				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
679 			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
680 			u32 size = limits->numEntries *
681 				sizeof(struct amdgpu_clock_voltage_dependency_entry);
682 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
683 				kzalloc(size, GFP_KERNEL);
684 			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
685 				amdgpu_free_extended_power_table(adev);
686 				return -ENOMEM;
687 			}
688 			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
689 				limits->numEntries;
690 			entry = &limits->entries[0];
691 			for (i = 0; i < limits->numEntries; i++) {
692 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
693 					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
694 				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
695 					le16_to_cpu(entry->usVoltage);
696 				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
697 					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
698 			}
699 		}
700 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
701 			ext_hdr->usPowerTuneTableOffset) {
702 			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
703 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
704 			ATOM_PowerTune_Table *pt;
705 			adev->pm.dpm.dyn_state.cac_tdp_table =
706 				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
707 			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
708 				amdgpu_free_extended_power_table(adev);
709 				return -ENOMEM;
710 			}
711 			if (rev > 0) {
712 				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
713 					(mode_info->atom_context->bios + data_offset +
714 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
715 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
716 					ppt->usMaximumPowerDeliveryLimit;
717 				pt = &ppt->power_tune_table;
718 			} else {
719 				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
720 					(mode_info->atom_context->bios + data_offset +
721 					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
722 				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
723 				pt = &ppt->power_tune_table;
724 			}
725 			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
726 			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
727 				le16_to_cpu(pt->usConfigurableTDP);
728 			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
729 			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
730 				le16_to_cpu(pt->usBatteryPowerLimit);
731 			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
732 				le16_to_cpu(pt->usSmallPowerLimit);
733 			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
734 				le16_to_cpu(pt->usLowCACLeakage);
735 			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
736 				le16_to_cpu(pt->usHighCACLeakage);
737 		}
738 		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
739 				ext_hdr->usSclkVddgfxTableOffset) {
740 			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
741 				(mode_info->atom_context->bios + data_offset +
742 				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
743 			ret = amdgpu_parse_clk_voltage_dep_table(
744 					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
745 					dep_table);
746 			if (ret) {
747 				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
748 				return ret;
749 			}
750 		}
751 	}
752 
753 	return 0;
754 }
755 
756 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
757 {
758 	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
759 
760 	kfree(dyn_state->vddc_dependency_on_sclk.entries);
761 	kfree(dyn_state->vddci_dependency_on_mclk.entries);
762 	kfree(dyn_state->vddc_dependency_on_mclk.entries);
763 	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
764 	kfree(dyn_state->cac_leakage_table.entries);
765 	kfree(dyn_state->phase_shedding_limits_table.entries);
766 	kfree(dyn_state->ppm_table);
767 	kfree(dyn_state->cac_tdp_table);
768 	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
769 	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
770 	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
771 	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
772 	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
773 }
774 
775 static const char *pp_lib_thermal_controller_names[] = {
776 	"NONE",
777 	"lm63",
778 	"adm1032",
779 	"adm1030",
780 	"max6649",
781 	"lm64",
782 	"f75375",
783 	"RV6xx",
784 	"RV770",
785 	"adt7473",
786 	"NONE",
787 	"External GPIO",
788 	"Evergreen",
789 	"emc2103",
790 	"Sumo",
791 	"Northern Islands",
792 	"Southern Islands",
793 	"lm96163",
794 	"Sea Islands",
795 	"Kaveri/Kabini",
796 };
797 
798 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
799 {
800 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
801 	ATOM_PPLIB_POWERPLAYTABLE *power_table;
802 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
803 	ATOM_PPLIB_THERMALCONTROLLER *controller;
804 	struct amdgpu_i2c_bus_rec i2c_bus;
805 	u16 data_offset;
806 	u8 frev, crev;
807 
808 	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
809 				   &frev, &crev, &data_offset))
810 		return;
811 	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
812 		(mode_info->atom_context->bios + data_offset);
813 	controller = &power_table->sThermalController;
814 
815 	/* add the i2c bus for thermal/fan chip */
816 	if (controller->ucType > 0) {
817 		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
818 			adev->pm.no_fan = true;
819 		adev->pm.fan_pulses_per_revolution =
820 			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
821 		if (adev->pm.fan_pulses_per_revolution) {
822 			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
823 			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
824 		}
825 		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
826 			DRM_INFO("Internal thermal controller %s fan control\n",
827 				 (controller->ucFanParameters &
828 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
829 			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
830 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
831 			DRM_INFO("Internal thermal controller %s fan control\n",
832 				 (controller->ucFanParameters &
833 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
834 			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
835 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
836 			DRM_INFO("Internal thermal controller %s fan control\n",
837 				 (controller->ucFanParameters &
838 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
839 			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
840 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
841 			DRM_INFO("Internal thermal controller %s fan control\n",
842 				 (controller->ucFanParameters &
843 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
844 			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
845 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
846 			DRM_INFO("Internal thermal controller %s fan control\n",
847 				 (controller->ucFanParameters &
848 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849 			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
850 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
851 			DRM_INFO("Internal thermal controller %s fan control\n",
852 				 (controller->ucFanParameters &
853 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
854 			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
855 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
856 			DRM_INFO("Internal thermal controller %s fan control\n",
857 				 (controller->ucFanParameters &
858 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
859 			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
860 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
861 			DRM_INFO("Internal thermal controller %s fan control\n",
862 				 (controller->ucFanParameters &
863 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
864 			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
865 		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
866 			DRM_INFO("External GPIO thermal controller %s fan control\n",
867 				 (controller->ucFanParameters &
868 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
869 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
870 		} else if (controller->ucType ==
871 			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
872 			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
873 				 (controller->ucFanParameters &
874 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
875 			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
876 		} else if (controller->ucType ==
877 			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
878 			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
879 				 (controller->ucFanParameters &
880 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
881 			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
882 		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
883 			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
884 				 pp_lib_thermal_controller_names[controller->ucType],
885 				 controller->ucI2cAddress >> 1,
886 				 (controller->ucFanParameters &
887 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
888 			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
889 			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
890 			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
891 			if (adev->pm.i2c_bus) {
892 				struct i2c_board_info info = { };
893 				const char *name = pp_lib_thermal_controller_names[controller->ucType];
894 				info.addr = controller->ucI2cAddress >> 1;
895 				strlcpy(info.type, name, sizeof(info.type));
896 				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
897 			}
898 		} else {
899 			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
900 				 controller->ucType,
901 				 controller->ucI2cAddress >> 1,
902 				 (controller->ucFanParameters &
903 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
904 		}
905 	}
906 }
907 
908 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
909 						 u32 sys_mask,
910 						 enum amdgpu_pcie_gen asic_gen,
911 						 enum amdgpu_pcie_gen default_gen)
912 {
913 	switch (asic_gen) {
914 	case AMDGPU_PCIE_GEN1:
915 		return AMDGPU_PCIE_GEN1;
916 	case AMDGPU_PCIE_GEN2:
917 		return AMDGPU_PCIE_GEN2;
918 	case AMDGPU_PCIE_GEN3:
919 		return AMDGPU_PCIE_GEN3;
920 	default:
921 		if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
922 			return AMDGPU_PCIE_GEN3;
923 		else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
924 			return AMDGPU_PCIE_GEN2;
925 		else
926 			return AMDGPU_PCIE_GEN1;
927 	}
928 	return AMDGPU_PCIE_GEN1;
929 }
930 
931 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
932 				 u16 asic_lanes,
933 				 u16 default_lanes)
934 {
935 	switch (asic_lanes) {
936 	case 0:
937 	default:
938 		return default_lanes;
939 	case 1:
940 		return 1;
941 	case 2:
942 		return 2;
943 	case 4:
944 		return 4;
945 	case 8:
946 		return 8;
947 	case 12:
948 		return 12;
949 	case 16:
950 		return 16;
951 	}
952 }
953 
954 u8 amdgpu_encode_pci_lane_width(u32 lanes)
955 {
956 	u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
957 
958 	if (lanes > 16)
959 		return 0;
960 
961 	return encoded_lanes[lanes];
962 }
963