xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/powerplay/hwmgr/amdgpu_hwmgr.c (revision 770c9d53527f316421c021ddaa11b2f94d5fbbc7)
1 /*	$NetBSD: amdgpu_hwmgr.c,v 1.3 2024/04/16 14:34:01 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_hwmgr.c,v 1.3 2024/04/16 14:34:01 riastradh Exp $");
28 
29 #include "pp_debug.h"
30 #include <linux/delay.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34 #include <linux/pci.h>
35 #include <linux/acpi.h>
36 #include <drm/amdgpu_drm.h>
37 #include "power_state.h"
38 #include "hwmgr.h"
39 #include "ppsmc.h"
40 #include "amd_acpi.h"
41 #include "pp_psm.h"
42 
43 extern const struct pp_smumgr_func ci_smu_funcs;
44 extern const struct pp_smumgr_func smu8_smu_funcs;
45 extern const struct pp_smumgr_func iceland_smu_funcs;
46 extern const struct pp_smumgr_func tonga_smu_funcs;
47 extern const struct pp_smumgr_func fiji_smu_funcs;
48 extern const struct pp_smumgr_func polaris10_smu_funcs;
49 extern const struct pp_smumgr_func vegam_smu_funcs;
50 extern const struct pp_smumgr_func vega10_smu_funcs;
51 extern const struct pp_smumgr_func vega12_smu_funcs;
52 extern const struct pp_smumgr_func smu10_smu_funcs;
53 extern const struct pp_smumgr_func vega20_smu_funcs;
54 
55 extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr);
56 extern int smu8_init_function_pointers(struct pp_hwmgr *hwmgr);
57 extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr);
58 extern int vega12_hwmgr_init(struct pp_hwmgr *hwmgr);
59 extern int vega20_hwmgr_init(struct pp_hwmgr *hwmgr);
60 extern int smu10_init_function_pointers(struct pp_hwmgr *hwmgr);
61 
62 static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
63 static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
64 static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
65 static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
66 static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
67 static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
68 static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
69 
70 
hwmgr_init_workload_prority(struct pp_hwmgr * hwmgr)71 static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
72 {
73 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
74 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
75 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
76 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
77 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
78 	hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
79 
80 	hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
81 	hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
82 	hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
83 	hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
84 	hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
85 	hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
86 }
87 
hwmgr_early_init(struct pp_hwmgr * hwmgr)88 int hwmgr_early_init(struct pp_hwmgr *hwmgr)
89 {
90 	struct amdgpu_device *adev;
91 
92 	if (!hwmgr)
93 		return -EINVAL;
94 
95 	hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
96 	hwmgr->pp_table_version = PP_TABLE_V1;
97 	hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
98 	hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
99 	hwmgr_init_default_caps(hwmgr);
100 	hwmgr_set_user_specify_caps(hwmgr);
101 	hwmgr->fan_ctrl_is_in_default_mode = true;
102 	hwmgr_init_workload_prority(hwmgr);
103 	hwmgr->gfxoff_state_changed_by_workload = false;
104 
105 	adev = hwmgr->adev;
106 
107 	switch (hwmgr->chip_family) {
108 	case AMDGPU_FAMILY_CI:
109 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
110 		hwmgr->smumgr_funcs = &ci_smu_funcs;
111 		ci_set_asic_special_caps(hwmgr);
112 		hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
113 					 PP_ENABLE_GFX_CG_THRU_SMU |
114 					 PP_GFXOFF_MASK);
115 		hwmgr->pp_table_version = PP_TABLE_V0;
116 		hwmgr->od_enabled = false;
117 		smu7_init_function_pointers(hwmgr);
118 		break;
119 	case AMDGPU_FAMILY_CZ:
120 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
121 		hwmgr->od_enabled = false;
122 		hwmgr->smumgr_funcs = &smu8_smu_funcs;
123 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
124 		smu8_init_function_pointers(hwmgr);
125 		break;
126 	case AMDGPU_FAMILY_VI:
127 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
128 		hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
129 		switch (hwmgr->chip_id) {
130 		case CHIP_TOPAZ:
131 			hwmgr->smumgr_funcs = &iceland_smu_funcs;
132 			topaz_set_asic_special_caps(hwmgr);
133 			hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
134 						PP_ENABLE_GFX_CG_THRU_SMU);
135 			hwmgr->pp_table_version = PP_TABLE_V0;
136 			hwmgr->od_enabled = false;
137 			break;
138 		case CHIP_TONGA:
139 			hwmgr->smumgr_funcs = &tonga_smu_funcs;
140 			tonga_set_asic_special_caps(hwmgr);
141 			hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
142 			break;
143 		case CHIP_FIJI:
144 			hwmgr->smumgr_funcs = &fiji_smu_funcs;
145 			fiji_set_asic_special_caps(hwmgr);
146 			hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
147 						PP_ENABLE_GFX_CG_THRU_SMU);
148 			break;
149 		case CHIP_POLARIS11:
150 		case CHIP_POLARIS10:
151 		case CHIP_POLARIS12:
152 			hwmgr->smumgr_funcs = &polaris10_smu_funcs;
153 			polaris_set_asic_special_caps(hwmgr);
154 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
155 			break;
156 		case CHIP_VEGAM:
157 			hwmgr->smumgr_funcs = &vegam_smu_funcs;
158 			polaris_set_asic_special_caps(hwmgr);
159 			hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
160 			break;
161 		default:
162 			return -EINVAL;
163 		}
164 		smu7_init_function_pointers(hwmgr);
165 		break;
166 	case AMDGPU_FAMILY_AI:
167 		switch (hwmgr->chip_id) {
168 		case CHIP_VEGA10:
169 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
170 			hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
171 			hwmgr->smumgr_funcs = &vega10_smu_funcs;
172 			vega10_hwmgr_init(hwmgr);
173 			break;
174 		case CHIP_VEGA12:
175 			hwmgr->smumgr_funcs = &vega12_smu_funcs;
176 			vega12_hwmgr_init(hwmgr);
177 			break;
178 		case CHIP_VEGA20:
179 			adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
180 			hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
181 			hwmgr->smumgr_funcs = &vega20_smu_funcs;
182 			vega20_hwmgr_init(hwmgr);
183 			break;
184 		default:
185 			return -EINVAL;
186 		}
187 		break;
188 	case AMDGPU_FAMILY_RV:
189 		switch (hwmgr->chip_id) {
190 		case CHIP_RAVEN:
191 			hwmgr->od_enabled = false;
192 			hwmgr->smumgr_funcs = &smu10_smu_funcs;
193 			smu10_init_function_pointers(hwmgr);
194 			break;
195 		default:
196 			return -EINVAL;
197 		}
198 		break;
199 	default:
200 		return -EINVAL;
201 	}
202 
203 	return 0;
204 }
205 
hwmgr_sw_init(struct pp_hwmgr * hwmgr)206 int hwmgr_sw_init(struct pp_hwmgr *hwmgr)
207 {
208 	if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init)
209 		return -EINVAL;
210 
211 	phm_register_irq_handlers(hwmgr);
212 	pr_info("hwmgr_sw_init smu backed is %s\n", hwmgr->smumgr_funcs->name);
213 
214 	return hwmgr->smumgr_funcs->smu_init(hwmgr);
215 }
216 
217 
hwmgr_sw_fini(struct pp_hwmgr * hwmgr)218 int hwmgr_sw_fini(struct pp_hwmgr *hwmgr)
219 {
220 	if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini)
221 		hwmgr->smumgr_funcs->smu_fini(hwmgr);
222 
223 	return 0;
224 }
225 
hwmgr_hw_init(struct pp_hwmgr * hwmgr)226 int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
227 {
228 	int ret = 0;
229 
230 	hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev);
231 	hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf))
232 			? true : false;
233 	if (!hwmgr->pm_en)
234 		return 0;
235 
236 	if (!hwmgr->pptable_func ||
237 	    !hwmgr->pptable_func->pptable_init ||
238 	    !hwmgr->hwmgr_func->backend_init) {
239 		hwmgr->pm_en = false;
240 		pr_info("dpm not supported \n");
241 		return 0;
242 	}
243 
244 	ret = hwmgr->pptable_func->pptable_init(hwmgr);
245 	if (ret)
246 		goto err;
247 
248 	((struct amdgpu_device *)hwmgr->adev)->pm.no_fan =
249 				hwmgr->thermal_controller.fanInfo.bNoFan;
250 
251 	ret = hwmgr->hwmgr_func->backend_init(hwmgr);
252 	if (ret)
253 		goto err1;
254  /* make sure dc limits are valid */
255 	if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
256 			(hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
257 			hwmgr->dyn_state.max_clock_voltage_on_dc =
258 					hwmgr->dyn_state.max_clock_voltage_on_ac;
259 
260 	ret = psm_init_power_state_table(hwmgr);
261 	if (ret)
262 		goto err2;
263 
264 	ret = phm_setup_asic(hwmgr);
265 	if (ret)
266 		goto err2;
267 
268 	ret = phm_enable_dynamic_state_management(hwmgr);
269 	if (ret)
270 		goto err2;
271 	ret = phm_start_thermal_controller(hwmgr);
272 	ret |= psm_set_performance_states(hwmgr);
273 	if (ret)
274 		goto err2;
275 
276 	((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true;
277 
278 	return 0;
279 err2:
280 	if (hwmgr->hwmgr_func->backend_fini)
281 		hwmgr->hwmgr_func->backend_fini(hwmgr);
282 err1:
283 	if (hwmgr->pptable_func->pptable_fini)
284 		hwmgr->pptable_func->pptable_fini(hwmgr);
285 err:
286 	return ret;
287 }
288 
hwmgr_hw_fini(struct pp_hwmgr * hwmgr)289 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr)
290 {
291 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
292 		return 0;
293 
294 	phm_stop_thermal_controller(hwmgr);
295 	psm_set_boot_states(hwmgr);
296 	psm_adjust_power_state_dynamic(hwmgr, true, NULL);
297 	phm_disable_dynamic_state_management(hwmgr);
298 	phm_disable_clock_power_gatings(hwmgr);
299 
300 	if (hwmgr->hwmgr_func->backend_fini)
301 		hwmgr->hwmgr_func->backend_fini(hwmgr);
302 	if (hwmgr->pptable_func->pptable_fini)
303 		hwmgr->pptable_func->pptable_fini(hwmgr);
304 	return psm_fini_power_state_table(hwmgr);
305 }
306 
hwmgr_suspend(struct pp_hwmgr * hwmgr)307 int hwmgr_suspend(struct pp_hwmgr *hwmgr)
308 {
309 	int ret = 0;
310 
311 	if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf)
312 		return 0;
313 
314 	phm_disable_smc_firmware_ctf(hwmgr);
315 	ret = psm_set_boot_states(hwmgr);
316 	if (ret)
317 		return ret;
318 	ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
319 	if (ret)
320 		return ret;
321 	ret = phm_power_down_asic(hwmgr);
322 
323 	return ret;
324 }
325 
hwmgr_resume(struct pp_hwmgr * hwmgr)326 int hwmgr_resume(struct pp_hwmgr *hwmgr)
327 {
328 	int ret = 0;
329 
330 	if (!hwmgr)
331 		return -EINVAL;
332 
333 	if (!hwmgr->not_vf || !hwmgr->pm_en)
334 		return 0;
335 
336 	ret = phm_setup_asic(hwmgr);
337 	if (ret)
338 		return ret;
339 
340 	ret = phm_enable_dynamic_state_management(hwmgr);
341 	if (ret)
342 		return ret;
343 	ret = phm_start_thermal_controller(hwmgr);
344 	ret |= psm_set_performance_states(hwmgr);
345 	if (ret)
346 		return ret;
347 
348 	ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
349 
350 	return ret;
351 }
352 
power_state_convert(enum amd_pm_state_type state)353 static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type  state)
354 {
355 	switch (state) {
356 	case POWER_STATE_TYPE_BATTERY:
357 		return PP_StateUILabel_Battery;
358 	case POWER_STATE_TYPE_BALANCED:
359 		return PP_StateUILabel_Balanced;
360 	case POWER_STATE_TYPE_PERFORMANCE:
361 		return PP_StateUILabel_Performance;
362 	default:
363 		return PP_StateUILabel_None;
364 	}
365 }
366 
hwmgr_handle_task(struct pp_hwmgr * hwmgr,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)367 int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
368 		enum amd_pm_state_type *user_state)
369 {
370 	int ret = 0;
371 
372 	if (hwmgr == NULL)
373 		return -EINVAL;
374 
375 	switch (task_id) {
376 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
377 		if (!hwmgr->not_vf)
378 			return ret;
379 		ret = phm_pre_display_configuration_changed(hwmgr);
380 		if (ret)
381 			return ret;
382 		ret = phm_set_cpu_power_state(hwmgr);
383 		if (ret)
384 			return ret;
385 		ret = psm_set_performance_states(hwmgr);
386 		if (ret)
387 			return ret;
388 		ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
389 		break;
390 	case AMD_PP_TASK_ENABLE_USER_STATE:
391 	{
392 		enum PP_StateUILabel requested_ui_label;
393 		struct pp_power_state *requested_ps = NULL;
394 
395 		if (!hwmgr->not_vf)
396 			return ret;
397 		if (user_state == NULL) {
398 			ret = -EINVAL;
399 			break;
400 		}
401 
402 		requested_ui_label = power_state_convert(*user_state);
403 		ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
404 		if (ret)
405 			return ret;
406 		ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps);
407 		break;
408 	}
409 	case AMD_PP_TASK_COMPLETE_INIT:
410 	case AMD_PP_TASK_READJUST_POWER_STATE:
411 		ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL);
412 		break;
413 	default:
414 		break;
415 	}
416 	return ret;
417 }
418 
hwmgr_init_default_caps(struct pp_hwmgr * hwmgr)419 void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
420 {
421 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
422 
423 	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
424 	phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
425 
426 #if defined(CONFIG_ACPI)
427 	if (amdgpu_acpi_is_pcie_performance_request_supported(hwmgr->adev))
428 		phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
429 #endif
430 
431 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
432 		PHM_PlatformCaps_DynamicPatchPowerState);
433 
434 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
435 		PHM_PlatformCaps_EnableSMU7ThermalManagement);
436 
437 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
438 			PHM_PlatformCaps_DynamicPowerManagement);
439 
440 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
441 					PHM_PlatformCaps_SMC);
442 
443 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
444 					PHM_PlatformCaps_DynamicUVDState);
445 
446 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
447 						PHM_PlatformCaps_FanSpeedInTableIsRPM);
448 	return;
449 }
450 
hwmgr_set_user_specify_caps(struct pp_hwmgr * hwmgr)451 int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr)
452 {
453 	if (hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK)
454 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
455 			PHM_PlatformCaps_SclkDeepSleep);
456 	else
457 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
458 			PHM_PlatformCaps_SclkDeepSleep);
459 
460 	if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
461 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
462 			    PHM_PlatformCaps_PowerContainment);
463 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
464 			PHM_PlatformCaps_CAC);
465 	} else {
466 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
467 			    PHM_PlatformCaps_PowerContainment);
468 		phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
469 			PHM_PlatformCaps_CAC);
470 	}
471 
472 	if (hwmgr->feature_mask & PP_OVERDRIVE_MASK)
473 		hwmgr->od_enabled = true;
474 
475 	return 0;
476 }
477 
polaris_set_asic_special_caps(struct pp_hwmgr * hwmgr)478 int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
479 {
480 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
481 						PHM_PlatformCaps_EVV);
482 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
483 						PHM_PlatformCaps_SQRamping);
484 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
485 						PHM_PlatformCaps_RegulatorHot);
486 
487 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
488 					PHM_PlatformCaps_AutomaticDCTransition);
489 
490 	if (hwmgr->chip_id != CHIP_POLARIS10)
491 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
492 					PHM_PlatformCaps_SPLLShutdownSupport);
493 
494 	if (hwmgr->chip_id != CHIP_POLARIS11) {
495 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
496 							PHM_PlatformCaps_DBRamping);
497 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
498 							PHM_PlatformCaps_TDRamping);
499 		phm_cap_set(hwmgr->platform_descriptor.platformCaps,
500 							PHM_PlatformCaps_TCPRamping);
501 	}
502 	return 0;
503 }
504 
fiji_set_asic_special_caps(struct pp_hwmgr * hwmgr)505 int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
506 {
507 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
508 						PHM_PlatformCaps_EVV);
509 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
510 			PHM_PlatformCaps_SQRamping);
511 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
512 			PHM_PlatformCaps_DBRamping);
513 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
514 			PHM_PlatformCaps_TDRamping);
515 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
516 			PHM_PlatformCaps_TCPRamping);
517 	return 0;
518 }
519 
tonga_set_asic_special_caps(struct pp_hwmgr * hwmgr)520 int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
521 {
522 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
523 						PHM_PlatformCaps_EVV);
524 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
525 			PHM_PlatformCaps_SQRamping);
526 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
527 			PHM_PlatformCaps_DBRamping);
528 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
529 			PHM_PlatformCaps_TDRamping);
530 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
531 			PHM_PlatformCaps_TCPRamping);
532 
533 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
534 		      PHM_PlatformCaps_UVDPowerGating);
535 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
536 		      PHM_PlatformCaps_VCEPowerGating);
537 	return 0;
538 }
539 
topaz_set_asic_special_caps(struct pp_hwmgr * hwmgr)540 int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
541 {
542 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
543 						PHM_PlatformCaps_EVV);
544 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
545 			PHM_PlatformCaps_SQRamping);
546 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
547 			PHM_PlatformCaps_DBRamping);
548 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
549 			PHM_PlatformCaps_TDRamping);
550 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
551 			PHM_PlatformCaps_TCPRamping);
552 	return 0;
553 }
554 
ci_set_asic_special_caps(struct pp_hwmgr * hwmgr)555 int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
556 {
557 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
558 			PHM_PlatformCaps_SQRamping);
559 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
560 			PHM_PlatformCaps_DBRamping);
561 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
562 			PHM_PlatformCaps_TDRamping);
563 	phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
564 			PHM_PlatformCaps_TCPRamping);
565 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
566 			PHM_PlatformCaps_MemorySpreadSpectrumSupport);
567 	phm_cap_set(hwmgr->platform_descriptor.platformCaps,
568 			PHM_PlatformCaps_EngineSpreadSpectrumSupport);
569 	return 0;
570 }
571