Lines Matching defs:smu
60 static int smu_force_smuclk_levels(struct smu_context *smu,
63 static int smu_handle_task(struct smu_context *smu,
66 static int smu_reset(struct smu_context *smu);
71 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
77 struct smu_context *smu = handle;
79 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
82 return smu_get_pp_feature_mask(smu, buf);
88 struct smu_context *smu = handle;
90 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
93 return smu_set_pp_feature_mask(smu, new_mask);
96 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
98 if (!smu->ppt_funcs->set_gfx_off_residency)
101 return smu_set_gfx_off_residency(smu, value);
104 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
106 if (!smu->ppt_funcs->get_gfx_off_residency)
109 return smu_get_gfx_off_residency(smu, value);
112 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
114 if (!smu->ppt_funcs->get_gfx_off_entrycount)
117 return smu_get_gfx_off_entrycount(smu, value);
120 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
122 if (!smu->ppt_funcs->get_gfx_off_status)
125 *value = smu_get_gfx_off_status(smu);
130 int smu_set_soft_freq_range(struct smu_context *smu,
137 if (smu->ppt_funcs->set_soft_freq_limited_range)
138 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
146 int smu_get_dpm_freq_range(struct smu_context *smu,
156 if (smu->ppt_funcs->get_dpm_ultimate_freq)
157 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
165 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
168 struct amdgpu_device *adev = smu->adev;
170 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
171 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
180 struct smu_context *smu = handle;
184 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
194 struct smu_context *smu = handle;
198 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
206 static int smu_set_gfx_imu_enable(struct smu_context *smu)
208 struct amdgpu_device *adev = smu->adev;
213 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
216 return smu_set_gfx_power_up_by_imu(smu);
219 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
222 struct smu_power_context *smu_power = &smu->smu_power;
226 if (!smu->ppt_funcs->dpm_set_vcn_enable)
232 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
239 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
242 struct smu_power_context *smu_power = &smu->smu_power;
246 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
252 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
266 * This API uses no smu->mutex lock protection due to:
270 * Under this case, the smu->mutex lock protection is already enforced on
277 struct smu_context *smu = handle;
280 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
281 dev_WARN(smu->adev->dev,
294 ret = smu_dpm_set_vcn_enable(smu, !gate);
296 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
300 ret = smu_gfx_off_control(smu, gate);
302 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
306 ret = smu_powergate_sdma(smu, gate);
308 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
312 ret = smu_dpm_set_jpeg_enable(smu, !gate);
314 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
318 dev_err(smu->adev->dev, "Unsupported block type!\n");
328 * @smu: smu_context pointer
333 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
335 if (smu->adev->in_suspend)
339 smu->user_dpm_profile.clk_dependency = 0;
340 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
343 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
346 smu->user_dpm_profile.clk_dependency = 0;
347 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
350 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
353 smu->user_dpm_profile.clk_dependency = 0;
354 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
363 * @smu: smu_context pointer
368 static void smu_restore_dpm_user_profile(struct smu_context *smu)
370 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
373 if (!smu->adev->in_suspend)
376 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
380 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
383 if (smu->user_dpm_profile.power_limit) {
384 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
386 dev_err(smu->adev->dev, "Failed to set power limit value\n");
395 * Iterate over smu clk type and force the saved user clk
398 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
399 smu->user_dpm_profile.clk_mask[clk_type]) {
400 ret = smu_force_smuclk_levels(smu, clk_type,
401 smu->user_dpm_profile.clk_mask[clk_type]);
403 dev_err(smu->adev->dev,
410 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
411 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
412 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
414 smu->user_dpm_profile.fan_speed_pwm = 0;
415 smu->user_dpm_profile.fan_speed_rpm = 0;
416 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
417 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
420 if (smu->user_dpm_profile.fan_speed_pwm) {
421 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
423 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
426 if (smu->user_dpm_profile.fan_speed_rpm) {
427 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
429 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
434 if (smu->user_dpm_profile.user_od) {
435 if (smu->ppt_funcs->restore_user_od_settings) {
436 ret = smu->ppt_funcs->restore_user_od_settings(smu);
438 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
443 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
474 struct smu_context *smu = adev->powerplay.pp_handle;
476 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
486 struct smu_context *smu = handle;
487 struct smu_table_context *smu_table = &smu->smu_table;
489 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
507 struct smu_context *smu = handle;
508 struct smu_table_context *smu_table = &smu->smu_table;
512 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
516 dev_err(smu->adev->dev, "pp table size not matched !\n");
534 smu->uploading_custom_pp_table = true;
536 ret = smu_reset(smu);
538 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
540 smu->uploading_custom_pp_table = false;
545 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
547 struct smu_feature *feature = &smu->smu_feature;
558 if (smu->adev->scpm_enabled) {
565 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
579 struct smu_context *smu = adev->powerplay.pp_handle;
582 smu->od_enabled = true;
588 navi10_set_ppt_funcs(smu);
594 sienna_cichlid_set_ppt_funcs(smu);
598 renoir_set_ppt_funcs(smu);
601 vangogh_set_ppt_funcs(smu);
606 yellow_carp_set_ppt_funcs(smu);
610 smu_v13_0_4_set_ppt_funcs(smu);
613 smu_v13_0_5_set_ppt_funcs(smu);
616 cyan_skillfish_set_ppt_funcs(smu);
620 arcturus_set_ppt_funcs(smu);
622 smu->od_enabled = false;
625 aldebaran_set_ppt_funcs(smu);
627 smu->od_enabled = true;
631 smu_v13_0_0_set_ppt_funcs(smu);
634 smu_v13_0_6_set_ppt_funcs(smu);
636 smu->od_enabled = true;
639 smu_v13_0_7_set_ppt_funcs(smu);
651 struct smu_context *smu;
654 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
655 if (!smu)
658 smu->adev = adev;
659 smu->pm_enabled = !!amdgpu_dpm;
660 smu->is_apu = false;
661 smu->smu_baco.state = SMU_BACO_STATE_EXIT;
662 smu->smu_baco.platform_support = false;
663 smu->user_dpm_profile.fan_mode = -1;
665 rw_init(&smu->message_lock, "smuml");
667 adev->powerplay.pp_handle = smu;
673 return smu_init_microcode(smu);
676 static int smu_set_default_dpm_table(struct smu_context *smu)
678 struct smu_power_context *smu_power = &smu->smu_power;
683 if (!smu->ppt_funcs->set_default_dpm_table)
689 ret = smu_dpm_set_vcn_enable(smu, true);
693 ret = smu_dpm_set_jpeg_enable(smu, true);
697 ret = smu->ppt_funcs->set_default_dpm_table(smu);
699 dev_err(smu->adev->dev,
702 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
704 smu_dpm_set_vcn_enable(smu, !vcn_gate);
708 static int smu_apply_default_config_table_settings(struct smu_context *smu)
710 struct amdgpu_device *adev = smu->adev;
713 ret = smu_get_default_config_table_settings(smu,
718 return smu_set_config_table(smu, &adev->pm.config_table);
724 struct smu_context *smu = adev->powerplay.pp_handle;
727 smu_set_fine_grain_gfx_freq_parameters(smu);
729 if (!smu->pm_enabled)
732 ret = smu_post_init(smu);
734 dev_err(adev->dev, "Failed to post smu init!\n");
746 smu_set_ac_dc(smu);
752 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
753 ret = smu_set_default_od_settings(smu);
760 ret = smu_populate_umd_state_clk(smu);
766 ret = smu_get_asic_power_limits(smu,
767 &smu->current_power_limit,
768 &smu->default_power_limit,
769 &smu->max_power_limit);
776 smu_get_unique_id(smu);
778 smu_get_fan_parameters(smu);
780 smu_handle_task(smu,
781 smu->smu_dpm.dpm_level,
784 ret = smu_apply_default_config_table_settings(smu);
790 smu_restore_dpm_user_profile(smu);
795 static int smu_init_fb_allocations(struct smu_context *smu)
797 struct amdgpu_device *adev = smu->adev;
798 struct smu_table_context *smu_table = &smu->smu_table;
861 static int smu_fini_fb_allocations(struct smu_context *smu)
863 struct smu_table_context *smu_table = &smu->smu_table;
882 * @smu: amdgpu_device pointer
889 static int smu_alloc_memory_pool(struct smu_context *smu)
891 struct amdgpu_device *adev = smu->adev;
892 struct smu_table_context *smu_table = &smu->smu_table;
894 uint64_t pool_size = smu->pool_size;
926 static int smu_free_memory_pool(struct smu_context *smu)
928 struct smu_table_context *smu_table = &smu->smu_table;
943 static int smu_alloc_dummy_read_table(struct smu_context *smu)
945 struct smu_table_context *smu_table = &smu->smu_table;
948 struct amdgpu_device *adev = smu->adev;
967 static void smu_free_dummy_read_table(struct smu_context *smu)
969 struct smu_table_context *smu_table = &smu->smu_table;
981 static int smu_smc_table_sw_init(struct smu_context *smu)
989 ret = smu_init_smc_tables(smu);
991 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
999 ret = smu_init_power(smu);
1001 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1008 ret = smu_init_fb_allocations(smu);
1012 ret = smu_alloc_memory_pool(smu);
1016 ret = smu_alloc_dummy_read_table(smu);
1020 ret = smu_i2c_init(smu);
1027 static int smu_smc_table_sw_fini(struct smu_context *smu)
1031 smu_i2c_fini(smu);
1033 smu_free_dummy_read_table(smu);
1035 ret = smu_free_memory_pool(smu);
1039 ret = smu_fini_fb_allocations(smu);
1043 ret = smu_fini_power(smu);
1045 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1049 ret = smu_fini_smc_tables(smu);
1051 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1060 struct smu_context *smu = container_of(work, struct smu_context,
1063 smu_log_thermal_throttling(smu);
1068 struct smu_context *smu = container_of(work, struct smu_context,
1071 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1072 smu->ppt_funcs->interrupt_work(smu);
1077 struct smu_context *smu =
1080 &smu->thermal_range;
1081 struct amdgpu_device *adev = smu->adev;
1090 smu->ppt_funcs->read_sensor &&
1091 !smu->ppt_funcs->read_sensor(smu,
1106 struct smu_context *smu = adev->powerplay.pp_handle;
1109 smu->pool_size = adev->pm.smu_prv_buffer_size;
1110 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1111 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1112 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1114 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1115 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1116 atomic64_set(&smu->throttle_int_counter, 0);
1117 smu->watermarks_bitmap = 0;
1118 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1119 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1121 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1122 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1124 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1125 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1126 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1127 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1128 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1129 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1130 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1131 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1133 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1134 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1135 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1136 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1137 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1138 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1139 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1140 smu->display_config = &adev->pm.pm_display_cfg;
1142 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1143 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1145 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1148 ret = smu_smc_table_sw_init(smu);
1155 ret = smu_get_vbios_bootup_values(smu);
1161 ret = smu_init_pptable_microcode(smu);
1167 ret = smu_register_irq_handler(smu);
1174 if (!smu->ppt_funcs->get_fan_control_mode)
1175 smu->adev->pm.no_fan = true;
1183 struct smu_context *smu = adev->powerplay.pp_handle;
1186 ret = smu_smc_table_sw_fini(smu);
1192 smu_fini_microcode(smu);
1197 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1199 struct amdgpu_device *adev = smu->adev;
1201 &smu->thermal_range;
1204 if (!smu->ppt_funcs->get_thermal_temperature_range)
1207 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1224 static int smu_smc_hw_setup(struct smu_context *smu)
1226 struct smu_feature *feature = &smu->smu_feature;
1227 struct amdgpu_device *adev = smu->adev;
1237 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1239 ret = smu_system_features_control(smu, true);
1249 ret = smu_init_display_count(smu, 0);
1255 ret = smu_set_driver_table_location(smu);
1264 ret = smu_set_tool_table_location(smu);
1274 ret = smu_notify_memory_pool_location(smu);
1286 ret = smu_setup_pptable(smu);
1293 /* smu_dump_pptable(smu); */
1304 ret = smu_write_pptable(smu);
1312 ret = smu_run_btc(smu);
1321 ret = smu_feature_set_allowed_mask(smu);
1328 ret = smu_system_features_control(smu, true);
1334 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1343 if (!smu_is_dpm_running(smu))
1351 ret = smu_set_default_dpm_table(smu);
1382 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1388 ret = smu_get_thermal_temperature_range(smu);
1394 ret = smu_enable_thermal_alert(smu);
1400 ret = smu_notify_display_change(smu);
1410 ret = smu_set_min_dcef_deep_sleep(smu,
1411 smu->smu_table.boot_values.dcefclk / 100);
1416 static int smu_start_smc_engine(struct smu_context *smu)
1418 struct amdgpu_device *adev = smu->adev;
1423 if (smu->ppt_funcs->load_microcode) {
1424 ret = smu->ppt_funcs->load_microcode(smu);
1431 if (smu->ppt_funcs->check_fw_status) {
1432 ret = smu->ppt_funcs->check_fw_status(smu);
1443 ret = smu_check_fw_version(smu);
1454 struct smu_context *smu = adev->powerplay.pp_handle;
1457 smu->pm_enabled = false;
1461 ret = smu_start_smc_engine(smu);
1467 if (smu->is_apu) {
1468 ret = smu_set_gfx_imu_enable(smu);
1471 smu_dpm_set_vcn_enable(smu, true);
1472 smu_dpm_set_jpeg_enable(smu, true);
1473 smu_set_gfx_cgpg(smu, true);
1476 if (!smu->pm_enabled)
1479 ret = smu_get_driver_allowed_feature_mask(smu);
1483 ret = smu_smc_hw_setup(smu);
1496 ret = smu_init_max_sustainable_clocks(smu);
1509 static int smu_disable_dpms(struct smu_context *smu)
1511 struct amdgpu_device *adev = smu->adev;
1513 bool use_baco = !smu->is_apu &&
1542 if (smu->uploading_custom_pp_table) {
1593 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1594 ret = smu_disable_all_features_with_exception(smu,
1597 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1601 ret = smu_system_features_control(smu, false);
1603 dev_err(adev->dev, "Failed to disable smu features.\n");
1614 static int smu_smc_hw_cleanup(struct smu_context *smu)
1616 struct amdgpu_device *adev = smu->adev;
1619 cancel_work_sync(&smu->throttling_logging_work);
1620 cancel_work_sync(&smu->interrupt_work);
1622 ret = smu_disable_thermal_alert(smu);
1628 cancel_delayed_work_sync(&smu->swctf_delayed_work);
1630 ret = smu_disable_dpms(smu);
1642 struct smu_context *smu = adev->powerplay.pp_handle;
1647 smu_dpm_set_vcn_enable(smu, false);
1648 smu_dpm_set_jpeg_enable(smu, false);
1653 if (!smu->pm_enabled)
1658 return smu_smc_hw_cleanup(smu);
1664 struct smu_context *smu = adev->powerplay.pp_handle;
1666 kfree(smu);
1669 static int smu_reset(struct smu_context *smu)
1671 struct amdgpu_device *adev = smu->adev;
1692 struct smu_context *smu = adev->powerplay.pp_handle;
1699 if (!smu->pm_enabled)
1704 ret = smu_smc_hw_cleanup(smu);
1708 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
1710 smu_set_gfx_cgpg(smu, false);
1716 ret = smu_get_entrycount_gfxoff(smu, &count);
1727 struct smu_context *smu = adev->powerplay.pp_handle;
1732 if (!smu->pm_enabled)
1737 ret = smu_start_smc_engine(smu);
1743 ret = smu_smc_hw_setup(smu);
1749 ret = smu_set_gfx_imu_enable(smu);
1753 smu_set_gfx_cgpg(smu, true);
1755 smu->disable_uclk_switch = 0;
1767 struct smu_context *smu = handle;
1769 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1775 smu_set_min_dcef_deep_sleep(smu,
1801 struct smu_context *smu = (struct smu_context*)(handle);
1802 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1804 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1811 smu_gpo_control(smu, false);
1812 smu_gfx_ulv_control(smu, false);
1813 smu_deep_sleep_control(smu, false);
1814 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
1821 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
1822 smu_deep_sleep_control(smu, true);
1823 smu_gfx_ulv_control(smu, true);
1824 smu_gpo_control(smu, true);
1831 static int smu_bump_power_profile_mode(struct smu_context *smu,
1837 if (smu->ppt_funcs->set_power_profile_mode)
1838 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
1843 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
1851 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1854 ret = smu_display_config_changed(smu);
1856 dev_err(smu->adev->dev, "Failed to change display config!");
1861 ret = smu_apply_clocks_adjust_rules(smu);
1863 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
1868 ret = smu_notify_smc_display_config(smu);
1870 dev_err(smu->adev->dev, "Failed to notify smc display config!");
1876 ret = smu_asic_set_performance_level(smu, level);
1878 dev_err(smu->adev->dev, "Failed to set performance level!");
1888 index = fls(smu->workload_mask);
1890 workload[0] = smu->workload_setting[index];
1892 if (init || smu->power_profile_mode != workload[0])
1893 smu_bump_power_profile_mode(smu, workload, 0);
1899 static int smu_handle_task(struct smu_context *smu,
1905 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1910 ret = smu_pre_display_config_changed(smu);
1913 ret = smu_adjust_power_state_dynamic(smu, level, false, false);
1916 ret = smu_adjust_power_state_dynamic(smu, level, true, true);
1919 ret = smu_adjust_power_state_dynamic(smu, level, true, false);
1932 struct smu_context *smu = handle;
1933 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1935 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
1943 struct smu_context *smu = handle;
1944 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1948 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1955 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
1956 index = fls(smu->workload_mask);
1958 workload[0] = smu->workload_setting[index];
1960 smu->workload_mask |= (1 << smu->workload_prority[type]);
1961 index = fls(smu->workload_mask);
1963 workload[0] = smu->workload_setting[index];
1968 smu_bump_power_profile_mode(smu, workload, 0);
1975 struct smu_context *smu = handle;
1976 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1978 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1981 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
1990 struct smu_context *smu = handle;
1991 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
1994 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
1997 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2000 ret = smu_enable_umd_pstate(smu, &level);
2004 ret = smu_handle_task(smu, level,
2009 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2010 smu->user_dpm_profile.clk_dependency = 0;
2018 struct smu_context *smu = handle;
2020 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2023 return smu_init_display_count(smu, count);
2026 static int smu_force_smuclk_levels(struct smu_context *smu,
2030 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2033 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2037 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2041 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2042 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2043 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2044 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2045 smu_set_user_clk_dependencies(smu, clk_type);
2056 struct smu_context *smu = handle;
2092 return smu_force_smuclk_levels(smu, clk_type, mask);
2105 struct smu_context *smu = handle;
2108 if (!smu->pm_enabled)
2111 if (smu->ppt_funcs &&
2112 smu->ppt_funcs->set_mp1_state)
2113 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2121 struct smu_context *smu = handle;
2124 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2127 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2130 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2132 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2137 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
2141 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2144 if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
2147 ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
2149 dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
2154 int smu_write_watermarks_table(struct smu_context *smu)
2156 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2159 return smu_set_watermarks_table(smu, NULL);
2165 struct smu_context *smu = handle;
2167 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2170 if (smu->disable_watermark)
2173 return smu_set_watermarks_table(smu, clock_ranges);
2176 int smu_set_ac_dc(struct smu_context *smu)
2180 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2184 if (smu->dc_controlled_by_gpio)
2187 ret = smu_set_power_source(smu,
2188 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2191 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2192 smu->adev->pm.ac_power ? "AC" : "DC");
2198 .name = "smu",
2242 struct smu_context *smu = handle;
2243 struct amdgpu_device *adev = smu->adev;
2246 if (!smu->pm_enabled)
2253 if (smu->ppt_funcs->load_microcode) {
2254 ret = smu->ppt_funcs->load_microcode(smu);
2261 if (smu->ppt_funcs->check_fw_status) {
2262 ret = smu->ppt_funcs->check_fw_status(smu);
2272 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2276 if (smu->ppt_funcs->set_gfx_cgpg)
2277 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2284 struct smu_context *smu = handle;
2287 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2290 if (!smu->ppt_funcs->set_fan_speed_rpm)
2296 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2297 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2298 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2299 smu->user_dpm_profile.fan_speed_rpm = speed;
2302 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2303 smu->user_dpm_profile.fan_speed_pwm = 0;
2312 * @handle: pointer to smu context
2324 struct smu_context *smu = handle;
2325 struct amdgpu_device *adev = smu->adev;
2330 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2362 if (smu->ppt_funcs->get_ppt_limit)
2363 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2373 ret = smu_get_asic_power_limits(smu,
2374 &smu->current_power_limit,
2381 *limit = smu->current_power_limit;
2384 *limit = smu->default_power_limit;
2387 *limit = smu->max_power_limit;
2399 struct smu_context *smu = handle;
2403 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2408 if (smu->ppt_funcs->set_power_limit)
2409 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2411 if (limit > smu->max_power_limit) {
2412 dev_err(smu->adev->dev,
2414 limit, smu->max_power_limit);
2419 limit = smu->current_power_limit;
2421 if (smu->ppt_funcs->set_power_limit) {
2422 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2423 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2424 smu->user_dpm_profile.power_limit = limit;
2430 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2434 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2437 if (smu->ppt_funcs->print_clk_levels)
2438 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2491 struct smu_context *smu = handle;
2498 return smu_print_smuclk_levels(smu, clk_type, buf);
2503 struct smu_context *smu = handle;
2510 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2513 if (!smu->ppt_funcs->emit_clk_levels)
2516 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2524 struct smu_context *smu = handle;
2527 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2530 if (smu->ppt_funcs->od_edit_dpm_table) {
2531 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2542 struct smu_context *smu = handle;
2544 &smu->pstate_table;
2548 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2557 if (smu->ppt_funcs->read_sensor)
2558 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2579 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2583 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2587 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2591 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2614 struct smu_context *smu = handle;
2616 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2617 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2625 struct smu_context *smu = handle;
2627 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2628 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2635 struct smu_context *smu = handle;
2637 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2638 !smu->ppt_funcs->get_power_profile_mode)
2643 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
2650 struct smu_context *smu = handle;
2652 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2653 !smu->ppt_funcs->set_power_profile_mode)
2656 return smu_bump_power_profile_mode(smu, param, param_size);
2661 struct smu_context *smu = handle;
2663 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2666 if (!smu->ppt_funcs->get_fan_control_mode)
2672 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
2679 struct smu_context *smu = handle;
2682 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2685 if (!smu->ppt_funcs->set_fan_control_mode)
2691 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
2695 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2696 smu->user_dpm_profile.fan_mode = value;
2700 smu->user_dpm_profile.fan_speed_pwm = 0;
2701 smu->user_dpm_profile.fan_speed_rpm = 0;
2702 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
2712 struct smu_context *smu = handle;
2715 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2718 if (!smu->ppt_funcs->get_fan_speed_pwm)
2724 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
2731 struct smu_context *smu = handle;
2734 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2737 if (!smu->ppt_funcs->set_fan_speed_pwm)
2743 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
2744 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2745 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
2746 smu->user_dpm_profile.fan_speed_pwm = speed;
2749 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
2750 smu->user_dpm_profile.fan_speed_rpm = 0;
2758 struct smu_context *smu = handle;
2761 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2764 if (!smu->ppt_funcs->get_fan_speed_rpm)
2770 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
2777 struct smu_context *smu = handle;
2779 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2782 return smu_set_min_dcef_deep_sleep(smu, clk);
2789 struct smu_context *smu = handle;
2793 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2796 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
2811 dev_err(smu->adev->dev, "Invalid clock type!\n");
2815 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
2824 struct smu_context *smu = handle;
2827 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2830 if (smu->ppt_funcs->display_clock_voltage_request)
2831 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
2840 struct smu_context *smu = handle;
2843 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2846 if (smu->ppt_funcs->display_disable_memory_clock_switch)
2847 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
2855 struct smu_context *smu = handle;
2858 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2861 if (smu->ppt_funcs->set_xgmi_pstate)
2862 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
2865 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
2872 struct smu_context *smu = handle;
2876 if (!smu->pm_enabled)
2879 if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
2880 *cap = smu->ppt_funcs->baco_is_support(smu);
2887 struct smu_context *smu = handle;
2890 if (!smu->pm_enabled)
2894 if (smu->ppt_funcs->baco_exit)
2895 ret = smu->ppt_funcs->baco_exit(smu);
2897 if (smu->ppt_funcs->baco_enter)
2898 ret = smu->ppt_funcs->baco_enter(smu);
2904 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
2910 bool smu_mode1_reset_is_support(struct smu_context *smu)
2914 if (!smu->pm_enabled)
2917 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
2918 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
2923 bool smu_mode2_reset_is_support(struct smu_context *smu)
2927 if (!smu->pm_enabled)
2930 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
2931 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
2936 int smu_mode1_reset(struct smu_context *smu)
2940 if (!smu->pm_enabled)
2943 if (smu->ppt_funcs->mode1_reset)
2944 ret = smu->ppt_funcs->mode1_reset(smu);
2951 struct smu_context *smu = handle;
2954 if (!smu->pm_enabled)
2957 if (smu->ppt_funcs->mode2_reset)
2958 ret = smu->ppt_funcs->mode2_reset(smu);
2961 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
2968 struct smu_context *smu = handle;
2971 if (!smu->pm_enabled)
2974 if (smu->ppt_funcs->enable_gfx_features)
2975 ret = smu->ppt_funcs->enable_gfx_features(smu);
2978 dev_err(smu->adev->dev, "enable gfx features failed!\n");
2986 struct smu_context *smu = handle;
2989 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2992 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
2993 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3002 struct smu_context *smu = handle;
3005 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3008 if (smu->ppt_funcs->get_uclk_dpm_states)
3009 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3016 struct smu_context *smu = handle;
3019 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3022 if (smu->ppt_funcs->get_current_power_state)
3023 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3031 struct smu_context *smu = handle;
3034 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3037 if (smu->ppt_funcs->get_dpm_clock_table)
3038 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3045 struct smu_context *smu = handle;
3047 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3050 if (!smu->ppt_funcs->get_gpu_metrics)
3053 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3058 struct smu_context *smu = handle;
3061 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3064 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3065 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3073 struct smu_context *smu = handle;
3076 if (smu->ppt_funcs->gfx_state_change_set)
3077 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3082 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3086 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3087 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3092 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3096 if (smu->ppt_funcs &&
3097 smu->ppt_funcs->get_ecc_info)
3098 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3106 struct smu_context *smu = handle;
3107 struct smu_table_context *smu_table = &smu->smu_table;
3181 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3186 if (smu->ppt_funcs->wait_for_event)
3187 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3192 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3195 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3199 if (size != smu->stb_context.stb_buf_size)
3203 * No need to lock smu mutex as we access STB directly through MMIO
3207 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3215 struct smu_context *smu = adev->powerplay.pp_handle;
3219 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3223 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3240 struct smu_context *smu = adev->powerplay.pp_handle;
3249 smu->stb_context.stb_buf_size);
3281 struct smu_context *smu = adev->powerplay.pp_handle;
3283 if (!smu || (!smu->stb_context.stb_buf_size))
3291 smu->stb_context.stb_buf_size);
3295 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3299 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3300 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3305 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3309 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3310 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);