11bb76ff1Sjsg /*
21bb76ff1Sjsg * Copyright 2013 Advanced Micro Devices, Inc.
31bb76ff1Sjsg *
41bb76ff1Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
51bb76ff1Sjsg * copy of this software and associated documentation files (the "Software"),
61bb76ff1Sjsg * to deal in the Software without restriction, including without limitation
71bb76ff1Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
81bb76ff1Sjsg * and/or sell copies of the Software, and to permit persons to whom the
91bb76ff1Sjsg * Software is furnished to do so, subject to the following conditions:
101bb76ff1Sjsg *
111bb76ff1Sjsg * The above copyright notice and this permission notice shall be included in
121bb76ff1Sjsg * all copies or substantial portions of the Software.
131bb76ff1Sjsg *
141bb76ff1Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
151bb76ff1Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
161bb76ff1Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
171bb76ff1Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
181bb76ff1Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
191bb76ff1Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
201bb76ff1Sjsg * OTHER DEALINGS IN THE SOFTWARE.
211bb76ff1Sjsg *
221bb76ff1Sjsg */
231bb76ff1Sjsg
241bb76ff1Sjsg #include "amdgpu.h"
251bb76ff1Sjsg #include "amdgpu_pm.h"
261bb76ff1Sjsg #include "cikd.h"
271bb76ff1Sjsg #include "atom.h"
281bb76ff1Sjsg #include "amdgpu_atombios.h"
291bb76ff1Sjsg #include "amdgpu_dpm.h"
301bb76ff1Sjsg #include "kv_dpm.h"
311bb76ff1Sjsg #include "gfx_v7_0.h"
321bb76ff1Sjsg #include <linux/seq_file.h>
331bb76ff1Sjsg
341bb76ff1Sjsg #include "smu/smu_7_0_0_d.h"
351bb76ff1Sjsg #include "smu/smu_7_0_0_sh_mask.h"
361bb76ff1Sjsg
371bb76ff1Sjsg #include "gca/gfx_7_2_d.h"
381bb76ff1Sjsg #include "gca/gfx_7_2_sh_mask.h"
391bb76ff1Sjsg #include "legacy_dpm.h"
401bb76ff1Sjsg
411bb76ff1Sjsg #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
421bb76ff1Sjsg #define KV_MINIMUM_ENGINE_CLOCK 800
431bb76ff1Sjsg #define SMC_RAM_END 0x40000
441bb76ff1Sjsg
451bb76ff1Sjsg static const struct amd_pm_funcs kv_dpm_funcs;
461bb76ff1Sjsg
471bb76ff1Sjsg static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
481bb76ff1Sjsg static int kv_enable_nb_dpm(struct amdgpu_device *adev,
491bb76ff1Sjsg bool enable);
501bb76ff1Sjsg static void kv_init_graphics_levels(struct amdgpu_device *adev);
511bb76ff1Sjsg static int kv_calculate_ds_divider(struct amdgpu_device *adev);
521bb76ff1Sjsg static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
531bb76ff1Sjsg static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
541bb76ff1Sjsg static void kv_enable_new_levels(struct amdgpu_device *adev);
551bb76ff1Sjsg static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
561bb76ff1Sjsg struct amdgpu_ps *new_rps);
571bb76ff1Sjsg static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
581bb76ff1Sjsg static int kv_set_enabled_levels(struct amdgpu_device *adev);
591bb76ff1Sjsg static int kv_force_dpm_highest(struct amdgpu_device *adev);
601bb76ff1Sjsg static int kv_force_dpm_lowest(struct amdgpu_device *adev);
611bb76ff1Sjsg static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
621bb76ff1Sjsg struct amdgpu_ps *new_rps,
631bb76ff1Sjsg struct amdgpu_ps *old_rps);
641bb76ff1Sjsg static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
651bb76ff1Sjsg int min_temp, int max_temp);
661bb76ff1Sjsg static int kv_init_fps_limits(struct amdgpu_device *adev);
671bb76ff1Sjsg
681bb76ff1Sjsg static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
691bb76ff1Sjsg static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
701bb76ff1Sjsg
711bb76ff1Sjsg
kv_convert_vid2_to_vid7(struct amdgpu_device * adev,struct sumo_vid_mapping_table * vid_mapping_table,u32 vid_2bit)721bb76ff1Sjsg static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
731bb76ff1Sjsg struct sumo_vid_mapping_table *vid_mapping_table,
741bb76ff1Sjsg u32 vid_2bit)
751bb76ff1Sjsg {
761bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
771bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
781bb76ff1Sjsg u32 i;
791bb76ff1Sjsg
801bb76ff1Sjsg if (vddc_sclk_table && vddc_sclk_table->count) {
811bb76ff1Sjsg if (vid_2bit < vddc_sclk_table->count)
821bb76ff1Sjsg return vddc_sclk_table->entries[vid_2bit].v;
831bb76ff1Sjsg else
841bb76ff1Sjsg return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
851bb76ff1Sjsg } else {
861bb76ff1Sjsg for (i = 0; i < vid_mapping_table->num_entries; i++) {
871bb76ff1Sjsg if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
881bb76ff1Sjsg return vid_mapping_table->entries[i].vid_7bit;
891bb76ff1Sjsg }
901bb76ff1Sjsg return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
911bb76ff1Sjsg }
921bb76ff1Sjsg }
931bb76ff1Sjsg
kv_convert_vid7_to_vid2(struct amdgpu_device * adev,struct sumo_vid_mapping_table * vid_mapping_table,u32 vid_7bit)941bb76ff1Sjsg static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
951bb76ff1Sjsg struct sumo_vid_mapping_table *vid_mapping_table,
961bb76ff1Sjsg u32 vid_7bit)
971bb76ff1Sjsg {
981bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
991bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1001bb76ff1Sjsg u32 i;
1011bb76ff1Sjsg
1021bb76ff1Sjsg if (vddc_sclk_table && vddc_sclk_table->count) {
1031bb76ff1Sjsg for (i = 0; i < vddc_sclk_table->count; i++) {
1041bb76ff1Sjsg if (vddc_sclk_table->entries[i].v == vid_7bit)
1051bb76ff1Sjsg return i;
1061bb76ff1Sjsg }
1071bb76ff1Sjsg return vddc_sclk_table->count - 1;
1081bb76ff1Sjsg } else {
1091bb76ff1Sjsg for (i = 0; i < vid_mapping_table->num_entries; i++) {
1101bb76ff1Sjsg if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
1111bb76ff1Sjsg return vid_mapping_table->entries[i].vid_2bit;
1121bb76ff1Sjsg }
1131bb76ff1Sjsg
1141bb76ff1Sjsg return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
1151bb76ff1Sjsg }
1161bb76ff1Sjsg }
1171bb76ff1Sjsg
sumo_take_smu_control(struct amdgpu_device * adev,bool enable)1181bb76ff1Sjsg static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
1191bb76ff1Sjsg {
1201bb76ff1Sjsg /* This bit selects who handles display phy powergating.
1211bb76ff1Sjsg * Clear the bit to let atom handle it.
1221bb76ff1Sjsg * Set it to let the driver handle it.
1231bb76ff1Sjsg * For now we just let atom handle it.
1241bb76ff1Sjsg */
1251bb76ff1Sjsg #if 0
1261bb76ff1Sjsg u32 v = RREG32(mmDOUT_SCRATCH3);
1271bb76ff1Sjsg
1281bb76ff1Sjsg if (enable)
1291bb76ff1Sjsg v |= 0x4;
1301bb76ff1Sjsg else
1311bb76ff1Sjsg v &= 0xFFFFFFFB;
1321bb76ff1Sjsg
1331bb76ff1Sjsg WREG32(mmDOUT_SCRATCH3, v);
1341bb76ff1Sjsg #endif
1351bb76ff1Sjsg }
1361bb76ff1Sjsg
sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device * adev,struct sumo_sclk_voltage_mapping_table * sclk_voltage_mapping_table,ATOM_AVAILABLE_SCLK_LIST * table)1371bb76ff1Sjsg static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
1381bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
1391bb76ff1Sjsg ATOM_AVAILABLE_SCLK_LIST *table)
1401bb76ff1Sjsg {
1411bb76ff1Sjsg u32 i;
1421bb76ff1Sjsg u32 n = 0;
1431bb76ff1Sjsg u32 prev_sclk = 0;
1441bb76ff1Sjsg
1451bb76ff1Sjsg for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
1461bb76ff1Sjsg if (table[i].ulSupportedSCLK > prev_sclk) {
1471bb76ff1Sjsg sclk_voltage_mapping_table->entries[n].sclk_frequency =
1481bb76ff1Sjsg table[i].ulSupportedSCLK;
1491bb76ff1Sjsg sclk_voltage_mapping_table->entries[n].vid_2bit =
1501bb76ff1Sjsg table[i].usVoltageIndex;
1511bb76ff1Sjsg prev_sclk = table[i].ulSupportedSCLK;
1521bb76ff1Sjsg n++;
1531bb76ff1Sjsg }
1541bb76ff1Sjsg }
1551bb76ff1Sjsg
1561bb76ff1Sjsg sclk_voltage_mapping_table->num_max_dpm_entries = n;
1571bb76ff1Sjsg }
1581bb76ff1Sjsg
sumo_construct_vid_mapping_table(struct amdgpu_device * adev,struct sumo_vid_mapping_table * vid_mapping_table,ATOM_AVAILABLE_SCLK_LIST * table)1591bb76ff1Sjsg static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
1601bb76ff1Sjsg struct sumo_vid_mapping_table *vid_mapping_table,
1611bb76ff1Sjsg ATOM_AVAILABLE_SCLK_LIST *table)
1621bb76ff1Sjsg {
1631bb76ff1Sjsg u32 i, j;
1641bb76ff1Sjsg
1651bb76ff1Sjsg for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
1661bb76ff1Sjsg if (table[i].ulSupportedSCLK != 0) {
167*e6146758Sjsg if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
168*e6146758Sjsg continue;
1691bb76ff1Sjsg vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
1701bb76ff1Sjsg table[i].usVoltageID;
1711bb76ff1Sjsg vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
1721bb76ff1Sjsg table[i].usVoltageIndex;
1731bb76ff1Sjsg }
1741bb76ff1Sjsg }
1751bb76ff1Sjsg
1761bb76ff1Sjsg for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
1771bb76ff1Sjsg if (vid_mapping_table->entries[i].vid_7bit == 0) {
1781bb76ff1Sjsg for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
1791bb76ff1Sjsg if (vid_mapping_table->entries[j].vid_7bit != 0) {
1801bb76ff1Sjsg vid_mapping_table->entries[i] =
1811bb76ff1Sjsg vid_mapping_table->entries[j];
1821bb76ff1Sjsg vid_mapping_table->entries[j].vid_7bit = 0;
1831bb76ff1Sjsg break;
1841bb76ff1Sjsg }
1851bb76ff1Sjsg }
1861bb76ff1Sjsg
1871bb76ff1Sjsg if (j == SUMO_MAX_NUMBER_VOLTAGES)
1881bb76ff1Sjsg break;
1891bb76ff1Sjsg }
1901bb76ff1Sjsg }
1911bb76ff1Sjsg
1921bb76ff1Sjsg vid_mapping_table->num_entries = i;
1931bb76ff1Sjsg }
1941bb76ff1Sjsg
1951bb76ff1Sjsg #if 0
196f005ef32Sjsg static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = {
1971bb76ff1Sjsg { 0, 4, 1 },
1981bb76ff1Sjsg { 1, 4, 1 },
1991bb76ff1Sjsg { 2, 5, 1 },
2001bb76ff1Sjsg { 3, 4, 2 },
2011bb76ff1Sjsg { 4, 1, 1 },
2021bb76ff1Sjsg { 5, 5, 2 },
2031bb76ff1Sjsg { 6, 6, 1 },
2041bb76ff1Sjsg { 7, 9, 2 },
2051bb76ff1Sjsg { 0xffffffff }
2061bb76ff1Sjsg };
2071bb76ff1Sjsg
208f005ef32Sjsg static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = {
2091bb76ff1Sjsg { 0, 4, 1 },
2101bb76ff1Sjsg { 0xffffffff }
2111bb76ff1Sjsg };
2121bb76ff1Sjsg
213f005ef32Sjsg static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = {
2141bb76ff1Sjsg { 0, 4, 1 },
2151bb76ff1Sjsg { 0xffffffff }
2161bb76ff1Sjsg };
2171bb76ff1Sjsg
218f005ef32Sjsg static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = {
2191bb76ff1Sjsg { 0, 4, 1 },
2201bb76ff1Sjsg { 0xffffffff }
2211bb76ff1Sjsg };
2221bb76ff1Sjsg
223f005ef32Sjsg static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = {
2241bb76ff1Sjsg { 0, 4, 1 },
2251bb76ff1Sjsg { 0xffffffff }
2261bb76ff1Sjsg };
2271bb76ff1Sjsg
228f005ef32Sjsg static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = {
2291bb76ff1Sjsg { 0, 4, 1 },
2301bb76ff1Sjsg { 1, 4, 1 },
2311bb76ff1Sjsg { 2, 5, 1 },
2321bb76ff1Sjsg { 3, 4, 1 },
2331bb76ff1Sjsg { 4, 1, 1 },
2341bb76ff1Sjsg { 5, 5, 1 },
2351bb76ff1Sjsg { 6, 6, 1 },
2361bb76ff1Sjsg { 7, 9, 1 },
2371bb76ff1Sjsg { 8, 4, 1 },
2381bb76ff1Sjsg { 9, 2, 1 },
2391bb76ff1Sjsg { 10, 3, 1 },
2401bb76ff1Sjsg { 11, 6, 1 },
2411bb76ff1Sjsg { 12, 8, 2 },
2421bb76ff1Sjsg { 13, 1, 1 },
2431bb76ff1Sjsg { 14, 2, 1 },
2441bb76ff1Sjsg { 15, 3, 1 },
2451bb76ff1Sjsg { 16, 1, 1 },
2461bb76ff1Sjsg { 17, 4, 1 },
2471bb76ff1Sjsg { 18, 3, 1 },
2481bb76ff1Sjsg { 19, 1, 1 },
2491bb76ff1Sjsg { 20, 8, 1 },
2501bb76ff1Sjsg { 21, 5, 1 },
2511bb76ff1Sjsg { 22, 1, 1 },
2521bb76ff1Sjsg { 23, 1, 1 },
2531bb76ff1Sjsg { 24, 4, 1 },
2541bb76ff1Sjsg { 27, 6, 1 },
2551bb76ff1Sjsg { 28, 1, 1 },
2561bb76ff1Sjsg { 0xffffffff }
2571bb76ff1Sjsg };
2581bb76ff1Sjsg
259f005ef32Sjsg static const struct kv_lcac_config_reg sx0_cac_config_reg[] = {
2601bb76ff1Sjsg { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
2611bb76ff1Sjsg };
2621bb76ff1Sjsg
263f005ef32Sjsg static const struct kv_lcac_config_reg mc0_cac_config_reg[] = {
2641bb76ff1Sjsg { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
2651bb76ff1Sjsg };
2661bb76ff1Sjsg
267f005ef32Sjsg static const struct kv_lcac_config_reg mc1_cac_config_reg[] = {
2681bb76ff1Sjsg { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
2691bb76ff1Sjsg };
2701bb76ff1Sjsg
271f005ef32Sjsg static const struct kv_lcac_config_reg mc2_cac_config_reg[] = {
2721bb76ff1Sjsg { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
2731bb76ff1Sjsg };
2741bb76ff1Sjsg
275f005ef32Sjsg static const struct kv_lcac_config_reg mc3_cac_config_reg[] = {
2761bb76ff1Sjsg { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
2771bb76ff1Sjsg };
2781bb76ff1Sjsg
279f005ef32Sjsg static const struct kv_lcac_config_reg cpl_cac_config_reg[] = {
2801bb76ff1Sjsg { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
2811bb76ff1Sjsg };
2821bb76ff1Sjsg #endif
2831bb76ff1Sjsg
284f005ef32Sjsg static const struct kv_pt_config_reg didt_config_kv[] = {
2851bb76ff1Sjsg { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
2861bb76ff1Sjsg { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
2871bb76ff1Sjsg { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
2881bb76ff1Sjsg { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
2891bb76ff1Sjsg { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
2901bb76ff1Sjsg { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
2911bb76ff1Sjsg { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
2921bb76ff1Sjsg { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
2931bb76ff1Sjsg { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
2941bb76ff1Sjsg { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
2951bb76ff1Sjsg { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
2961bb76ff1Sjsg { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
2971bb76ff1Sjsg { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
2981bb76ff1Sjsg { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
2991bb76ff1Sjsg { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
3001bb76ff1Sjsg { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3011bb76ff1Sjsg { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3021bb76ff1Sjsg { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3031bb76ff1Sjsg { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3041bb76ff1Sjsg { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3051bb76ff1Sjsg { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3061bb76ff1Sjsg { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3071bb76ff1Sjsg { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3081bb76ff1Sjsg { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3091bb76ff1Sjsg { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3101bb76ff1Sjsg { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3111bb76ff1Sjsg { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3121bb76ff1Sjsg { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3131bb76ff1Sjsg { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3141bb76ff1Sjsg { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3151bb76ff1Sjsg { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
3161bb76ff1Sjsg { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
3171bb76ff1Sjsg { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
3181bb76ff1Sjsg { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3191bb76ff1Sjsg { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3201bb76ff1Sjsg { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3211bb76ff1Sjsg { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3221bb76ff1Sjsg { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3231bb76ff1Sjsg { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3241bb76ff1Sjsg { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3251bb76ff1Sjsg { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3261bb76ff1Sjsg { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3271bb76ff1Sjsg { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3281bb76ff1Sjsg { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3291bb76ff1Sjsg { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3301bb76ff1Sjsg { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3311bb76ff1Sjsg { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3321bb76ff1Sjsg { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3331bb76ff1Sjsg { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
3341bb76ff1Sjsg { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
3351bb76ff1Sjsg { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
3361bb76ff1Sjsg { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3371bb76ff1Sjsg { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3381bb76ff1Sjsg { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3391bb76ff1Sjsg { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3401bb76ff1Sjsg { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3411bb76ff1Sjsg { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3421bb76ff1Sjsg { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3431bb76ff1Sjsg { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3441bb76ff1Sjsg { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3451bb76ff1Sjsg { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3461bb76ff1Sjsg { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3471bb76ff1Sjsg { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3481bb76ff1Sjsg { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
3491bb76ff1Sjsg { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
3501bb76ff1Sjsg { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
3511bb76ff1Sjsg { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
3521bb76ff1Sjsg { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
3531bb76ff1Sjsg { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
3541bb76ff1Sjsg { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3551bb76ff1Sjsg { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
3561bb76ff1Sjsg { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
3571bb76ff1Sjsg { 0xFFFFFFFF }
3581bb76ff1Sjsg };
3591bb76ff1Sjsg
kv_get_ps(struct amdgpu_ps * rps)3601bb76ff1Sjsg static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
3611bb76ff1Sjsg {
3621bb76ff1Sjsg struct kv_ps *ps = rps->ps_priv;
3631bb76ff1Sjsg
3641bb76ff1Sjsg return ps;
3651bb76ff1Sjsg }
3661bb76ff1Sjsg
kv_get_pi(struct amdgpu_device * adev)3671bb76ff1Sjsg static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
3681bb76ff1Sjsg {
3691bb76ff1Sjsg struct kv_power_info *pi = adev->pm.dpm.priv;
3701bb76ff1Sjsg
3711bb76ff1Sjsg return pi;
3721bb76ff1Sjsg }
3731bb76ff1Sjsg
3741bb76ff1Sjsg #if 0
3751bb76ff1Sjsg static void kv_program_local_cac_table(struct amdgpu_device *adev,
3761bb76ff1Sjsg const struct kv_lcac_config_values *local_cac_table,
3771bb76ff1Sjsg const struct kv_lcac_config_reg *local_cac_reg)
3781bb76ff1Sjsg {
3791bb76ff1Sjsg u32 i, count, data;
3801bb76ff1Sjsg const struct kv_lcac_config_values *values = local_cac_table;
3811bb76ff1Sjsg
3821bb76ff1Sjsg while (values->block_id != 0xffffffff) {
3831bb76ff1Sjsg count = values->signal_id;
3841bb76ff1Sjsg for (i = 0; i < count; i++) {
3851bb76ff1Sjsg data = ((values->block_id << local_cac_reg->block_shift) &
3861bb76ff1Sjsg local_cac_reg->block_mask);
3871bb76ff1Sjsg data |= ((i << local_cac_reg->signal_shift) &
3881bb76ff1Sjsg local_cac_reg->signal_mask);
3891bb76ff1Sjsg data |= ((values->t << local_cac_reg->t_shift) &
3901bb76ff1Sjsg local_cac_reg->t_mask);
3911bb76ff1Sjsg data |= ((1 << local_cac_reg->enable_shift) &
3921bb76ff1Sjsg local_cac_reg->enable_mask);
3931bb76ff1Sjsg WREG32_SMC(local_cac_reg->cntl, data);
3941bb76ff1Sjsg }
3951bb76ff1Sjsg values++;
3961bb76ff1Sjsg }
3971bb76ff1Sjsg }
3981bb76ff1Sjsg #endif
3991bb76ff1Sjsg
kv_program_pt_config_registers(struct amdgpu_device * adev,const struct kv_pt_config_reg * cac_config_regs)4001bb76ff1Sjsg static int kv_program_pt_config_registers(struct amdgpu_device *adev,
4011bb76ff1Sjsg const struct kv_pt_config_reg *cac_config_regs)
4021bb76ff1Sjsg {
4031bb76ff1Sjsg const struct kv_pt_config_reg *config_regs = cac_config_regs;
4041bb76ff1Sjsg u32 data;
4051bb76ff1Sjsg u32 cache = 0;
4061bb76ff1Sjsg
4071bb76ff1Sjsg if (config_regs == NULL)
4081bb76ff1Sjsg return -EINVAL;
4091bb76ff1Sjsg
4101bb76ff1Sjsg while (config_regs->offset != 0xFFFFFFFF) {
4111bb76ff1Sjsg if (config_regs->type == KV_CONFIGREG_CACHE) {
4121bb76ff1Sjsg cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
4131bb76ff1Sjsg } else {
4141bb76ff1Sjsg switch (config_regs->type) {
4151bb76ff1Sjsg case KV_CONFIGREG_SMC_IND:
4161bb76ff1Sjsg data = RREG32_SMC(config_regs->offset);
4171bb76ff1Sjsg break;
4181bb76ff1Sjsg case KV_CONFIGREG_DIDT_IND:
4191bb76ff1Sjsg data = RREG32_DIDT(config_regs->offset);
4201bb76ff1Sjsg break;
4211bb76ff1Sjsg default:
4221bb76ff1Sjsg data = RREG32(config_regs->offset);
4231bb76ff1Sjsg break;
4241bb76ff1Sjsg }
4251bb76ff1Sjsg
4261bb76ff1Sjsg data &= ~config_regs->mask;
4271bb76ff1Sjsg data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
4281bb76ff1Sjsg data |= cache;
4291bb76ff1Sjsg cache = 0;
4301bb76ff1Sjsg
4311bb76ff1Sjsg switch (config_regs->type) {
4321bb76ff1Sjsg case KV_CONFIGREG_SMC_IND:
4331bb76ff1Sjsg WREG32_SMC(config_regs->offset, data);
4341bb76ff1Sjsg break;
4351bb76ff1Sjsg case KV_CONFIGREG_DIDT_IND:
4361bb76ff1Sjsg WREG32_DIDT(config_regs->offset, data);
4371bb76ff1Sjsg break;
4381bb76ff1Sjsg default:
4391bb76ff1Sjsg WREG32(config_regs->offset, data);
4401bb76ff1Sjsg break;
4411bb76ff1Sjsg }
4421bb76ff1Sjsg }
4431bb76ff1Sjsg config_regs++;
4441bb76ff1Sjsg }
4451bb76ff1Sjsg
4461bb76ff1Sjsg return 0;
4471bb76ff1Sjsg }
4481bb76ff1Sjsg
kv_do_enable_didt(struct amdgpu_device * adev,bool enable)4491bb76ff1Sjsg static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
4501bb76ff1Sjsg {
4511bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
4521bb76ff1Sjsg u32 data;
4531bb76ff1Sjsg
4541bb76ff1Sjsg if (pi->caps_sq_ramping) {
4551bb76ff1Sjsg data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
4561bb76ff1Sjsg if (enable)
4571bb76ff1Sjsg data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
4581bb76ff1Sjsg else
4591bb76ff1Sjsg data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
4601bb76ff1Sjsg WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
4611bb76ff1Sjsg }
4621bb76ff1Sjsg
4631bb76ff1Sjsg if (pi->caps_db_ramping) {
4641bb76ff1Sjsg data = RREG32_DIDT(ixDIDT_DB_CTRL0);
4651bb76ff1Sjsg if (enable)
4661bb76ff1Sjsg data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
4671bb76ff1Sjsg else
4681bb76ff1Sjsg data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
4691bb76ff1Sjsg WREG32_DIDT(ixDIDT_DB_CTRL0, data);
4701bb76ff1Sjsg }
4711bb76ff1Sjsg
4721bb76ff1Sjsg if (pi->caps_td_ramping) {
4731bb76ff1Sjsg data = RREG32_DIDT(ixDIDT_TD_CTRL0);
4741bb76ff1Sjsg if (enable)
4751bb76ff1Sjsg data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
4761bb76ff1Sjsg else
4771bb76ff1Sjsg data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
4781bb76ff1Sjsg WREG32_DIDT(ixDIDT_TD_CTRL0, data);
4791bb76ff1Sjsg }
4801bb76ff1Sjsg
4811bb76ff1Sjsg if (pi->caps_tcp_ramping) {
4821bb76ff1Sjsg data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
4831bb76ff1Sjsg if (enable)
4841bb76ff1Sjsg data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
4851bb76ff1Sjsg else
4861bb76ff1Sjsg data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
4871bb76ff1Sjsg WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
4881bb76ff1Sjsg }
4891bb76ff1Sjsg }
4901bb76ff1Sjsg
kv_enable_didt(struct amdgpu_device * adev,bool enable)4911bb76ff1Sjsg static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
4921bb76ff1Sjsg {
4931bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
4941bb76ff1Sjsg int ret;
4951bb76ff1Sjsg
4961bb76ff1Sjsg if (pi->caps_sq_ramping ||
4971bb76ff1Sjsg pi->caps_db_ramping ||
4981bb76ff1Sjsg pi->caps_td_ramping ||
4991bb76ff1Sjsg pi->caps_tcp_ramping) {
500f005ef32Sjsg amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5011bb76ff1Sjsg
5021bb76ff1Sjsg if (enable) {
5031bb76ff1Sjsg ret = kv_program_pt_config_registers(adev, didt_config_kv);
5041bb76ff1Sjsg if (ret) {
505f005ef32Sjsg amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5061bb76ff1Sjsg return ret;
5071bb76ff1Sjsg }
5081bb76ff1Sjsg }
5091bb76ff1Sjsg
5101bb76ff1Sjsg kv_do_enable_didt(adev, enable);
5111bb76ff1Sjsg
512f005ef32Sjsg amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5131bb76ff1Sjsg }
5141bb76ff1Sjsg
5151bb76ff1Sjsg return 0;
5161bb76ff1Sjsg }
5171bb76ff1Sjsg
5181bb76ff1Sjsg #if 0
5191bb76ff1Sjsg static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
5201bb76ff1Sjsg {
5211bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
5221bb76ff1Sjsg
5231bb76ff1Sjsg if (pi->caps_cac) {
5241bb76ff1Sjsg WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
5251bb76ff1Sjsg WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
5261bb76ff1Sjsg kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
5271bb76ff1Sjsg
5281bb76ff1Sjsg WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
5291bb76ff1Sjsg WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
5301bb76ff1Sjsg kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
5311bb76ff1Sjsg
5321bb76ff1Sjsg WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
5331bb76ff1Sjsg WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
5341bb76ff1Sjsg kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
5351bb76ff1Sjsg
5361bb76ff1Sjsg WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
5371bb76ff1Sjsg WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
5381bb76ff1Sjsg kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
5391bb76ff1Sjsg
5401bb76ff1Sjsg WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
5411bb76ff1Sjsg WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
5421bb76ff1Sjsg kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
5431bb76ff1Sjsg
5441bb76ff1Sjsg WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
5451bb76ff1Sjsg WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
5461bb76ff1Sjsg kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
5471bb76ff1Sjsg }
5481bb76ff1Sjsg }
5491bb76ff1Sjsg #endif
5501bb76ff1Sjsg
kv_enable_smc_cac(struct amdgpu_device * adev,bool enable)5511bb76ff1Sjsg static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
5521bb76ff1Sjsg {
5531bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
5541bb76ff1Sjsg int ret = 0;
5551bb76ff1Sjsg
5561bb76ff1Sjsg if (pi->caps_cac) {
5571bb76ff1Sjsg if (enable) {
5581bb76ff1Sjsg ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
5591bb76ff1Sjsg if (ret)
5601bb76ff1Sjsg pi->cac_enabled = false;
5611bb76ff1Sjsg else
5621bb76ff1Sjsg pi->cac_enabled = true;
5631bb76ff1Sjsg } else if (pi->cac_enabled) {
5641bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
5651bb76ff1Sjsg pi->cac_enabled = false;
5661bb76ff1Sjsg }
5671bb76ff1Sjsg }
5681bb76ff1Sjsg
5691bb76ff1Sjsg return ret;
5701bb76ff1Sjsg }
5711bb76ff1Sjsg
kv_process_firmware_header(struct amdgpu_device * adev)5721bb76ff1Sjsg static int kv_process_firmware_header(struct amdgpu_device *adev)
5731bb76ff1Sjsg {
5741bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
5751bb76ff1Sjsg u32 tmp;
5761bb76ff1Sjsg int ret;
5771bb76ff1Sjsg
5781bb76ff1Sjsg ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
5791bb76ff1Sjsg offsetof(SMU7_Firmware_Header, DpmTable),
5801bb76ff1Sjsg &tmp, pi->sram_end);
5811bb76ff1Sjsg
5821bb76ff1Sjsg if (ret == 0)
5831bb76ff1Sjsg pi->dpm_table_start = tmp;
5841bb76ff1Sjsg
5851bb76ff1Sjsg ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
5861bb76ff1Sjsg offsetof(SMU7_Firmware_Header, SoftRegisters),
5871bb76ff1Sjsg &tmp, pi->sram_end);
5881bb76ff1Sjsg
5891bb76ff1Sjsg if (ret == 0)
5901bb76ff1Sjsg pi->soft_regs_start = tmp;
5911bb76ff1Sjsg
5921bb76ff1Sjsg return ret;
5931bb76ff1Sjsg }
5941bb76ff1Sjsg
kv_enable_dpm_voltage_scaling(struct amdgpu_device * adev)5951bb76ff1Sjsg static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
5961bb76ff1Sjsg {
5971bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
5981bb76ff1Sjsg int ret;
5991bb76ff1Sjsg
6001bb76ff1Sjsg pi->graphics_voltage_change_enable = 1;
6011bb76ff1Sjsg
6021bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
6031bb76ff1Sjsg pi->dpm_table_start +
6041bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
6051bb76ff1Sjsg &pi->graphics_voltage_change_enable,
6061bb76ff1Sjsg sizeof(u8), pi->sram_end);
6071bb76ff1Sjsg
6081bb76ff1Sjsg return ret;
6091bb76ff1Sjsg }
6101bb76ff1Sjsg
kv_set_dpm_interval(struct amdgpu_device * adev)6111bb76ff1Sjsg static int kv_set_dpm_interval(struct amdgpu_device *adev)
6121bb76ff1Sjsg {
6131bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
6141bb76ff1Sjsg int ret;
6151bb76ff1Sjsg
6161bb76ff1Sjsg pi->graphics_interval = 1;
6171bb76ff1Sjsg
6181bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
6191bb76ff1Sjsg pi->dpm_table_start +
6201bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
6211bb76ff1Sjsg &pi->graphics_interval,
6221bb76ff1Sjsg sizeof(u8), pi->sram_end);
6231bb76ff1Sjsg
6241bb76ff1Sjsg return ret;
6251bb76ff1Sjsg }
6261bb76ff1Sjsg
kv_set_dpm_boot_state(struct amdgpu_device * adev)6271bb76ff1Sjsg static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
6281bb76ff1Sjsg {
6291bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
6301bb76ff1Sjsg int ret;
6311bb76ff1Sjsg
6321bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
6331bb76ff1Sjsg pi->dpm_table_start +
6341bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
6351bb76ff1Sjsg &pi->graphics_boot_level,
6361bb76ff1Sjsg sizeof(u8), pi->sram_end);
6371bb76ff1Sjsg
6381bb76ff1Sjsg return ret;
6391bb76ff1Sjsg }
6401bb76ff1Sjsg
kv_program_vc(struct amdgpu_device * adev)6411bb76ff1Sjsg static void kv_program_vc(struct amdgpu_device *adev)
6421bb76ff1Sjsg {
6431bb76ff1Sjsg WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
6441bb76ff1Sjsg }
6451bb76ff1Sjsg
kv_clear_vc(struct amdgpu_device * adev)6461bb76ff1Sjsg static void kv_clear_vc(struct amdgpu_device *adev)
6471bb76ff1Sjsg {
6481bb76ff1Sjsg WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
6491bb76ff1Sjsg }
6501bb76ff1Sjsg
kv_set_divider_value(struct amdgpu_device * adev,u32 index,u32 sclk)6511bb76ff1Sjsg static int kv_set_divider_value(struct amdgpu_device *adev,
6521bb76ff1Sjsg u32 index, u32 sclk)
6531bb76ff1Sjsg {
6541bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
6551bb76ff1Sjsg struct atom_clock_dividers dividers;
6561bb76ff1Sjsg int ret;
6571bb76ff1Sjsg
6581bb76ff1Sjsg ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
6591bb76ff1Sjsg sclk, false, ÷rs);
6601bb76ff1Sjsg if (ret)
6611bb76ff1Sjsg return ret;
6621bb76ff1Sjsg
6631bb76ff1Sjsg pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
6641bb76ff1Sjsg pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
6651bb76ff1Sjsg
6661bb76ff1Sjsg return 0;
6671bb76ff1Sjsg }
6681bb76ff1Sjsg
kv_convert_8bit_index_to_voltage(struct amdgpu_device * adev,u16 voltage)6691bb76ff1Sjsg static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
6701bb76ff1Sjsg u16 voltage)
6711bb76ff1Sjsg {
6721bb76ff1Sjsg return 6200 - (voltage * 25);
6731bb76ff1Sjsg }
6741bb76ff1Sjsg
kv_convert_2bit_index_to_voltage(struct amdgpu_device * adev,u32 vid_2bit)6751bb76ff1Sjsg static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
6761bb76ff1Sjsg u32 vid_2bit)
6771bb76ff1Sjsg {
6781bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
6791bb76ff1Sjsg u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
6801bb76ff1Sjsg &pi->sys_info.vid_mapping_table,
6811bb76ff1Sjsg vid_2bit);
6821bb76ff1Sjsg
6831bb76ff1Sjsg return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
6841bb76ff1Sjsg }
6851bb76ff1Sjsg
6861bb76ff1Sjsg
kv_set_vid(struct amdgpu_device * adev,u32 index,u32 vid)6871bb76ff1Sjsg static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
6881bb76ff1Sjsg {
6891bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
6901bb76ff1Sjsg
6911bb76ff1Sjsg pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
6921bb76ff1Sjsg pi->graphics_level[index].MinVddNb =
6931bb76ff1Sjsg cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
6941bb76ff1Sjsg
6951bb76ff1Sjsg return 0;
6961bb76ff1Sjsg }
6971bb76ff1Sjsg
kv_set_at(struct amdgpu_device * adev,u32 index,u32 at)6981bb76ff1Sjsg static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
6991bb76ff1Sjsg {
7001bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
7011bb76ff1Sjsg
7021bb76ff1Sjsg pi->graphics_level[index].AT = cpu_to_be16((u16)at);
7031bb76ff1Sjsg
7041bb76ff1Sjsg return 0;
7051bb76ff1Sjsg }
7061bb76ff1Sjsg
kv_dpm_power_level_enable(struct amdgpu_device * adev,u32 index,bool enable)7071bb76ff1Sjsg static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
7081bb76ff1Sjsg u32 index, bool enable)
7091bb76ff1Sjsg {
7101bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
7111bb76ff1Sjsg
7121bb76ff1Sjsg pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
7131bb76ff1Sjsg }
7141bb76ff1Sjsg
kv_start_dpm(struct amdgpu_device * adev)7151bb76ff1Sjsg static void kv_start_dpm(struct amdgpu_device *adev)
7161bb76ff1Sjsg {
7171bb76ff1Sjsg u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
7181bb76ff1Sjsg
7191bb76ff1Sjsg tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
7201bb76ff1Sjsg WREG32_SMC(ixGENERAL_PWRMGT, tmp);
7211bb76ff1Sjsg
7221bb76ff1Sjsg amdgpu_kv_smc_dpm_enable(adev, true);
7231bb76ff1Sjsg }
7241bb76ff1Sjsg
kv_stop_dpm(struct amdgpu_device * adev)7251bb76ff1Sjsg static void kv_stop_dpm(struct amdgpu_device *adev)
7261bb76ff1Sjsg {
7271bb76ff1Sjsg amdgpu_kv_smc_dpm_enable(adev, false);
7281bb76ff1Sjsg }
7291bb76ff1Sjsg
kv_start_am(struct amdgpu_device * adev)7301bb76ff1Sjsg static void kv_start_am(struct amdgpu_device *adev)
7311bb76ff1Sjsg {
7321bb76ff1Sjsg u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
7331bb76ff1Sjsg
7341bb76ff1Sjsg sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
7351bb76ff1Sjsg SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
7361bb76ff1Sjsg sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
7371bb76ff1Sjsg
7381bb76ff1Sjsg WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
7391bb76ff1Sjsg }
7401bb76ff1Sjsg
kv_reset_am(struct amdgpu_device * adev)7411bb76ff1Sjsg static void kv_reset_am(struct amdgpu_device *adev)
7421bb76ff1Sjsg {
7431bb76ff1Sjsg u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
7441bb76ff1Sjsg
7451bb76ff1Sjsg sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
7461bb76ff1Sjsg SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
7471bb76ff1Sjsg
7481bb76ff1Sjsg WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
7491bb76ff1Sjsg }
7501bb76ff1Sjsg
kv_freeze_sclk_dpm(struct amdgpu_device * adev,bool freeze)7511bb76ff1Sjsg static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
7521bb76ff1Sjsg {
7531bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, freeze ?
7541bb76ff1Sjsg PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
7551bb76ff1Sjsg }
7561bb76ff1Sjsg
kv_force_lowest_valid(struct amdgpu_device * adev)7571bb76ff1Sjsg static int kv_force_lowest_valid(struct amdgpu_device *adev)
7581bb76ff1Sjsg {
7591bb76ff1Sjsg return kv_force_dpm_lowest(adev);
7601bb76ff1Sjsg }
7611bb76ff1Sjsg
kv_unforce_levels(struct amdgpu_device * adev)7621bb76ff1Sjsg static int kv_unforce_levels(struct amdgpu_device *adev)
7631bb76ff1Sjsg {
7641bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
7651bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
7661bb76ff1Sjsg else
7671bb76ff1Sjsg return kv_set_enabled_levels(adev);
7681bb76ff1Sjsg }
7691bb76ff1Sjsg
kv_update_sclk_t(struct amdgpu_device * adev)7701bb76ff1Sjsg static int kv_update_sclk_t(struct amdgpu_device *adev)
7711bb76ff1Sjsg {
7721bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
7731bb76ff1Sjsg u32 low_sclk_interrupt_t = 0;
7741bb76ff1Sjsg int ret = 0;
7751bb76ff1Sjsg
7761bb76ff1Sjsg if (pi->caps_sclk_throttle_low_notification) {
7771bb76ff1Sjsg low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
7781bb76ff1Sjsg
7791bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
7801bb76ff1Sjsg pi->dpm_table_start +
7811bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
7821bb76ff1Sjsg (u8 *)&low_sclk_interrupt_t,
7831bb76ff1Sjsg sizeof(u32), pi->sram_end);
7841bb76ff1Sjsg }
7851bb76ff1Sjsg return ret;
7861bb76ff1Sjsg }
7871bb76ff1Sjsg
kv_program_bootup_state(struct amdgpu_device * adev)7881bb76ff1Sjsg static int kv_program_bootup_state(struct amdgpu_device *adev)
7891bb76ff1Sjsg {
7901bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
7911bb76ff1Sjsg u32 i;
7921bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
7931bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
7941bb76ff1Sjsg
7951bb76ff1Sjsg if (table && table->count) {
7961bb76ff1Sjsg for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
7971bb76ff1Sjsg if (table->entries[i].clk == pi->boot_pl.sclk)
7981bb76ff1Sjsg break;
7991bb76ff1Sjsg }
8001bb76ff1Sjsg
8011bb76ff1Sjsg pi->graphics_boot_level = (u8)i;
8021bb76ff1Sjsg kv_dpm_power_level_enable(adev, i, true);
8031bb76ff1Sjsg } else {
8041bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *table =
8051bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table;
8061bb76ff1Sjsg
8071bb76ff1Sjsg if (table->num_max_dpm_entries == 0)
8081bb76ff1Sjsg return -EINVAL;
8091bb76ff1Sjsg
8101bb76ff1Sjsg for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
8111bb76ff1Sjsg if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
8121bb76ff1Sjsg break;
8131bb76ff1Sjsg }
8141bb76ff1Sjsg
8151bb76ff1Sjsg pi->graphics_boot_level = (u8)i;
8161bb76ff1Sjsg kv_dpm_power_level_enable(adev, i, true);
8171bb76ff1Sjsg }
8181bb76ff1Sjsg return 0;
8191bb76ff1Sjsg }
8201bb76ff1Sjsg
kv_enable_auto_thermal_throttling(struct amdgpu_device * adev)8211bb76ff1Sjsg static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
8221bb76ff1Sjsg {
8231bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
8241bb76ff1Sjsg int ret;
8251bb76ff1Sjsg
8261bb76ff1Sjsg pi->graphics_therm_throttle_enable = 1;
8271bb76ff1Sjsg
8281bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
8291bb76ff1Sjsg pi->dpm_table_start +
8301bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
8311bb76ff1Sjsg &pi->graphics_therm_throttle_enable,
8321bb76ff1Sjsg sizeof(u8), pi->sram_end);
8331bb76ff1Sjsg
8341bb76ff1Sjsg return ret;
8351bb76ff1Sjsg }
8361bb76ff1Sjsg
kv_upload_dpm_settings(struct amdgpu_device * adev)8371bb76ff1Sjsg static int kv_upload_dpm_settings(struct amdgpu_device *adev)
8381bb76ff1Sjsg {
8391bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
8401bb76ff1Sjsg int ret;
8411bb76ff1Sjsg
8421bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
8431bb76ff1Sjsg pi->dpm_table_start +
8441bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
8451bb76ff1Sjsg (u8 *)&pi->graphics_level,
8461bb76ff1Sjsg sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
8471bb76ff1Sjsg pi->sram_end);
8481bb76ff1Sjsg
8491bb76ff1Sjsg if (ret)
8501bb76ff1Sjsg return ret;
8511bb76ff1Sjsg
8521bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
8531bb76ff1Sjsg pi->dpm_table_start +
8541bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
8551bb76ff1Sjsg &pi->graphics_dpm_level_count,
8561bb76ff1Sjsg sizeof(u8), pi->sram_end);
8571bb76ff1Sjsg
8581bb76ff1Sjsg return ret;
8591bb76ff1Sjsg }
8601bb76ff1Sjsg
kv_get_clock_difference(u32 a,u32 b)8611bb76ff1Sjsg static u32 kv_get_clock_difference(u32 a, u32 b)
8621bb76ff1Sjsg {
8631bb76ff1Sjsg return (a >= b) ? a - b : b - a;
8641bb76ff1Sjsg }
8651bb76ff1Sjsg
kv_get_clk_bypass(struct amdgpu_device * adev,u32 clk)8661bb76ff1Sjsg static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
8671bb76ff1Sjsg {
8681bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
8691bb76ff1Sjsg u32 value;
8701bb76ff1Sjsg
8711bb76ff1Sjsg if (pi->caps_enable_dfs_bypass) {
8721bb76ff1Sjsg if (kv_get_clock_difference(clk, 40000) < 200)
8731bb76ff1Sjsg value = 3;
8741bb76ff1Sjsg else if (kv_get_clock_difference(clk, 30000) < 200)
8751bb76ff1Sjsg value = 2;
8761bb76ff1Sjsg else if (kv_get_clock_difference(clk, 20000) < 200)
8771bb76ff1Sjsg value = 7;
8781bb76ff1Sjsg else if (kv_get_clock_difference(clk, 15000) < 200)
8791bb76ff1Sjsg value = 6;
8801bb76ff1Sjsg else if (kv_get_clock_difference(clk, 10000) < 200)
8811bb76ff1Sjsg value = 8;
8821bb76ff1Sjsg else
8831bb76ff1Sjsg value = 0;
8841bb76ff1Sjsg } else {
8851bb76ff1Sjsg value = 0;
8861bb76ff1Sjsg }
8871bb76ff1Sjsg
8881bb76ff1Sjsg return value;
8891bb76ff1Sjsg }
8901bb76ff1Sjsg
kv_populate_uvd_table(struct amdgpu_device * adev)8911bb76ff1Sjsg static int kv_populate_uvd_table(struct amdgpu_device *adev)
8921bb76ff1Sjsg {
8931bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
8941bb76ff1Sjsg struct amdgpu_uvd_clock_voltage_dependency_table *table =
8951bb76ff1Sjsg &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
8961bb76ff1Sjsg struct atom_clock_dividers dividers;
8971bb76ff1Sjsg int ret;
8981bb76ff1Sjsg u32 i;
8991bb76ff1Sjsg
9001bb76ff1Sjsg if (table == NULL || table->count == 0)
9011bb76ff1Sjsg return 0;
9021bb76ff1Sjsg
9031bb76ff1Sjsg pi->uvd_level_count = 0;
9041bb76ff1Sjsg for (i = 0; i < table->count; i++) {
9051bb76ff1Sjsg if (pi->high_voltage_t &&
9061bb76ff1Sjsg (pi->high_voltage_t < table->entries[i].v))
9071bb76ff1Sjsg break;
9081bb76ff1Sjsg
9091bb76ff1Sjsg pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
9101bb76ff1Sjsg pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
9111bb76ff1Sjsg pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
9121bb76ff1Sjsg
9131bb76ff1Sjsg pi->uvd_level[i].VClkBypassCntl =
9141bb76ff1Sjsg (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
9151bb76ff1Sjsg pi->uvd_level[i].DClkBypassCntl =
9161bb76ff1Sjsg (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
9171bb76ff1Sjsg
9181bb76ff1Sjsg ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
9191bb76ff1Sjsg table->entries[i].vclk, false, ÷rs);
9201bb76ff1Sjsg if (ret)
9211bb76ff1Sjsg return ret;
9221bb76ff1Sjsg pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
9231bb76ff1Sjsg
9241bb76ff1Sjsg ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
9251bb76ff1Sjsg table->entries[i].dclk, false, ÷rs);
9261bb76ff1Sjsg if (ret)
9271bb76ff1Sjsg return ret;
9281bb76ff1Sjsg pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
9291bb76ff1Sjsg
9301bb76ff1Sjsg pi->uvd_level_count++;
9311bb76ff1Sjsg }
9321bb76ff1Sjsg
9331bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
9341bb76ff1Sjsg pi->dpm_table_start +
9351bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
9361bb76ff1Sjsg (u8 *)&pi->uvd_level_count,
9371bb76ff1Sjsg sizeof(u8), pi->sram_end);
9381bb76ff1Sjsg if (ret)
9391bb76ff1Sjsg return ret;
9401bb76ff1Sjsg
9411bb76ff1Sjsg pi->uvd_interval = 1;
9421bb76ff1Sjsg
9431bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
9441bb76ff1Sjsg pi->dpm_table_start +
9451bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, UVDInterval),
9461bb76ff1Sjsg &pi->uvd_interval,
9471bb76ff1Sjsg sizeof(u8), pi->sram_end);
9481bb76ff1Sjsg if (ret)
9491bb76ff1Sjsg return ret;
9501bb76ff1Sjsg
9511bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
9521bb76ff1Sjsg pi->dpm_table_start +
9531bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, UvdLevel),
9541bb76ff1Sjsg (u8 *)&pi->uvd_level,
9551bb76ff1Sjsg sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
9561bb76ff1Sjsg pi->sram_end);
9571bb76ff1Sjsg
9581bb76ff1Sjsg return ret;
9591bb76ff1Sjsg
9601bb76ff1Sjsg }
9611bb76ff1Sjsg
kv_populate_vce_table(struct amdgpu_device * adev)9621bb76ff1Sjsg static int kv_populate_vce_table(struct amdgpu_device *adev)
9631bb76ff1Sjsg {
9641bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
9651bb76ff1Sjsg int ret;
9661bb76ff1Sjsg u32 i;
9671bb76ff1Sjsg struct amdgpu_vce_clock_voltage_dependency_table *table =
9681bb76ff1Sjsg &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
9691bb76ff1Sjsg struct atom_clock_dividers dividers;
9701bb76ff1Sjsg
9711bb76ff1Sjsg if (table == NULL || table->count == 0)
9721bb76ff1Sjsg return 0;
9731bb76ff1Sjsg
9741bb76ff1Sjsg pi->vce_level_count = 0;
9751bb76ff1Sjsg for (i = 0; i < table->count; i++) {
9761bb76ff1Sjsg if (pi->high_voltage_t &&
9771bb76ff1Sjsg pi->high_voltage_t < table->entries[i].v)
9781bb76ff1Sjsg break;
9791bb76ff1Sjsg
9801bb76ff1Sjsg pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
9811bb76ff1Sjsg pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
9821bb76ff1Sjsg
9831bb76ff1Sjsg pi->vce_level[i].ClkBypassCntl =
9841bb76ff1Sjsg (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
9851bb76ff1Sjsg
9861bb76ff1Sjsg ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
9871bb76ff1Sjsg table->entries[i].evclk, false, ÷rs);
9881bb76ff1Sjsg if (ret)
9891bb76ff1Sjsg return ret;
9901bb76ff1Sjsg pi->vce_level[i].Divider = (u8)dividers.post_div;
9911bb76ff1Sjsg
9921bb76ff1Sjsg pi->vce_level_count++;
9931bb76ff1Sjsg }
9941bb76ff1Sjsg
9951bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
9961bb76ff1Sjsg pi->dpm_table_start +
9971bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
9981bb76ff1Sjsg (u8 *)&pi->vce_level_count,
9991bb76ff1Sjsg sizeof(u8),
10001bb76ff1Sjsg pi->sram_end);
10011bb76ff1Sjsg if (ret)
10021bb76ff1Sjsg return ret;
10031bb76ff1Sjsg
10041bb76ff1Sjsg pi->vce_interval = 1;
10051bb76ff1Sjsg
10061bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
10071bb76ff1Sjsg pi->dpm_table_start +
10081bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, VCEInterval),
10091bb76ff1Sjsg (u8 *)&pi->vce_interval,
10101bb76ff1Sjsg sizeof(u8),
10111bb76ff1Sjsg pi->sram_end);
10121bb76ff1Sjsg if (ret)
10131bb76ff1Sjsg return ret;
10141bb76ff1Sjsg
10151bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
10161bb76ff1Sjsg pi->dpm_table_start +
10171bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, VceLevel),
10181bb76ff1Sjsg (u8 *)&pi->vce_level,
10191bb76ff1Sjsg sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
10201bb76ff1Sjsg pi->sram_end);
10211bb76ff1Sjsg
10221bb76ff1Sjsg return ret;
10231bb76ff1Sjsg }
10241bb76ff1Sjsg
kv_populate_samu_table(struct amdgpu_device * adev)10251bb76ff1Sjsg static int kv_populate_samu_table(struct amdgpu_device *adev)
10261bb76ff1Sjsg {
10271bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
10281bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
10291bb76ff1Sjsg &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
10301bb76ff1Sjsg struct atom_clock_dividers dividers;
10311bb76ff1Sjsg int ret;
10321bb76ff1Sjsg u32 i;
10331bb76ff1Sjsg
10341bb76ff1Sjsg if (table == NULL || table->count == 0)
10351bb76ff1Sjsg return 0;
10361bb76ff1Sjsg
10371bb76ff1Sjsg pi->samu_level_count = 0;
10381bb76ff1Sjsg for (i = 0; i < table->count; i++) {
10391bb76ff1Sjsg if (pi->high_voltage_t &&
10401bb76ff1Sjsg pi->high_voltage_t < table->entries[i].v)
10411bb76ff1Sjsg break;
10421bb76ff1Sjsg
10431bb76ff1Sjsg pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
10441bb76ff1Sjsg pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
10451bb76ff1Sjsg
10461bb76ff1Sjsg pi->samu_level[i].ClkBypassCntl =
10471bb76ff1Sjsg (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
10481bb76ff1Sjsg
10491bb76ff1Sjsg ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
10501bb76ff1Sjsg table->entries[i].clk, false, ÷rs);
10511bb76ff1Sjsg if (ret)
10521bb76ff1Sjsg return ret;
10531bb76ff1Sjsg pi->samu_level[i].Divider = (u8)dividers.post_div;
10541bb76ff1Sjsg
10551bb76ff1Sjsg pi->samu_level_count++;
10561bb76ff1Sjsg }
10571bb76ff1Sjsg
10581bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
10591bb76ff1Sjsg pi->dpm_table_start +
10601bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
10611bb76ff1Sjsg (u8 *)&pi->samu_level_count,
10621bb76ff1Sjsg sizeof(u8),
10631bb76ff1Sjsg pi->sram_end);
10641bb76ff1Sjsg if (ret)
10651bb76ff1Sjsg return ret;
10661bb76ff1Sjsg
10671bb76ff1Sjsg pi->samu_interval = 1;
10681bb76ff1Sjsg
10691bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
10701bb76ff1Sjsg pi->dpm_table_start +
10711bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
10721bb76ff1Sjsg (u8 *)&pi->samu_interval,
10731bb76ff1Sjsg sizeof(u8),
10741bb76ff1Sjsg pi->sram_end);
10751bb76ff1Sjsg if (ret)
10761bb76ff1Sjsg return ret;
10771bb76ff1Sjsg
10781bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
10791bb76ff1Sjsg pi->dpm_table_start +
10801bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, SamuLevel),
10811bb76ff1Sjsg (u8 *)&pi->samu_level,
10821bb76ff1Sjsg sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
10831bb76ff1Sjsg pi->sram_end);
10841bb76ff1Sjsg if (ret)
10851bb76ff1Sjsg return ret;
10861bb76ff1Sjsg
10871bb76ff1Sjsg return ret;
10881bb76ff1Sjsg }
10891bb76ff1Sjsg
10901bb76ff1Sjsg
kv_populate_acp_table(struct amdgpu_device * adev)10911bb76ff1Sjsg static int kv_populate_acp_table(struct amdgpu_device *adev)
10921bb76ff1Sjsg {
10931bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
10941bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
10951bb76ff1Sjsg &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
10961bb76ff1Sjsg struct atom_clock_dividers dividers;
10971bb76ff1Sjsg int ret;
10981bb76ff1Sjsg u32 i;
10991bb76ff1Sjsg
11001bb76ff1Sjsg if (table == NULL || table->count == 0)
11011bb76ff1Sjsg return 0;
11021bb76ff1Sjsg
11031bb76ff1Sjsg pi->acp_level_count = 0;
11041bb76ff1Sjsg for (i = 0; i < table->count; i++) {
11051bb76ff1Sjsg pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
11061bb76ff1Sjsg pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
11071bb76ff1Sjsg
11081bb76ff1Sjsg ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
11091bb76ff1Sjsg table->entries[i].clk, false, ÷rs);
11101bb76ff1Sjsg if (ret)
11111bb76ff1Sjsg return ret;
11121bb76ff1Sjsg pi->acp_level[i].Divider = (u8)dividers.post_div;
11131bb76ff1Sjsg
11141bb76ff1Sjsg pi->acp_level_count++;
11151bb76ff1Sjsg }
11161bb76ff1Sjsg
11171bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
11181bb76ff1Sjsg pi->dpm_table_start +
11191bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
11201bb76ff1Sjsg (u8 *)&pi->acp_level_count,
11211bb76ff1Sjsg sizeof(u8),
11221bb76ff1Sjsg pi->sram_end);
11231bb76ff1Sjsg if (ret)
11241bb76ff1Sjsg return ret;
11251bb76ff1Sjsg
11261bb76ff1Sjsg pi->acp_interval = 1;
11271bb76ff1Sjsg
11281bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
11291bb76ff1Sjsg pi->dpm_table_start +
11301bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, ACPInterval),
11311bb76ff1Sjsg (u8 *)&pi->acp_interval,
11321bb76ff1Sjsg sizeof(u8),
11331bb76ff1Sjsg pi->sram_end);
11341bb76ff1Sjsg if (ret)
11351bb76ff1Sjsg return ret;
11361bb76ff1Sjsg
11371bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
11381bb76ff1Sjsg pi->dpm_table_start +
11391bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, AcpLevel),
11401bb76ff1Sjsg (u8 *)&pi->acp_level,
11411bb76ff1Sjsg sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
11421bb76ff1Sjsg pi->sram_end);
11431bb76ff1Sjsg if (ret)
11441bb76ff1Sjsg return ret;
11451bb76ff1Sjsg
11461bb76ff1Sjsg return ret;
11471bb76ff1Sjsg }
11481bb76ff1Sjsg
kv_calculate_dfs_bypass_settings(struct amdgpu_device * adev)11491bb76ff1Sjsg static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
11501bb76ff1Sjsg {
11511bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
11521bb76ff1Sjsg u32 i;
11531bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
11541bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
11551bb76ff1Sjsg
11561bb76ff1Sjsg if (table && table->count) {
11571bb76ff1Sjsg for (i = 0; i < pi->graphics_dpm_level_count; i++) {
11581bb76ff1Sjsg if (pi->caps_enable_dfs_bypass) {
11591bb76ff1Sjsg if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
11601bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 3;
11611bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
11621bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 2;
11631bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
11641bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 7;
11651bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
11661bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 6;
11671bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
11681bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 8;
11691bb76ff1Sjsg else
11701bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 0;
11711bb76ff1Sjsg } else {
11721bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 0;
11731bb76ff1Sjsg }
11741bb76ff1Sjsg }
11751bb76ff1Sjsg } else {
11761bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *table =
11771bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table;
11781bb76ff1Sjsg for (i = 0; i < pi->graphics_dpm_level_count; i++) {
11791bb76ff1Sjsg if (pi->caps_enable_dfs_bypass) {
11801bb76ff1Sjsg if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
11811bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 3;
11821bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
11831bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 2;
11841bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
11851bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 7;
11861bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
11871bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 6;
11881bb76ff1Sjsg else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
11891bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 8;
11901bb76ff1Sjsg else
11911bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 0;
11921bb76ff1Sjsg } else {
11931bb76ff1Sjsg pi->graphics_level[i].ClkBypassCntl = 0;
11941bb76ff1Sjsg }
11951bb76ff1Sjsg }
11961bb76ff1Sjsg }
11971bb76ff1Sjsg }
11981bb76ff1Sjsg
kv_enable_ulv(struct amdgpu_device * adev,bool enable)11991bb76ff1Sjsg static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
12001bb76ff1Sjsg {
12011bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, enable ?
12021bb76ff1Sjsg PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
12031bb76ff1Sjsg }
12041bb76ff1Sjsg
kv_reset_acp_boot_level(struct amdgpu_device * adev)12051bb76ff1Sjsg static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
12061bb76ff1Sjsg {
12071bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
12081bb76ff1Sjsg
12091bb76ff1Sjsg pi->acp_boot_level = 0xff;
12101bb76ff1Sjsg }
12111bb76ff1Sjsg
kv_update_current_ps(struct amdgpu_device * adev,struct amdgpu_ps * rps)12121bb76ff1Sjsg static void kv_update_current_ps(struct amdgpu_device *adev,
12131bb76ff1Sjsg struct amdgpu_ps *rps)
12141bb76ff1Sjsg {
12151bb76ff1Sjsg struct kv_ps *new_ps = kv_get_ps(rps);
12161bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
12171bb76ff1Sjsg
12181bb76ff1Sjsg pi->current_rps = *rps;
12191bb76ff1Sjsg pi->current_ps = *new_ps;
12201bb76ff1Sjsg pi->current_rps.ps_priv = &pi->current_ps;
12211bb76ff1Sjsg adev->pm.dpm.current_ps = &pi->current_rps;
12221bb76ff1Sjsg }
12231bb76ff1Sjsg
kv_update_requested_ps(struct amdgpu_device * adev,struct amdgpu_ps * rps)12241bb76ff1Sjsg static void kv_update_requested_ps(struct amdgpu_device *adev,
12251bb76ff1Sjsg struct amdgpu_ps *rps)
12261bb76ff1Sjsg {
12271bb76ff1Sjsg struct kv_ps *new_ps = kv_get_ps(rps);
12281bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
12291bb76ff1Sjsg
12301bb76ff1Sjsg pi->requested_rps = *rps;
12311bb76ff1Sjsg pi->requested_ps = *new_ps;
12321bb76ff1Sjsg pi->requested_rps.ps_priv = &pi->requested_ps;
12331bb76ff1Sjsg adev->pm.dpm.requested_ps = &pi->requested_rps;
12341bb76ff1Sjsg }
12351bb76ff1Sjsg
kv_dpm_enable_bapm(void * handle,bool enable)12361bb76ff1Sjsg static void kv_dpm_enable_bapm(void *handle, bool enable)
12371bb76ff1Sjsg {
12381bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
12391bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
12401bb76ff1Sjsg int ret;
12411bb76ff1Sjsg
12421bb76ff1Sjsg if (pi->bapm_enable) {
12431bb76ff1Sjsg ret = amdgpu_kv_smc_bapm_enable(adev, enable);
12441bb76ff1Sjsg if (ret)
12451bb76ff1Sjsg DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
12461bb76ff1Sjsg }
12471bb76ff1Sjsg }
12481bb76ff1Sjsg
kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)12491bb76ff1Sjsg static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
12501bb76ff1Sjsg {
12511bb76ff1Sjsg switch (sensor) {
12521bb76ff1Sjsg case THERMAL_TYPE_KV:
12531bb76ff1Sjsg return true;
12541bb76ff1Sjsg case THERMAL_TYPE_NONE:
12551bb76ff1Sjsg case THERMAL_TYPE_EXTERNAL:
12561bb76ff1Sjsg case THERMAL_TYPE_EXTERNAL_GPIO:
12571bb76ff1Sjsg default:
12581bb76ff1Sjsg return false;
12591bb76ff1Sjsg }
12601bb76ff1Sjsg }
12611bb76ff1Sjsg
kv_dpm_enable(struct amdgpu_device * adev)12621bb76ff1Sjsg static int kv_dpm_enable(struct amdgpu_device *adev)
12631bb76ff1Sjsg {
12641bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
12651bb76ff1Sjsg int ret;
12661bb76ff1Sjsg
12671bb76ff1Sjsg ret = kv_process_firmware_header(adev);
12681bb76ff1Sjsg if (ret) {
12691bb76ff1Sjsg DRM_ERROR("kv_process_firmware_header failed\n");
12701bb76ff1Sjsg return ret;
12711bb76ff1Sjsg }
12721bb76ff1Sjsg kv_init_fps_limits(adev);
12731bb76ff1Sjsg kv_init_graphics_levels(adev);
12741bb76ff1Sjsg ret = kv_program_bootup_state(adev);
12751bb76ff1Sjsg if (ret) {
12761bb76ff1Sjsg DRM_ERROR("kv_program_bootup_state failed\n");
12771bb76ff1Sjsg return ret;
12781bb76ff1Sjsg }
12791bb76ff1Sjsg kv_calculate_dfs_bypass_settings(adev);
12801bb76ff1Sjsg ret = kv_upload_dpm_settings(adev);
12811bb76ff1Sjsg if (ret) {
12821bb76ff1Sjsg DRM_ERROR("kv_upload_dpm_settings failed\n");
12831bb76ff1Sjsg return ret;
12841bb76ff1Sjsg }
12851bb76ff1Sjsg ret = kv_populate_uvd_table(adev);
12861bb76ff1Sjsg if (ret) {
12871bb76ff1Sjsg DRM_ERROR("kv_populate_uvd_table failed\n");
12881bb76ff1Sjsg return ret;
12891bb76ff1Sjsg }
12901bb76ff1Sjsg ret = kv_populate_vce_table(adev);
12911bb76ff1Sjsg if (ret) {
12921bb76ff1Sjsg DRM_ERROR("kv_populate_vce_table failed\n");
12931bb76ff1Sjsg return ret;
12941bb76ff1Sjsg }
12951bb76ff1Sjsg ret = kv_populate_samu_table(adev);
12961bb76ff1Sjsg if (ret) {
12971bb76ff1Sjsg DRM_ERROR("kv_populate_samu_table failed\n");
12981bb76ff1Sjsg return ret;
12991bb76ff1Sjsg }
13001bb76ff1Sjsg ret = kv_populate_acp_table(adev);
13011bb76ff1Sjsg if (ret) {
13021bb76ff1Sjsg DRM_ERROR("kv_populate_acp_table failed\n");
13031bb76ff1Sjsg return ret;
13041bb76ff1Sjsg }
13051bb76ff1Sjsg kv_program_vc(adev);
13061bb76ff1Sjsg #if 0
13071bb76ff1Sjsg kv_initialize_hardware_cac_manager(adev);
13081bb76ff1Sjsg #endif
13091bb76ff1Sjsg kv_start_am(adev);
13101bb76ff1Sjsg if (pi->enable_auto_thermal_throttling) {
13111bb76ff1Sjsg ret = kv_enable_auto_thermal_throttling(adev);
13121bb76ff1Sjsg if (ret) {
13131bb76ff1Sjsg DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
13141bb76ff1Sjsg return ret;
13151bb76ff1Sjsg }
13161bb76ff1Sjsg }
13171bb76ff1Sjsg ret = kv_enable_dpm_voltage_scaling(adev);
13181bb76ff1Sjsg if (ret) {
13191bb76ff1Sjsg DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
13201bb76ff1Sjsg return ret;
13211bb76ff1Sjsg }
13221bb76ff1Sjsg ret = kv_set_dpm_interval(adev);
13231bb76ff1Sjsg if (ret) {
13241bb76ff1Sjsg DRM_ERROR("kv_set_dpm_interval failed\n");
13251bb76ff1Sjsg return ret;
13261bb76ff1Sjsg }
13271bb76ff1Sjsg ret = kv_set_dpm_boot_state(adev);
13281bb76ff1Sjsg if (ret) {
13291bb76ff1Sjsg DRM_ERROR("kv_set_dpm_boot_state failed\n");
13301bb76ff1Sjsg return ret;
13311bb76ff1Sjsg }
13321bb76ff1Sjsg ret = kv_enable_ulv(adev, true);
13331bb76ff1Sjsg if (ret) {
13341bb76ff1Sjsg DRM_ERROR("kv_enable_ulv failed\n");
13351bb76ff1Sjsg return ret;
13361bb76ff1Sjsg }
13371bb76ff1Sjsg kv_start_dpm(adev);
13381bb76ff1Sjsg ret = kv_enable_didt(adev, true);
13391bb76ff1Sjsg if (ret) {
13401bb76ff1Sjsg DRM_ERROR("kv_enable_didt failed\n");
13411bb76ff1Sjsg return ret;
13421bb76ff1Sjsg }
13431bb76ff1Sjsg ret = kv_enable_smc_cac(adev, true);
13441bb76ff1Sjsg if (ret) {
13451bb76ff1Sjsg DRM_ERROR("kv_enable_smc_cac failed\n");
13461bb76ff1Sjsg return ret;
13471bb76ff1Sjsg }
13481bb76ff1Sjsg
13491bb76ff1Sjsg kv_reset_acp_boot_level(adev);
13501bb76ff1Sjsg
13511bb76ff1Sjsg ret = amdgpu_kv_smc_bapm_enable(adev, false);
13521bb76ff1Sjsg if (ret) {
13531bb76ff1Sjsg DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
13541bb76ff1Sjsg return ret;
13551bb76ff1Sjsg }
13561bb76ff1Sjsg
13571bb76ff1Sjsg if (adev->irq.installed &&
13581bb76ff1Sjsg kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
13591bb76ff1Sjsg ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
13601bb76ff1Sjsg if (ret) {
13611bb76ff1Sjsg DRM_ERROR("kv_set_thermal_temperature_range failed\n");
13621bb76ff1Sjsg return ret;
13631bb76ff1Sjsg }
13641bb76ff1Sjsg amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
13651bb76ff1Sjsg AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
13661bb76ff1Sjsg amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
13671bb76ff1Sjsg AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
13681bb76ff1Sjsg }
13691bb76ff1Sjsg
13701bb76ff1Sjsg return ret;
13711bb76ff1Sjsg }
13721bb76ff1Sjsg
kv_dpm_disable(struct amdgpu_device * adev)13731bb76ff1Sjsg static void kv_dpm_disable(struct amdgpu_device *adev)
13741bb76ff1Sjsg {
13751bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
13761bb76ff1Sjsg int err;
13771bb76ff1Sjsg
13781bb76ff1Sjsg amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
13791bb76ff1Sjsg AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
13801bb76ff1Sjsg amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
13811bb76ff1Sjsg AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
13821bb76ff1Sjsg
13831bb76ff1Sjsg err = amdgpu_kv_smc_bapm_enable(adev, false);
13841bb76ff1Sjsg if (err)
13851bb76ff1Sjsg DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
13861bb76ff1Sjsg
13871bb76ff1Sjsg if (adev->asic_type == CHIP_MULLINS)
13881bb76ff1Sjsg kv_enable_nb_dpm(adev, false);
13891bb76ff1Sjsg
13901bb76ff1Sjsg /* powerup blocks */
13911bb76ff1Sjsg kv_dpm_powergate_acp(adev, false);
13921bb76ff1Sjsg kv_dpm_powergate_samu(adev, false);
13931bb76ff1Sjsg if (pi->caps_vce_pg) /* power on the VCE block */
13941bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
13951bb76ff1Sjsg if (pi->caps_uvd_pg) /* power on the UVD block */
13961bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
13971bb76ff1Sjsg
13981bb76ff1Sjsg kv_enable_smc_cac(adev, false);
13991bb76ff1Sjsg kv_enable_didt(adev, false);
14001bb76ff1Sjsg kv_clear_vc(adev);
14011bb76ff1Sjsg kv_stop_dpm(adev);
14021bb76ff1Sjsg kv_enable_ulv(adev, false);
14031bb76ff1Sjsg kv_reset_am(adev);
14041bb76ff1Sjsg
14051bb76ff1Sjsg kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
14061bb76ff1Sjsg }
14071bb76ff1Sjsg
14081bb76ff1Sjsg #if 0
14091bb76ff1Sjsg static int kv_write_smc_soft_register(struct amdgpu_device *adev,
14101bb76ff1Sjsg u16 reg_offset, u32 value)
14111bb76ff1Sjsg {
14121bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
14131bb76ff1Sjsg
14141bb76ff1Sjsg return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
14151bb76ff1Sjsg (u8 *)&value, sizeof(u16), pi->sram_end);
14161bb76ff1Sjsg }
14171bb76ff1Sjsg
14181bb76ff1Sjsg static int kv_read_smc_soft_register(struct amdgpu_device *adev,
14191bb76ff1Sjsg u16 reg_offset, u32 *value)
14201bb76ff1Sjsg {
14211bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
14221bb76ff1Sjsg
14231bb76ff1Sjsg return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
14241bb76ff1Sjsg value, pi->sram_end);
14251bb76ff1Sjsg }
14261bb76ff1Sjsg #endif
14271bb76ff1Sjsg
kv_init_sclk_t(struct amdgpu_device * adev)14281bb76ff1Sjsg static void kv_init_sclk_t(struct amdgpu_device *adev)
14291bb76ff1Sjsg {
14301bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
14311bb76ff1Sjsg
14321bb76ff1Sjsg pi->low_sclk_interrupt_t = 0;
14331bb76ff1Sjsg }
14341bb76ff1Sjsg
kv_init_fps_limits(struct amdgpu_device * adev)14351bb76ff1Sjsg static int kv_init_fps_limits(struct amdgpu_device *adev)
14361bb76ff1Sjsg {
14371bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
14381bb76ff1Sjsg int ret = 0;
14391bb76ff1Sjsg
14401bb76ff1Sjsg if (pi->caps_fps) {
14411bb76ff1Sjsg u16 tmp;
14421bb76ff1Sjsg
14431bb76ff1Sjsg tmp = 45;
14441bb76ff1Sjsg pi->fps_high_t = cpu_to_be16(tmp);
14451bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
14461bb76ff1Sjsg pi->dpm_table_start +
14471bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, FpsHighT),
14481bb76ff1Sjsg (u8 *)&pi->fps_high_t,
14491bb76ff1Sjsg sizeof(u16), pi->sram_end);
14501bb76ff1Sjsg
14511bb76ff1Sjsg tmp = 30;
14521bb76ff1Sjsg pi->fps_low_t = cpu_to_be16(tmp);
14531bb76ff1Sjsg
14541bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
14551bb76ff1Sjsg pi->dpm_table_start +
14561bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, FpsLowT),
14571bb76ff1Sjsg (u8 *)&pi->fps_low_t,
14581bb76ff1Sjsg sizeof(u16), pi->sram_end);
14591bb76ff1Sjsg
14601bb76ff1Sjsg }
14611bb76ff1Sjsg return ret;
14621bb76ff1Sjsg }
14631bb76ff1Sjsg
kv_init_powergate_state(struct amdgpu_device * adev)14641bb76ff1Sjsg static void kv_init_powergate_state(struct amdgpu_device *adev)
14651bb76ff1Sjsg {
14661bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
14671bb76ff1Sjsg
14681bb76ff1Sjsg pi->uvd_power_gated = false;
14691bb76ff1Sjsg pi->vce_power_gated = false;
14701bb76ff1Sjsg pi->samu_power_gated = false;
14711bb76ff1Sjsg pi->acp_power_gated = false;
14721bb76ff1Sjsg
14731bb76ff1Sjsg }
14741bb76ff1Sjsg
kv_enable_uvd_dpm(struct amdgpu_device * adev,bool enable)14751bb76ff1Sjsg static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
14761bb76ff1Sjsg {
14771bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, enable ?
14781bb76ff1Sjsg PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
14791bb76ff1Sjsg }
14801bb76ff1Sjsg
kv_enable_vce_dpm(struct amdgpu_device * adev,bool enable)14811bb76ff1Sjsg static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
14821bb76ff1Sjsg {
14831bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, enable ?
14841bb76ff1Sjsg PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
14851bb76ff1Sjsg }
14861bb76ff1Sjsg
kv_enable_samu_dpm(struct amdgpu_device * adev,bool enable)14871bb76ff1Sjsg static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
14881bb76ff1Sjsg {
14891bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, enable ?
14901bb76ff1Sjsg PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
14911bb76ff1Sjsg }
14921bb76ff1Sjsg
kv_enable_acp_dpm(struct amdgpu_device * adev,bool enable)14931bb76ff1Sjsg static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
14941bb76ff1Sjsg {
14951bb76ff1Sjsg return amdgpu_kv_notify_message_to_smu(adev, enable ?
14961bb76ff1Sjsg PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
14971bb76ff1Sjsg }
14981bb76ff1Sjsg
kv_update_uvd_dpm(struct amdgpu_device * adev,bool gate)14991bb76ff1Sjsg static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
15001bb76ff1Sjsg {
15011bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
15021bb76ff1Sjsg struct amdgpu_uvd_clock_voltage_dependency_table *table =
15031bb76ff1Sjsg &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
15041bb76ff1Sjsg int ret;
15051bb76ff1Sjsg u32 mask;
15061bb76ff1Sjsg
15071bb76ff1Sjsg if (!gate) {
15081bb76ff1Sjsg if (table->count)
15091bb76ff1Sjsg pi->uvd_boot_level = table->count - 1;
15101bb76ff1Sjsg else
15111bb76ff1Sjsg pi->uvd_boot_level = 0;
15121bb76ff1Sjsg
15131bb76ff1Sjsg if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
15141bb76ff1Sjsg mask = 1 << pi->uvd_boot_level;
15151bb76ff1Sjsg } else {
15161bb76ff1Sjsg mask = 0x1f;
15171bb76ff1Sjsg }
15181bb76ff1Sjsg
15191bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
15201bb76ff1Sjsg pi->dpm_table_start +
15211bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
15221bb76ff1Sjsg (uint8_t *)&pi->uvd_boot_level,
15231bb76ff1Sjsg sizeof(u8), pi->sram_end);
15241bb76ff1Sjsg if (ret)
15251bb76ff1Sjsg return ret;
15261bb76ff1Sjsg
15271bb76ff1Sjsg amdgpu_kv_send_msg_to_smc_with_parameter(adev,
15281bb76ff1Sjsg PPSMC_MSG_UVDDPM_SetEnabledMask,
15291bb76ff1Sjsg mask);
15301bb76ff1Sjsg }
15311bb76ff1Sjsg
15321bb76ff1Sjsg return kv_enable_uvd_dpm(adev, !gate);
15331bb76ff1Sjsg }
15341bb76ff1Sjsg
kv_get_vce_boot_level(struct amdgpu_device * adev,u32 evclk)15351bb76ff1Sjsg static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
15361bb76ff1Sjsg {
15371bb76ff1Sjsg u8 i;
15381bb76ff1Sjsg struct amdgpu_vce_clock_voltage_dependency_table *table =
15391bb76ff1Sjsg &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
15401bb76ff1Sjsg
15411bb76ff1Sjsg for (i = 0; i < table->count; i++) {
15421bb76ff1Sjsg if (table->entries[i].evclk >= evclk)
15431bb76ff1Sjsg break;
15441bb76ff1Sjsg }
15451bb76ff1Sjsg
15461bb76ff1Sjsg return i;
15471bb76ff1Sjsg }
15481bb76ff1Sjsg
kv_update_vce_dpm(struct amdgpu_device * adev,struct amdgpu_ps * amdgpu_new_state,struct amdgpu_ps * amdgpu_current_state)15491bb76ff1Sjsg static int kv_update_vce_dpm(struct amdgpu_device *adev,
15501bb76ff1Sjsg struct amdgpu_ps *amdgpu_new_state,
15511bb76ff1Sjsg struct amdgpu_ps *amdgpu_current_state)
15521bb76ff1Sjsg {
15531bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
15541bb76ff1Sjsg struct amdgpu_vce_clock_voltage_dependency_table *table =
15551bb76ff1Sjsg &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
15561bb76ff1Sjsg int ret;
15571bb76ff1Sjsg
15581bb76ff1Sjsg if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
15591bb76ff1Sjsg if (pi->caps_stable_p_state)
15601bb76ff1Sjsg pi->vce_boot_level = table->count - 1;
15611bb76ff1Sjsg else
15621bb76ff1Sjsg pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
15631bb76ff1Sjsg
15641bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
15651bb76ff1Sjsg pi->dpm_table_start +
15661bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
15671bb76ff1Sjsg (u8 *)&pi->vce_boot_level,
15681bb76ff1Sjsg sizeof(u8),
15691bb76ff1Sjsg pi->sram_end);
15701bb76ff1Sjsg if (ret)
15711bb76ff1Sjsg return ret;
15721bb76ff1Sjsg
15731bb76ff1Sjsg if (pi->caps_stable_p_state)
15741bb76ff1Sjsg amdgpu_kv_send_msg_to_smc_with_parameter(adev,
15751bb76ff1Sjsg PPSMC_MSG_VCEDPM_SetEnabledMask,
15761bb76ff1Sjsg (1 << pi->vce_boot_level));
15771bb76ff1Sjsg kv_enable_vce_dpm(adev, true);
15781bb76ff1Sjsg } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
15791bb76ff1Sjsg kv_enable_vce_dpm(adev, false);
15801bb76ff1Sjsg }
15811bb76ff1Sjsg
15821bb76ff1Sjsg return 0;
15831bb76ff1Sjsg }
15841bb76ff1Sjsg
kv_update_samu_dpm(struct amdgpu_device * adev,bool gate)15851bb76ff1Sjsg static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
15861bb76ff1Sjsg {
15871bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
15881bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
15891bb76ff1Sjsg &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
15901bb76ff1Sjsg int ret;
15911bb76ff1Sjsg
15921bb76ff1Sjsg if (!gate) {
15931bb76ff1Sjsg if (pi->caps_stable_p_state)
15941bb76ff1Sjsg pi->samu_boot_level = table->count - 1;
15951bb76ff1Sjsg else
15961bb76ff1Sjsg pi->samu_boot_level = 0;
15971bb76ff1Sjsg
15981bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
15991bb76ff1Sjsg pi->dpm_table_start +
16001bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
16011bb76ff1Sjsg (u8 *)&pi->samu_boot_level,
16021bb76ff1Sjsg sizeof(u8),
16031bb76ff1Sjsg pi->sram_end);
16041bb76ff1Sjsg if (ret)
16051bb76ff1Sjsg return ret;
16061bb76ff1Sjsg
16071bb76ff1Sjsg if (pi->caps_stable_p_state)
16081bb76ff1Sjsg amdgpu_kv_send_msg_to_smc_with_parameter(adev,
16091bb76ff1Sjsg PPSMC_MSG_SAMUDPM_SetEnabledMask,
16101bb76ff1Sjsg (1 << pi->samu_boot_level));
16111bb76ff1Sjsg }
16121bb76ff1Sjsg
16131bb76ff1Sjsg return kv_enable_samu_dpm(adev, !gate);
16141bb76ff1Sjsg }
16151bb76ff1Sjsg
kv_get_acp_boot_level(struct amdgpu_device * adev)16161bb76ff1Sjsg static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
16171bb76ff1Sjsg {
16181bb76ff1Sjsg return 0;
16191bb76ff1Sjsg }
16201bb76ff1Sjsg
kv_update_acp_boot_level(struct amdgpu_device * adev)16211bb76ff1Sjsg static void kv_update_acp_boot_level(struct amdgpu_device *adev)
16221bb76ff1Sjsg {
16231bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
16241bb76ff1Sjsg u8 acp_boot_level;
16251bb76ff1Sjsg
16261bb76ff1Sjsg if (!pi->caps_stable_p_state) {
16271bb76ff1Sjsg acp_boot_level = kv_get_acp_boot_level(adev);
16281bb76ff1Sjsg if (acp_boot_level != pi->acp_boot_level) {
16291bb76ff1Sjsg pi->acp_boot_level = acp_boot_level;
16301bb76ff1Sjsg amdgpu_kv_send_msg_to_smc_with_parameter(adev,
16311bb76ff1Sjsg PPSMC_MSG_ACPDPM_SetEnabledMask,
16321bb76ff1Sjsg (1 << pi->acp_boot_level));
16331bb76ff1Sjsg }
16341bb76ff1Sjsg }
16351bb76ff1Sjsg }
16361bb76ff1Sjsg
kv_update_acp_dpm(struct amdgpu_device * adev,bool gate)16371bb76ff1Sjsg static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
16381bb76ff1Sjsg {
16391bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
16401bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
16411bb76ff1Sjsg &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
16421bb76ff1Sjsg int ret;
16431bb76ff1Sjsg
16441bb76ff1Sjsg if (!gate) {
16451bb76ff1Sjsg if (pi->caps_stable_p_state)
16461bb76ff1Sjsg pi->acp_boot_level = table->count - 1;
16471bb76ff1Sjsg else
16481bb76ff1Sjsg pi->acp_boot_level = kv_get_acp_boot_level(adev);
16491bb76ff1Sjsg
16501bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
16511bb76ff1Sjsg pi->dpm_table_start +
16521bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
16531bb76ff1Sjsg (u8 *)&pi->acp_boot_level,
16541bb76ff1Sjsg sizeof(u8),
16551bb76ff1Sjsg pi->sram_end);
16561bb76ff1Sjsg if (ret)
16571bb76ff1Sjsg return ret;
16581bb76ff1Sjsg
16591bb76ff1Sjsg if (pi->caps_stable_p_state)
16601bb76ff1Sjsg amdgpu_kv_send_msg_to_smc_with_parameter(adev,
16611bb76ff1Sjsg PPSMC_MSG_ACPDPM_SetEnabledMask,
16621bb76ff1Sjsg (1 << pi->acp_boot_level));
16631bb76ff1Sjsg }
16641bb76ff1Sjsg
16651bb76ff1Sjsg return kv_enable_acp_dpm(adev, !gate);
16661bb76ff1Sjsg }
16671bb76ff1Sjsg
kv_dpm_powergate_uvd(void * handle,bool gate)16681bb76ff1Sjsg static void kv_dpm_powergate_uvd(void *handle, bool gate)
16691bb76ff1Sjsg {
16701bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
16711bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
16721bb76ff1Sjsg
16731bb76ff1Sjsg pi->uvd_power_gated = gate;
16741bb76ff1Sjsg
16751bb76ff1Sjsg if (gate) {
16761bb76ff1Sjsg /* stop the UVD block */
16771bb76ff1Sjsg amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
16781bb76ff1Sjsg AMD_PG_STATE_GATE);
16791bb76ff1Sjsg kv_update_uvd_dpm(adev, gate);
16801bb76ff1Sjsg if (pi->caps_uvd_pg)
16811bb76ff1Sjsg /* power off the UVD block */
16821bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
16831bb76ff1Sjsg } else {
16841bb76ff1Sjsg if (pi->caps_uvd_pg)
16851bb76ff1Sjsg /* power on the UVD block */
16861bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
16871bb76ff1Sjsg /* re-init the UVD block */
16881bb76ff1Sjsg kv_update_uvd_dpm(adev, gate);
16891bb76ff1Sjsg
16901bb76ff1Sjsg amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
16911bb76ff1Sjsg AMD_PG_STATE_UNGATE);
16921bb76ff1Sjsg }
16931bb76ff1Sjsg }
16941bb76ff1Sjsg
kv_dpm_powergate_vce(void * handle,bool gate)16951bb76ff1Sjsg static void kv_dpm_powergate_vce(void *handle, bool gate)
16961bb76ff1Sjsg {
16971bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
16981bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
16991bb76ff1Sjsg
17001bb76ff1Sjsg pi->vce_power_gated = gate;
17011bb76ff1Sjsg
17021bb76ff1Sjsg if (gate) {
17031bb76ff1Sjsg /* stop the VCE block */
17041bb76ff1Sjsg amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
17051bb76ff1Sjsg AMD_PG_STATE_GATE);
17061bb76ff1Sjsg kv_enable_vce_dpm(adev, false);
17071bb76ff1Sjsg if (pi->caps_vce_pg) /* power off the VCE block */
17081bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
17091bb76ff1Sjsg } else {
17101bb76ff1Sjsg if (pi->caps_vce_pg) /* power on the VCE block */
17111bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
17121bb76ff1Sjsg kv_enable_vce_dpm(adev, true);
17131bb76ff1Sjsg /* re-init the VCE block */
17141bb76ff1Sjsg amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
17151bb76ff1Sjsg AMD_PG_STATE_UNGATE);
17161bb76ff1Sjsg }
17171bb76ff1Sjsg }
17181bb76ff1Sjsg
17191bb76ff1Sjsg
kv_dpm_powergate_samu(struct amdgpu_device * adev,bool gate)17201bb76ff1Sjsg static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
17211bb76ff1Sjsg {
17221bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
17231bb76ff1Sjsg
17241bb76ff1Sjsg if (pi->samu_power_gated == gate)
17251bb76ff1Sjsg return;
17261bb76ff1Sjsg
17271bb76ff1Sjsg pi->samu_power_gated = gate;
17281bb76ff1Sjsg
17291bb76ff1Sjsg if (gate) {
17301bb76ff1Sjsg kv_update_samu_dpm(adev, true);
17311bb76ff1Sjsg if (pi->caps_samu_pg)
17321bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
17331bb76ff1Sjsg } else {
17341bb76ff1Sjsg if (pi->caps_samu_pg)
17351bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
17361bb76ff1Sjsg kv_update_samu_dpm(adev, false);
17371bb76ff1Sjsg }
17381bb76ff1Sjsg }
17391bb76ff1Sjsg
kv_dpm_powergate_acp(struct amdgpu_device * adev,bool gate)17401bb76ff1Sjsg static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
17411bb76ff1Sjsg {
17421bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
17431bb76ff1Sjsg
17441bb76ff1Sjsg if (pi->acp_power_gated == gate)
17451bb76ff1Sjsg return;
17461bb76ff1Sjsg
17471bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
17481bb76ff1Sjsg return;
17491bb76ff1Sjsg
17501bb76ff1Sjsg pi->acp_power_gated = gate;
17511bb76ff1Sjsg
17521bb76ff1Sjsg if (gate) {
17531bb76ff1Sjsg kv_update_acp_dpm(adev, true);
17541bb76ff1Sjsg if (pi->caps_acp_pg)
17551bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
17561bb76ff1Sjsg } else {
17571bb76ff1Sjsg if (pi->caps_acp_pg)
17581bb76ff1Sjsg amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
17591bb76ff1Sjsg kv_update_acp_dpm(adev, false);
17601bb76ff1Sjsg }
17611bb76ff1Sjsg }
17621bb76ff1Sjsg
kv_set_valid_clock_range(struct amdgpu_device * adev,struct amdgpu_ps * new_rps)17631bb76ff1Sjsg static void kv_set_valid_clock_range(struct amdgpu_device *adev,
17641bb76ff1Sjsg struct amdgpu_ps *new_rps)
17651bb76ff1Sjsg {
17661bb76ff1Sjsg struct kv_ps *new_ps = kv_get_ps(new_rps);
17671bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
17681bb76ff1Sjsg u32 i;
17691bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
17701bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
17711bb76ff1Sjsg
17721bb76ff1Sjsg if (table && table->count) {
17731bb76ff1Sjsg for (i = 0; i < pi->graphics_dpm_level_count; i++) {
17741bb76ff1Sjsg if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
17751bb76ff1Sjsg (i == (pi->graphics_dpm_level_count - 1))) {
17761bb76ff1Sjsg pi->lowest_valid = i;
17771bb76ff1Sjsg break;
17781bb76ff1Sjsg }
17791bb76ff1Sjsg }
17801bb76ff1Sjsg
17811bb76ff1Sjsg for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
17821bb76ff1Sjsg if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
17831bb76ff1Sjsg break;
17841bb76ff1Sjsg }
17851bb76ff1Sjsg pi->highest_valid = i;
17861bb76ff1Sjsg
17871bb76ff1Sjsg if (pi->lowest_valid > pi->highest_valid) {
17881bb76ff1Sjsg if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
17891bb76ff1Sjsg (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
17901bb76ff1Sjsg pi->highest_valid = pi->lowest_valid;
17911bb76ff1Sjsg else
17921bb76ff1Sjsg pi->lowest_valid = pi->highest_valid;
17931bb76ff1Sjsg }
17941bb76ff1Sjsg } else {
17951bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *table =
17961bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table;
17971bb76ff1Sjsg
17981bb76ff1Sjsg for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
17991bb76ff1Sjsg if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
18001bb76ff1Sjsg i == (int)(pi->graphics_dpm_level_count - 1)) {
18011bb76ff1Sjsg pi->lowest_valid = i;
18021bb76ff1Sjsg break;
18031bb76ff1Sjsg }
18041bb76ff1Sjsg }
18051bb76ff1Sjsg
18061bb76ff1Sjsg for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
18071bb76ff1Sjsg if (table->entries[i].sclk_frequency <=
18081bb76ff1Sjsg new_ps->levels[new_ps->num_levels - 1].sclk)
18091bb76ff1Sjsg break;
18101bb76ff1Sjsg }
18111bb76ff1Sjsg pi->highest_valid = i;
18121bb76ff1Sjsg
18131bb76ff1Sjsg if (pi->lowest_valid > pi->highest_valid) {
18141bb76ff1Sjsg if ((new_ps->levels[0].sclk -
18151bb76ff1Sjsg table->entries[pi->highest_valid].sclk_frequency) >
18161bb76ff1Sjsg (table->entries[pi->lowest_valid].sclk_frequency -
18171bb76ff1Sjsg new_ps->levels[new_ps->num_levels - 1].sclk))
18181bb76ff1Sjsg pi->highest_valid = pi->lowest_valid;
18191bb76ff1Sjsg else
18201bb76ff1Sjsg pi->lowest_valid = pi->highest_valid;
18211bb76ff1Sjsg }
18221bb76ff1Sjsg }
18231bb76ff1Sjsg }
18241bb76ff1Sjsg
kv_update_dfs_bypass_settings(struct amdgpu_device * adev,struct amdgpu_ps * new_rps)18251bb76ff1Sjsg static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
18261bb76ff1Sjsg struct amdgpu_ps *new_rps)
18271bb76ff1Sjsg {
18281bb76ff1Sjsg struct kv_ps *new_ps = kv_get_ps(new_rps);
18291bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
18301bb76ff1Sjsg int ret = 0;
18311bb76ff1Sjsg u8 clk_bypass_cntl;
18321bb76ff1Sjsg
18331bb76ff1Sjsg if (pi->caps_enable_dfs_bypass) {
18341bb76ff1Sjsg clk_bypass_cntl = new_ps->need_dfs_bypass ?
18351bb76ff1Sjsg pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
18361bb76ff1Sjsg ret = amdgpu_kv_copy_bytes_to_smc(adev,
18371bb76ff1Sjsg (pi->dpm_table_start +
18381bb76ff1Sjsg offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
18391bb76ff1Sjsg (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
18401bb76ff1Sjsg offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
18411bb76ff1Sjsg &clk_bypass_cntl,
18421bb76ff1Sjsg sizeof(u8), pi->sram_end);
18431bb76ff1Sjsg }
18441bb76ff1Sjsg
18451bb76ff1Sjsg return ret;
18461bb76ff1Sjsg }
18471bb76ff1Sjsg
kv_enable_nb_dpm(struct amdgpu_device * adev,bool enable)18481bb76ff1Sjsg static int kv_enable_nb_dpm(struct amdgpu_device *adev,
18491bb76ff1Sjsg bool enable)
18501bb76ff1Sjsg {
18511bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
18521bb76ff1Sjsg int ret = 0;
18531bb76ff1Sjsg
18541bb76ff1Sjsg if (enable) {
18551bb76ff1Sjsg if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
18561bb76ff1Sjsg ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
18571bb76ff1Sjsg if (ret == 0)
18581bb76ff1Sjsg pi->nb_dpm_enabled = true;
18591bb76ff1Sjsg }
18601bb76ff1Sjsg } else {
18611bb76ff1Sjsg if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
18621bb76ff1Sjsg ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
18631bb76ff1Sjsg if (ret == 0)
18641bb76ff1Sjsg pi->nb_dpm_enabled = false;
18651bb76ff1Sjsg }
18661bb76ff1Sjsg }
18671bb76ff1Sjsg
18681bb76ff1Sjsg return ret;
18691bb76ff1Sjsg }
18701bb76ff1Sjsg
kv_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)18711bb76ff1Sjsg static int kv_dpm_force_performance_level(void *handle,
18721bb76ff1Sjsg enum amd_dpm_forced_level level)
18731bb76ff1Sjsg {
18741bb76ff1Sjsg int ret;
18751bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
18761bb76ff1Sjsg
18771bb76ff1Sjsg if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
18781bb76ff1Sjsg ret = kv_force_dpm_highest(adev);
18791bb76ff1Sjsg if (ret)
18801bb76ff1Sjsg return ret;
18811bb76ff1Sjsg } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
18821bb76ff1Sjsg ret = kv_force_dpm_lowest(adev);
18831bb76ff1Sjsg if (ret)
18841bb76ff1Sjsg return ret;
18851bb76ff1Sjsg } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
18861bb76ff1Sjsg ret = kv_unforce_levels(adev);
18871bb76ff1Sjsg if (ret)
18881bb76ff1Sjsg return ret;
18891bb76ff1Sjsg }
18901bb76ff1Sjsg
18911bb76ff1Sjsg adev->pm.dpm.forced_level = level;
18921bb76ff1Sjsg
18931bb76ff1Sjsg return 0;
18941bb76ff1Sjsg }
18951bb76ff1Sjsg
kv_dpm_pre_set_power_state(void * handle)18961bb76ff1Sjsg static int kv_dpm_pre_set_power_state(void *handle)
18971bb76ff1Sjsg {
18981bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
18991bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
19001bb76ff1Sjsg struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
19011bb76ff1Sjsg struct amdgpu_ps *new_ps = &requested_ps;
19021bb76ff1Sjsg
19031bb76ff1Sjsg kv_update_requested_ps(adev, new_ps);
19041bb76ff1Sjsg
19051bb76ff1Sjsg kv_apply_state_adjust_rules(adev,
19061bb76ff1Sjsg &pi->requested_rps,
19071bb76ff1Sjsg &pi->current_rps);
19081bb76ff1Sjsg
19091bb76ff1Sjsg return 0;
19101bb76ff1Sjsg }
19111bb76ff1Sjsg
kv_dpm_set_power_state(void * handle)19121bb76ff1Sjsg static int kv_dpm_set_power_state(void *handle)
19131bb76ff1Sjsg {
19141bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
19151bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
19161bb76ff1Sjsg struct amdgpu_ps *new_ps = &pi->requested_rps;
19171bb76ff1Sjsg struct amdgpu_ps *old_ps = &pi->current_rps;
19181bb76ff1Sjsg int ret;
19191bb76ff1Sjsg
19201bb76ff1Sjsg if (pi->bapm_enable) {
19211bb76ff1Sjsg ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
19221bb76ff1Sjsg if (ret) {
19231bb76ff1Sjsg DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
19241bb76ff1Sjsg return ret;
19251bb76ff1Sjsg }
19261bb76ff1Sjsg }
19271bb76ff1Sjsg
19281bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
19291bb76ff1Sjsg if (pi->enable_dpm) {
19301bb76ff1Sjsg kv_set_valid_clock_range(adev, new_ps);
19311bb76ff1Sjsg kv_update_dfs_bypass_settings(adev, new_ps);
19321bb76ff1Sjsg ret = kv_calculate_ds_divider(adev);
19331bb76ff1Sjsg if (ret) {
19341bb76ff1Sjsg DRM_ERROR("kv_calculate_ds_divider failed\n");
19351bb76ff1Sjsg return ret;
19361bb76ff1Sjsg }
19371bb76ff1Sjsg kv_calculate_nbps_level_settings(adev);
19381bb76ff1Sjsg kv_calculate_dpm_settings(adev);
19391bb76ff1Sjsg kv_force_lowest_valid(adev);
19401bb76ff1Sjsg kv_enable_new_levels(adev);
19411bb76ff1Sjsg kv_upload_dpm_settings(adev);
19421bb76ff1Sjsg kv_program_nbps_index_settings(adev, new_ps);
19431bb76ff1Sjsg kv_unforce_levels(adev);
19441bb76ff1Sjsg kv_set_enabled_levels(adev);
19451bb76ff1Sjsg kv_force_lowest_valid(adev);
19461bb76ff1Sjsg kv_unforce_levels(adev);
19471bb76ff1Sjsg
19481bb76ff1Sjsg ret = kv_update_vce_dpm(adev, new_ps, old_ps);
19491bb76ff1Sjsg if (ret) {
19501bb76ff1Sjsg DRM_ERROR("kv_update_vce_dpm failed\n");
19511bb76ff1Sjsg return ret;
19521bb76ff1Sjsg }
19531bb76ff1Sjsg kv_update_sclk_t(adev);
19541bb76ff1Sjsg if (adev->asic_type == CHIP_MULLINS)
19551bb76ff1Sjsg kv_enable_nb_dpm(adev, true);
19561bb76ff1Sjsg }
19571bb76ff1Sjsg } else {
19581bb76ff1Sjsg if (pi->enable_dpm) {
19591bb76ff1Sjsg kv_set_valid_clock_range(adev, new_ps);
19601bb76ff1Sjsg kv_update_dfs_bypass_settings(adev, new_ps);
19611bb76ff1Sjsg ret = kv_calculate_ds_divider(adev);
19621bb76ff1Sjsg if (ret) {
19631bb76ff1Sjsg DRM_ERROR("kv_calculate_ds_divider failed\n");
19641bb76ff1Sjsg return ret;
19651bb76ff1Sjsg }
19661bb76ff1Sjsg kv_calculate_nbps_level_settings(adev);
19671bb76ff1Sjsg kv_calculate_dpm_settings(adev);
19681bb76ff1Sjsg kv_freeze_sclk_dpm(adev, true);
19691bb76ff1Sjsg kv_upload_dpm_settings(adev);
19701bb76ff1Sjsg kv_program_nbps_index_settings(adev, new_ps);
19711bb76ff1Sjsg kv_freeze_sclk_dpm(adev, false);
19721bb76ff1Sjsg kv_set_enabled_levels(adev);
19731bb76ff1Sjsg ret = kv_update_vce_dpm(adev, new_ps, old_ps);
19741bb76ff1Sjsg if (ret) {
19751bb76ff1Sjsg DRM_ERROR("kv_update_vce_dpm failed\n");
19761bb76ff1Sjsg return ret;
19771bb76ff1Sjsg }
19781bb76ff1Sjsg kv_update_acp_boot_level(adev);
19791bb76ff1Sjsg kv_update_sclk_t(adev);
19801bb76ff1Sjsg kv_enable_nb_dpm(adev, true);
19811bb76ff1Sjsg }
19821bb76ff1Sjsg }
19831bb76ff1Sjsg
19841bb76ff1Sjsg return 0;
19851bb76ff1Sjsg }
19861bb76ff1Sjsg
kv_dpm_post_set_power_state(void * handle)19871bb76ff1Sjsg static void kv_dpm_post_set_power_state(void *handle)
19881bb76ff1Sjsg {
19891bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
19901bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
19911bb76ff1Sjsg struct amdgpu_ps *new_ps = &pi->requested_rps;
19921bb76ff1Sjsg
19931bb76ff1Sjsg kv_update_current_ps(adev, new_ps);
19941bb76ff1Sjsg }
19951bb76ff1Sjsg
kv_dpm_setup_asic(struct amdgpu_device * adev)19961bb76ff1Sjsg static void kv_dpm_setup_asic(struct amdgpu_device *adev)
19971bb76ff1Sjsg {
19981bb76ff1Sjsg sumo_take_smu_control(adev, true);
19991bb76ff1Sjsg kv_init_powergate_state(adev);
20001bb76ff1Sjsg kv_init_sclk_t(adev);
20011bb76ff1Sjsg }
20021bb76ff1Sjsg
20031bb76ff1Sjsg #if 0
20041bb76ff1Sjsg static void kv_dpm_reset_asic(struct amdgpu_device *adev)
20051bb76ff1Sjsg {
20061bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
20071bb76ff1Sjsg
20081bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
20091bb76ff1Sjsg kv_force_lowest_valid(adev);
20101bb76ff1Sjsg kv_init_graphics_levels(adev);
20111bb76ff1Sjsg kv_program_bootup_state(adev);
20121bb76ff1Sjsg kv_upload_dpm_settings(adev);
20131bb76ff1Sjsg kv_force_lowest_valid(adev);
20141bb76ff1Sjsg kv_unforce_levels(adev);
20151bb76ff1Sjsg } else {
20161bb76ff1Sjsg kv_init_graphics_levels(adev);
20171bb76ff1Sjsg kv_program_bootup_state(adev);
20181bb76ff1Sjsg kv_freeze_sclk_dpm(adev, true);
20191bb76ff1Sjsg kv_upload_dpm_settings(adev);
20201bb76ff1Sjsg kv_freeze_sclk_dpm(adev, false);
20211bb76ff1Sjsg kv_set_enabled_level(adev, pi->graphics_boot_level);
20221bb76ff1Sjsg }
20231bb76ff1Sjsg }
20241bb76ff1Sjsg #endif
20251bb76ff1Sjsg
kv_construct_max_power_limits_table(struct amdgpu_device * adev,struct amdgpu_clock_and_voltage_limits * table)20261bb76ff1Sjsg static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
20271bb76ff1Sjsg struct amdgpu_clock_and_voltage_limits *table)
20281bb76ff1Sjsg {
20291bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
20301bb76ff1Sjsg
20311bb76ff1Sjsg if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
20321bb76ff1Sjsg int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
20331bb76ff1Sjsg table->sclk =
20341bb76ff1Sjsg pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
20351bb76ff1Sjsg table->vddc =
20361bb76ff1Sjsg kv_convert_2bit_index_to_voltage(adev,
20371bb76ff1Sjsg pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
20381bb76ff1Sjsg }
20391bb76ff1Sjsg
20401bb76ff1Sjsg table->mclk = pi->sys_info.nbp_memory_clock[0];
20411bb76ff1Sjsg }
20421bb76ff1Sjsg
kv_patch_voltage_values(struct amdgpu_device * adev)20431bb76ff1Sjsg static void kv_patch_voltage_values(struct amdgpu_device *adev)
20441bb76ff1Sjsg {
20451bb76ff1Sjsg int i;
20461bb76ff1Sjsg struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
20471bb76ff1Sjsg &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
20481bb76ff1Sjsg struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
20491bb76ff1Sjsg &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
20501bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *samu_table =
20511bb76ff1Sjsg &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
20521bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *acp_table =
20531bb76ff1Sjsg &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
20541bb76ff1Sjsg
20551bb76ff1Sjsg if (uvd_table->count) {
20561bb76ff1Sjsg for (i = 0; i < uvd_table->count; i++)
20571bb76ff1Sjsg uvd_table->entries[i].v =
20581bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev,
20591bb76ff1Sjsg uvd_table->entries[i].v);
20601bb76ff1Sjsg }
20611bb76ff1Sjsg
20621bb76ff1Sjsg if (vce_table->count) {
20631bb76ff1Sjsg for (i = 0; i < vce_table->count; i++)
20641bb76ff1Sjsg vce_table->entries[i].v =
20651bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev,
20661bb76ff1Sjsg vce_table->entries[i].v);
20671bb76ff1Sjsg }
20681bb76ff1Sjsg
20691bb76ff1Sjsg if (samu_table->count) {
20701bb76ff1Sjsg for (i = 0; i < samu_table->count; i++)
20711bb76ff1Sjsg samu_table->entries[i].v =
20721bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev,
20731bb76ff1Sjsg samu_table->entries[i].v);
20741bb76ff1Sjsg }
20751bb76ff1Sjsg
20761bb76ff1Sjsg if (acp_table->count) {
20771bb76ff1Sjsg for (i = 0; i < acp_table->count; i++)
20781bb76ff1Sjsg acp_table->entries[i].v =
20791bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev,
20801bb76ff1Sjsg acp_table->entries[i].v);
20811bb76ff1Sjsg }
20821bb76ff1Sjsg
20831bb76ff1Sjsg }
20841bb76ff1Sjsg
kv_construct_boot_state(struct amdgpu_device * adev)20851bb76ff1Sjsg static void kv_construct_boot_state(struct amdgpu_device *adev)
20861bb76ff1Sjsg {
20871bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
20881bb76ff1Sjsg
20891bb76ff1Sjsg pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
20901bb76ff1Sjsg pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
20911bb76ff1Sjsg pi->boot_pl.ds_divider_index = 0;
20921bb76ff1Sjsg pi->boot_pl.ss_divider_index = 0;
20931bb76ff1Sjsg pi->boot_pl.allow_gnb_slow = 1;
20941bb76ff1Sjsg pi->boot_pl.force_nbp_state = 0;
20951bb76ff1Sjsg pi->boot_pl.display_wm = 0;
20961bb76ff1Sjsg pi->boot_pl.vce_wm = 0;
20971bb76ff1Sjsg }
20981bb76ff1Sjsg
kv_force_dpm_highest(struct amdgpu_device * adev)20991bb76ff1Sjsg static int kv_force_dpm_highest(struct amdgpu_device *adev)
21001bb76ff1Sjsg {
21011bb76ff1Sjsg int ret;
21021bb76ff1Sjsg u32 enable_mask, i;
21031bb76ff1Sjsg
21041bb76ff1Sjsg ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
21051bb76ff1Sjsg if (ret)
21061bb76ff1Sjsg return ret;
21071bb76ff1Sjsg
21081bb76ff1Sjsg for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
21091bb76ff1Sjsg if (enable_mask & (1 << i))
21101bb76ff1Sjsg break;
21111bb76ff1Sjsg }
21121bb76ff1Sjsg
21131bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
21141bb76ff1Sjsg return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
21151bb76ff1Sjsg else
21161bb76ff1Sjsg return kv_set_enabled_level(adev, i);
21171bb76ff1Sjsg }
21181bb76ff1Sjsg
kv_force_dpm_lowest(struct amdgpu_device * adev)21191bb76ff1Sjsg static int kv_force_dpm_lowest(struct amdgpu_device *adev)
21201bb76ff1Sjsg {
21211bb76ff1Sjsg int ret;
21221bb76ff1Sjsg u32 enable_mask, i;
21231bb76ff1Sjsg
21241bb76ff1Sjsg ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
21251bb76ff1Sjsg if (ret)
21261bb76ff1Sjsg return ret;
21271bb76ff1Sjsg
21281bb76ff1Sjsg for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
21291bb76ff1Sjsg if (enable_mask & (1 << i))
21301bb76ff1Sjsg break;
21311bb76ff1Sjsg }
21321bb76ff1Sjsg
21331bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
21341bb76ff1Sjsg return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
21351bb76ff1Sjsg else
21361bb76ff1Sjsg return kv_set_enabled_level(adev, i);
21371bb76ff1Sjsg }
21381bb76ff1Sjsg
kv_get_sleep_divider_id_from_clock(struct amdgpu_device * adev,u32 sclk,u32 min_sclk_in_sr)21391bb76ff1Sjsg static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
21401bb76ff1Sjsg u32 sclk, u32 min_sclk_in_sr)
21411bb76ff1Sjsg {
21421bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
21431bb76ff1Sjsg u32 i;
21441bb76ff1Sjsg u32 temp;
21451bb76ff1Sjsg u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
21461bb76ff1Sjsg
21471bb76ff1Sjsg if (sclk < min)
21481bb76ff1Sjsg return 0;
21491bb76ff1Sjsg
21501bb76ff1Sjsg if (!pi->caps_sclk_ds)
21511bb76ff1Sjsg return 0;
21521bb76ff1Sjsg
21531bb76ff1Sjsg for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
21541bb76ff1Sjsg temp = sclk >> i;
21551bb76ff1Sjsg if (temp >= min)
21561bb76ff1Sjsg break;
21571bb76ff1Sjsg }
21581bb76ff1Sjsg
21591bb76ff1Sjsg return (u8)i;
21601bb76ff1Sjsg }
21611bb76ff1Sjsg
kv_get_high_voltage_limit(struct amdgpu_device * adev,int * limit)21621bb76ff1Sjsg static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
21631bb76ff1Sjsg {
21641bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
21651bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
21661bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
21671bb76ff1Sjsg int i;
21681bb76ff1Sjsg
21691bb76ff1Sjsg if (table && table->count) {
21701bb76ff1Sjsg for (i = table->count - 1; i >= 0; i--) {
21711bb76ff1Sjsg if (pi->high_voltage_t &&
21721bb76ff1Sjsg (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
21731bb76ff1Sjsg pi->high_voltage_t)) {
21741bb76ff1Sjsg *limit = i;
21751bb76ff1Sjsg return 0;
21761bb76ff1Sjsg }
21771bb76ff1Sjsg }
21781bb76ff1Sjsg } else {
21791bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *table =
21801bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table;
21811bb76ff1Sjsg
21821bb76ff1Sjsg for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
21831bb76ff1Sjsg if (pi->high_voltage_t &&
21841bb76ff1Sjsg (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
21851bb76ff1Sjsg pi->high_voltage_t)) {
21861bb76ff1Sjsg *limit = i;
21871bb76ff1Sjsg return 0;
21881bb76ff1Sjsg }
21891bb76ff1Sjsg }
21901bb76ff1Sjsg }
21911bb76ff1Sjsg
21921bb76ff1Sjsg *limit = 0;
21931bb76ff1Sjsg return 0;
21941bb76ff1Sjsg }
21951bb76ff1Sjsg
kv_apply_state_adjust_rules(struct amdgpu_device * adev,struct amdgpu_ps * new_rps,struct amdgpu_ps * old_rps)21961bb76ff1Sjsg static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
21971bb76ff1Sjsg struct amdgpu_ps *new_rps,
21981bb76ff1Sjsg struct amdgpu_ps *old_rps)
21991bb76ff1Sjsg {
22001bb76ff1Sjsg struct kv_ps *ps = kv_get_ps(new_rps);
22011bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
22021bb76ff1Sjsg u32 min_sclk = 10000; /* ??? */
22031bb76ff1Sjsg u32 sclk, mclk = 0;
22041bb76ff1Sjsg int i, limit;
22051bb76ff1Sjsg bool force_high;
22061bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
22071bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
22081bb76ff1Sjsg u32 stable_p_state_sclk = 0;
22091bb76ff1Sjsg struct amdgpu_clock_and_voltage_limits *max_limits =
22101bb76ff1Sjsg &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
22111bb76ff1Sjsg
22121bb76ff1Sjsg if (new_rps->vce_active) {
22131bb76ff1Sjsg new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
22141bb76ff1Sjsg new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
22151bb76ff1Sjsg } else {
22161bb76ff1Sjsg new_rps->evclk = 0;
22171bb76ff1Sjsg new_rps->ecclk = 0;
22181bb76ff1Sjsg }
22191bb76ff1Sjsg
22201bb76ff1Sjsg mclk = max_limits->mclk;
22211bb76ff1Sjsg sclk = min_sclk;
22221bb76ff1Sjsg
22231bb76ff1Sjsg if (pi->caps_stable_p_state) {
22241bb76ff1Sjsg stable_p_state_sclk = (max_limits->sclk * 75) / 100;
22251bb76ff1Sjsg
22261bb76ff1Sjsg for (i = table->count - 1; i >= 0; i--) {
22271bb76ff1Sjsg if (stable_p_state_sclk >= table->entries[i].clk) {
22281bb76ff1Sjsg stable_p_state_sclk = table->entries[i].clk;
22291bb76ff1Sjsg break;
22301bb76ff1Sjsg }
22311bb76ff1Sjsg }
22321bb76ff1Sjsg
22331bb76ff1Sjsg if (i > 0)
22341bb76ff1Sjsg stable_p_state_sclk = table->entries[0].clk;
22351bb76ff1Sjsg
22361bb76ff1Sjsg sclk = stable_p_state_sclk;
22371bb76ff1Sjsg }
22381bb76ff1Sjsg
22391bb76ff1Sjsg if (new_rps->vce_active) {
22401bb76ff1Sjsg if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
22411bb76ff1Sjsg sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
22421bb76ff1Sjsg }
22431bb76ff1Sjsg
22441bb76ff1Sjsg ps->need_dfs_bypass = true;
22451bb76ff1Sjsg
22461bb76ff1Sjsg for (i = 0; i < ps->num_levels; i++) {
22471bb76ff1Sjsg if (ps->levels[i].sclk < sclk)
22481bb76ff1Sjsg ps->levels[i].sclk = sclk;
22491bb76ff1Sjsg }
22501bb76ff1Sjsg
22511bb76ff1Sjsg if (table && table->count) {
22521bb76ff1Sjsg for (i = 0; i < ps->num_levels; i++) {
22531bb76ff1Sjsg if (pi->high_voltage_t &&
22541bb76ff1Sjsg (pi->high_voltage_t <
22551bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
22561bb76ff1Sjsg kv_get_high_voltage_limit(adev, &limit);
22571bb76ff1Sjsg ps->levels[i].sclk = table->entries[limit].clk;
22581bb76ff1Sjsg }
22591bb76ff1Sjsg }
22601bb76ff1Sjsg } else {
22611bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *table =
22621bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table;
22631bb76ff1Sjsg
22641bb76ff1Sjsg for (i = 0; i < ps->num_levels; i++) {
22651bb76ff1Sjsg if (pi->high_voltage_t &&
22661bb76ff1Sjsg (pi->high_voltage_t <
22671bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
22681bb76ff1Sjsg kv_get_high_voltage_limit(adev, &limit);
22691bb76ff1Sjsg ps->levels[i].sclk = table->entries[limit].sclk_frequency;
22701bb76ff1Sjsg }
22711bb76ff1Sjsg }
22721bb76ff1Sjsg }
22731bb76ff1Sjsg
22741bb76ff1Sjsg if (pi->caps_stable_p_state) {
22751bb76ff1Sjsg for (i = 0; i < ps->num_levels; i++) {
22761bb76ff1Sjsg ps->levels[i].sclk = stable_p_state_sclk;
22771bb76ff1Sjsg }
22781bb76ff1Sjsg }
22791bb76ff1Sjsg
22801bb76ff1Sjsg pi->video_start = new_rps->dclk || new_rps->vclk ||
22811bb76ff1Sjsg new_rps->evclk || new_rps->ecclk;
22821bb76ff1Sjsg
22831bb76ff1Sjsg if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
22841bb76ff1Sjsg ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
22851bb76ff1Sjsg pi->battery_state = true;
22861bb76ff1Sjsg else
22871bb76ff1Sjsg pi->battery_state = false;
22881bb76ff1Sjsg
22891bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
22901bb76ff1Sjsg ps->dpm0_pg_nb_ps_lo = 0x1;
22911bb76ff1Sjsg ps->dpm0_pg_nb_ps_hi = 0x0;
22921bb76ff1Sjsg ps->dpmx_nb_ps_lo = 0x1;
22931bb76ff1Sjsg ps->dpmx_nb_ps_hi = 0x0;
22941bb76ff1Sjsg } else {
22951bb76ff1Sjsg ps->dpm0_pg_nb_ps_lo = 0x3;
22961bb76ff1Sjsg ps->dpm0_pg_nb_ps_hi = 0x0;
22971bb76ff1Sjsg ps->dpmx_nb_ps_lo = 0x3;
22981bb76ff1Sjsg ps->dpmx_nb_ps_hi = 0x0;
22991bb76ff1Sjsg
23001bb76ff1Sjsg if (pi->sys_info.nb_dpm_enable) {
23011bb76ff1Sjsg force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
23021bb76ff1Sjsg pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
23031bb76ff1Sjsg pi->disable_nb_ps3_in_battery;
23041bb76ff1Sjsg ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
23051bb76ff1Sjsg ps->dpm0_pg_nb_ps_hi = 0x2;
23061bb76ff1Sjsg ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
23071bb76ff1Sjsg ps->dpmx_nb_ps_hi = 0x2;
23081bb76ff1Sjsg }
23091bb76ff1Sjsg }
23101bb76ff1Sjsg }
23111bb76ff1Sjsg
kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device * adev,u32 index,bool enable)23121bb76ff1Sjsg static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
23131bb76ff1Sjsg u32 index, bool enable)
23141bb76ff1Sjsg {
23151bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
23161bb76ff1Sjsg
23171bb76ff1Sjsg pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
23181bb76ff1Sjsg }
23191bb76ff1Sjsg
kv_calculate_ds_divider(struct amdgpu_device * adev)23201bb76ff1Sjsg static int kv_calculate_ds_divider(struct amdgpu_device *adev)
23211bb76ff1Sjsg {
23221bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
23231bb76ff1Sjsg u32 sclk_in_sr = 10000; /* ??? */
23241bb76ff1Sjsg u32 i;
23251bb76ff1Sjsg
23261bb76ff1Sjsg if (pi->lowest_valid > pi->highest_valid)
23271bb76ff1Sjsg return -EINVAL;
23281bb76ff1Sjsg
23291bb76ff1Sjsg for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
23301bb76ff1Sjsg pi->graphics_level[i].DeepSleepDivId =
23311bb76ff1Sjsg kv_get_sleep_divider_id_from_clock(adev,
23321bb76ff1Sjsg be32_to_cpu(pi->graphics_level[i].SclkFrequency),
23331bb76ff1Sjsg sclk_in_sr);
23341bb76ff1Sjsg }
23351bb76ff1Sjsg return 0;
23361bb76ff1Sjsg }
23371bb76ff1Sjsg
kv_calculate_nbps_level_settings(struct amdgpu_device * adev)23381bb76ff1Sjsg static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
23391bb76ff1Sjsg {
23401bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
23411bb76ff1Sjsg u32 i;
23421bb76ff1Sjsg bool force_high;
23431bb76ff1Sjsg struct amdgpu_clock_and_voltage_limits *max_limits =
23441bb76ff1Sjsg &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
23451bb76ff1Sjsg u32 mclk = max_limits->mclk;
23461bb76ff1Sjsg
23471bb76ff1Sjsg if (pi->lowest_valid > pi->highest_valid)
23481bb76ff1Sjsg return -EINVAL;
23491bb76ff1Sjsg
23501bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
23511bb76ff1Sjsg for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
23521bb76ff1Sjsg pi->graphics_level[i].GnbSlow = 1;
23531bb76ff1Sjsg pi->graphics_level[i].ForceNbPs1 = 0;
23541bb76ff1Sjsg pi->graphics_level[i].UpH = 0;
23551bb76ff1Sjsg }
23561bb76ff1Sjsg
23571bb76ff1Sjsg if (!pi->sys_info.nb_dpm_enable)
23581bb76ff1Sjsg return 0;
23591bb76ff1Sjsg
23601bb76ff1Sjsg force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
23611bb76ff1Sjsg (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
23621bb76ff1Sjsg
23631bb76ff1Sjsg if (force_high) {
23641bb76ff1Sjsg for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
23651bb76ff1Sjsg pi->graphics_level[i].GnbSlow = 0;
23661bb76ff1Sjsg } else {
23671bb76ff1Sjsg if (pi->battery_state)
23681bb76ff1Sjsg pi->graphics_level[0].ForceNbPs1 = 1;
23691bb76ff1Sjsg
23701bb76ff1Sjsg pi->graphics_level[1].GnbSlow = 0;
23711bb76ff1Sjsg pi->graphics_level[2].GnbSlow = 0;
23721bb76ff1Sjsg pi->graphics_level[3].GnbSlow = 0;
23731bb76ff1Sjsg pi->graphics_level[4].GnbSlow = 0;
23741bb76ff1Sjsg }
23751bb76ff1Sjsg } else {
23761bb76ff1Sjsg for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
23771bb76ff1Sjsg pi->graphics_level[i].GnbSlow = 1;
23781bb76ff1Sjsg pi->graphics_level[i].ForceNbPs1 = 0;
23791bb76ff1Sjsg pi->graphics_level[i].UpH = 0;
23801bb76ff1Sjsg }
23811bb76ff1Sjsg
23821bb76ff1Sjsg if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
23831bb76ff1Sjsg pi->graphics_level[pi->lowest_valid].UpH = 0x28;
23841bb76ff1Sjsg pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
23851bb76ff1Sjsg if (pi->lowest_valid != pi->highest_valid)
23861bb76ff1Sjsg pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
23871bb76ff1Sjsg }
23881bb76ff1Sjsg }
23891bb76ff1Sjsg return 0;
23901bb76ff1Sjsg }
23911bb76ff1Sjsg
kv_calculate_dpm_settings(struct amdgpu_device * adev)23921bb76ff1Sjsg static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
23931bb76ff1Sjsg {
23941bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
23951bb76ff1Sjsg u32 i;
23961bb76ff1Sjsg
23971bb76ff1Sjsg if (pi->lowest_valid > pi->highest_valid)
23981bb76ff1Sjsg return -EINVAL;
23991bb76ff1Sjsg
24001bb76ff1Sjsg for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
24011bb76ff1Sjsg pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
24021bb76ff1Sjsg
24031bb76ff1Sjsg return 0;
24041bb76ff1Sjsg }
24051bb76ff1Sjsg
kv_init_graphics_levels(struct amdgpu_device * adev)24061bb76ff1Sjsg static void kv_init_graphics_levels(struct amdgpu_device *adev)
24071bb76ff1Sjsg {
24081bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
24091bb76ff1Sjsg u32 i;
24101bb76ff1Sjsg struct amdgpu_clock_voltage_dependency_table *table =
24111bb76ff1Sjsg &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
24121bb76ff1Sjsg
24131bb76ff1Sjsg if (table && table->count) {
24141bb76ff1Sjsg u32 vid_2bit;
24151bb76ff1Sjsg
24161bb76ff1Sjsg pi->graphics_dpm_level_count = 0;
24171bb76ff1Sjsg for (i = 0; i < table->count; i++) {
24181bb76ff1Sjsg if (pi->high_voltage_t &&
24191bb76ff1Sjsg (pi->high_voltage_t <
24201bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
24211bb76ff1Sjsg break;
24221bb76ff1Sjsg
24231bb76ff1Sjsg kv_set_divider_value(adev, i, table->entries[i].clk);
24241bb76ff1Sjsg vid_2bit = kv_convert_vid7_to_vid2(adev,
24251bb76ff1Sjsg &pi->sys_info.vid_mapping_table,
24261bb76ff1Sjsg table->entries[i].v);
24271bb76ff1Sjsg kv_set_vid(adev, i, vid_2bit);
24281bb76ff1Sjsg kv_set_at(adev, i, pi->at[i]);
24291bb76ff1Sjsg kv_dpm_power_level_enabled_for_throttle(adev, i, true);
24301bb76ff1Sjsg pi->graphics_dpm_level_count++;
24311bb76ff1Sjsg }
24321bb76ff1Sjsg } else {
24331bb76ff1Sjsg struct sumo_sclk_voltage_mapping_table *table =
24341bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table;
24351bb76ff1Sjsg
24361bb76ff1Sjsg pi->graphics_dpm_level_count = 0;
24371bb76ff1Sjsg for (i = 0; i < table->num_max_dpm_entries; i++) {
24381bb76ff1Sjsg if (pi->high_voltage_t &&
24391bb76ff1Sjsg pi->high_voltage_t <
24401bb76ff1Sjsg kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
24411bb76ff1Sjsg break;
24421bb76ff1Sjsg
24431bb76ff1Sjsg kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
24441bb76ff1Sjsg kv_set_vid(adev, i, table->entries[i].vid_2bit);
24451bb76ff1Sjsg kv_set_at(adev, i, pi->at[i]);
24461bb76ff1Sjsg kv_dpm_power_level_enabled_for_throttle(adev, i, true);
24471bb76ff1Sjsg pi->graphics_dpm_level_count++;
24481bb76ff1Sjsg }
24491bb76ff1Sjsg }
24501bb76ff1Sjsg
24511bb76ff1Sjsg for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
24521bb76ff1Sjsg kv_dpm_power_level_enable(adev, i, false);
24531bb76ff1Sjsg }
24541bb76ff1Sjsg
kv_enable_new_levels(struct amdgpu_device * adev)24551bb76ff1Sjsg static void kv_enable_new_levels(struct amdgpu_device *adev)
24561bb76ff1Sjsg {
24571bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
24581bb76ff1Sjsg u32 i;
24591bb76ff1Sjsg
24601bb76ff1Sjsg for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
24611bb76ff1Sjsg if (i >= pi->lowest_valid && i <= pi->highest_valid)
24621bb76ff1Sjsg kv_dpm_power_level_enable(adev, i, true);
24631bb76ff1Sjsg }
24641bb76ff1Sjsg }
24651bb76ff1Sjsg
kv_set_enabled_level(struct amdgpu_device * adev,u32 level)24661bb76ff1Sjsg static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
24671bb76ff1Sjsg {
24681bb76ff1Sjsg u32 new_mask = (1 << level);
24691bb76ff1Sjsg
24701bb76ff1Sjsg return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
24711bb76ff1Sjsg PPSMC_MSG_SCLKDPM_SetEnabledMask,
24721bb76ff1Sjsg new_mask);
24731bb76ff1Sjsg }
24741bb76ff1Sjsg
kv_set_enabled_levels(struct amdgpu_device * adev)24751bb76ff1Sjsg static int kv_set_enabled_levels(struct amdgpu_device *adev)
24761bb76ff1Sjsg {
24771bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
24781bb76ff1Sjsg u32 i, new_mask = 0;
24791bb76ff1Sjsg
24801bb76ff1Sjsg for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
24811bb76ff1Sjsg new_mask |= (1 << i);
24821bb76ff1Sjsg
24831bb76ff1Sjsg return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
24841bb76ff1Sjsg PPSMC_MSG_SCLKDPM_SetEnabledMask,
24851bb76ff1Sjsg new_mask);
24861bb76ff1Sjsg }
24871bb76ff1Sjsg
kv_program_nbps_index_settings(struct amdgpu_device * adev,struct amdgpu_ps * new_rps)24881bb76ff1Sjsg static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
24891bb76ff1Sjsg struct amdgpu_ps *new_rps)
24901bb76ff1Sjsg {
24911bb76ff1Sjsg struct kv_ps *new_ps = kv_get_ps(new_rps);
24921bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
24931bb76ff1Sjsg u32 nbdpmconfig1;
24941bb76ff1Sjsg
24951bb76ff1Sjsg if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
24961bb76ff1Sjsg return;
24971bb76ff1Sjsg
24981bb76ff1Sjsg if (pi->sys_info.nb_dpm_enable) {
24991bb76ff1Sjsg nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
25001bb76ff1Sjsg nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
25011bb76ff1Sjsg NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
25021bb76ff1Sjsg NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
25031bb76ff1Sjsg NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
25041bb76ff1Sjsg nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
25051bb76ff1Sjsg (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
25061bb76ff1Sjsg (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
25071bb76ff1Sjsg (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
25081bb76ff1Sjsg WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
25091bb76ff1Sjsg }
25101bb76ff1Sjsg }
25111bb76ff1Sjsg
kv_set_thermal_temperature_range(struct amdgpu_device * adev,int min_temp,int max_temp)25121bb76ff1Sjsg static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
25131bb76ff1Sjsg int min_temp, int max_temp)
25141bb76ff1Sjsg {
25151bb76ff1Sjsg int low_temp = 0 * 1000;
25161bb76ff1Sjsg int high_temp = 255 * 1000;
25171bb76ff1Sjsg u32 tmp;
25181bb76ff1Sjsg
25191bb76ff1Sjsg if (low_temp < min_temp)
25201bb76ff1Sjsg low_temp = min_temp;
25211bb76ff1Sjsg if (high_temp > max_temp)
25221bb76ff1Sjsg high_temp = max_temp;
25231bb76ff1Sjsg if (high_temp < low_temp) {
25241bb76ff1Sjsg DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
25251bb76ff1Sjsg return -EINVAL;
25261bb76ff1Sjsg }
25271bb76ff1Sjsg
25281bb76ff1Sjsg tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
25291bb76ff1Sjsg tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
25301bb76ff1Sjsg CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
25311bb76ff1Sjsg tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
25321bb76ff1Sjsg ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
25331bb76ff1Sjsg WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
25341bb76ff1Sjsg
25351bb76ff1Sjsg adev->pm.dpm.thermal.min_temp = low_temp;
25361bb76ff1Sjsg adev->pm.dpm.thermal.max_temp = high_temp;
25371bb76ff1Sjsg
25381bb76ff1Sjsg return 0;
25391bb76ff1Sjsg }
25401bb76ff1Sjsg
25411bb76ff1Sjsg union igp_info {
25421bb76ff1Sjsg struct _ATOM_INTEGRATED_SYSTEM_INFO info;
25431bb76ff1Sjsg struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
25441bb76ff1Sjsg struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
25451bb76ff1Sjsg struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
25461bb76ff1Sjsg struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
25471bb76ff1Sjsg struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
25481bb76ff1Sjsg };
25491bb76ff1Sjsg
kv_parse_sys_info_table(struct amdgpu_device * adev)25501bb76ff1Sjsg static int kv_parse_sys_info_table(struct amdgpu_device *adev)
25511bb76ff1Sjsg {
25521bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
25531bb76ff1Sjsg struct amdgpu_mode_info *mode_info = &adev->mode_info;
25541bb76ff1Sjsg int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
25551bb76ff1Sjsg union igp_info *igp_info;
25561bb76ff1Sjsg u8 frev, crev;
25571bb76ff1Sjsg u16 data_offset;
25581bb76ff1Sjsg int i;
25591bb76ff1Sjsg
25601bb76ff1Sjsg if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
25611bb76ff1Sjsg &frev, &crev, &data_offset)) {
25621bb76ff1Sjsg igp_info = (union igp_info *)(mode_info->atom_context->bios +
25631bb76ff1Sjsg data_offset);
25641bb76ff1Sjsg
25651bb76ff1Sjsg if (crev != 8) {
25661bb76ff1Sjsg DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
25671bb76ff1Sjsg return -EINVAL;
25681bb76ff1Sjsg }
25691bb76ff1Sjsg pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
25701bb76ff1Sjsg pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
25711bb76ff1Sjsg pi->sys_info.bootup_nb_voltage_index =
25721bb76ff1Sjsg le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
25731bb76ff1Sjsg if (igp_info->info_8.ucHtcTmpLmt == 0)
25741bb76ff1Sjsg pi->sys_info.htc_tmp_lmt = 203;
25751bb76ff1Sjsg else
25761bb76ff1Sjsg pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
25771bb76ff1Sjsg if (igp_info->info_8.ucHtcHystLmt == 0)
25781bb76ff1Sjsg pi->sys_info.htc_hyst_lmt = 5;
25791bb76ff1Sjsg else
25801bb76ff1Sjsg pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
25811bb76ff1Sjsg if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
25821bb76ff1Sjsg DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
25831bb76ff1Sjsg }
25841bb76ff1Sjsg
25851bb76ff1Sjsg if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
25861bb76ff1Sjsg pi->sys_info.nb_dpm_enable = true;
25871bb76ff1Sjsg else
25881bb76ff1Sjsg pi->sys_info.nb_dpm_enable = false;
25891bb76ff1Sjsg
25901bb76ff1Sjsg for (i = 0; i < KV_NUM_NBPSTATES; i++) {
25911bb76ff1Sjsg pi->sys_info.nbp_memory_clock[i] =
25921bb76ff1Sjsg le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
25931bb76ff1Sjsg pi->sys_info.nbp_n_clock[i] =
25941bb76ff1Sjsg le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
25951bb76ff1Sjsg }
25961bb76ff1Sjsg if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
25971bb76ff1Sjsg SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
25981bb76ff1Sjsg pi->caps_enable_dfs_bypass = true;
25991bb76ff1Sjsg
26001bb76ff1Sjsg sumo_construct_sclk_voltage_mapping_table(adev,
26011bb76ff1Sjsg &pi->sys_info.sclk_voltage_mapping_table,
26021bb76ff1Sjsg igp_info->info_8.sAvail_SCLK);
26031bb76ff1Sjsg
26041bb76ff1Sjsg sumo_construct_vid_mapping_table(adev,
26051bb76ff1Sjsg &pi->sys_info.vid_mapping_table,
26061bb76ff1Sjsg igp_info->info_8.sAvail_SCLK);
26071bb76ff1Sjsg
26081bb76ff1Sjsg kv_construct_max_power_limits_table(adev,
26091bb76ff1Sjsg &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
26101bb76ff1Sjsg }
26111bb76ff1Sjsg return 0;
26121bb76ff1Sjsg }
26131bb76ff1Sjsg
26141bb76ff1Sjsg union power_info {
26151bb76ff1Sjsg struct _ATOM_POWERPLAY_INFO info;
26161bb76ff1Sjsg struct _ATOM_POWERPLAY_INFO_V2 info_2;
26171bb76ff1Sjsg struct _ATOM_POWERPLAY_INFO_V3 info_3;
26181bb76ff1Sjsg struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
26191bb76ff1Sjsg struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
26201bb76ff1Sjsg struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
26211bb76ff1Sjsg };
26221bb76ff1Sjsg
26231bb76ff1Sjsg union pplib_clock_info {
26241bb76ff1Sjsg struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
26251bb76ff1Sjsg struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
26261bb76ff1Sjsg struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
26271bb76ff1Sjsg struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
26281bb76ff1Sjsg };
26291bb76ff1Sjsg
26301bb76ff1Sjsg union pplib_power_state {
26311bb76ff1Sjsg struct _ATOM_PPLIB_STATE v1;
26321bb76ff1Sjsg struct _ATOM_PPLIB_STATE_V2 v2;
26331bb76ff1Sjsg };
26341bb76ff1Sjsg
kv_patch_boot_state(struct amdgpu_device * adev,struct kv_ps * ps)26351bb76ff1Sjsg static void kv_patch_boot_state(struct amdgpu_device *adev,
26361bb76ff1Sjsg struct kv_ps *ps)
26371bb76ff1Sjsg {
26381bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
26391bb76ff1Sjsg
26401bb76ff1Sjsg ps->num_levels = 1;
26411bb76ff1Sjsg ps->levels[0] = pi->boot_pl;
26421bb76ff1Sjsg }
26431bb76ff1Sjsg
kv_parse_pplib_non_clock_info(struct amdgpu_device * adev,struct amdgpu_ps * rps,struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info,u8 table_rev)26441bb76ff1Sjsg static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
26451bb76ff1Sjsg struct amdgpu_ps *rps,
26461bb76ff1Sjsg struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
26471bb76ff1Sjsg u8 table_rev)
26481bb76ff1Sjsg {
26491bb76ff1Sjsg struct kv_ps *ps = kv_get_ps(rps);
26501bb76ff1Sjsg
26511bb76ff1Sjsg rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
26521bb76ff1Sjsg rps->class = le16_to_cpu(non_clock_info->usClassification);
26531bb76ff1Sjsg rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
26541bb76ff1Sjsg
26551bb76ff1Sjsg if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
26561bb76ff1Sjsg rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
26571bb76ff1Sjsg rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
26581bb76ff1Sjsg } else {
26591bb76ff1Sjsg rps->vclk = 0;
26601bb76ff1Sjsg rps->dclk = 0;
26611bb76ff1Sjsg }
26621bb76ff1Sjsg
26631bb76ff1Sjsg if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
26641bb76ff1Sjsg adev->pm.dpm.boot_ps = rps;
26651bb76ff1Sjsg kv_patch_boot_state(adev, ps);
26661bb76ff1Sjsg }
26671bb76ff1Sjsg if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
26681bb76ff1Sjsg adev->pm.dpm.uvd_ps = rps;
26691bb76ff1Sjsg }
26701bb76ff1Sjsg
kv_parse_pplib_clock_info(struct amdgpu_device * adev,struct amdgpu_ps * rps,int index,union pplib_clock_info * clock_info)26711bb76ff1Sjsg static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
26721bb76ff1Sjsg struct amdgpu_ps *rps, int index,
26731bb76ff1Sjsg union pplib_clock_info *clock_info)
26741bb76ff1Sjsg {
26751bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
26761bb76ff1Sjsg struct kv_ps *ps = kv_get_ps(rps);
26771bb76ff1Sjsg struct kv_pl *pl = &ps->levels[index];
26781bb76ff1Sjsg u32 sclk;
26791bb76ff1Sjsg
26801bb76ff1Sjsg sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
26811bb76ff1Sjsg sclk |= clock_info->sumo.ucEngineClockHigh << 16;
26821bb76ff1Sjsg pl->sclk = sclk;
26831bb76ff1Sjsg pl->vddc_index = clock_info->sumo.vddcIndex;
26841bb76ff1Sjsg
26851bb76ff1Sjsg ps->num_levels = index + 1;
26861bb76ff1Sjsg
26871bb76ff1Sjsg if (pi->caps_sclk_ds) {
26881bb76ff1Sjsg pl->ds_divider_index = 5;
26891bb76ff1Sjsg pl->ss_divider_index = 5;
26901bb76ff1Sjsg }
26911bb76ff1Sjsg }
26921bb76ff1Sjsg
kv_parse_power_table(struct amdgpu_device * adev)26931bb76ff1Sjsg static int kv_parse_power_table(struct amdgpu_device *adev)
26941bb76ff1Sjsg {
26951bb76ff1Sjsg struct amdgpu_mode_info *mode_info = &adev->mode_info;
26961bb76ff1Sjsg struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
26971bb76ff1Sjsg union pplib_power_state *power_state;
26981bb76ff1Sjsg int i, j, k, non_clock_array_index, clock_array_index;
26991bb76ff1Sjsg union pplib_clock_info *clock_info;
27001bb76ff1Sjsg struct _StateArray *state_array;
27011bb76ff1Sjsg struct _ClockInfoArray *clock_info_array;
27021bb76ff1Sjsg struct _NonClockInfoArray *non_clock_info_array;
27031bb76ff1Sjsg union power_info *power_info;
27041bb76ff1Sjsg int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
27051bb76ff1Sjsg u16 data_offset;
27061bb76ff1Sjsg u8 frev, crev;
27071bb76ff1Sjsg u8 *power_state_offset;
27081bb76ff1Sjsg struct kv_ps *ps;
27091bb76ff1Sjsg
27101bb76ff1Sjsg if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
27111bb76ff1Sjsg &frev, &crev, &data_offset))
27121bb76ff1Sjsg return -EINVAL;
27131bb76ff1Sjsg power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
27141bb76ff1Sjsg
27151bb76ff1Sjsg amdgpu_add_thermal_controller(adev);
27161bb76ff1Sjsg
27171bb76ff1Sjsg state_array = (struct _StateArray *)
27181bb76ff1Sjsg (mode_info->atom_context->bios + data_offset +
27191bb76ff1Sjsg le16_to_cpu(power_info->pplib.usStateArrayOffset));
27201bb76ff1Sjsg clock_info_array = (struct _ClockInfoArray *)
27211bb76ff1Sjsg (mode_info->atom_context->bios + data_offset +
27221bb76ff1Sjsg le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
27231bb76ff1Sjsg non_clock_info_array = (struct _NonClockInfoArray *)
27241bb76ff1Sjsg (mode_info->atom_context->bios + data_offset +
27251bb76ff1Sjsg le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
27261bb76ff1Sjsg
27271bb76ff1Sjsg adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
27281bb76ff1Sjsg sizeof(struct amdgpu_ps),
27291bb76ff1Sjsg GFP_KERNEL);
27301bb76ff1Sjsg if (!adev->pm.dpm.ps)
27311bb76ff1Sjsg return -ENOMEM;
27321bb76ff1Sjsg power_state_offset = (u8 *)state_array->states;
27331bb76ff1Sjsg for (i = 0; i < state_array->ucNumEntries; i++) {
27341bb76ff1Sjsg u8 *idx;
27351bb76ff1Sjsg power_state = (union pplib_power_state *)power_state_offset;
27361bb76ff1Sjsg non_clock_array_index = power_state->v2.nonClockInfoIndex;
27371bb76ff1Sjsg non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
27381bb76ff1Sjsg &non_clock_info_array->nonClockInfo[non_clock_array_index];
27391bb76ff1Sjsg ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
274092bc92c8Sjsg if (ps == NULL)
27411bb76ff1Sjsg return -ENOMEM;
27421bb76ff1Sjsg adev->pm.dpm.ps[i].ps_priv = ps;
27431bb76ff1Sjsg k = 0;
27441bb76ff1Sjsg idx = (u8 *)&power_state->v2.clockInfoIndex[0];
27451bb76ff1Sjsg for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
27461bb76ff1Sjsg clock_array_index = idx[j];
27471bb76ff1Sjsg if (clock_array_index >= clock_info_array->ucNumEntries)
27481bb76ff1Sjsg continue;
27491bb76ff1Sjsg if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
27501bb76ff1Sjsg break;
27511bb76ff1Sjsg clock_info = (union pplib_clock_info *)
27521bb76ff1Sjsg ((u8 *)&clock_info_array->clockInfo[0] +
27531bb76ff1Sjsg (clock_array_index * clock_info_array->ucEntrySize));
27541bb76ff1Sjsg kv_parse_pplib_clock_info(adev,
27551bb76ff1Sjsg &adev->pm.dpm.ps[i], k,
27561bb76ff1Sjsg clock_info);
27571bb76ff1Sjsg k++;
27581bb76ff1Sjsg }
27591bb76ff1Sjsg kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
27601bb76ff1Sjsg non_clock_info,
27611bb76ff1Sjsg non_clock_info_array->ucEntrySize);
27621bb76ff1Sjsg power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
27631bb76ff1Sjsg }
27641bb76ff1Sjsg adev->pm.dpm.num_ps = state_array->ucNumEntries;
27651bb76ff1Sjsg
27661bb76ff1Sjsg /* fill in the vce power states */
27671bb76ff1Sjsg for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
27681bb76ff1Sjsg u32 sclk;
27691bb76ff1Sjsg clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
27701bb76ff1Sjsg clock_info = (union pplib_clock_info *)
27711bb76ff1Sjsg &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
27721bb76ff1Sjsg sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
27731bb76ff1Sjsg sclk |= clock_info->sumo.ucEngineClockHigh << 16;
27741bb76ff1Sjsg adev->pm.dpm.vce_states[i].sclk = sclk;
27751bb76ff1Sjsg adev->pm.dpm.vce_states[i].mclk = 0;
27761bb76ff1Sjsg }
27771bb76ff1Sjsg
27781bb76ff1Sjsg return 0;
27791bb76ff1Sjsg }
27801bb76ff1Sjsg
kv_dpm_init(struct amdgpu_device * adev)27811bb76ff1Sjsg static int kv_dpm_init(struct amdgpu_device *adev)
27821bb76ff1Sjsg {
27831bb76ff1Sjsg struct kv_power_info *pi;
27841bb76ff1Sjsg int ret, i;
27851bb76ff1Sjsg
27861bb76ff1Sjsg pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
27871bb76ff1Sjsg if (pi == NULL)
27881bb76ff1Sjsg return -ENOMEM;
27891bb76ff1Sjsg adev->pm.dpm.priv = pi;
27901bb76ff1Sjsg
27911bb76ff1Sjsg ret = amdgpu_get_platform_caps(adev);
27921bb76ff1Sjsg if (ret)
27931bb76ff1Sjsg return ret;
27941bb76ff1Sjsg
27951bb76ff1Sjsg ret = amdgpu_parse_extended_power_table(adev);
27961bb76ff1Sjsg if (ret)
27971bb76ff1Sjsg return ret;
27981bb76ff1Sjsg
27991bb76ff1Sjsg for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
28001bb76ff1Sjsg pi->at[i] = TRINITY_AT_DFLT;
28011bb76ff1Sjsg
28021bb76ff1Sjsg pi->sram_end = SMC_RAM_END;
28031bb76ff1Sjsg
28041bb76ff1Sjsg pi->enable_nb_dpm = true;
28051bb76ff1Sjsg
28061bb76ff1Sjsg pi->caps_power_containment = true;
28071bb76ff1Sjsg pi->caps_cac = true;
28081bb76ff1Sjsg pi->enable_didt = false;
28091bb76ff1Sjsg if (pi->enable_didt) {
28101bb76ff1Sjsg pi->caps_sq_ramping = true;
28111bb76ff1Sjsg pi->caps_db_ramping = true;
28121bb76ff1Sjsg pi->caps_td_ramping = true;
28131bb76ff1Sjsg pi->caps_tcp_ramping = true;
28141bb76ff1Sjsg }
28151bb76ff1Sjsg
28161bb76ff1Sjsg if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
28171bb76ff1Sjsg pi->caps_sclk_ds = true;
28181bb76ff1Sjsg else
28191bb76ff1Sjsg pi->caps_sclk_ds = false;
28201bb76ff1Sjsg
28211bb76ff1Sjsg pi->enable_auto_thermal_throttling = true;
28221bb76ff1Sjsg pi->disable_nb_ps3_in_battery = false;
28231bb76ff1Sjsg if (amdgpu_bapm == 0)
28241bb76ff1Sjsg pi->bapm_enable = false;
28251bb76ff1Sjsg else
28261bb76ff1Sjsg pi->bapm_enable = true;
28271bb76ff1Sjsg pi->voltage_drop_t = 0;
28281bb76ff1Sjsg pi->caps_sclk_throttle_low_notification = false;
28291bb76ff1Sjsg pi->caps_fps = false; /* true? */
28301bb76ff1Sjsg pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
28311bb76ff1Sjsg pi->caps_uvd_dpm = true;
28321bb76ff1Sjsg pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
28331bb76ff1Sjsg pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
28341bb76ff1Sjsg pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
28351bb76ff1Sjsg pi->caps_stable_p_state = false;
28361bb76ff1Sjsg
28371bb76ff1Sjsg ret = kv_parse_sys_info_table(adev);
28381bb76ff1Sjsg if (ret)
28391bb76ff1Sjsg return ret;
28401bb76ff1Sjsg
28411bb76ff1Sjsg kv_patch_voltage_values(adev);
28421bb76ff1Sjsg kv_construct_boot_state(adev);
28431bb76ff1Sjsg
28441bb76ff1Sjsg ret = kv_parse_power_table(adev);
28451bb76ff1Sjsg if (ret)
28461bb76ff1Sjsg return ret;
28471bb76ff1Sjsg
28481bb76ff1Sjsg pi->enable_dpm = true;
28491bb76ff1Sjsg
28501bb76ff1Sjsg return 0;
28511bb76ff1Sjsg }
28521bb76ff1Sjsg
28531bb76ff1Sjsg static void
kv_dpm_debugfs_print_current_performance_level(void * handle,struct seq_file * m)28541bb76ff1Sjsg kv_dpm_debugfs_print_current_performance_level(void *handle,
28551bb76ff1Sjsg struct seq_file *m)
28561bb76ff1Sjsg {
28571bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
28581bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
28591bb76ff1Sjsg u32 current_index =
28601bb76ff1Sjsg (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
28611bb76ff1Sjsg TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
28621bb76ff1Sjsg TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
28631bb76ff1Sjsg u32 sclk, tmp;
28641bb76ff1Sjsg u16 vddc;
28651bb76ff1Sjsg
28661bb76ff1Sjsg if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
28671bb76ff1Sjsg seq_printf(m, "invalid dpm profile %d\n", current_index);
28681bb76ff1Sjsg } else {
28691bb76ff1Sjsg sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
28701bb76ff1Sjsg tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
28711bb76ff1Sjsg SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
28721bb76ff1Sjsg SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
28731bb76ff1Sjsg vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
28741bb76ff1Sjsg seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
28751bb76ff1Sjsg seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
28761bb76ff1Sjsg seq_printf(m, "power level %d sclk: %u vddc: %u\n",
28771bb76ff1Sjsg current_index, sclk, vddc);
28781bb76ff1Sjsg }
28791bb76ff1Sjsg }
28801bb76ff1Sjsg
28811bb76ff1Sjsg static void
kv_dpm_print_power_state(void * handle,void * request_ps)28821bb76ff1Sjsg kv_dpm_print_power_state(void *handle, void *request_ps)
28831bb76ff1Sjsg {
28841bb76ff1Sjsg int i;
28851bb76ff1Sjsg struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
28861bb76ff1Sjsg struct kv_ps *ps = kv_get_ps(rps);
28871bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
28881bb76ff1Sjsg
28891bb76ff1Sjsg amdgpu_dpm_print_class_info(rps->class, rps->class2);
28901bb76ff1Sjsg amdgpu_dpm_print_cap_info(rps->caps);
28911bb76ff1Sjsg printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
28921bb76ff1Sjsg for (i = 0; i < ps->num_levels; i++) {
28931bb76ff1Sjsg struct kv_pl *pl = &ps->levels[i];
28941bb76ff1Sjsg printk("\t\tpower level %d sclk: %u vddc: %u\n",
28951bb76ff1Sjsg i, pl->sclk,
28961bb76ff1Sjsg kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
28971bb76ff1Sjsg }
28981bb76ff1Sjsg amdgpu_dpm_print_ps_status(adev, rps);
28991bb76ff1Sjsg }
29001bb76ff1Sjsg
kv_dpm_fini(struct amdgpu_device * adev)29011bb76ff1Sjsg static void kv_dpm_fini(struct amdgpu_device *adev)
29021bb76ff1Sjsg {
29031bb76ff1Sjsg int i;
29041bb76ff1Sjsg
29051bb76ff1Sjsg for (i = 0; i < adev->pm.dpm.num_ps; i++) {
29061bb76ff1Sjsg kfree(adev->pm.dpm.ps[i].ps_priv);
29071bb76ff1Sjsg }
29081bb76ff1Sjsg kfree(adev->pm.dpm.ps);
29091bb76ff1Sjsg kfree(adev->pm.dpm.priv);
29101bb76ff1Sjsg amdgpu_free_extended_power_table(adev);
29111bb76ff1Sjsg }
29121bb76ff1Sjsg
kv_dpm_display_configuration_changed(void * handle)29131bb76ff1Sjsg static void kv_dpm_display_configuration_changed(void *handle)
29141bb76ff1Sjsg {
29151bb76ff1Sjsg
29161bb76ff1Sjsg }
29171bb76ff1Sjsg
kv_dpm_get_sclk(void * handle,bool low)29181bb76ff1Sjsg static u32 kv_dpm_get_sclk(void *handle, bool low)
29191bb76ff1Sjsg {
29201bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
29211bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
29221bb76ff1Sjsg struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
29231bb76ff1Sjsg
29241bb76ff1Sjsg if (low)
29251bb76ff1Sjsg return requested_state->levels[0].sclk;
29261bb76ff1Sjsg else
29271bb76ff1Sjsg return requested_state->levels[requested_state->num_levels - 1].sclk;
29281bb76ff1Sjsg }
29291bb76ff1Sjsg
kv_dpm_get_mclk(void * handle,bool low)29301bb76ff1Sjsg static u32 kv_dpm_get_mclk(void *handle, bool low)
29311bb76ff1Sjsg {
29321bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
29331bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
29341bb76ff1Sjsg
29351bb76ff1Sjsg return pi->sys_info.bootup_uma_clk;
29361bb76ff1Sjsg }
29371bb76ff1Sjsg
29381bb76ff1Sjsg /* get temperature in millidegrees */
kv_dpm_get_temp(void * handle)29391bb76ff1Sjsg static int kv_dpm_get_temp(void *handle)
29401bb76ff1Sjsg {
29411bb76ff1Sjsg u32 temp;
29421bb76ff1Sjsg int actual_temp = 0;
29431bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
29441bb76ff1Sjsg
29451bb76ff1Sjsg temp = RREG32_SMC(0xC0300E0C);
29461bb76ff1Sjsg
29471bb76ff1Sjsg if (temp)
29481bb76ff1Sjsg actual_temp = (temp / 8) - 49;
29491bb76ff1Sjsg else
29501bb76ff1Sjsg actual_temp = 0;
29511bb76ff1Sjsg
29521bb76ff1Sjsg actual_temp = actual_temp * 1000;
29531bb76ff1Sjsg
29541bb76ff1Sjsg return actual_temp;
29551bb76ff1Sjsg }
29561bb76ff1Sjsg
kv_dpm_early_init(void * handle)29571bb76ff1Sjsg static int kv_dpm_early_init(void *handle)
29581bb76ff1Sjsg {
29591bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
29601bb76ff1Sjsg
29611bb76ff1Sjsg adev->powerplay.pp_funcs = &kv_dpm_funcs;
29621bb76ff1Sjsg adev->powerplay.pp_handle = adev;
29631bb76ff1Sjsg kv_dpm_set_irq_funcs(adev);
29641bb76ff1Sjsg
29651bb76ff1Sjsg return 0;
29661bb76ff1Sjsg }
29671bb76ff1Sjsg
kv_dpm_late_init(void * handle)29681bb76ff1Sjsg static int kv_dpm_late_init(void *handle)
29691bb76ff1Sjsg {
29701bb76ff1Sjsg /* powerdown unused blocks for now */
29711bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
29721bb76ff1Sjsg
29731bb76ff1Sjsg if (!adev->pm.dpm_enabled)
29741bb76ff1Sjsg return 0;
29751bb76ff1Sjsg
29761bb76ff1Sjsg kv_dpm_powergate_acp(adev, true);
29771bb76ff1Sjsg kv_dpm_powergate_samu(adev, true);
29781bb76ff1Sjsg
29791bb76ff1Sjsg return 0;
29801bb76ff1Sjsg }
29811bb76ff1Sjsg
kv_dpm_sw_init(void * handle)29821bb76ff1Sjsg static int kv_dpm_sw_init(void *handle)
29831bb76ff1Sjsg {
29841bb76ff1Sjsg int ret;
29851bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
29861bb76ff1Sjsg
29871bb76ff1Sjsg ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
29881bb76ff1Sjsg &adev->pm.dpm.thermal.irq);
29891bb76ff1Sjsg if (ret)
29901bb76ff1Sjsg return ret;
29911bb76ff1Sjsg
29921bb76ff1Sjsg ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
29931bb76ff1Sjsg &adev->pm.dpm.thermal.irq);
29941bb76ff1Sjsg if (ret)
29951bb76ff1Sjsg return ret;
29961bb76ff1Sjsg
29971bb76ff1Sjsg /* default to balanced state */
29981bb76ff1Sjsg adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
29991bb76ff1Sjsg adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
30001bb76ff1Sjsg adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
30011bb76ff1Sjsg adev->pm.default_sclk = adev->clock.default_sclk;
30021bb76ff1Sjsg adev->pm.default_mclk = adev->clock.default_mclk;
30031bb76ff1Sjsg adev->pm.current_sclk = adev->clock.default_sclk;
30041bb76ff1Sjsg adev->pm.current_mclk = adev->clock.default_mclk;
30051bb76ff1Sjsg adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
30061bb76ff1Sjsg
30071bb76ff1Sjsg if (amdgpu_dpm == 0)
30081bb76ff1Sjsg return 0;
30091bb76ff1Sjsg
30101bb76ff1Sjsg INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
30111bb76ff1Sjsg ret = kv_dpm_init(adev);
30121bb76ff1Sjsg if (ret)
30131bb76ff1Sjsg goto dpm_failed;
30141bb76ff1Sjsg adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
30151bb76ff1Sjsg if (amdgpu_dpm == 1)
30161bb76ff1Sjsg amdgpu_pm_print_power_states(adev);
30171bb76ff1Sjsg DRM_INFO("amdgpu: dpm initialized\n");
30181bb76ff1Sjsg
30191bb76ff1Sjsg return 0;
30201bb76ff1Sjsg
30211bb76ff1Sjsg dpm_failed:
30221bb76ff1Sjsg kv_dpm_fini(adev);
30231bb76ff1Sjsg DRM_ERROR("amdgpu: dpm initialization failed\n");
30241bb76ff1Sjsg return ret;
30251bb76ff1Sjsg }
30261bb76ff1Sjsg
kv_dpm_sw_fini(void * handle)30271bb76ff1Sjsg static int kv_dpm_sw_fini(void *handle)
30281bb76ff1Sjsg {
30291bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
30301bb76ff1Sjsg
30311bb76ff1Sjsg flush_work(&adev->pm.dpm.thermal.work);
30321bb76ff1Sjsg
30331bb76ff1Sjsg kv_dpm_fini(adev);
30341bb76ff1Sjsg
30351bb76ff1Sjsg return 0;
30361bb76ff1Sjsg }
30371bb76ff1Sjsg
kv_dpm_hw_init(void * handle)30381bb76ff1Sjsg static int kv_dpm_hw_init(void *handle)
30391bb76ff1Sjsg {
30401bb76ff1Sjsg int ret;
30411bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
30421bb76ff1Sjsg
30431bb76ff1Sjsg if (!amdgpu_dpm)
30441bb76ff1Sjsg return 0;
30451bb76ff1Sjsg
30461bb76ff1Sjsg kv_dpm_setup_asic(adev);
30471bb76ff1Sjsg ret = kv_dpm_enable(adev);
30481bb76ff1Sjsg if (ret)
30491bb76ff1Sjsg adev->pm.dpm_enabled = false;
30501bb76ff1Sjsg else
30511bb76ff1Sjsg adev->pm.dpm_enabled = true;
30521bb76ff1Sjsg amdgpu_legacy_dpm_compute_clocks(adev);
30531bb76ff1Sjsg return ret;
30541bb76ff1Sjsg }
30551bb76ff1Sjsg
kv_dpm_hw_fini(void * handle)30561bb76ff1Sjsg static int kv_dpm_hw_fini(void *handle)
30571bb76ff1Sjsg {
30581bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
30591bb76ff1Sjsg
30601bb76ff1Sjsg if (adev->pm.dpm_enabled)
30611bb76ff1Sjsg kv_dpm_disable(adev);
30621bb76ff1Sjsg
30631bb76ff1Sjsg return 0;
30641bb76ff1Sjsg }
30651bb76ff1Sjsg
kv_dpm_suspend(void * handle)30661bb76ff1Sjsg static int kv_dpm_suspend(void *handle)
30671bb76ff1Sjsg {
30681bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
30691bb76ff1Sjsg
30701bb76ff1Sjsg if (adev->pm.dpm_enabled) {
30711bb76ff1Sjsg /* disable dpm */
30721bb76ff1Sjsg kv_dpm_disable(adev);
30731bb76ff1Sjsg /* reset the power state */
30741bb76ff1Sjsg adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
30751bb76ff1Sjsg }
30761bb76ff1Sjsg return 0;
30771bb76ff1Sjsg }
30781bb76ff1Sjsg
kv_dpm_resume(void * handle)30791bb76ff1Sjsg static int kv_dpm_resume(void *handle)
30801bb76ff1Sjsg {
30811bb76ff1Sjsg int ret;
30821bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
30831bb76ff1Sjsg
30841bb76ff1Sjsg if (adev->pm.dpm_enabled) {
30851bb76ff1Sjsg /* asic init will reset to the boot state */
30861bb76ff1Sjsg kv_dpm_setup_asic(adev);
30871bb76ff1Sjsg ret = kv_dpm_enable(adev);
30881bb76ff1Sjsg if (ret)
30891bb76ff1Sjsg adev->pm.dpm_enabled = false;
30901bb76ff1Sjsg else
30911bb76ff1Sjsg adev->pm.dpm_enabled = true;
30921bb76ff1Sjsg if (adev->pm.dpm_enabled)
30931bb76ff1Sjsg amdgpu_legacy_dpm_compute_clocks(adev);
30941bb76ff1Sjsg }
30951bb76ff1Sjsg return 0;
30961bb76ff1Sjsg }
30971bb76ff1Sjsg
kv_dpm_is_idle(void * handle)30981bb76ff1Sjsg static bool kv_dpm_is_idle(void *handle)
30991bb76ff1Sjsg {
31001bb76ff1Sjsg return true;
31011bb76ff1Sjsg }
31021bb76ff1Sjsg
kv_dpm_wait_for_idle(void * handle)31031bb76ff1Sjsg static int kv_dpm_wait_for_idle(void *handle)
31041bb76ff1Sjsg {
31051bb76ff1Sjsg return 0;
31061bb76ff1Sjsg }
31071bb76ff1Sjsg
31081bb76ff1Sjsg
kv_dpm_soft_reset(void * handle)31091bb76ff1Sjsg static int kv_dpm_soft_reset(void *handle)
31101bb76ff1Sjsg {
31111bb76ff1Sjsg return 0;
31121bb76ff1Sjsg }
31131bb76ff1Sjsg
kv_dpm_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)31141bb76ff1Sjsg static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
31151bb76ff1Sjsg struct amdgpu_irq_src *src,
31161bb76ff1Sjsg unsigned type,
31171bb76ff1Sjsg enum amdgpu_interrupt_state state)
31181bb76ff1Sjsg {
31191bb76ff1Sjsg u32 cg_thermal_int;
31201bb76ff1Sjsg
31211bb76ff1Sjsg switch (type) {
31221bb76ff1Sjsg case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
31231bb76ff1Sjsg switch (state) {
31241bb76ff1Sjsg case AMDGPU_IRQ_STATE_DISABLE:
31251bb76ff1Sjsg cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
31261bb76ff1Sjsg cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
31271bb76ff1Sjsg WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
31281bb76ff1Sjsg break;
31291bb76ff1Sjsg case AMDGPU_IRQ_STATE_ENABLE:
31301bb76ff1Sjsg cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
31311bb76ff1Sjsg cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
31321bb76ff1Sjsg WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
31331bb76ff1Sjsg break;
31341bb76ff1Sjsg default:
31351bb76ff1Sjsg break;
31361bb76ff1Sjsg }
31371bb76ff1Sjsg break;
31381bb76ff1Sjsg
31391bb76ff1Sjsg case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
31401bb76ff1Sjsg switch (state) {
31411bb76ff1Sjsg case AMDGPU_IRQ_STATE_DISABLE:
31421bb76ff1Sjsg cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
31431bb76ff1Sjsg cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
31441bb76ff1Sjsg WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
31451bb76ff1Sjsg break;
31461bb76ff1Sjsg case AMDGPU_IRQ_STATE_ENABLE:
31471bb76ff1Sjsg cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
31481bb76ff1Sjsg cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
31491bb76ff1Sjsg WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
31501bb76ff1Sjsg break;
31511bb76ff1Sjsg default:
31521bb76ff1Sjsg break;
31531bb76ff1Sjsg }
31541bb76ff1Sjsg break;
31551bb76ff1Sjsg
31561bb76ff1Sjsg default:
31571bb76ff1Sjsg break;
31581bb76ff1Sjsg }
31591bb76ff1Sjsg return 0;
31601bb76ff1Sjsg }
31611bb76ff1Sjsg
kv_dpm_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)31621bb76ff1Sjsg static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
31631bb76ff1Sjsg struct amdgpu_irq_src *source,
31641bb76ff1Sjsg struct amdgpu_iv_entry *entry)
31651bb76ff1Sjsg {
31661bb76ff1Sjsg bool queue_thermal = false;
31671bb76ff1Sjsg
31681bb76ff1Sjsg if (entry == NULL)
31691bb76ff1Sjsg return -EINVAL;
31701bb76ff1Sjsg
31711bb76ff1Sjsg switch (entry->src_id) {
31721bb76ff1Sjsg case 230: /* thermal low to high */
31731bb76ff1Sjsg DRM_DEBUG("IH: thermal low to high\n");
31741bb76ff1Sjsg adev->pm.dpm.thermal.high_to_low = false;
31751bb76ff1Sjsg queue_thermal = true;
31761bb76ff1Sjsg break;
31771bb76ff1Sjsg case 231: /* thermal high to low */
31781bb76ff1Sjsg DRM_DEBUG("IH: thermal high to low\n");
31791bb76ff1Sjsg adev->pm.dpm.thermal.high_to_low = true;
31801bb76ff1Sjsg queue_thermal = true;
31811bb76ff1Sjsg break;
31821bb76ff1Sjsg default:
31831bb76ff1Sjsg break;
31841bb76ff1Sjsg }
31851bb76ff1Sjsg
31861bb76ff1Sjsg if (queue_thermal)
31871bb76ff1Sjsg schedule_work(&adev->pm.dpm.thermal.work);
31881bb76ff1Sjsg
31891bb76ff1Sjsg return 0;
31901bb76ff1Sjsg }
31911bb76ff1Sjsg
kv_dpm_set_clockgating_state(void * handle,enum amd_clockgating_state state)31921bb76ff1Sjsg static int kv_dpm_set_clockgating_state(void *handle,
31931bb76ff1Sjsg enum amd_clockgating_state state)
31941bb76ff1Sjsg {
31951bb76ff1Sjsg return 0;
31961bb76ff1Sjsg }
31971bb76ff1Sjsg
kv_dpm_set_powergating_state(void * handle,enum amd_powergating_state state)31981bb76ff1Sjsg static int kv_dpm_set_powergating_state(void *handle,
31991bb76ff1Sjsg enum amd_powergating_state state)
32001bb76ff1Sjsg {
32011bb76ff1Sjsg return 0;
32021bb76ff1Sjsg }
32031bb76ff1Sjsg
kv_are_power_levels_equal(const struct kv_pl * kv_cpl1,const struct kv_pl * kv_cpl2)32041bb76ff1Sjsg static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
32051bb76ff1Sjsg const struct kv_pl *kv_cpl2)
32061bb76ff1Sjsg {
32071bb76ff1Sjsg return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
32081bb76ff1Sjsg (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
32091bb76ff1Sjsg (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
32101bb76ff1Sjsg (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
32111bb76ff1Sjsg }
32121bb76ff1Sjsg
kv_check_state_equal(void * handle,void * current_ps,void * request_ps,bool * equal)32131bb76ff1Sjsg static int kv_check_state_equal(void *handle,
32141bb76ff1Sjsg void *current_ps,
32151bb76ff1Sjsg void *request_ps,
32161bb76ff1Sjsg bool *equal)
32171bb76ff1Sjsg {
32181bb76ff1Sjsg struct kv_ps *kv_cps;
32191bb76ff1Sjsg struct kv_ps *kv_rps;
32201bb76ff1Sjsg int i;
32211bb76ff1Sjsg struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
32221bb76ff1Sjsg struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
32231bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
32241bb76ff1Sjsg
32251bb76ff1Sjsg if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
32261bb76ff1Sjsg return -EINVAL;
32271bb76ff1Sjsg
32281bb76ff1Sjsg kv_cps = kv_get_ps(cps);
32291bb76ff1Sjsg kv_rps = kv_get_ps(rps);
32301bb76ff1Sjsg
32311bb76ff1Sjsg if (kv_cps == NULL) {
32321bb76ff1Sjsg *equal = false;
32331bb76ff1Sjsg return 0;
32341bb76ff1Sjsg }
32351bb76ff1Sjsg
32361bb76ff1Sjsg if (kv_cps->num_levels != kv_rps->num_levels) {
32371bb76ff1Sjsg *equal = false;
32381bb76ff1Sjsg return 0;
32391bb76ff1Sjsg }
32401bb76ff1Sjsg
32411bb76ff1Sjsg for (i = 0; i < kv_cps->num_levels; i++) {
32421bb76ff1Sjsg if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
32431bb76ff1Sjsg &(kv_rps->levels[i]))) {
32441bb76ff1Sjsg *equal = false;
32451bb76ff1Sjsg return 0;
32461bb76ff1Sjsg }
32471bb76ff1Sjsg }
32481bb76ff1Sjsg
32491bb76ff1Sjsg /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
32501bb76ff1Sjsg *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
32511bb76ff1Sjsg *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
32521bb76ff1Sjsg
32531bb76ff1Sjsg return 0;
32541bb76ff1Sjsg }
32551bb76ff1Sjsg
kv_dpm_read_sensor(void * handle,int idx,void * value,int * size)32561bb76ff1Sjsg static int kv_dpm_read_sensor(void *handle, int idx,
32571bb76ff1Sjsg void *value, int *size)
32581bb76ff1Sjsg {
32591bb76ff1Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle;
32601bb76ff1Sjsg struct kv_power_info *pi = kv_get_pi(adev);
32611bb76ff1Sjsg uint32_t sclk;
32621bb76ff1Sjsg u32 pl_index =
32631bb76ff1Sjsg (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
32641bb76ff1Sjsg TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
32651bb76ff1Sjsg TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
32661bb76ff1Sjsg
32671bb76ff1Sjsg /* size must be at least 4 bytes for all sensors */
32681bb76ff1Sjsg if (*size < 4)
32691bb76ff1Sjsg return -EINVAL;
32701bb76ff1Sjsg
32711bb76ff1Sjsg switch (idx) {
32721bb76ff1Sjsg case AMDGPU_PP_SENSOR_GFX_SCLK:
32731bb76ff1Sjsg if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
32741bb76ff1Sjsg sclk = be32_to_cpu(
32751bb76ff1Sjsg pi->graphics_level[pl_index].SclkFrequency);
32761bb76ff1Sjsg *((uint32_t *)value) = sclk;
32771bb76ff1Sjsg *size = 4;
32781bb76ff1Sjsg return 0;
32791bb76ff1Sjsg }
32801bb76ff1Sjsg return -EINVAL;
32811bb76ff1Sjsg case AMDGPU_PP_SENSOR_GPU_TEMP:
32821bb76ff1Sjsg *((uint32_t *)value) = kv_dpm_get_temp(adev);
32831bb76ff1Sjsg *size = 4;
32841bb76ff1Sjsg return 0;
32851bb76ff1Sjsg default:
32861bb76ff1Sjsg return -EOPNOTSUPP;
32871bb76ff1Sjsg }
32881bb76ff1Sjsg }
32891bb76ff1Sjsg
kv_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)32901bb76ff1Sjsg static int kv_set_powergating_by_smu(void *handle,
32911bb76ff1Sjsg uint32_t block_type, bool gate)
32921bb76ff1Sjsg {
32931bb76ff1Sjsg switch (block_type) {
32941bb76ff1Sjsg case AMD_IP_BLOCK_TYPE_UVD:
32951bb76ff1Sjsg kv_dpm_powergate_uvd(handle, gate);
32961bb76ff1Sjsg break;
32971bb76ff1Sjsg case AMD_IP_BLOCK_TYPE_VCE:
32981bb76ff1Sjsg kv_dpm_powergate_vce(handle, gate);
32991bb76ff1Sjsg break;
33001bb76ff1Sjsg default:
33011bb76ff1Sjsg break;
33021bb76ff1Sjsg }
33031bb76ff1Sjsg return 0;
33041bb76ff1Sjsg }
33051bb76ff1Sjsg
33061bb76ff1Sjsg static const struct amd_ip_funcs kv_dpm_ip_funcs = {
33071bb76ff1Sjsg .name = "kv_dpm",
33081bb76ff1Sjsg .early_init = kv_dpm_early_init,
33091bb76ff1Sjsg .late_init = kv_dpm_late_init,
33101bb76ff1Sjsg .sw_init = kv_dpm_sw_init,
33111bb76ff1Sjsg .sw_fini = kv_dpm_sw_fini,
33121bb76ff1Sjsg .hw_init = kv_dpm_hw_init,
33131bb76ff1Sjsg .hw_fini = kv_dpm_hw_fini,
33141bb76ff1Sjsg .suspend = kv_dpm_suspend,
33151bb76ff1Sjsg .resume = kv_dpm_resume,
33161bb76ff1Sjsg .is_idle = kv_dpm_is_idle,
33171bb76ff1Sjsg .wait_for_idle = kv_dpm_wait_for_idle,
33181bb76ff1Sjsg .soft_reset = kv_dpm_soft_reset,
33191bb76ff1Sjsg .set_clockgating_state = kv_dpm_set_clockgating_state,
33201bb76ff1Sjsg .set_powergating_state = kv_dpm_set_powergating_state,
33211bb76ff1Sjsg };
33221bb76ff1Sjsg
3323f005ef32Sjsg const struct amdgpu_ip_block_version kv_smu_ip_block = {
33241bb76ff1Sjsg .type = AMD_IP_BLOCK_TYPE_SMC,
33251bb76ff1Sjsg .major = 1,
33261bb76ff1Sjsg .minor = 0,
33271bb76ff1Sjsg .rev = 0,
33281bb76ff1Sjsg .funcs = &kv_dpm_ip_funcs,
33291bb76ff1Sjsg };
33301bb76ff1Sjsg
33311bb76ff1Sjsg static const struct amd_pm_funcs kv_dpm_funcs = {
33321bb76ff1Sjsg .pre_set_power_state = &kv_dpm_pre_set_power_state,
33331bb76ff1Sjsg .set_power_state = &kv_dpm_set_power_state,
33341bb76ff1Sjsg .post_set_power_state = &kv_dpm_post_set_power_state,
33351bb76ff1Sjsg .display_configuration_changed = &kv_dpm_display_configuration_changed,
33361bb76ff1Sjsg .get_sclk = &kv_dpm_get_sclk,
33371bb76ff1Sjsg .get_mclk = &kv_dpm_get_mclk,
33381bb76ff1Sjsg .print_power_state = &kv_dpm_print_power_state,
33391bb76ff1Sjsg .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
33401bb76ff1Sjsg .force_performance_level = &kv_dpm_force_performance_level,
33411bb76ff1Sjsg .set_powergating_by_smu = kv_set_powergating_by_smu,
33421bb76ff1Sjsg .enable_bapm = &kv_dpm_enable_bapm,
33431bb76ff1Sjsg .get_vce_clock_state = amdgpu_get_vce_clock_state,
33441bb76ff1Sjsg .check_state_equal = kv_check_state_equal,
33451bb76ff1Sjsg .read_sensor = &kv_dpm_read_sensor,
33461bb76ff1Sjsg .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
33471bb76ff1Sjsg };
33481bb76ff1Sjsg
33491bb76ff1Sjsg static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
33501bb76ff1Sjsg .set = kv_dpm_set_interrupt_state,
33511bb76ff1Sjsg .process = kv_dpm_process_interrupt,
33521bb76ff1Sjsg };
33531bb76ff1Sjsg
kv_dpm_set_irq_funcs(struct amdgpu_device * adev)33541bb76ff1Sjsg static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
33551bb76ff1Sjsg {
33561bb76ff1Sjsg adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
33571bb76ff1Sjsg adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
33581bb76ff1Sjsg }
3359