1 /* $NetBSD: amdgpu_kv_dpm.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2013 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_kv_dpm.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $");
28
29 #include "amdgpu.h"
30 #include "amdgpu_pm.h"
31 #include "cikd.h"
32 #include "atom.h"
33 #include "amdgpu_atombios.h"
34 #include "amdgpu_dpm.h"
35 #include "kv_dpm.h"
36 #include "gfx_v7_0.h"
37 #include <linux/seq_file.h>
38
39 #include "smu/smu_7_0_0_d.h"
40 #include "smu/smu_7_0_0_sh_mask.h"
41
42 #include "gca/gfx_7_2_d.h"
43 #include "gca/gfx_7_2_sh_mask.h"
44
45 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
46 #define KV_MINIMUM_ENGINE_CLOCK 800
47 #define SMC_RAM_END 0x40000
48
49 static const struct amd_pm_funcs kv_dpm_funcs;
50
51 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
52 static int kv_enable_nb_dpm(struct amdgpu_device *adev,
53 bool enable);
54 static void kv_init_graphics_levels(struct amdgpu_device *adev);
55 static int kv_calculate_ds_divider(struct amdgpu_device *adev);
56 static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
57 static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
58 static void kv_enable_new_levels(struct amdgpu_device *adev);
59 static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
60 struct amdgpu_ps *new_rps);
61 static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
62 static int kv_set_enabled_levels(struct amdgpu_device *adev);
63 static int kv_force_dpm_highest(struct amdgpu_device *adev);
64 static int kv_force_dpm_lowest(struct amdgpu_device *adev);
65 static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
66 struct amdgpu_ps *new_rps,
67 struct amdgpu_ps *old_rps);
68 static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
69 int min_temp, int max_temp);
70 static int kv_init_fps_limits(struct amdgpu_device *adev);
71
72 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
73 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
74
75
kv_convert_vid2_to_vid7(struct amdgpu_device * adev,struct sumo_vid_mapping_table * vid_mapping_table,u32 vid_2bit)76 static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
77 struct sumo_vid_mapping_table *vid_mapping_table,
78 u32 vid_2bit)
79 {
80 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
81 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
82 u32 i;
83
84 if (vddc_sclk_table && vddc_sclk_table->count) {
85 if (vid_2bit < vddc_sclk_table->count)
86 return vddc_sclk_table->entries[vid_2bit].v;
87 else
88 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
89 } else {
90 for (i = 0; i < vid_mapping_table->num_entries; i++) {
91 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
92 return vid_mapping_table->entries[i].vid_7bit;
93 }
94 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
95 }
96 }
97
kv_convert_vid7_to_vid2(struct amdgpu_device * adev,struct sumo_vid_mapping_table * vid_mapping_table,u32 vid_7bit)98 static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
99 struct sumo_vid_mapping_table *vid_mapping_table,
100 u32 vid_7bit)
101 {
102 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
103 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
104 u32 i;
105
106 if (vddc_sclk_table && vddc_sclk_table->count) {
107 for (i = 0; i < vddc_sclk_table->count; i++) {
108 if (vddc_sclk_table->entries[i].v == vid_7bit)
109 return i;
110 }
111 return vddc_sclk_table->count - 1;
112 } else {
113 for (i = 0; i < vid_mapping_table->num_entries; i++) {
114 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
115 return vid_mapping_table->entries[i].vid_2bit;
116 }
117
118 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
119 }
120 }
121
sumo_take_smu_control(struct amdgpu_device * adev,bool enable)122 static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
123 {
124 /* This bit selects who handles display phy powergating.
125 * Clear the bit to let atom handle it.
126 * Set it to let the driver handle it.
127 * For now we just let atom handle it.
128 */
129 #if 0
130 u32 v = RREG32(mmDOUT_SCRATCH3);
131
132 if (enable)
133 v |= 0x4;
134 else
135 v &= 0xFFFFFFFB;
136
137 WREG32(mmDOUT_SCRATCH3, v);
138 #endif
139 }
140
sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device * adev,struct sumo_sclk_voltage_mapping_table * sclk_voltage_mapping_table,ATOM_AVAILABLE_SCLK_LIST * table)141 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
142 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
143 ATOM_AVAILABLE_SCLK_LIST *table)
144 {
145 u32 i;
146 u32 n = 0;
147 u32 prev_sclk = 0;
148
149 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
150 if (table[i].ulSupportedSCLK > prev_sclk) {
151 sclk_voltage_mapping_table->entries[n].sclk_frequency =
152 table[i].ulSupportedSCLK;
153 sclk_voltage_mapping_table->entries[n].vid_2bit =
154 table[i].usVoltageIndex;
155 prev_sclk = table[i].ulSupportedSCLK;
156 n++;
157 }
158 }
159
160 sclk_voltage_mapping_table->num_max_dpm_entries = n;
161 }
162
sumo_construct_vid_mapping_table(struct amdgpu_device * adev,struct sumo_vid_mapping_table * vid_mapping_table,ATOM_AVAILABLE_SCLK_LIST * table)163 static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
164 struct sumo_vid_mapping_table *vid_mapping_table,
165 ATOM_AVAILABLE_SCLK_LIST *table)
166 {
167 u32 i, j;
168
169 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
170 if (table[i].ulSupportedSCLK != 0) {
171 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
172 table[i].usVoltageID;
173 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
174 table[i].usVoltageIndex;
175 }
176 }
177
178 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
179 if (vid_mapping_table->entries[i].vid_7bit == 0) {
180 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
181 if (vid_mapping_table->entries[j].vid_7bit != 0) {
182 vid_mapping_table->entries[i] =
183 vid_mapping_table->entries[j];
184 vid_mapping_table->entries[j].vid_7bit = 0;
185 break;
186 }
187 }
188
189 if (j == SUMO_MAX_NUMBER_VOLTAGES)
190 break;
191 }
192 }
193
194 vid_mapping_table->num_entries = i;
195 }
196
197 #if 0
198 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
199 {
200 { 0, 4, 1 },
201 { 1, 4, 1 },
202 { 2, 5, 1 },
203 { 3, 4, 2 },
204 { 4, 1, 1 },
205 { 5, 5, 2 },
206 { 6, 6, 1 },
207 { 7, 9, 2 },
208 { 0xffffffff }
209 };
210
211 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
212 {
213 { 0, 4, 1 },
214 { 0xffffffff }
215 };
216
217 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
218 {
219 { 0, 4, 1 },
220 { 0xffffffff }
221 };
222
223 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
224 {
225 { 0, 4, 1 },
226 { 0xffffffff }
227 };
228
229 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
230 {
231 { 0, 4, 1 },
232 { 0xffffffff }
233 };
234
235 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
236 {
237 { 0, 4, 1 },
238 { 1, 4, 1 },
239 { 2, 5, 1 },
240 { 3, 4, 1 },
241 { 4, 1, 1 },
242 { 5, 5, 1 },
243 { 6, 6, 1 },
244 { 7, 9, 1 },
245 { 8, 4, 1 },
246 { 9, 2, 1 },
247 { 10, 3, 1 },
248 { 11, 6, 1 },
249 { 12, 8, 2 },
250 { 13, 1, 1 },
251 { 14, 2, 1 },
252 { 15, 3, 1 },
253 { 16, 1, 1 },
254 { 17, 4, 1 },
255 { 18, 3, 1 },
256 { 19, 1, 1 },
257 { 20, 8, 1 },
258 { 21, 5, 1 },
259 { 22, 1, 1 },
260 { 23, 1, 1 },
261 { 24, 4, 1 },
262 { 27, 6, 1 },
263 { 28, 1, 1 },
264 { 0xffffffff }
265 };
266
267 static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
268 {
269 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
270 };
271
272 static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
273 {
274 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
275 };
276
277 static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
278 {
279 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
280 };
281
282 static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
283 {
284 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
285 };
286
287 static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
288 {
289 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
290 };
291
292 static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
293 {
294 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
295 };
296 #endif
297
298 static const struct kv_pt_config_reg didt_config_kv[] =
299 {
300 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
301 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
302 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
303 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
304 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
305 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
306 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
307 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
308 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
309 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
310 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
311 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
312 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
313 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
314 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
315 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
316 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
317 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
318 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
319 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
320 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
321 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
322 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
323 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
324 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
325 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
326 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
327 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
328 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
329 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
330 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
331 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
332 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
333 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
334 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
335 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
336 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
337 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
338 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
339 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
340 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
341 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
342 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
343 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
344 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
345 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
346 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
347 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
348 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
349 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
350 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
351 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
352 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
353 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
354 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
355 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
356 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
357 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
358 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
359 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
360 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
361 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
362 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
363 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
364 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
365 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
366 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
367 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
368 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
369 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
370 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
371 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
372 { 0xFFFFFFFF }
373 };
374
kv_get_ps(struct amdgpu_ps * rps)375 static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
376 {
377 struct kv_ps *ps = rps->ps_priv;
378
379 return ps;
380 }
381
kv_get_pi(struct amdgpu_device * adev)382 static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
383 {
384 struct kv_power_info *pi = adev->pm.dpm.priv;
385
386 return pi;
387 }
388
389 #if 0
390 static void kv_program_local_cac_table(struct amdgpu_device *adev,
391 const struct kv_lcac_config_values *local_cac_table,
392 const struct kv_lcac_config_reg *local_cac_reg)
393 {
394 u32 i, count, data;
395 const struct kv_lcac_config_values *values = local_cac_table;
396
397 while (values->block_id != 0xffffffff) {
398 count = values->signal_id;
399 for (i = 0; i < count; i++) {
400 data = ((values->block_id << local_cac_reg->block_shift) &
401 local_cac_reg->block_mask);
402 data |= ((i << local_cac_reg->signal_shift) &
403 local_cac_reg->signal_mask);
404 data |= ((values->t << local_cac_reg->t_shift) &
405 local_cac_reg->t_mask);
406 data |= ((1 << local_cac_reg->enable_shift) &
407 local_cac_reg->enable_mask);
408 WREG32_SMC(local_cac_reg->cntl, data);
409 }
410 values++;
411 }
412 }
413 #endif
414
kv_program_pt_config_registers(struct amdgpu_device * adev,const struct kv_pt_config_reg * cac_config_regs)415 static int kv_program_pt_config_registers(struct amdgpu_device *adev,
416 const struct kv_pt_config_reg *cac_config_regs)
417 {
418 const struct kv_pt_config_reg *config_regs = cac_config_regs;
419 u32 data;
420 u32 cache = 0;
421
422 if (config_regs == NULL)
423 return -EINVAL;
424
425 while (config_regs->offset != 0xFFFFFFFF) {
426 if (config_regs->type == KV_CONFIGREG_CACHE) {
427 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
428 } else {
429 switch (config_regs->type) {
430 case KV_CONFIGREG_SMC_IND:
431 data = RREG32_SMC(config_regs->offset);
432 break;
433 case KV_CONFIGREG_DIDT_IND:
434 data = RREG32_DIDT(config_regs->offset);
435 break;
436 default:
437 data = RREG32(config_regs->offset);
438 break;
439 }
440
441 data &= ~config_regs->mask;
442 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
443 data |= cache;
444 cache = 0;
445
446 switch (config_regs->type) {
447 case KV_CONFIGREG_SMC_IND:
448 WREG32_SMC(config_regs->offset, data);
449 break;
450 case KV_CONFIGREG_DIDT_IND:
451 WREG32_DIDT(config_regs->offset, data);
452 break;
453 default:
454 WREG32(config_regs->offset, data);
455 break;
456 }
457 }
458 config_regs++;
459 }
460
461 return 0;
462 }
463
kv_do_enable_didt(struct amdgpu_device * adev,bool enable)464 static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
465 {
466 struct kv_power_info *pi = kv_get_pi(adev);
467 u32 data;
468
469 if (pi->caps_sq_ramping) {
470 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
471 if (enable)
472 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
473 else
474 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
475 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
476 }
477
478 if (pi->caps_db_ramping) {
479 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
480 if (enable)
481 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
482 else
483 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
484 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
485 }
486
487 if (pi->caps_td_ramping) {
488 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
489 if (enable)
490 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
491 else
492 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
493 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
494 }
495
496 if (pi->caps_tcp_ramping) {
497 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
498 if (enable)
499 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
500 else
501 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
502 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
503 }
504 }
505
kv_enable_didt(struct amdgpu_device * adev,bool enable)506 static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
507 {
508 struct kv_power_info *pi = kv_get_pi(adev);
509 int ret;
510
511 if (pi->caps_sq_ramping ||
512 pi->caps_db_ramping ||
513 pi->caps_td_ramping ||
514 pi->caps_tcp_ramping) {
515 amdgpu_gfx_rlc_enter_safe_mode(adev);
516
517 if (enable) {
518 ret = kv_program_pt_config_registers(adev, didt_config_kv);
519 if (ret) {
520 amdgpu_gfx_rlc_exit_safe_mode(adev);
521 return ret;
522 }
523 }
524
525 kv_do_enable_didt(adev, enable);
526
527 amdgpu_gfx_rlc_exit_safe_mode(adev);
528 }
529
530 return 0;
531 }
532
533 #if 0
534 static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
535 {
536 struct kv_power_info *pi = kv_get_pi(adev);
537
538 if (pi->caps_cac) {
539 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
540 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
541 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
542
543 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
544 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
545 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
546
547 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
548 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
549 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
550
551 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
552 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
553 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
554
555 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
556 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
557 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
558
559 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
560 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
561 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
562 }
563 }
564 #endif
565
kv_enable_smc_cac(struct amdgpu_device * adev,bool enable)566 static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
567 {
568 struct kv_power_info *pi = kv_get_pi(adev);
569 int ret = 0;
570
571 if (pi->caps_cac) {
572 if (enable) {
573 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
574 if (ret)
575 pi->cac_enabled = false;
576 else
577 pi->cac_enabled = true;
578 } else if (pi->cac_enabled) {
579 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
580 pi->cac_enabled = false;
581 }
582 }
583
584 return ret;
585 }
586
kv_process_firmware_header(struct amdgpu_device * adev)587 static int kv_process_firmware_header(struct amdgpu_device *adev)
588 {
589 struct kv_power_info *pi = kv_get_pi(adev);
590 u32 tmp;
591 int ret;
592
593 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
594 offsetof(SMU7_Firmware_Header, DpmTable),
595 &tmp, pi->sram_end);
596
597 if (ret == 0)
598 pi->dpm_table_start = tmp;
599
600 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
601 offsetof(SMU7_Firmware_Header, SoftRegisters),
602 &tmp, pi->sram_end);
603
604 if (ret == 0)
605 pi->soft_regs_start = tmp;
606
607 return ret;
608 }
609
kv_enable_dpm_voltage_scaling(struct amdgpu_device * adev)610 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
611 {
612 struct kv_power_info *pi = kv_get_pi(adev);
613 int ret;
614
615 pi->graphics_voltage_change_enable = 1;
616
617 ret = amdgpu_kv_copy_bytes_to_smc(adev,
618 pi->dpm_table_start +
619 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
620 &pi->graphics_voltage_change_enable,
621 sizeof(u8), pi->sram_end);
622
623 return ret;
624 }
625
kv_set_dpm_interval(struct amdgpu_device * adev)626 static int kv_set_dpm_interval(struct amdgpu_device *adev)
627 {
628 struct kv_power_info *pi = kv_get_pi(adev);
629 int ret;
630
631 pi->graphics_interval = 1;
632
633 ret = amdgpu_kv_copy_bytes_to_smc(adev,
634 pi->dpm_table_start +
635 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
636 &pi->graphics_interval,
637 sizeof(u8), pi->sram_end);
638
639 return ret;
640 }
641
kv_set_dpm_boot_state(struct amdgpu_device * adev)642 static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
643 {
644 struct kv_power_info *pi = kv_get_pi(adev);
645 int ret;
646
647 ret = amdgpu_kv_copy_bytes_to_smc(adev,
648 pi->dpm_table_start +
649 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
650 &pi->graphics_boot_level,
651 sizeof(u8), pi->sram_end);
652
653 return ret;
654 }
655
kv_program_vc(struct amdgpu_device * adev)656 static void kv_program_vc(struct amdgpu_device *adev)
657 {
658 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
659 }
660
kv_clear_vc(struct amdgpu_device * adev)661 static void kv_clear_vc(struct amdgpu_device *adev)
662 {
663 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
664 }
665
kv_set_divider_value(struct amdgpu_device * adev,u32 index,u32 sclk)666 static int kv_set_divider_value(struct amdgpu_device *adev,
667 u32 index, u32 sclk)
668 {
669 struct kv_power_info *pi = kv_get_pi(adev);
670 struct atom_clock_dividers dividers;
671 int ret;
672
673 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
674 sclk, false, ÷rs);
675 if (ret)
676 return ret;
677
678 pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
679 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
680
681 return 0;
682 }
683
kv_convert_8bit_index_to_voltage(struct amdgpu_device * adev,u16 voltage)684 static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
685 u16 voltage)
686 {
687 return 6200 - (voltage * 25);
688 }
689
kv_convert_2bit_index_to_voltage(struct amdgpu_device * adev,u32 vid_2bit)690 static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
691 u32 vid_2bit)
692 {
693 struct kv_power_info *pi = kv_get_pi(adev);
694 u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
695 &pi->sys_info.vid_mapping_table,
696 vid_2bit);
697
698 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
699 }
700
701
kv_set_vid(struct amdgpu_device * adev,u32 index,u32 vid)702 static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
703 {
704 struct kv_power_info *pi = kv_get_pi(adev);
705
706 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
707 pi->graphics_level[index].MinVddNb =
708 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
709
710 return 0;
711 }
712
kv_set_at(struct amdgpu_device * adev,u32 index,u32 at)713 static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
714 {
715 struct kv_power_info *pi = kv_get_pi(adev);
716
717 pi->graphics_level[index].AT = cpu_to_be16((u16)at);
718
719 return 0;
720 }
721
kv_dpm_power_level_enable(struct amdgpu_device * adev,u32 index,bool enable)722 static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
723 u32 index, bool enable)
724 {
725 struct kv_power_info *pi = kv_get_pi(adev);
726
727 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
728 }
729
kv_start_dpm(struct amdgpu_device * adev)730 static void kv_start_dpm(struct amdgpu_device *adev)
731 {
732 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
733
734 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
735 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
736
737 amdgpu_kv_smc_dpm_enable(adev, true);
738 }
739
kv_stop_dpm(struct amdgpu_device * adev)740 static void kv_stop_dpm(struct amdgpu_device *adev)
741 {
742 amdgpu_kv_smc_dpm_enable(adev, false);
743 }
744
kv_start_am(struct amdgpu_device * adev)745 static void kv_start_am(struct amdgpu_device *adev)
746 {
747 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
748
749 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
750 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
751 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
752
753 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
754 }
755
kv_reset_am(struct amdgpu_device * adev)756 static void kv_reset_am(struct amdgpu_device *adev)
757 {
758 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
759
760 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
761 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
762
763 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
764 }
765
kv_freeze_sclk_dpm(struct amdgpu_device * adev,bool freeze)766 static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
767 {
768 return amdgpu_kv_notify_message_to_smu(adev, freeze ?
769 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
770 }
771
kv_force_lowest_valid(struct amdgpu_device * adev)772 static int kv_force_lowest_valid(struct amdgpu_device *adev)
773 {
774 return kv_force_dpm_lowest(adev);
775 }
776
kv_unforce_levels(struct amdgpu_device * adev)777 static int kv_unforce_levels(struct amdgpu_device *adev)
778 {
779 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
780 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
781 else
782 return kv_set_enabled_levels(adev);
783 }
784
kv_update_sclk_t(struct amdgpu_device * adev)785 static int kv_update_sclk_t(struct amdgpu_device *adev)
786 {
787 struct kv_power_info *pi = kv_get_pi(adev);
788 u32 low_sclk_interrupt_t = 0;
789 int ret = 0;
790
791 if (pi->caps_sclk_throttle_low_notification) {
792 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
793
794 ret = amdgpu_kv_copy_bytes_to_smc(adev,
795 pi->dpm_table_start +
796 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
797 (u8 *)&low_sclk_interrupt_t,
798 sizeof(u32), pi->sram_end);
799 }
800 return ret;
801 }
802
kv_program_bootup_state(struct amdgpu_device * adev)803 static int kv_program_bootup_state(struct amdgpu_device *adev)
804 {
805 struct kv_power_info *pi = kv_get_pi(adev);
806 u32 i;
807 struct amdgpu_clock_voltage_dependency_table *table =
808 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
809
810 if (table && table->count) {
811 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
812 if (table->entries[i].clk == pi->boot_pl.sclk)
813 break;
814 }
815
816 pi->graphics_boot_level = (u8)i;
817 kv_dpm_power_level_enable(adev, i, true);
818 } else {
819 struct sumo_sclk_voltage_mapping_table *table =
820 &pi->sys_info.sclk_voltage_mapping_table;
821
822 if (table->num_max_dpm_entries == 0)
823 return -EINVAL;
824
825 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
826 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
827 break;
828 }
829
830 pi->graphics_boot_level = (u8)i;
831 kv_dpm_power_level_enable(adev, i, true);
832 }
833 return 0;
834 }
835
kv_enable_auto_thermal_throttling(struct amdgpu_device * adev)836 static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
837 {
838 struct kv_power_info *pi = kv_get_pi(adev);
839 int ret;
840
841 pi->graphics_therm_throttle_enable = 1;
842
843 ret = amdgpu_kv_copy_bytes_to_smc(adev,
844 pi->dpm_table_start +
845 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
846 &pi->graphics_therm_throttle_enable,
847 sizeof(u8), pi->sram_end);
848
849 return ret;
850 }
851
kv_upload_dpm_settings(struct amdgpu_device * adev)852 static int kv_upload_dpm_settings(struct amdgpu_device *adev)
853 {
854 struct kv_power_info *pi = kv_get_pi(adev);
855 int ret;
856
857 ret = amdgpu_kv_copy_bytes_to_smc(adev,
858 pi->dpm_table_start +
859 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
860 (u8 *)&pi->graphics_level,
861 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
862 pi->sram_end);
863
864 if (ret)
865 return ret;
866
867 ret = amdgpu_kv_copy_bytes_to_smc(adev,
868 pi->dpm_table_start +
869 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
870 &pi->graphics_dpm_level_count,
871 sizeof(u8), pi->sram_end);
872
873 return ret;
874 }
875
kv_get_clock_difference(u32 a,u32 b)876 static u32 kv_get_clock_difference(u32 a, u32 b)
877 {
878 return (a >= b) ? a - b : b - a;
879 }
880
kv_get_clk_bypass(struct amdgpu_device * adev,u32 clk)881 static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
882 {
883 struct kv_power_info *pi = kv_get_pi(adev);
884 u32 value;
885
886 if (pi->caps_enable_dfs_bypass) {
887 if (kv_get_clock_difference(clk, 40000) < 200)
888 value = 3;
889 else if (kv_get_clock_difference(clk, 30000) < 200)
890 value = 2;
891 else if (kv_get_clock_difference(clk, 20000) < 200)
892 value = 7;
893 else if (kv_get_clock_difference(clk, 15000) < 200)
894 value = 6;
895 else if (kv_get_clock_difference(clk, 10000) < 200)
896 value = 8;
897 else
898 value = 0;
899 } else {
900 value = 0;
901 }
902
903 return value;
904 }
905
kv_populate_uvd_table(struct amdgpu_device * adev)906 static int kv_populate_uvd_table(struct amdgpu_device *adev)
907 {
908 struct kv_power_info *pi = kv_get_pi(adev);
909 struct amdgpu_uvd_clock_voltage_dependency_table *table =
910 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
911 struct atom_clock_dividers dividers;
912 int ret;
913 u32 i;
914
915 if (table == NULL || table->count == 0)
916 return 0;
917
918 pi->uvd_level_count = 0;
919 for (i = 0; i < table->count; i++) {
920 if (pi->high_voltage_t &&
921 (pi->high_voltage_t < table->entries[i].v))
922 break;
923
924 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
925 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
926 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
927
928 pi->uvd_level[i].VClkBypassCntl =
929 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
930 pi->uvd_level[i].DClkBypassCntl =
931 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
932
933 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
934 table->entries[i].vclk, false, ÷rs);
935 if (ret)
936 return ret;
937 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
938
939 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
940 table->entries[i].dclk, false, ÷rs);
941 if (ret)
942 return ret;
943 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
944
945 pi->uvd_level_count++;
946 }
947
948 ret = amdgpu_kv_copy_bytes_to_smc(adev,
949 pi->dpm_table_start +
950 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
951 (u8 *)&pi->uvd_level_count,
952 sizeof(u8), pi->sram_end);
953 if (ret)
954 return ret;
955
956 pi->uvd_interval = 1;
957
958 ret = amdgpu_kv_copy_bytes_to_smc(adev,
959 pi->dpm_table_start +
960 offsetof(SMU7_Fusion_DpmTable, UVDInterval),
961 &pi->uvd_interval,
962 sizeof(u8), pi->sram_end);
963 if (ret)
964 return ret;
965
966 ret = amdgpu_kv_copy_bytes_to_smc(adev,
967 pi->dpm_table_start +
968 offsetof(SMU7_Fusion_DpmTable, UvdLevel),
969 (u8 *)&pi->uvd_level,
970 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
971 pi->sram_end);
972
973 return ret;
974
975 }
976
kv_populate_vce_table(struct amdgpu_device * adev)977 static int kv_populate_vce_table(struct amdgpu_device *adev)
978 {
979 struct kv_power_info *pi = kv_get_pi(adev);
980 int ret;
981 u32 i;
982 struct amdgpu_vce_clock_voltage_dependency_table *table =
983 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
984 struct atom_clock_dividers dividers;
985
986 if (table == NULL || table->count == 0)
987 return 0;
988
989 pi->vce_level_count = 0;
990 for (i = 0; i < table->count; i++) {
991 if (pi->high_voltage_t &&
992 pi->high_voltage_t < table->entries[i].v)
993 break;
994
995 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
996 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
997
998 pi->vce_level[i].ClkBypassCntl =
999 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
1000
1001 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
1002 table->entries[i].evclk, false, ÷rs);
1003 if (ret)
1004 return ret;
1005 pi->vce_level[i].Divider = (u8)dividers.post_div;
1006
1007 pi->vce_level_count++;
1008 }
1009
1010 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1011 pi->dpm_table_start +
1012 offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
1013 (u8 *)&pi->vce_level_count,
1014 sizeof(u8),
1015 pi->sram_end);
1016 if (ret)
1017 return ret;
1018
1019 pi->vce_interval = 1;
1020
1021 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1022 pi->dpm_table_start +
1023 offsetof(SMU7_Fusion_DpmTable, VCEInterval),
1024 (u8 *)&pi->vce_interval,
1025 sizeof(u8),
1026 pi->sram_end);
1027 if (ret)
1028 return ret;
1029
1030 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1031 pi->dpm_table_start +
1032 offsetof(SMU7_Fusion_DpmTable, VceLevel),
1033 (u8 *)&pi->vce_level,
1034 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
1035 pi->sram_end);
1036
1037 return ret;
1038 }
1039
kv_populate_samu_table(struct amdgpu_device * adev)1040 static int kv_populate_samu_table(struct amdgpu_device *adev)
1041 {
1042 struct kv_power_info *pi = kv_get_pi(adev);
1043 struct amdgpu_clock_voltage_dependency_table *table =
1044 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1045 struct atom_clock_dividers dividers;
1046 int ret;
1047 u32 i;
1048
1049 if (table == NULL || table->count == 0)
1050 return 0;
1051
1052 pi->samu_level_count = 0;
1053 for (i = 0; i < table->count; i++) {
1054 if (pi->high_voltage_t &&
1055 pi->high_voltage_t < table->entries[i].v)
1056 break;
1057
1058 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
1059 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
1060
1061 pi->samu_level[i].ClkBypassCntl =
1062 (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
1063
1064 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
1065 table->entries[i].clk, false, ÷rs);
1066 if (ret)
1067 return ret;
1068 pi->samu_level[i].Divider = (u8)dividers.post_div;
1069
1070 pi->samu_level_count++;
1071 }
1072
1073 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1074 pi->dpm_table_start +
1075 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
1076 (u8 *)&pi->samu_level_count,
1077 sizeof(u8),
1078 pi->sram_end);
1079 if (ret)
1080 return ret;
1081
1082 pi->samu_interval = 1;
1083
1084 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1085 pi->dpm_table_start +
1086 offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
1087 (u8 *)&pi->samu_interval,
1088 sizeof(u8),
1089 pi->sram_end);
1090 if (ret)
1091 return ret;
1092
1093 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1094 pi->dpm_table_start +
1095 offsetof(SMU7_Fusion_DpmTable, SamuLevel),
1096 (u8 *)&pi->samu_level,
1097 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
1098 pi->sram_end);
1099 if (ret)
1100 return ret;
1101
1102 return ret;
1103 }
1104
1105
kv_populate_acp_table(struct amdgpu_device * adev)1106 static int kv_populate_acp_table(struct amdgpu_device *adev)
1107 {
1108 struct kv_power_info *pi = kv_get_pi(adev);
1109 struct amdgpu_clock_voltage_dependency_table *table =
1110 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1111 struct atom_clock_dividers dividers;
1112 int ret;
1113 u32 i;
1114
1115 if (table == NULL || table->count == 0)
1116 return 0;
1117
1118 pi->acp_level_count = 0;
1119 for (i = 0; i < table->count; i++) {
1120 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
1121 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
1122
1123 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
1124 table->entries[i].clk, false, ÷rs);
1125 if (ret)
1126 return ret;
1127 pi->acp_level[i].Divider = (u8)dividers.post_div;
1128
1129 pi->acp_level_count++;
1130 }
1131
1132 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1133 pi->dpm_table_start +
1134 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
1135 (u8 *)&pi->acp_level_count,
1136 sizeof(u8),
1137 pi->sram_end);
1138 if (ret)
1139 return ret;
1140
1141 pi->acp_interval = 1;
1142
1143 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1144 pi->dpm_table_start +
1145 offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1146 (u8 *)&pi->acp_interval,
1147 sizeof(u8),
1148 pi->sram_end);
1149 if (ret)
1150 return ret;
1151
1152 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1153 pi->dpm_table_start +
1154 offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1155 (u8 *)&pi->acp_level,
1156 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1157 pi->sram_end);
1158 if (ret)
1159 return ret;
1160
1161 return ret;
1162 }
1163
kv_calculate_dfs_bypass_settings(struct amdgpu_device * adev)1164 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
1165 {
1166 struct kv_power_info *pi = kv_get_pi(adev);
1167 u32 i;
1168 struct amdgpu_clock_voltage_dependency_table *table =
1169 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1170
1171 if (table && table->count) {
1172 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1173 if (pi->caps_enable_dfs_bypass) {
1174 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1175 pi->graphics_level[i].ClkBypassCntl = 3;
1176 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1177 pi->graphics_level[i].ClkBypassCntl = 2;
1178 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1179 pi->graphics_level[i].ClkBypassCntl = 7;
1180 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1181 pi->graphics_level[i].ClkBypassCntl = 6;
1182 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1183 pi->graphics_level[i].ClkBypassCntl = 8;
1184 else
1185 pi->graphics_level[i].ClkBypassCntl = 0;
1186 } else {
1187 pi->graphics_level[i].ClkBypassCntl = 0;
1188 }
1189 }
1190 } else {
1191 struct sumo_sclk_voltage_mapping_table *table =
1192 &pi->sys_info.sclk_voltage_mapping_table;
1193 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1194 if (pi->caps_enable_dfs_bypass) {
1195 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1196 pi->graphics_level[i].ClkBypassCntl = 3;
1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1198 pi->graphics_level[i].ClkBypassCntl = 2;
1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1200 pi->graphics_level[i].ClkBypassCntl = 7;
1201 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1202 pi->graphics_level[i].ClkBypassCntl = 6;
1203 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1204 pi->graphics_level[i].ClkBypassCntl = 8;
1205 else
1206 pi->graphics_level[i].ClkBypassCntl = 0;
1207 } else {
1208 pi->graphics_level[i].ClkBypassCntl = 0;
1209 }
1210 }
1211 }
1212 }
1213
kv_enable_ulv(struct amdgpu_device * adev,bool enable)1214 static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
1215 {
1216 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1217 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1218 }
1219
kv_reset_acp_boot_level(struct amdgpu_device * adev)1220 static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
1221 {
1222 struct kv_power_info *pi = kv_get_pi(adev);
1223
1224 pi->acp_boot_level = 0xff;
1225 }
1226
kv_update_current_ps(struct amdgpu_device * adev,struct amdgpu_ps * rps)1227 static void kv_update_current_ps(struct amdgpu_device *adev,
1228 struct amdgpu_ps *rps)
1229 {
1230 struct kv_ps *new_ps = kv_get_ps(rps);
1231 struct kv_power_info *pi = kv_get_pi(adev);
1232
1233 pi->current_rps = *rps;
1234 pi->current_ps = *new_ps;
1235 pi->current_rps.ps_priv = &pi->current_ps;
1236 adev->pm.dpm.current_ps = &pi->current_rps;
1237 }
1238
kv_update_requested_ps(struct amdgpu_device * adev,struct amdgpu_ps * rps)1239 static void kv_update_requested_ps(struct amdgpu_device *adev,
1240 struct amdgpu_ps *rps)
1241 {
1242 struct kv_ps *new_ps = kv_get_ps(rps);
1243 struct kv_power_info *pi = kv_get_pi(adev);
1244
1245 pi->requested_rps = *rps;
1246 pi->requested_ps = *new_ps;
1247 pi->requested_rps.ps_priv = &pi->requested_ps;
1248 adev->pm.dpm.requested_ps = &pi->requested_rps;
1249 }
1250
kv_dpm_enable_bapm(void * handle,bool enable)1251 static void kv_dpm_enable_bapm(void *handle, bool enable)
1252 {
1253 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1254 struct kv_power_info *pi = kv_get_pi(adev);
1255 int ret;
1256
1257 if (pi->bapm_enable) {
1258 ret = amdgpu_kv_smc_bapm_enable(adev, enable);
1259 if (ret)
1260 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1261 }
1262 }
1263
kv_dpm_enable(struct amdgpu_device * adev)1264 static int kv_dpm_enable(struct amdgpu_device *adev)
1265 {
1266 struct kv_power_info *pi = kv_get_pi(adev);
1267 int ret;
1268
1269 ret = kv_process_firmware_header(adev);
1270 if (ret) {
1271 DRM_ERROR("kv_process_firmware_header failed\n");
1272 return ret;
1273 }
1274 kv_init_fps_limits(adev);
1275 kv_init_graphics_levels(adev);
1276 ret = kv_program_bootup_state(adev);
1277 if (ret) {
1278 DRM_ERROR("kv_program_bootup_state failed\n");
1279 return ret;
1280 }
1281 kv_calculate_dfs_bypass_settings(adev);
1282 ret = kv_upload_dpm_settings(adev);
1283 if (ret) {
1284 DRM_ERROR("kv_upload_dpm_settings failed\n");
1285 return ret;
1286 }
1287 ret = kv_populate_uvd_table(adev);
1288 if (ret) {
1289 DRM_ERROR("kv_populate_uvd_table failed\n");
1290 return ret;
1291 }
1292 ret = kv_populate_vce_table(adev);
1293 if (ret) {
1294 DRM_ERROR("kv_populate_vce_table failed\n");
1295 return ret;
1296 }
1297 ret = kv_populate_samu_table(adev);
1298 if (ret) {
1299 DRM_ERROR("kv_populate_samu_table failed\n");
1300 return ret;
1301 }
1302 ret = kv_populate_acp_table(adev);
1303 if (ret) {
1304 DRM_ERROR("kv_populate_acp_table failed\n");
1305 return ret;
1306 }
1307 kv_program_vc(adev);
1308 #if 0
1309 kv_initialize_hardware_cac_manager(adev);
1310 #endif
1311 kv_start_am(adev);
1312 if (pi->enable_auto_thermal_throttling) {
1313 ret = kv_enable_auto_thermal_throttling(adev);
1314 if (ret) {
1315 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1316 return ret;
1317 }
1318 }
1319 ret = kv_enable_dpm_voltage_scaling(adev);
1320 if (ret) {
1321 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1322 return ret;
1323 }
1324 ret = kv_set_dpm_interval(adev);
1325 if (ret) {
1326 DRM_ERROR("kv_set_dpm_interval failed\n");
1327 return ret;
1328 }
1329 ret = kv_set_dpm_boot_state(adev);
1330 if (ret) {
1331 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1332 return ret;
1333 }
1334 ret = kv_enable_ulv(adev, true);
1335 if (ret) {
1336 DRM_ERROR("kv_enable_ulv failed\n");
1337 return ret;
1338 }
1339 kv_start_dpm(adev);
1340 ret = kv_enable_didt(adev, true);
1341 if (ret) {
1342 DRM_ERROR("kv_enable_didt failed\n");
1343 return ret;
1344 }
1345 ret = kv_enable_smc_cac(adev, true);
1346 if (ret) {
1347 DRM_ERROR("kv_enable_smc_cac failed\n");
1348 return ret;
1349 }
1350
1351 kv_reset_acp_boot_level(adev);
1352
1353 ret = amdgpu_kv_smc_bapm_enable(adev, false);
1354 if (ret) {
1355 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1356 return ret;
1357 }
1358
1359 if (adev->irq.installed &&
1360 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
1361 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
1362 if (ret) {
1363 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1364 return ret;
1365 }
1366 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
1367 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
1368 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
1369 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
1370 }
1371
1372 return ret;
1373 }
1374
kv_dpm_disable(struct amdgpu_device * adev)1375 static void kv_dpm_disable(struct amdgpu_device *adev)
1376 {
1377 struct kv_power_info *pi = kv_get_pi(adev);
1378
1379 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
1380 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
1381 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
1382 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
1383
1384 amdgpu_kv_smc_bapm_enable(adev, false);
1385
1386 if (adev->asic_type == CHIP_MULLINS)
1387 kv_enable_nb_dpm(adev, false);
1388
1389 /* powerup blocks */
1390 kv_dpm_powergate_acp(adev, false);
1391 kv_dpm_powergate_samu(adev, false);
1392 if (pi->caps_vce_pg) /* power on the VCE block */
1393 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1394 if (pi->caps_uvd_pg) /* power on the UVD block */
1395 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
1396
1397 kv_enable_smc_cac(adev, false);
1398 kv_enable_didt(adev, false);
1399 kv_clear_vc(adev);
1400 kv_stop_dpm(adev);
1401 kv_enable_ulv(adev, false);
1402 kv_reset_am(adev);
1403
1404 kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
1405 }
1406
1407 #if 0
1408 static int kv_write_smc_soft_register(struct amdgpu_device *adev,
1409 u16 reg_offset, u32 value)
1410 {
1411 struct kv_power_info *pi = kv_get_pi(adev);
1412
1413 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
1414 (u8 *)&value, sizeof(u16), pi->sram_end);
1415 }
1416
1417 static int kv_read_smc_soft_register(struct amdgpu_device *adev,
1418 u16 reg_offset, u32 *value)
1419 {
1420 struct kv_power_info *pi = kv_get_pi(adev);
1421
1422 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
1423 value, pi->sram_end);
1424 }
1425 #endif
1426
kv_init_sclk_t(struct amdgpu_device * adev)1427 static void kv_init_sclk_t(struct amdgpu_device *adev)
1428 {
1429 struct kv_power_info *pi = kv_get_pi(adev);
1430
1431 pi->low_sclk_interrupt_t = 0;
1432 }
1433
kv_init_fps_limits(struct amdgpu_device * adev)1434 static int kv_init_fps_limits(struct amdgpu_device *adev)
1435 {
1436 struct kv_power_info *pi = kv_get_pi(adev);
1437 int ret = 0;
1438
1439 if (pi->caps_fps) {
1440 u16 tmp;
1441
1442 tmp = 45;
1443 pi->fps_high_t = cpu_to_be16(tmp);
1444 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1445 pi->dpm_table_start +
1446 offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1447 (u8 *)&pi->fps_high_t,
1448 sizeof(u16), pi->sram_end);
1449
1450 tmp = 30;
1451 pi->fps_low_t = cpu_to_be16(tmp);
1452
1453 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1454 pi->dpm_table_start +
1455 offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1456 (u8 *)&pi->fps_low_t,
1457 sizeof(u16), pi->sram_end);
1458
1459 }
1460 return ret;
1461 }
1462
kv_init_powergate_state(struct amdgpu_device * adev)1463 static void kv_init_powergate_state(struct amdgpu_device *adev)
1464 {
1465 struct kv_power_info *pi = kv_get_pi(adev);
1466
1467 pi->uvd_power_gated = false;
1468 pi->vce_power_gated = false;
1469 pi->samu_power_gated = false;
1470 pi->acp_power_gated = false;
1471
1472 }
1473
kv_enable_uvd_dpm(struct amdgpu_device * adev,bool enable)1474 static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
1475 {
1476 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1477 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1478 }
1479
kv_enable_vce_dpm(struct amdgpu_device * adev,bool enable)1480 static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
1481 {
1482 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1483 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1484 }
1485
kv_enable_samu_dpm(struct amdgpu_device * adev,bool enable)1486 static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
1487 {
1488 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1489 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1490 }
1491
kv_enable_acp_dpm(struct amdgpu_device * adev,bool enable)1492 static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
1493 {
1494 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1495 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1496 }
1497
kv_update_uvd_dpm(struct amdgpu_device * adev,bool gate)1498 static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
1499 {
1500 struct kv_power_info *pi = kv_get_pi(adev);
1501 struct amdgpu_uvd_clock_voltage_dependency_table *table =
1502 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1503 int ret;
1504 u32 mask;
1505
1506 if (!gate) {
1507 if (table->count)
1508 pi->uvd_boot_level = table->count - 1;
1509 else
1510 pi->uvd_boot_level = 0;
1511
1512 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1513 mask = 1 << pi->uvd_boot_level;
1514 } else {
1515 mask = 0x1f;
1516 }
1517
1518 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1519 pi->dpm_table_start +
1520 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1521 (uint8_t *)&pi->uvd_boot_level,
1522 sizeof(u8), pi->sram_end);
1523 if (ret)
1524 return ret;
1525
1526 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1527 PPSMC_MSG_UVDDPM_SetEnabledMask,
1528 mask);
1529 }
1530
1531 return kv_enable_uvd_dpm(adev, !gate);
1532 }
1533
kv_get_vce_boot_level(struct amdgpu_device * adev,u32 evclk)1534 static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
1535 {
1536 u8 i;
1537 struct amdgpu_vce_clock_voltage_dependency_table *table =
1538 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1539
1540 for (i = 0; i < table->count; i++) {
1541 if (table->entries[i].evclk >= evclk)
1542 break;
1543 }
1544
1545 return i;
1546 }
1547
kv_update_vce_dpm(struct amdgpu_device * adev,struct amdgpu_ps * amdgpu_new_state,struct amdgpu_ps * amdgpu_current_state)1548 static int kv_update_vce_dpm(struct amdgpu_device *adev,
1549 struct amdgpu_ps *amdgpu_new_state,
1550 struct amdgpu_ps *amdgpu_current_state)
1551 {
1552 struct kv_power_info *pi = kv_get_pi(adev);
1553 struct amdgpu_vce_clock_voltage_dependency_table *table =
1554 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1555 int ret;
1556
1557 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
1558 if (pi->caps_stable_p_state)
1559 pi->vce_boot_level = table->count - 1;
1560 else
1561 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
1562
1563 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1564 pi->dpm_table_start +
1565 offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1566 (u8 *)&pi->vce_boot_level,
1567 sizeof(u8),
1568 pi->sram_end);
1569 if (ret)
1570 return ret;
1571
1572 if (pi->caps_stable_p_state)
1573 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1574 PPSMC_MSG_VCEDPM_SetEnabledMask,
1575 (1 << pi->vce_boot_level));
1576 kv_enable_vce_dpm(adev, true);
1577 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
1578 kv_enable_vce_dpm(adev, false);
1579 }
1580
1581 return 0;
1582 }
1583
kv_update_samu_dpm(struct amdgpu_device * adev,bool gate)1584 static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
1585 {
1586 struct kv_power_info *pi = kv_get_pi(adev);
1587 struct amdgpu_clock_voltage_dependency_table *table =
1588 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1589 int ret;
1590
1591 if (!gate) {
1592 if (pi->caps_stable_p_state)
1593 pi->samu_boot_level = table->count - 1;
1594 else
1595 pi->samu_boot_level = 0;
1596
1597 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1598 pi->dpm_table_start +
1599 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1600 (u8 *)&pi->samu_boot_level,
1601 sizeof(u8),
1602 pi->sram_end);
1603 if (ret)
1604 return ret;
1605
1606 if (pi->caps_stable_p_state)
1607 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1608 PPSMC_MSG_SAMUDPM_SetEnabledMask,
1609 (1 << pi->samu_boot_level));
1610 }
1611
1612 return kv_enable_samu_dpm(adev, !gate);
1613 }
1614
kv_get_acp_boot_level(struct amdgpu_device * adev)1615 static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
1616 {
1617 u8 i;
1618 struct amdgpu_clock_voltage_dependency_table *table =
1619 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1620
1621 for (i = 0; i < table->count; i++) {
1622 /* XXX Fake out -Wtype-limits. */
1623 if (table->entries[i].clk == 0 || table->entries[i].clk > 0) /* XXX */
1624 break;
1625 }
1626
1627 if (i >= table->count)
1628 i = table->count - 1;
1629
1630 return i;
1631 }
1632
kv_update_acp_boot_level(struct amdgpu_device * adev)1633 static void kv_update_acp_boot_level(struct amdgpu_device *adev)
1634 {
1635 struct kv_power_info *pi = kv_get_pi(adev);
1636 u8 acp_boot_level;
1637
1638 if (!pi->caps_stable_p_state) {
1639 acp_boot_level = kv_get_acp_boot_level(adev);
1640 if (acp_boot_level != pi->acp_boot_level) {
1641 pi->acp_boot_level = acp_boot_level;
1642 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1643 PPSMC_MSG_ACPDPM_SetEnabledMask,
1644 (1 << pi->acp_boot_level));
1645 }
1646 }
1647 }
1648
kv_update_acp_dpm(struct amdgpu_device * adev,bool gate)1649 static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
1650 {
1651 struct kv_power_info *pi = kv_get_pi(adev);
1652 struct amdgpu_clock_voltage_dependency_table *table =
1653 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1654 int ret;
1655
1656 if (!gate) {
1657 if (pi->caps_stable_p_state)
1658 pi->acp_boot_level = table->count - 1;
1659 else
1660 pi->acp_boot_level = kv_get_acp_boot_level(adev);
1661
1662 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1663 pi->dpm_table_start +
1664 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1665 (u8 *)&pi->acp_boot_level,
1666 sizeof(u8),
1667 pi->sram_end);
1668 if (ret)
1669 return ret;
1670
1671 if (pi->caps_stable_p_state)
1672 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1673 PPSMC_MSG_ACPDPM_SetEnabledMask,
1674 (1 << pi->acp_boot_level));
1675 }
1676
1677 return kv_enable_acp_dpm(adev, !gate);
1678 }
1679
kv_dpm_powergate_uvd(void * handle,bool gate)1680 static void kv_dpm_powergate_uvd(void *handle, bool gate)
1681 {
1682 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1683 struct kv_power_info *pi = kv_get_pi(adev);
1684 int ret __unused;
1685
1686 pi->uvd_power_gated = gate;
1687
1688 if (gate) {
1689 /* stop the UVD block */
1690 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1691 AMD_PG_STATE_GATE);
1692 kv_update_uvd_dpm(adev, gate);
1693 if (pi->caps_uvd_pg)
1694 /* power off the UVD block */
1695 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
1696 } else {
1697 if (pi->caps_uvd_pg)
1698 /* power on the UVD block */
1699 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
1700 /* re-init the UVD block */
1701 kv_update_uvd_dpm(adev, gate);
1702
1703 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1704 AMD_PG_STATE_UNGATE);
1705 }
1706 }
1707
kv_dpm_powergate_vce(void * handle,bool gate)1708 static void kv_dpm_powergate_vce(void *handle, bool gate)
1709 {
1710 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1711 struct kv_power_info *pi = kv_get_pi(adev);
1712 int ret __unused;
1713
1714 pi->vce_power_gated = gate;
1715
1716 if (gate) {
1717 /* stop the VCE block */
1718 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1719 AMD_PG_STATE_GATE);
1720 kv_enable_vce_dpm(adev, false);
1721 if (pi->caps_vce_pg) /* power off the VCE block */
1722 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
1723 } else {
1724 if (pi->caps_vce_pg) /* power on the VCE block */
1725 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1726 kv_enable_vce_dpm(adev, true);
1727 /* re-init the VCE block */
1728 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1729 AMD_PG_STATE_UNGATE);
1730 }
1731 }
1732
1733
kv_dpm_powergate_samu(struct amdgpu_device * adev,bool gate)1734 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
1735 {
1736 struct kv_power_info *pi = kv_get_pi(adev);
1737
1738 if (pi->samu_power_gated == gate)
1739 return;
1740
1741 pi->samu_power_gated = gate;
1742
1743 if (gate) {
1744 kv_update_samu_dpm(adev, true);
1745 if (pi->caps_samu_pg)
1746 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
1747 } else {
1748 if (pi->caps_samu_pg)
1749 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
1750 kv_update_samu_dpm(adev, false);
1751 }
1752 }
1753
kv_dpm_powergate_acp(struct amdgpu_device * adev,bool gate)1754 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
1755 {
1756 struct kv_power_info *pi = kv_get_pi(adev);
1757
1758 if (pi->acp_power_gated == gate)
1759 return;
1760
1761 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
1762 return;
1763
1764 pi->acp_power_gated = gate;
1765
1766 if (gate) {
1767 kv_update_acp_dpm(adev, true);
1768 if (pi->caps_acp_pg)
1769 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
1770 } else {
1771 if (pi->caps_acp_pg)
1772 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
1773 kv_update_acp_dpm(adev, false);
1774 }
1775 }
1776
kv_set_valid_clock_range(struct amdgpu_device * adev,struct amdgpu_ps * new_rps)1777 static void kv_set_valid_clock_range(struct amdgpu_device *adev,
1778 struct amdgpu_ps *new_rps)
1779 {
1780 struct kv_ps *new_ps = kv_get_ps(new_rps);
1781 struct kv_power_info *pi = kv_get_pi(adev);
1782 u32 i;
1783 struct amdgpu_clock_voltage_dependency_table *table =
1784 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1785
1786 if (table && table->count) {
1787 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1788 if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1789 (i == (pi->graphics_dpm_level_count - 1))) {
1790 pi->lowest_valid = i;
1791 break;
1792 }
1793 }
1794
1795 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1796 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1797 break;
1798 }
1799 pi->highest_valid = i;
1800
1801 if (pi->lowest_valid > pi->highest_valid) {
1802 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1803 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1804 pi->highest_valid = pi->lowest_valid;
1805 else
1806 pi->lowest_valid = pi->highest_valid;
1807 }
1808 } else {
1809 struct sumo_sclk_voltage_mapping_table *table =
1810 &pi->sys_info.sclk_voltage_mapping_table;
1811
1812 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1813 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1814 i == (int)(pi->graphics_dpm_level_count - 1)) {
1815 pi->lowest_valid = i;
1816 break;
1817 }
1818 }
1819
1820 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1821 if (table->entries[i].sclk_frequency <=
1822 new_ps->levels[new_ps->num_levels - 1].sclk)
1823 break;
1824 }
1825 pi->highest_valid = i;
1826
1827 if (pi->lowest_valid > pi->highest_valid) {
1828 if ((new_ps->levels[0].sclk -
1829 table->entries[pi->highest_valid].sclk_frequency) >
1830 (table->entries[pi->lowest_valid].sclk_frequency -
1831 new_ps->levels[new_ps->num_levels -1].sclk))
1832 pi->highest_valid = pi->lowest_valid;
1833 else
1834 pi->lowest_valid = pi->highest_valid;
1835 }
1836 }
1837 }
1838
kv_update_dfs_bypass_settings(struct amdgpu_device * adev,struct amdgpu_ps * new_rps)1839 static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
1840 struct amdgpu_ps *new_rps)
1841 {
1842 struct kv_ps *new_ps = kv_get_ps(new_rps);
1843 struct kv_power_info *pi = kv_get_pi(adev);
1844 int ret = 0;
1845 u8 clk_bypass_cntl;
1846
1847 if (pi->caps_enable_dfs_bypass) {
1848 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1849 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1850 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1851 (pi->dpm_table_start +
1852 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1853 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1854 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1855 &clk_bypass_cntl,
1856 sizeof(u8), pi->sram_end);
1857 }
1858
1859 return ret;
1860 }
1861
kv_enable_nb_dpm(struct amdgpu_device * adev,bool enable)1862 static int kv_enable_nb_dpm(struct amdgpu_device *adev,
1863 bool enable)
1864 {
1865 struct kv_power_info *pi = kv_get_pi(adev);
1866 int ret = 0;
1867
1868 if (enable) {
1869 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1870 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
1871 if (ret == 0)
1872 pi->nb_dpm_enabled = true;
1873 }
1874 } else {
1875 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1876 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
1877 if (ret == 0)
1878 pi->nb_dpm_enabled = false;
1879 }
1880 }
1881
1882 return ret;
1883 }
1884
kv_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)1885 static int kv_dpm_force_performance_level(void *handle,
1886 enum amd_dpm_forced_level level)
1887 {
1888 int ret;
1889 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1890
1891 if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
1892 ret = kv_force_dpm_highest(adev);
1893 if (ret)
1894 return ret;
1895 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
1896 ret = kv_force_dpm_lowest(adev);
1897 if (ret)
1898 return ret;
1899 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
1900 ret = kv_unforce_levels(adev);
1901 if (ret)
1902 return ret;
1903 }
1904
1905 adev->pm.dpm.forced_level = level;
1906
1907 return 0;
1908 }
1909
kv_dpm_pre_set_power_state(void * handle)1910 static int kv_dpm_pre_set_power_state(void *handle)
1911 {
1912 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1913 struct kv_power_info *pi = kv_get_pi(adev);
1914 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1915 struct amdgpu_ps *new_ps = &requested_ps;
1916
1917 kv_update_requested_ps(adev, new_ps);
1918
1919 kv_apply_state_adjust_rules(adev,
1920 &pi->requested_rps,
1921 &pi->current_rps);
1922
1923 return 0;
1924 }
1925
kv_dpm_set_power_state(void * handle)1926 static int kv_dpm_set_power_state(void *handle)
1927 {
1928 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1929 struct kv_power_info *pi = kv_get_pi(adev);
1930 struct amdgpu_ps *new_ps = &pi->requested_rps;
1931 struct amdgpu_ps *old_ps = &pi->current_rps;
1932 int ret;
1933
1934 if (pi->bapm_enable) {
1935 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
1936 if (ret) {
1937 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1938 return ret;
1939 }
1940 }
1941
1942 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
1943 if (pi->enable_dpm) {
1944 kv_set_valid_clock_range(adev, new_ps);
1945 kv_update_dfs_bypass_settings(adev, new_ps);
1946 ret = kv_calculate_ds_divider(adev);
1947 if (ret) {
1948 DRM_ERROR("kv_calculate_ds_divider failed\n");
1949 return ret;
1950 }
1951 kv_calculate_nbps_level_settings(adev);
1952 kv_calculate_dpm_settings(adev);
1953 kv_force_lowest_valid(adev);
1954 kv_enable_new_levels(adev);
1955 kv_upload_dpm_settings(adev);
1956 kv_program_nbps_index_settings(adev, new_ps);
1957 kv_unforce_levels(adev);
1958 kv_set_enabled_levels(adev);
1959 kv_force_lowest_valid(adev);
1960 kv_unforce_levels(adev);
1961
1962 ret = kv_update_vce_dpm(adev, new_ps, old_ps);
1963 if (ret) {
1964 DRM_ERROR("kv_update_vce_dpm failed\n");
1965 return ret;
1966 }
1967 kv_update_sclk_t(adev);
1968 if (adev->asic_type == CHIP_MULLINS)
1969 kv_enable_nb_dpm(adev, true);
1970 }
1971 } else {
1972 if (pi->enable_dpm) {
1973 kv_set_valid_clock_range(adev, new_ps);
1974 kv_update_dfs_bypass_settings(adev, new_ps);
1975 ret = kv_calculate_ds_divider(adev);
1976 if (ret) {
1977 DRM_ERROR("kv_calculate_ds_divider failed\n");
1978 return ret;
1979 }
1980 kv_calculate_nbps_level_settings(adev);
1981 kv_calculate_dpm_settings(adev);
1982 kv_freeze_sclk_dpm(adev, true);
1983 kv_upload_dpm_settings(adev);
1984 kv_program_nbps_index_settings(adev, new_ps);
1985 kv_freeze_sclk_dpm(adev, false);
1986 kv_set_enabled_levels(adev);
1987 ret = kv_update_vce_dpm(adev, new_ps, old_ps);
1988 if (ret) {
1989 DRM_ERROR("kv_update_vce_dpm failed\n");
1990 return ret;
1991 }
1992 kv_update_acp_boot_level(adev);
1993 kv_update_sclk_t(adev);
1994 kv_enable_nb_dpm(adev, true);
1995 }
1996 }
1997
1998 return 0;
1999 }
2000
kv_dpm_post_set_power_state(void * handle)2001 static void kv_dpm_post_set_power_state(void *handle)
2002 {
2003 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2004 struct kv_power_info *pi = kv_get_pi(adev);
2005 struct amdgpu_ps *new_ps = &pi->requested_rps;
2006
2007 kv_update_current_ps(adev, new_ps);
2008 }
2009
kv_dpm_setup_asic(struct amdgpu_device * adev)2010 static void kv_dpm_setup_asic(struct amdgpu_device *adev)
2011 {
2012 sumo_take_smu_control(adev, true);
2013 kv_init_powergate_state(adev);
2014 kv_init_sclk_t(adev);
2015 }
2016
2017 #if 0
2018 static void kv_dpm_reset_asic(struct amdgpu_device *adev)
2019 {
2020 struct kv_power_info *pi = kv_get_pi(adev);
2021
2022 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
2023 kv_force_lowest_valid(adev);
2024 kv_init_graphics_levels(adev);
2025 kv_program_bootup_state(adev);
2026 kv_upload_dpm_settings(adev);
2027 kv_force_lowest_valid(adev);
2028 kv_unforce_levels(adev);
2029 } else {
2030 kv_init_graphics_levels(adev);
2031 kv_program_bootup_state(adev);
2032 kv_freeze_sclk_dpm(adev, true);
2033 kv_upload_dpm_settings(adev);
2034 kv_freeze_sclk_dpm(adev, false);
2035 kv_set_enabled_level(adev, pi->graphics_boot_level);
2036 }
2037 }
2038 #endif
2039
kv_construct_max_power_limits_table(struct amdgpu_device * adev,struct amdgpu_clock_and_voltage_limits * table)2040 static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
2041 struct amdgpu_clock_and_voltage_limits *table)
2042 {
2043 struct kv_power_info *pi = kv_get_pi(adev);
2044
2045 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
2046 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
2047 table->sclk =
2048 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
2049 table->vddc =
2050 kv_convert_2bit_index_to_voltage(adev,
2051 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
2052 }
2053
2054 table->mclk = pi->sys_info.nbp_memory_clock[0];
2055 }
2056
kv_patch_voltage_values(struct amdgpu_device * adev)2057 static void kv_patch_voltage_values(struct amdgpu_device *adev)
2058 {
2059 int i;
2060 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
2061 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
2062 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
2063 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
2064 struct amdgpu_clock_voltage_dependency_table *samu_table =
2065 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
2066 struct amdgpu_clock_voltage_dependency_table *acp_table =
2067 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
2068
2069 if (uvd_table->count) {
2070 for (i = 0; i < uvd_table->count; i++)
2071 uvd_table->entries[i].v =
2072 kv_convert_8bit_index_to_voltage(adev,
2073 uvd_table->entries[i].v);
2074 }
2075
2076 if (vce_table->count) {
2077 for (i = 0; i < vce_table->count; i++)
2078 vce_table->entries[i].v =
2079 kv_convert_8bit_index_to_voltage(adev,
2080 vce_table->entries[i].v);
2081 }
2082
2083 if (samu_table->count) {
2084 for (i = 0; i < samu_table->count; i++)
2085 samu_table->entries[i].v =
2086 kv_convert_8bit_index_to_voltage(adev,
2087 samu_table->entries[i].v);
2088 }
2089
2090 if (acp_table->count) {
2091 for (i = 0; i < acp_table->count; i++)
2092 acp_table->entries[i].v =
2093 kv_convert_8bit_index_to_voltage(adev,
2094 acp_table->entries[i].v);
2095 }
2096
2097 }
2098
kv_construct_boot_state(struct amdgpu_device * adev)2099 static void kv_construct_boot_state(struct amdgpu_device *adev)
2100 {
2101 struct kv_power_info *pi = kv_get_pi(adev);
2102
2103 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
2104 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
2105 pi->boot_pl.ds_divider_index = 0;
2106 pi->boot_pl.ss_divider_index = 0;
2107 pi->boot_pl.allow_gnb_slow = 1;
2108 pi->boot_pl.force_nbp_state = 0;
2109 pi->boot_pl.display_wm = 0;
2110 pi->boot_pl.vce_wm = 0;
2111 }
2112
kv_force_dpm_highest(struct amdgpu_device * adev)2113 static int kv_force_dpm_highest(struct amdgpu_device *adev)
2114 {
2115 int ret;
2116 u32 enable_mask, i;
2117
2118 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
2119 if (ret)
2120 return ret;
2121
2122 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
2123 if (enable_mask & (1 << i))
2124 break;
2125 }
2126
2127 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
2128 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
2129 else
2130 return kv_set_enabled_level(adev, i);
2131 }
2132
kv_force_dpm_lowest(struct amdgpu_device * adev)2133 static int kv_force_dpm_lowest(struct amdgpu_device *adev)
2134 {
2135 int ret;
2136 u32 enable_mask, i;
2137
2138 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
2139 if (ret)
2140 return ret;
2141
2142 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2143 if (enable_mask & (1 << i))
2144 break;
2145 }
2146
2147 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
2148 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
2149 else
2150 return kv_set_enabled_level(adev, i);
2151 }
2152
kv_get_sleep_divider_id_from_clock(struct amdgpu_device * adev,u32 sclk,u32 min_sclk_in_sr)2153 static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
2154 u32 sclk, u32 min_sclk_in_sr)
2155 {
2156 struct kv_power_info *pi = kv_get_pi(adev);
2157 u32 i;
2158 u32 temp;
2159 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
2160
2161 if (sclk < min)
2162 return 0;
2163
2164 if (!pi->caps_sclk_ds)
2165 return 0;
2166
2167 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
2168 temp = sclk >> i;
2169 if (temp >= min)
2170 break;
2171 }
2172
2173 return (u8)i;
2174 }
2175
kv_get_high_voltage_limit(struct amdgpu_device * adev,int * limit)2176 static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
2177 {
2178 struct kv_power_info *pi = kv_get_pi(adev);
2179 struct amdgpu_clock_voltage_dependency_table *table =
2180 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2181 int i;
2182
2183 if (table && table->count) {
2184 for (i = table->count - 1; i >= 0; i--) {
2185 if (pi->high_voltage_t &&
2186 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
2187 pi->high_voltage_t)) {
2188 *limit = i;
2189 return 0;
2190 }
2191 }
2192 } else {
2193 struct sumo_sclk_voltage_mapping_table *table =
2194 &pi->sys_info.sclk_voltage_mapping_table;
2195
2196 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
2197 if (pi->high_voltage_t &&
2198 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
2199 pi->high_voltage_t)) {
2200 *limit = i;
2201 return 0;
2202 }
2203 }
2204 }
2205
2206 *limit = 0;
2207 return 0;
2208 }
2209
kv_apply_state_adjust_rules(struct amdgpu_device * adev,struct amdgpu_ps * new_rps,struct amdgpu_ps * old_rps)2210 static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
2211 struct amdgpu_ps *new_rps,
2212 struct amdgpu_ps *old_rps)
2213 {
2214 struct kv_ps *ps = kv_get_ps(new_rps);
2215 struct kv_power_info *pi = kv_get_pi(adev);
2216 u32 min_sclk = 10000; /* ??? */
2217 u32 sclk, mclk = 0;
2218 int i, limit;
2219 bool force_high;
2220 struct amdgpu_clock_voltage_dependency_table *table =
2221 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2222 u32 stable_p_state_sclk = 0;
2223 struct amdgpu_clock_and_voltage_limits *max_limits =
2224 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2225
2226 if (new_rps->vce_active) {
2227 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
2228 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
2229 } else {
2230 new_rps->evclk = 0;
2231 new_rps->ecclk = 0;
2232 }
2233
2234 mclk = max_limits->mclk;
2235 sclk = min_sclk;
2236
2237 if (pi->caps_stable_p_state) {
2238 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
2239
2240 for (i = table->count - 1; i >= 0; i--) {
2241 if (stable_p_state_sclk >= table->entries[i].clk) {
2242 stable_p_state_sclk = table->entries[i].clk;
2243 break;
2244 }
2245 }
2246
2247 if (i > 0)
2248 stable_p_state_sclk = table->entries[0].clk;
2249
2250 sclk = stable_p_state_sclk;
2251 }
2252
2253 if (new_rps->vce_active) {
2254 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
2255 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
2256 }
2257
2258 ps->need_dfs_bypass = true;
2259
2260 for (i = 0; i < ps->num_levels; i++) {
2261 if (ps->levels[i].sclk < sclk)
2262 ps->levels[i].sclk = sclk;
2263 }
2264
2265 if (table && table->count) {
2266 for (i = 0; i < ps->num_levels; i++) {
2267 if (pi->high_voltage_t &&
2268 (pi->high_voltage_t <
2269 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
2270 kv_get_high_voltage_limit(adev, &limit);
2271 ps->levels[i].sclk = table->entries[limit].clk;
2272 }
2273 }
2274 } else {
2275 struct sumo_sclk_voltage_mapping_table *table =
2276 &pi->sys_info.sclk_voltage_mapping_table;
2277
2278 for (i = 0; i < ps->num_levels; i++) {
2279 if (pi->high_voltage_t &&
2280 (pi->high_voltage_t <
2281 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
2282 kv_get_high_voltage_limit(adev, &limit);
2283 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2284 }
2285 }
2286 }
2287
2288 if (pi->caps_stable_p_state) {
2289 for (i = 0; i < ps->num_levels; i++) {
2290 ps->levels[i].sclk = stable_p_state_sclk;
2291 }
2292 }
2293
2294 pi->video_start = new_rps->dclk || new_rps->vclk ||
2295 new_rps->evclk || new_rps->ecclk;
2296
2297 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2298 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2299 pi->battery_state = true;
2300 else
2301 pi->battery_state = false;
2302
2303 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
2304 ps->dpm0_pg_nb_ps_lo = 0x1;
2305 ps->dpm0_pg_nb_ps_hi = 0x0;
2306 ps->dpmx_nb_ps_lo = 0x1;
2307 ps->dpmx_nb_ps_hi = 0x0;
2308 } else {
2309 ps->dpm0_pg_nb_ps_lo = 0x3;
2310 ps->dpm0_pg_nb_ps_hi = 0x0;
2311 ps->dpmx_nb_ps_lo = 0x3;
2312 ps->dpmx_nb_ps_hi = 0x0;
2313
2314 if (pi->sys_info.nb_dpm_enable) {
2315 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2316 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
2317 pi->disable_nb_ps3_in_battery;
2318 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2319 ps->dpm0_pg_nb_ps_hi = 0x2;
2320 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2321 ps->dpmx_nb_ps_hi = 0x2;
2322 }
2323 }
2324 }
2325
kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device * adev,u32 index,bool enable)2326 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
2327 u32 index, bool enable)
2328 {
2329 struct kv_power_info *pi = kv_get_pi(adev);
2330
2331 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2332 }
2333
kv_calculate_ds_divider(struct amdgpu_device * adev)2334 static int kv_calculate_ds_divider(struct amdgpu_device *adev)
2335 {
2336 struct kv_power_info *pi = kv_get_pi(adev);
2337 u32 sclk_in_sr = 10000; /* ??? */
2338 u32 i;
2339
2340 if (pi->lowest_valid > pi->highest_valid)
2341 return -EINVAL;
2342
2343 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2344 pi->graphics_level[i].DeepSleepDivId =
2345 kv_get_sleep_divider_id_from_clock(adev,
2346 be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2347 sclk_in_sr);
2348 }
2349 return 0;
2350 }
2351
kv_calculate_nbps_level_settings(struct amdgpu_device * adev)2352 static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
2353 {
2354 struct kv_power_info *pi = kv_get_pi(adev);
2355 u32 i;
2356 bool force_high;
2357 struct amdgpu_clock_and_voltage_limits *max_limits =
2358 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2359 u32 mclk = max_limits->mclk;
2360
2361 if (pi->lowest_valid > pi->highest_valid)
2362 return -EINVAL;
2363
2364 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
2365 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2366 pi->graphics_level[i].GnbSlow = 1;
2367 pi->graphics_level[i].ForceNbPs1 = 0;
2368 pi->graphics_level[i].UpH = 0;
2369 }
2370
2371 if (!pi->sys_info.nb_dpm_enable)
2372 return 0;
2373
2374 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2375 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2376
2377 if (force_high) {
2378 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2379 pi->graphics_level[i].GnbSlow = 0;
2380 } else {
2381 if (pi->battery_state)
2382 pi->graphics_level[0].ForceNbPs1 = 1;
2383
2384 pi->graphics_level[1].GnbSlow = 0;
2385 pi->graphics_level[2].GnbSlow = 0;
2386 pi->graphics_level[3].GnbSlow = 0;
2387 pi->graphics_level[4].GnbSlow = 0;
2388 }
2389 } else {
2390 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2391 pi->graphics_level[i].GnbSlow = 1;
2392 pi->graphics_level[i].ForceNbPs1 = 0;
2393 pi->graphics_level[i].UpH = 0;
2394 }
2395
2396 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2397 pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2398 pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2399 if (pi->lowest_valid != pi->highest_valid)
2400 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2401 }
2402 }
2403 return 0;
2404 }
2405
kv_calculate_dpm_settings(struct amdgpu_device * adev)2406 static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
2407 {
2408 struct kv_power_info *pi = kv_get_pi(adev);
2409 u32 i;
2410
2411 if (pi->lowest_valid > pi->highest_valid)
2412 return -EINVAL;
2413
2414 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2415 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2416
2417 return 0;
2418 }
2419
kv_init_graphics_levels(struct amdgpu_device * adev)2420 static void kv_init_graphics_levels(struct amdgpu_device *adev)
2421 {
2422 struct kv_power_info *pi = kv_get_pi(adev);
2423 u32 i;
2424 struct amdgpu_clock_voltage_dependency_table *table =
2425 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2426
2427 if (table && table->count) {
2428 u32 vid_2bit;
2429
2430 pi->graphics_dpm_level_count = 0;
2431 for (i = 0; i < table->count; i++) {
2432 if (pi->high_voltage_t &&
2433 (pi->high_voltage_t <
2434 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
2435 break;
2436
2437 kv_set_divider_value(adev, i, table->entries[i].clk);
2438 vid_2bit = kv_convert_vid7_to_vid2(adev,
2439 &pi->sys_info.vid_mapping_table,
2440 table->entries[i].v);
2441 kv_set_vid(adev, i, vid_2bit);
2442 kv_set_at(adev, i, pi->at[i]);
2443 kv_dpm_power_level_enabled_for_throttle(adev, i, true);
2444 pi->graphics_dpm_level_count++;
2445 }
2446 } else {
2447 struct sumo_sclk_voltage_mapping_table *table =
2448 &pi->sys_info.sclk_voltage_mapping_table;
2449
2450 pi->graphics_dpm_level_count = 0;
2451 for (i = 0; i < table->num_max_dpm_entries; i++) {
2452 if (pi->high_voltage_t &&
2453 pi->high_voltage_t <
2454 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
2455 break;
2456
2457 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
2458 kv_set_vid(adev, i, table->entries[i].vid_2bit);
2459 kv_set_at(adev, i, pi->at[i]);
2460 kv_dpm_power_level_enabled_for_throttle(adev, i, true);
2461 pi->graphics_dpm_level_count++;
2462 }
2463 }
2464
2465 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2466 kv_dpm_power_level_enable(adev, i, false);
2467 }
2468
kv_enable_new_levels(struct amdgpu_device * adev)2469 static void kv_enable_new_levels(struct amdgpu_device *adev)
2470 {
2471 struct kv_power_info *pi = kv_get_pi(adev);
2472 u32 i;
2473
2474 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2475 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2476 kv_dpm_power_level_enable(adev, i, true);
2477 }
2478 }
2479
kv_set_enabled_level(struct amdgpu_device * adev,u32 level)2480 static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
2481 {
2482 u32 new_mask = (1 << level);
2483
2484 return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
2485 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2486 new_mask);
2487 }
2488
kv_set_enabled_levels(struct amdgpu_device * adev)2489 static int kv_set_enabled_levels(struct amdgpu_device *adev)
2490 {
2491 struct kv_power_info *pi = kv_get_pi(adev);
2492 u32 i, new_mask = 0;
2493
2494 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2495 new_mask |= (1 << i);
2496
2497 return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
2498 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2499 new_mask);
2500 }
2501
kv_program_nbps_index_settings(struct amdgpu_device * adev,struct amdgpu_ps * new_rps)2502 static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
2503 struct amdgpu_ps *new_rps)
2504 {
2505 struct kv_ps *new_ps = kv_get_ps(new_rps);
2506 struct kv_power_info *pi = kv_get_pi(adev);
2507 u32 nbdpmconfig1;
2508
2509 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
2510 return;
2511
2512 if (pi->sys_info.nb_dpm_enable) {
2513 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
2514 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
2515 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
2516 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
2517 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
2518 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
2519 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
2520 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
2521 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
2522 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
2523 }
2524 }
2525
kv_set_thermal_temperature_range(struct amdgpu_device * adev,int min_temp,int max_temp)2526 static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
2527 int min_temp, int max_temp)
2528 {
2529 int low_temp = 0 * 1000;
2530 int high_temp = 255 * 1000;
2531 u32 tmp;
2532
2533 if (low_temp < min_temp)
2534 low_temp = min_temp;
2535 if (high_temp > max_temp)
2536 high_temp = max_temp;
2537 if (high_temp < low_temp) {
2538 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2539 return -EINVAL;
2540 }
2541
2542 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
2543 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
2544 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
2545 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
2546 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
2547 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
2548
2549 adev->pm.dpm.thermal.min_temp = low_temp;
2550 adev->pm.dpm.thermal.max_temp = high_temp;
2551
2552 return 0;
2553 }
2554
2555 union igp_info {
2556 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2557 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2558 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2559 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2560 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2561 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2562 };
2563
kv_parse_sys_info_table(struct amdgpu_device * adev)2564 static int kv_parse_sys_info_table(struct amdgpu_device *adev)
2565 {
2566 struct kv_power_info *pi = kv_get_pi(adev);
2567 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2568 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2569 union igp_info *igp_info;
2570 u8 frev, crev;
2571 u16 data_offset;
2572 int i;
2573
2574 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
2575 &frev, &crev, &data_offset)) {
2576 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2577 data_offset);
2578
2579 if (crev != 8) {
2580 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2581 return -EINVAL;
2582 }
2583 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2584 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2585 pi->sys_info.bootup_nb_voltage_index =
2586 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2587 if (igp_info->info_8.ucHtcTmpLmt == 0)
2588 pi->sys_info.htc_tmp_lmt = 203;
2589 else
2590 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2591 if (igp_info->info_8.ucHtcHystLmt == 0)
2592 pi->sys_info.htc_hyst_lmt = 5;
2593 else
2594 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2595 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2596 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2597 }
2598
2599 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2600 pi->sys_info.nb_dpm_enable = true;
2601 else
2602 pi->sys_info.nb_dpm_enable = false;
2603
2604 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2605 pi->sys_info.nbp_memory_clock[i] =
2606 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2607 pi->sys_info.nbp_n_clock[i] =
2608 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2609 }
2610 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2611 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2612 pi->caps_enable_dfs_bypass = true;
2613
2614 sumo_construct_sclk_voltage_mapping_table(adev,
2615 &pi->sys_info.sclk_voltage_mapping_table,
2616 igp_info->info_8.sAvail_SCLK);
2617
2618 sumo_construct_vid_mapping_table(adev,
2619 &pi->sys_info.vid_mapping_table,
2620 igp_info->info_8.sAvail_SCLK);
2621
2622 kv_construct_max_power_limits_table(adev,
2623 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2624 }
2625 return 0;
2626 }
2627
2628 union power_info {
2629 struct _ATOM_POWERPLAY_INFO info;
2630 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2631 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2632 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2633 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2634 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2635 };
2636
2637 union pplib_clock_info {
2638 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2639 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2640 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2641 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2642 };
2643
2644 union pplib_power_state {
2645 struct _ATOM_PPLIB_STATE v1;
2646 struct _ATOM_PPLIB_STATE_V2 v2;
2647 };
2648
kv_patch_boot_state(struct amdgpu_device * adev,struct kv_ps * ps)2649 static void kv_patch_boot_state(struct amdgpu_device *adev,
2650 struct kv_ps *ps)
2651 {
2652 struct kv_power_info *pi = kv_get_pi(adev);
2653
2654 ps->num_levels = 1;
2655 ps->levels[0] = pi->boot_pl;
2656 }
2657
kv_parse_pplib_non_clock_info(struct amdgpu_device * adev,struct amdgpu_ps * rps,struct _ATOM_PPLIB_NONCLOCK_INFO * non_clock_info,u8 table_rev)2658 static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
2659 struct amdgpu_ps *rps,
2660 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2661 u8 table_rev)
2662 {
2663 struct kv_ps *ps = kv_get_ps(rps);
2664
2665 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2666 rps->class = le16_to_cpu(non_clock_info->usClassification);
2667 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2668
2669 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2670 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2671 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2672 } else {
2673 rps->vclk = 0;
2674 rps->dclk = 0;
2675 }
2676
2677 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2678 adev->pm.dpm.boot_ps = rps;
2679 kv_patch_boot_state(adev, ps);
2680 }
2681 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2682 adev->pm.dpm.uvd_ps = rps;
2683 }
2684
kv_parse_pplib_clock_info(struct amdgpu_device * adev,struct amdgpu_ps * rps,int index,union pplib_clock_info * clock_info)2685 static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
2686 struct amdgpu_ps *rps, int index,
2687 union pplib_clock_info *clock_info)
2688 {
2689 struct kv_power_info *pi = kv_get_pi(adev);
2690 struct kv_ps *ps = kv_get_ps(rps);
2691 struct kv_pl *pl = &ps->levels[index];
2692 u32 sclk;
2693
2694 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2695 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2696 pl->sclk = sclk;
2697 pl->vddc_index = clock_info->sumo.vddcIndex;
2698
2699 ps->num_levels = index + 1;
2700
2701 if (pi->caps_sclk_ds) {
2702 pl->ds_divider_index = 5;
2703 pl->ss_divider_index = 5;
2704 }
2705 }
2706
kv_parse_power_table(struct amdgpu_device * adev)2707 static int kv_parse_power_table(struct amdgpu_device *adev)
2708 {
2709 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2710 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2711 union pplib_power_state *power_state;
2712 int i, j, k, non_clock_array_index, clock_array_index;
2713 union pplib_clock_info *clock_info;
2714 struct _StateArray *state_array;
2715 struct _ClockInfoArray *clock_info_array;
2716 struct _NonClockInfoArray *non_clock_info_array;
2717 union power_info *power_info;
2718 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2719 u16 data_offset;
2720 u8 frev, crev;
2721 u8 *power_state_offset;
2722 struct kv_ps *ps;
2723
2724 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
2725 &frev, &crev, &data_offset))
2726 return -EINVAL;
2727 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2728
2729 amdgpu_add_thermal_controller(adev);
2730
2731 state_array = (struct _StateArray *)
2732 (mode_info->atom_context->bios + data_offset +
2733 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2734 clock_info_array = (struct _ClockInfoArray *)
2735 (mode_info->atom_context->bios + data_offset +
2736 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2737 non_clock_info_array = (struct _NonClockInfoArray *)
2738 (mode_info->atom_context->bios + data_offset +
2739 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2740
2741 adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
2742 sizeof(struct amdgpu_ps),
2743 GFP_KERNEL);
2744 if (!adev->pm.dpm.ps)
2745 return -ENOMEM;
2746 power_state_offset = (u8 *)state_array->states;
2747 for (i = 0; i < state_array->ucNumEntries; i++) {
2748 u8 *idx;
2749 power_state = (union pplib_power_state *)power_state_offset;
2750 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2751 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2752 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2753 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2754 if (ps == NULL) {
2755 kfree(adev->pm.dpm.ps);
2756 return -ENOMEM;
2757 }
2758 adev->pm.dpm.ps[i].ps_priv = ps;
2759 k = 0;
2760 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2761 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2762 clock_array_index = idx[j];
2763 if (clock_array_index >= clock_info_array->ucNumEntries)
2764 continue;
2765 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2766 break;
2767 clock_info = (union pplib_clock_info *)
2768 ((u8 *)&clock_info_array->clockInfo[0] +
2769 (clock_array_index * clock_info_array->ucEntrySize));
2770 kv_parse_pplib_clock_info(adev,
2771 &adev->pm.dpm.ps[i], k,
2772 clock_info);
2773 k++;
2774 }
2775 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
2776 non_clock_info,
2777 non_clock_info_array->ucEntrySize);
2778 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2779 }
2780 adev->pm.dpm.num_ps = state_array->ucNumEntries;
2781
2782 /* fill in the vce power states */
2783 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
2784 u32 sclk;
2785 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
2786 clock_info = (union pplib_clock_info *)
2787 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2788 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2789 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2790 adev->pm.dpm.vce_states[i].sclk = sclk;
2791 adev->pm.dpm.vce_states[i].mclk = 0;
2792 }
2793
2794 return 0;
2795 }
2796
kv_dpm_init(struct amdgpu_device * adev)2797 static int kv_dpm_init(struct amdgpu_device *adev)
2798 {
2799 struct kv_power_info *pi;
2800 int ret, i;
2801
2802 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2803 if (pi == NULL)
2804 return -ENOMEM;
2805 adev->pm.dpm.priv = pi;
2806
2807 ret = amdgpu_get_platform_caps(adev);
2808 if (ret)
2809 return ret;
2810
2811 ret = amdgpu_parse_extended_power_table(adev);
2812 if (ret)
2813 return ret;
2814
2815 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2816 pi->at[i] = TRINITY_AT_DFLT;
2817
2818 pi->sram_end = SMC_RAM_END;
2819
2820 pi->enable_nb_dpm = true;
2821
2822 pi->caps_power_containment = true;
2823 pi->caps_cac = true;
2824 pi->enable_didt = false;
2825 if (pi->enable_didt) {
2826 pi->caps_sq_ramping = true;
2827 pi->caps_db_ramping = true;
2828 pi->caps_td_ramping = true;
2829 pi->caps_tcp_ramping = true;
2830 }
2831
2832 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
2833 pi->caps_sclk_ds = true;
2834 else
2835 pi->caps_sclk_ds = false;
2836
2837 pi->enable_auto_thermal_throttling = true;
2838 pi->disable_nb_ps3_in_battery = false;
2839 if (amdgpu_bapm == 0)
2840 pi->bapm_enable = false;
2841 else
2842 pi->bapm_enable = true;
2843 pi->voltage_drop_t = 0;
2844 pi->caps_sclk_throttle_low_notification = false;
2845 pi->caps_fps = false; /* true? */
2846 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
2847 pi->caps_uvd_dpm = true;
2848 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
2849 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
2850 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
2851 pi->caps_stable_p_state = false;
2852
2853 ret = kv_parse_sys_info_table(adev);
2854 if (ret)
2855 return ret;
2856
2857 kv_patch_voltage_values(adev);
2858 kv_construct_boot_state(adev);
2859
2860 ret = kv_parse_power_table(adev);
2861 if (ret)
2862 return ret;
2863
2864 pi->enable_dpm = true;
2865
2866 return 0;
2867 }
2868
2869 #ifdef CONFIG_DEBUG_FS
2870 static void
kv_dpm_debugfs_print_current_performance_level(void * handle,struct seq_file * m)2871 kv_dpm_debugfs_print_current_performance_level(void *handle,
2872 struct seq_file *m)
2873 {
2874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2875 struct kv_power_info *pi = kv_get_pi(adev);
2876 u32 current_index =
2877 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
2878 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
2879 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
2880 u32 sclk, tmp;
2881 u16 vddc;
2882
2883 if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2884 seq_printf(m, "invalid dpm profile %d\n", current_index);
2885 } else {
2886 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2887 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
2888 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2889 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
2890 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
2891 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2892 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
2893 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2894 current_index, sclk, vddc);
2895 }
2896 }
2897 #endif
2898
2899 static void
kv_dpm_print_power_state(void * handle,void * request_ps)2900 kv_dpm_print_power_state(void *handle, void *request_ps)
2901 {
2902 int i;
2903 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
2904 struct kv_ps *ps = kv_get_ps(rps);
2905 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2906
2907 amdgpu_dpm_print_class_info(rps->class, rps->class2);
2908 amdgpu_dpm_print_cap_info(rps->caps);
2909 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2910 for (i = 0; i < ps->num_levels; i++) {
2911 struct kv_pl *pl = &ps->levels[i];
2912 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2913 i, pl->sclk,
2914 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
2915 }
2916 amdgpu_dpm_print_ps_status(adev, rps);
2917 }
2918
kv_dpm_fini(struct amdgpu_device * adev)2919 static void kv_dpm_fini(struct amdgpu_device *adev)
2920 {
2921 int i;
2922
2923 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2924 kfree(adev->pm.dpm.ps[i].ps_priv);
2925 }
2926 kfree(adev->pm.dpm.ps);
2927 kfree(adev->pm.dpm.priv);
2928 amdgpu_free_extended_power_table(adev);
2929 }
2930
kv_dpm_display_configuration_changed(void * handle)2931 static void kv_dpm_display_configuration_changed(void *handle)
2932 {
2933
2934 }
2935
kv_dpm_get_sclk(void * handle,bool low)2936 static u32 kv_dpm_get_sclk(void *handle, bool low)
2937 {
2938 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2939 struct kv_power_info *pi = kv_get_pi(adev);
2940 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2941
2942 if (low)
2943 return requested_state->levels[0].sclk;
2944 else
2945 return requested_state->levels[requested_state->num_levels - 1].sclk;
2946 }
2947
kv_dpm_get_mclk(void * handle,bool low)2948 static u32 kv_dpm_get_mclk(void *handle, bool low)
2949 {
2950 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2951 struct kv_power_info *pi = kv_get_pi(adev);
2952
2953 return pi->sys_info.bootup_uma_clk;
2954 }
2955
2956 /* get temperature in millidegrees */
kv_dpm_get_temp(void * handle)2957 static int kv_dpm_get_temp(void *handle)
2958 {
2959 u32 temp;
2960 int actual_temp = 0;
2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962
2963 temp = RREG32_SMC(0xC0300E0C);
2964
2965 if (temp)
2966 actual_temp = (temp / 8) - 49;
2967 else
2968 actual_temp = 0;
2969
2970 actual_temp = actual_temp * 1000;
2971
2972 return actual_temp;
2973 }
2974
kv_dpm_early_init(void * handle)2975 static int kv_dpm_early_init(void *handle)
2976 {
2977 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2978
2979 adev->powerplay.pp_funcs = &kv_dpm_funcs;
2980 adev->powerplay.pp_handle = adev;
2981 kv_dpm_set_irq_funcs(adev);
2982
2983 return 0;
2984 }
2985
kv_dpm_late_init(void * handle)2986 static int kv_dpm_late_init(void *handle)
2987 {
2988 /* powerdown unused blocks for now */
2989 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2990
2991 if (!adev->pm.dpm_enabled)
2992 return 0;
2993
2994 kv_dpm_powergate_acp(adev, true);
2995 kv_dpm_powergate_samu(adev, true);
2996
2997 return 0;
2998 }
2999
kv_dpm_sw_init(void * handle)3000 static int kv_dpm_sw_init(void *handle)
3001 {
3002 int ret;
3003 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3004
3005 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230,
3006 &adev->pm.dpm.thermal.irq);
3007 if (ret)
3008 return ret;
3009
3010 ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231,
3011 &adev->pm.dpm.thermal.irq);
3012 if (ret)
3013 return ret;
3014
3015 /* default to balanced state */
3016 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
3017 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
3018 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
3019 adev->pm.default_sclk = adev->clock.default_sclk;
3020 adev->pm.default_mclk = adev->clock.default_mclk;
3021 adev->pm.current_sclk = adev->clock.default_sclk;
3022 adev->pm.current_mclk = adev->clock.default_mclk;
3023 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
3024
3025 if (amdgpu_dpm == 0)
3026 return 0;
3027
3028 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
3029 mutex_lock(&adev->pm.mutex);
3030 ret = kv_dpm_init(adev);
3031 if (ret)
3032 goto dpm_failed;
3033 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
3034 if (amdgpu_dpm == 1)
3035 amdgpu_pm_print_power_states(adev);
3036 mutex_unlock(&adev->pm.mutex);
3037 DRM_INFO("amdgpu: dpm initialized\n");
3038
3039 return 0;
3040
3041 dpm_failed:
3042 kv_dpm_fini(adev);
3043 mutex_unlock(&adev->pm.mutex);
3044 DRM_ERROR("amdgpu: dpm initialization failed\n");
3045 return ret;
3046 }
3047
kv_dpm_sw_fini(void * handle)3048 static int kv_dpm_sw_fini(void *handle)
3049 {
3050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3051
3052 flush_work(&adev->pm.dpm.thermal.work);
3053
3054 mutex_lock(&adev->pm.mutex);
3055 kv_dpm_fini(adev);
3056 mutex_unlock(&adev->pm.mutex);
3057
3058 return 0;
3059 }
3060
kv_dpm_hw_init(void * handle)3061 static int kv_dpm_hw_init(void *handle)
3062 {
3063 int ret;
3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3065
3066 if (!amdgpu_dpm)
3067 return 0;
3068
3069 mutex_lock(&adev->pm.mutex);
3070 kv_dpm_setup_asic(adev);
3071 ret = kv_dpm_enable(adev);
3072 if (ret)
3073 adev->pm.dpm_enabled = false;
3074 else
3075 adev->pm.dpm_enabled = true;
3076 mutex_unlock(&adev->pm.mutex);
3077 amdgpu_pm_compute_clocks(adev);
3078 return ret;
3079 }
3080
kv_dpm_hw_fini(void * handle)3081 static int kv_dpm_hw_fini(void *handle)
3082 {
3083 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3084
3085 if (adev->pm.dpm_enabled) {
3086 mutex_lock(&adev->pm.mutex);
3087 kv_dpm_disable(adev);
3088 mutex_unlock(&adev->pm.mutex);
3089 }
3090
3091 return 0;
3092 }
3093
kv_dpm_suspend(void * handle)3094 static int kv_dpm_suspend(void *handle)
3095 {
3096 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3097
3098 if (adev->pm.dpm_enabled) {
3099 mutex_lock(&adev->pm.mutex);
3100 /* disable dpm */
3101 kv_dpm_disable(adev);
3102 /* reset the power state */
3103 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
3104 mutex_unlock(&adev->pm.mutex);
3105 }
3106 return 0;
3107 }
3108
kv_dpm_resume(void * handle)3109 static int kv_dpm_resume(void *handle)
3110 {
3111 int ret;
3112 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3113
3114 if (adev->pm.dpm_enabled) {
3115 /* asic init will reset to the boot state */
3116 mutex_lock(&adev->pm.mutex);
3117 kv_dpm_setup_asic(adev);
3118 ret = kv_dpm_enable(adev);
3119 if (ret)
3120 adev->pm.dpm_enabled = false;
3121 else
3122 adev->pm.dpm_enabled = true;
3123 mutex_unlock(&adev->pm.mutex);
3124 if (adev->pm.dpm_enabled)
3125 amdgpu_pm_compute_clocks(adev);
3126 }
3127 return 0;
3128 }
3129
kv_dpm_is_idle(void * handle)3130 static bool kv_dpm_is_idle(void *handle)
3131 {
3132 return true;
3133 }
3134
kv_dpm_wait_for_idle(void * handle)3135 static int kv_dpm_wait_for_idle(void *handle)
3136 {
3137 return 0;
3138 }
3139
3140
kv_dpm_soft_reset(void * handle)3141 static int kv_dpm_soft_reset(void *handle)
3142 {
3143 return 0;
3144 }
3145
kv_dpm_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3146 static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
3147 struct amdgpu_irq_src *src,
3148 unsigned type,
3149 enum amdgpu_interrupt_state state)
3150 {
3151 u32 cg_thermal_int;
3152
3153 switch (type) {
3154 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
3155 switch (state) {
3156 case AMDGPU_IRQ_STATE_DISABLE:
3157 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3158 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
3159 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3160 break;
3161 case AMDGPU_IRQ_STATE_ENABLE:
3162 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3163 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
3164 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3165 break;
3166 default:
3167 break;
3168 }
3169 break;
3170
3171 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
3172 switch (state) {
3173 case AMDGPU_IRQ_STATE_DISABLE:
3174 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3175 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
3176 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3177 break;
3178 case AMDGPU_IRQ_STATE_ENABLE:
3179 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3180 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
3181 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3182 break;
3183 default:
3184 break;
3185 }
3186 break;
3187
3188 default:
3189 break;
3190 }
3191 return 0;
3192 }
3193
kv_dpm_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3194 static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
3195 struct amdgpu_irq_src *source,
3196 struct amdgpu_iv_entry *entry)
3197 {
3198 bool queue_thermal = false;
3199
3200 if (entry == NULL)
3201 return -EINVAL;
3202
3203 switch (entry->src_id) {
3204 case 230: /* thermal low to high */
3205 DRM_DEBUG("IH: thermal low to high\n");
3206 adev->pm.dpm.thermal.high_to_low = false;
3207 queue_thermal = true;
3208 break;
3209 case 231: /* thermal high to low */
3210 DRM_DEBUG("IH: thermal high to low\n");
3211 adev->pm.dpm.thermal.high_to_low = true;
3212 queue_thermal = true;
3213 break;
3214 default:
3215 break;
3216 }
3217
3218 if (queue_thermal)
3219 schedule_work(&adev->pm.dpm.thermal.work);
3220
3221 return 0;
3222 }
3223
kv_dpm_set_clockgating_state(void * handle,enum amd_clockgating_state state)3224 static int kv_dpm_set_clockgating_state(void *handle,
3225 enum amd_clockgating_state state)
3226 {
3227 return 0;
3228 }
3229
kv_dpm_set_powergating_state(void * handle,enum amd_powergating_state state)3230 static int kv_dpm_set_powergating_state(void *handle,
3231 enum amd_powergating_state state)
3232 {
3233 return 0;
3234 }
3235
kv_are_power_levels_equal(const struct kv_pl * kv_cpl1,const struct kv_pl * kv_cpl2)3236 static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
3237 const struct kv_pl *kv_cpl2)
3238 {
3239 return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
3240 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
3241 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
3242 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
3243 }
3244
kv_check_state_equal(void * handle,void * current_ps,void * request_ps,bool * equal)3245 static int kv_check_state_equal(void *handle,
3246 void *current_ps,
3247 void *request_ps,
3248 bool *equal)
3249 {
3250 struct kv_ps *kv_cps;
3251 struct kv_ps *kv_rps;
3252 int i;
3253 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
3254 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
3255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3256
3257 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
3258 return -EINVAL;
3259
3260 kv_cps = kv_get_ps(cps);
3261 kv_rps = kv_get_ps(rps);
3262
3263 if (kv_cps == NULL) {
3264 *equal = false;
3265 return 0;
3266 }
3267
3268 if (kv_cps->num_levels != kv_rps->num_levels) {
3269 *equal = false;
3270 return 0;
3271 }
3272
3273 for (i = 0; i < kv_cps->num_levels; i++) {
3274 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
3275 &(kv_rps->levels[i]))) {
3276 *equal = false;
3277 return 0;
3278 }
3279 }
3280
3281 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3282 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
3283 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
3284
3285 return 0;
3286 }
3287
kv_dpm_read_sensor(void * handle,int idx,void * value,int * size)3288 static int kv_dpm_read_sensor(void *handle, int idx,
3289 void *value, int *size)
3290 {
3291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3292 struct kv_power_info *pi = kv_get_pi(adev);
3293 uint32_t sclk;
3294 u32 pl_index =
3295 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
3296 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
3297 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
3298
3299 /* size must be at least 4 bytes for all sensors */
3300 if (*size < 4)
3301 return -EINVAL;
3302
3303 switch (idx) {
3304 case AMDGPU_PP_SENSOR_GFX_SCLK:
3305 if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
3306 sclk = be32_to_cpu(
3307 pi->graphics_level[pl_index].SclkFrequency);
3308 *((uint32_t *)value) = sclk;
3309 *size = 4;
3310 return 0;
3311 }
3312 return -EINVAL;
3313 case AMDGPU_PP_SENSOR_GPU_TEMP:
3314 *((uint32_t *)value) = kv_dpm_get_temp(adev);
3315 *size = 4;
3316 return 0;
3317 default:
3318 return -EINVAL;
3319 }
3320 }
3321
kv_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)3322 static int kv_set_powergating_by_smu(void *handle,
3323 uint32_t block_type, bool gate)
3324 {
3325 switch (block_type) {
3326 case AMD_IP_BLOCK_TYPE_UVD:
3327 kv_dpm_powergate_uvd(handle, gate);
3328 break;
3329 case AMD_IP_BLOCK_TYPE_VCE:
3330 kv_dpm_powergate_vce(handle, gate);
3331 break;
3332 default:
3333 break;
3334 }
3335 return 0;
3336 }
3337
3338 static const struct amd_ip_funcs kv_dpm_ip_funcs = {
3339 .name = "kv_dpm",
3340 .early_init = kv_dpm_early_init,
3341 .late_init = kv_dpm_late_init,
3342 .sw_init = kv_dpm_sw_init,
3343 .sw_fini = kv_dpm_sw_fini,
3344 .hw_init = kv_dpm_hw_init,
3345 .hw_fini = kv_dpm_hw_fini,
3346 .suspend = kv_dpm_suspend,
3347 .resume = kv_dpm_resume,
3348 .is_idle = kv_dpm_is_idle,
3349 .wait_for_idle = kv_dpm_wait_for_idle,
3350 .soft_reset = kv_dpm_soft_reset,
3351 .set_clockgating_state = kv_dpm_set_clockgating_state,
3352 .set_powergating_state = kv_dpm_set_powergating_state,
3353 };
3354
3355 const struct amdgpu_ip_block_version kv_smu_ip_block =
3356 {
3357 .type = AMD_IP_BLOCK_TYPE_SMC,
3358 .major = 1,
3359 .minor = 0,
3360 .rev = 0,
3361 .funcs = &kv_dpm_ip_funcs,
3362 };
3363
3364 static const struct amd_pm_funcs kv_dpm_funcs = {
3365 .pre_set_power_state = &kv_dpm_pre_set_power_state,
3366 .set_power_state = &kv_dpm_set_power_state,
3367 .post_set_power_state = &kv_dpm_post_set_power_state,
3368 .display_configuration_changed = &kv_dpm_display_configuration_changed,
3369 .get_sclk = &kv_dpm_get_sclk,
3370 .get_mclk = &kv_dpm_get_mclk,
3371 .print_power_state = &kv_dpm_print_power_state,
3372 #ifdef CONFIG_DEBUG_FS
3373 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
3374 #endif
3375 .force_performance_level = &kv_dpm_force_performance_level,
3376 .set_powergating_by_smu = kv_set_powergating_by_smu,
3377 .enable_bapm = &kv_dpm_enable_bapm,
3378 .get_vce_clock_state = amdgpu_get_vce_clock_state,
3379 .check_state_equal = kv_check_state_equal,
3380 .read_sensor = &kv_dpm_read_sensor,
3381 };
3382
3383 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
3384 .set = kv_dpm_set_interrupt_state,
3385 .process = kv_dpm_process_interrupt,
3386 };
3387
kv_dpm_set_irq_funcs(struct amdgpu_device * adev)3388 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
3389 {
3390 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
3391 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
3392 }
3393