1 /* $NetBSD: amdgpu_amd_powerplay.c,v 1.4 2021/12/19 12:31:45 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_amd_powerplay.c,v 1.4 2021/12/19 12:31:45 riastradh Exp $");
27
28 #include "pp_debug.h"
29 #include <linux/types.h>
30 #include <linux/kernel.h>
31 #include <linux/gfp.h>
32 #include <linux/slab.h>
33 #include <linux/firmware.h>
34 #include "amd_shared.h"
35 #include "amd_powerplay.h"
36 #include "power_state.h"
37 #include "amdgpu.h"
38 #include "hwmgr.h"
39
40 #include <linux/nbsd-namespace.h>
41
42 static const struct amd_pm_funcs pp_dpm_funcs;
43
amd_powerplay_create(struct amdgpu_device * adev)44 static int amd_powerplay_create(struct amdgpu_device *adev)
45 {
46 struct pp_hwmgr *hwmgr;
47
48 if (adev == NULL)
49 return -EINVAL;
50
51 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
52 if (hwmgr == NULL)
53 return -ENOMEM;
54
55 hwmgr->adev = adev;
56 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
57 hwmgr->device = amdgpu_cgs_create_device(adev);
58 mutex_init(&hwmgr->smu_lock);
59 hwmgr->chip_family = adev->family;
60 hwmgr->chip_id = adev->asic_type;
61 hwmgr->feature_mask = adev->pm.pp_feature;
62 hwmgr->display_config = &adev->pm.pm_display_cfg;
63 adev->powerplay.pp_handle = hwmgr;
64 adev->powerplay.pp_funcs = &pp_dpm_funcs;
65 return 0;
66 }
67
68
amd_powerplay_destroy(struct amdgpu_device * adev)69 static void amd_powerplay_destroy(struct amdgpu_device *adev)
70 {
71 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
72
73 kfree(hwmgr->hardcode_pp_table);
74 hwmgr->hardcode_pp_table = NULL;
75
76 mutex_destroy(&hwmgr->smu_lock);
77 kfree(hwmgr);
78 hwmgr = NULL;
79 }
80
pp_early_init(void * handle)81 static int pp_early_init(void *handle)
82 {
83 int ret;
84 struct amdgpu_device *adev = handle;
85
86 ret = amd_powerplay_create(adev);
87
88 if (ret != 0)
89 return ret;
90
91 ret = hwmgr_early_init(adev->powerplay.pp_handle);
92 if (ret)
93 return -EINVAL;
94
95 return 0;
96 }
97
pp_sw_init(void * handle)98 static int pp_sw_init(void *handle)
99 {
100 struct amdgpu_device *adev = handle;
101 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
102 int ret = 0;
103
104 ret = hwmgr_sw_init(hwmgr);
105
106 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
107
108 return ret;
109 }
110
pp_sw_fini(void * handle)111 static int pp_sw_fini(void *handle)
112 {
113 struct amdgpu_device *adev = handle;
114 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
115
116 hwmgr_sw_fini(hwmgr);
117
118 release_firmware(adev->pm.fw);
119 adev->pm.fw = NULL;
120
121 return 0;
122 }
123
pp_hw_init(void * handle)124 static int pp_hw_init(void *handle)
125 {
126 int ret = 0;
127 struct amdgpu_device *adev = handle;
128 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
129
130 ret = hwmgr_hw_init(hwmgr);
131
132 if (ret)
133 pr_err("powerplay hw init failed\n");
134
135 return ret;
136 }
137
pp_hw_fini(void * handle)138 static int pp_hw_fini(void *handle)
139 {
140 struct amdgpu_device *adev = handle;
141 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
142
143 hwmgr_hw_fini(hwmgr);
144
145 return 0;
146 }
147
pp_reserve_vram_for_smu(struct amdgpu_device * adev)148 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
149 {
150 int r = -EINVAL;
151 void *cpu_ptr = NULL;
152 uint64_t gpu_addr;
153 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
154
155 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
157 &adev->pm.smu_prv_buffer,
158 &gpu_addr,
159 &cpu_ptr)) {
160 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
161 return;
162 }
163
164 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
165 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
166 lower_32_bits((unsigned long)cpu_ptr),
167 upper_32_bits((unsigned long)cpu_ptr),
168 lower_32_bits(gpu_addr),
169 upper_32_bits(gpu_addr),
170 adev->pm.smu_prv_buffer_size);
171
172 if (r) {
173 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
174 adev->pm.smu_prv_buffer = NULL;
175 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
176 }
177 }
178
pp_late_init(void * handle)179 static int pp_late_init(void *handle)
180 {
181 struct amdgpu_device *adev = handle;
182 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
183
184 if (hwmgr && hwmgr->pm_en) {
185 mutex_lock(&hwmgr->smu_lock);
186 hwmgr_handle_task(hwmgr,
187 AMD_PP_TASK_COMPLETE_INIT, NULL);
188 mutex_unlock(&hwmgr->smu_lock);
189 }
190 if (adev->pm.smu_prv_buffer_size != 0)
191 pp_reserve_vram_for_smu(adev);
192
193 return 0;
194 }
195
pp_late_fini(void * handle)196 static void pp_late_fini(void *handle)
197 {
198 struct amdgpu_device *adev = handle;
199
200 if (adev->pm.smu_prv_buffer)
201 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
202 amd_powerplay_destroy(adev);
203 }
204
205
pp_is_idle(void * handle)206 static bool pp_is_idle(void *handle)
207 {
208 return false;
209 }
210
pp_wait_for_idle(void * handle)211 static int pp_wait_for_idle(void *handle)
212 {
213 return 0;
214 }
215
pp_sw_reset(void * handle)216 static int pp_sw_reset(void *handle)
217 {
218 return 0;
219 }
220
pp_set_powergating_state(void * handle,enum amd_powergating_state state)221 static int pp_set_powergating_state(void *handle,
222 enum amd_powergating_state state)
223 {
224 return 0;
225 }
226
pp_suspend(void * handle)227 static int pp_suspend(void *handle)
228 {
229 struct amdgpu_device *adev = handle;
230 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
231
232 return hwmgr_suspend(hwmgr);
233 }
234
pp_resume(void * handle)235 static int pp_resume(void *handle)
236 {
237 struct amdgpu_device *adev = handle;
238 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
239
240 return hwmgr_resume(hwmgr);
241 }
242
pp_set_clockgating_state(void * handle,enum amd_clockgating_state state)243 static int pp_set_clockgating_state(void *handle,
244 enum amd_clockgating_state state)
245 {
246 return 0;
247 }
248
249 static const struct amd_ip_funcs pp_ip_funcs = {
250 .name = "powerplay",
251 .early_init = pp_early_init,
252 .late_init = pp_late_init,
253 .sw_init = pp_sw_init,
254 .sw_fini = pp_sw_fini,
255 .hw_init = pp_hw_init,
256 .hw_fini = pp_hw_fini,
257 .late_fini = pp_late_fini,
258 .suspend = pp_suspend,
259 .resume = pp_resume,
260 .is_idle = pp_is_idle,
261 .wait_for_idle = pp_wait_for_idle,
262 .soft_reset = pp_sw_reset,
263 .set_clockgating_state = pp_set_clockgating_state,
264 .set_powergating_state = pp_set_powergating_state,
265 };
266
267 const struct amdgpu_ip_block_version pp_smu_ip_block =
268 {
269 .type = AMD_IP_BLOCK_TYPE_SMC,
270 .major = 1,
271 .minor = 0,
272 .rev = 0,
273 .funcs = &pp_ip_funcs,
274 };
275
276 /* This interface only be supported On Vi,
277 * because only smu7/8 can help to load gfx/sdma fw,
278 * smu need to be enabled before load other ip's fw.
279 * so call start smu to load smu7 fw and other ip's fw
280 */
pp_dpm_load_fw(void * handle)281 static int pp_dpm_load_fw(void *handle)
282 {
283 struct pp_hwmgr *hwmgr = handle;
284
285 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
286 return -EINVAL;
287
288 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
289 pr_err("fw load failed\n");
290 return -EINVAL;
291 }
292
293 return 0;
294 }
295
pp_dpm_fw_loading_complete(void * handle)296 static int pp_dpm_fw_loading_complete(void *handle)
297 {
298 return 0;
299 }
300
pp_set_clockgating_by_smu(void * handle,uint32_t msg_id)301 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
302 {
303 struct pp_hwmgr *hwmgr = handle;
304
305 if (!hwmgr || !hwmgr->pm_en)
306 return -EINVAL;
307
308 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
309 pr_info_ratelimited("%s was not implemented.\n", __func__);
310 return 0;
311 }
312
313 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
314 }
315
pp_dpm_en_umd_pstate(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level * level)316 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
317 enum amd_dpm_forced_level *level)
318 {
319 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
320 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
321 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
322 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
323
324 if (!(hwmgr->dpm_level & profile_mode_mask)) {
325 /* enter umd pstate, save current level, disable gfx cg*/
326 if (*level & profile_mode_mask) {
327 hwmgr->saved_dpm_level = hwmgr->dpm_level;
328 hwmgr->en_umd_pstate = true;
329 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
330 AMD_IP_BLOCK_TYPE_GFX,
331 AMD_CG_STATE_UNGATE);
332 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
333 AMD_IP_BLOCK_TYPE_GFX,
334 AMD_PG_STATE_UNGATE);
335 }
336 } else {
337 /* exit umd pstate, restore level, enable gfx cg*/
338 if (!(*level & profile_mode_mask)) {
339 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
340 *level = hwmgr->saved_dpm_level;
341 hwmgr->en_umd_pstate = false;
342 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
343 AMD_IP_BLOCK_TYPE_GFX,
344 AMD_CG_STATE_GATE);
345 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
346 AMD_IP_BLOCK_TYPE_GFX,
347 AMD_PG_STATE_GATE);
348 }
349 }
350 }
351
pp_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)352 static int pp_dpm_force_performance_level(void *handle,
353 enum amd_dpm_forced_level level)
354 {
355 struct pp_hwmgr *hwmgr = handle;
356
357 if (!hwmgr || !hwmgr->pm_en)
358 return -EINVAL;
359
360 if (level == hwmgr->dpm_level)
361 return 0;
362
363 mutex_lock(&hwmgr->smu_lock);
364 pp_dpm_en_umd_pstate(hwmgr, &level);
365 hwmgr->request_dpm_level = level;
366 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
367 mutex_unlock(&hwmgr->smu_lock);
368
369 return 0;
370 }
371
pp_dpm_get_performance_level(void * handle)372 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
373 void *handle)
374 {
375 struct pp_hwmgr *hwmgr = handle;
376 enum amd_dpm_forced_level level;
377
378 if (!hwmgr || !hwmgr->pm_en)
379 return -EINVAL;
380
381 mutex_lock(&hwmgr->smu_lock);
382 level = hwmgr->dpm_level;
383 mutex_unlock(&hwmgr->smu_lock);
384 return level;
385 }
386
pp_dpm_get_sclk(void * handle,bool low)387 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
388 {
389 struct pp_hwmgr *hwmgr = handle;
390 uint32_t clk = 0;
391
392 if (!hwmgr || !hwmgr->pm_en)
393 return 0;
394
395 if (hwmgr->hwmgr_func->get_sclk == NULL) {
396 pr_info_ratelimited("%s was not implemented.\n", __func__);
397 return 0;
398 }
399 mutex_lock(&hwmgr->smu_lock);
400 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
401 mutex_unlock(&hwmgr->smu_lock);
402 return clk;
403 }
404
pp_dpm_get_mclk(void * handle,bool low)405 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
406 {
407 struct pp_hwmgr *hwmgr = handle;
408 uint32_t clk = 0;
409
410 if (!hwmgr || !hwmgr->pm_en)
411 return 0;
412
413 if (hwmgr->hwmgr_func->get_mclk == NULL) {
414 pr_info_ratelimited("%s was not implemented.\n", __func__);
415 return 0;
416 }
417 mutex_lock(&hwmgr->smu_lock);
418 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
419 mutex_unlock(&hwmgr->smu_lock);
420 return clk;
421 }
422
pp_dpm_powergate_vce(void * handle,bool gate)423 static void pp_dpm_powergate_vce(void *handle, bool gate)
424 {
425 struct pp_hwmgr *hwmgr = handle;
426
427 if (!hwmgr || !hwmgr->pm_en)
428 return;
429
430 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
431 pr_info_ratelimited("%s was not implemented.\n", __func__);
432 return;
433 }
434 mutex_lock(&hwmgr->smu_lock);
435 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
436 mutex_unlock(&hwmgr->smu_lock);
437 }
438
pp_dpm_powergate_uvd(void * handle,bool gate)439 static void pp_dpm_powergate_uvd(void *handle, bool gate)
440 {
441 struct pp_hwmgr *hwmgr = handle;
442
443 if (!hwmgr || !hwmgr->pm_en)
444 return;
445
446 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
447 pr_info_ratelimited("%s was not implemented.\n", __func__);
448 return;
449 }
450 mutex_lock(&hwmgr->smu_lock);
451 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
452 mutex_unlock(&hwmgr->smu_lock);
453 }
454
pp_dpm_dispatch_tasks(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)455 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
456 enum amd_pm_state_type *user_state)
457 {
458 int ret = 0;
459 struct pp_hwmgr *hwmgr = handle;
460
461 if (!hwmgr || !hwmgr->pm_en)
462 return -EINVAL;
463
464 mutex_lock(&hwmgr->smu_lock);
465 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
466 mutex_unlock(&hwmgr->smu_lock);
467
468 return ret;
469 }
470
pp_dpm_get_current_power_state(void * handle)471 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
472 {
473 struct pp_hwmgr *hwmgr = handle;
474 struct pp_power_state *state;
475 enum amd_pm_state_type pm_type;
476
477 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
478 return -EINVAL;
479
480 mutex_lock(&hwmgr->smu_lock);
481
482 state = hwmgr->current_ps;
483
484 switch (state->classification.ui_label) {
485 case PP_StateUILabel_Battery:
486 pm_type = POWER_STATE_TYPE_BATTERY;
487 break;
488 case PP_StateUILabel_Balanced:
489 pm_type = POWER_STATE_TYPE_BALANCED;
490 break;
491 case PP_StateUILabel_Performance:
492 pm_type = POWER_STATE_TYPE_PERFORMANCE;
493 break;
494 default:
495 if (state->classification.flags & PP_StateClassificationFlag_Boot)
496 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
497 else
498 pm_type = POWER_STATE_TYPE_DEFAULT;
499 break;
500 }
501 mutex_unlock(&hwmgr->smu_lock);
502
503 return pm_type;
504 }
505
pp_dpm_set_fan_control_mode(void * handle,uint32_t mode)506 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
507 {
508 struct pp_hwmgr *hwmgr = handle;
509
510 if (!hwmgr || !hwmgr->pm_en)
511 return;
512
513 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
514 pr_info_ratelimited("%s was not implemented.\n", __func__);
515 return;
516 }
517 mutex_lock(&hwmgr->smu_lock);
518 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
519 mutex_unlock(&hwmgr->smu_lock);
520 }
521
pp_dpm_get_fan_control_mode(void * handle)522 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
523 {
524 struct pp_hwmgr *hwmgr = handle;
525 uint32_t mode = 0;
526
527 if (!hwmgr || !hwmgr->pm_en)
528 return 0;
529
530 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
531 pr_info_ratelimited("%s was not implemented.\n", __func__);
532 return 0;
533 }
534 mutex_lock(&hwmgr->smu_lock);
535 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
536 mutex_unlock(&hwmgr->smu_lock);
537 return mode;
538 }
539
pp_dpm_set_fan_speed_percent(void * handle,uint32_t percent)540 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
541 {
542 struct pp_hwmgr *hwmgr = handle;
543 int ret = 0;
544
545 if (!hwmgr || !hwmgr->pm_en)
546 return -EINVAL;
547
548 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
549 pr_info_ratelimited("%s was not implemented.\n", __func__);
550 return 0;
551 }
552 mutex_lock(&hwmgr->smu_lock);
553 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
554 mutex_unlock(&hwmgr->smu_lock);
555 return ret;
556 }
557
pp_dpm_get_fan_speed_percent(void * handle,uint32_t * speed)558 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
559 {
560 struct pp_hwmgr *hwmgr = handle;
561 int ret = 0;
562
563 if (!hwmgr || !hwmgr->pm_en)
564 return -EINVAL;
565
566 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
567 pr_info_ratelimited("%s was not implemented.\n", __func__);
568 return 0;
569 }
570
571 mutex_lock(&hwmgr->smu_lock);
572 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
573 mutex_unlock(&hwmgr->smu_lock);
574 return ret;
575 }
576
pp_dpm_get_fan_speed_rpm(void * handle,uint32_t * rpm)577 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
578 {
579 struct pp_hwmgr *hwmgr = handle;
580 int ret = 0;
581
582 if (!hwmgr || !hwmgr->pm_en)
583 return -EINVAL;
584
585 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
586 return -EINVAL;
587
588 mutex_lock(&hwmgr->smu_lock);
589 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
590 mutex_unlock(&hwmgr->smu_lock);
591 return ret;
592 }
593
pp_dpm_set_fan_speed_rpm(void * handle,uint32_t rpm)594 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
595 {
596 struct pp_hwmgr *hwmgr = handle;
597 int ret = 0;
598
599 if (!hwmgr || !hwmgr->pm_en)
600 return -EINVAL;
601
602 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
603 pr_info_ratelimited("%s was not implemented.\n", __func__);
604 return 0;
605 }
606 mutex_lock(&hwmgr->smu_lock);
607 ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
608 mutex_unlock(&hwmgr->smu_lock);
609 return ret;
610 }
611
pp_dpm_get_pp_num_states(void * handle,struct pp_states_info * data)612 static int pp_dpm_get_pp_num_states(void *handle,
613 struct pp_states_info *data)
614 {
615 struct pp_hwmgr *hwmgr = handle;
616 int i;
617
618 memset(data, 0, sizeof(*data));
619
620 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
621 return -EINVAL;
622
623 mutex_lock(&hwmgr->smu_lock);
624
625 data->nums = hwmgr->num_ps;
626
627 for (i = 0; i < hwmgr->num_ps; i++) {
628 struct pp_power_state *state = (struct pp_power_state *)
629 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
630 switch (state->classification.ui_label) {
631 case PP_StateUILabel_Battery:
632 data->states[i] = POWER_STATE_TYPE_BATTERY;
633 break;
634 case PP_StateUILabel_Balanced:
635 data->states[i] = POWER_STATE_TYPE_BALANCED;
636 break;
637 case PP_StateUILabel_Performance:
638 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
639 break;
640 default:
641 if (state->classification.flags & PP_StateClassificationFlag_Boot)
642 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
643 else
644 data->states[i] = POWER_STATE_TYPE_DEFAULT;
645 }
646 }
647 mutex_unlock(&hwmgr->smu_lock);
648 return 0;
649 }
650
pp_dpm_get_pp_table(void * handle,char ** table)651 static int pp_dpm_get_pp_table(void *handle, char **table)
652 {
653 struct pp_hwmgr *hwmgr = handle;
654 int size = 0;
655
656 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
657 return -EINVAL;
658
659 mutex_lock(&hwmgr->smu_lock);
660 *table = __UNCONST(hwmgr->soft_pp_table);
661 size = hwmgr->soft_pp_table_size;
662 mutex_unlock(&hwmgr->smu_lock);
663 return size;
664 }
665
amd_powerplay_reset(void * handle)666 static int amd_powerplay_reset(void *handle)
667 {
668 struct pp_hwmgr *hwmgr = handle;
669 int ret;
670
671 ret = hwmgr_hw_fini(hwmgr);
672 if (ret)
673 return ret;
674
675 ret = hwmgr_hw_init(hwmgr);
676 if (ret)
677 return ret;
678
679 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
680 }
681
pp_dpm_set_pp_table(void * handle,const char * buf,size_t size)682 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
683 {
684 struct pp_hwmgr *hwmgr = handle;
685 int ret = -ENOMEM;
686
687 if (!hwmgr || !hwmgr->pm_en)
688 return -EINVAL;
689
690 mutex_lock(&hwmgr->smu_lock);
691 if (!hwmgr->hardcode_pp_table) {
692 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
693 hwmgr->soft_pp_table_size,
694 GFP_KERNEL);
695 if (!hwmgr->hardcode_pp_table)
696 goto err;
697 }
698
699 memcpy(hwmgr->hardcode_pp_table, buf, size);
700
701 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
702
703 ret = amd_powerplay_reset(handle);
704 if (ret)
705 goto err;
706
707 if (hwmgr->hwmgr_func->avfs_control) {
708 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
709 if (ret)
710 goto err;
711 }
712 mutex_unlock(&hwmgr->smu_lock);
713 return 0;
714 err:
715 mutex_unlock(&hwmgr->smu_lock);
716 return ret;
717 }
718
pp_dpm_force_clock_level(void * handle,enum pp_clock_type type,uint32_t mask)719 static int pp_dpm_force_clock_level(void *handle,
720 enum pp_clock_type type, uint32_t mask)
721 {
722 struct pp_hwmgr *hwmgr = handle;
723 int ret = 0;
724
725 if (!hwmgr || !hwmgr->pm_en)
726 return -EINVAL;
727
728 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
729 pr_info_ratelimited("%s was not implemented.\n", __func__);
730 return 0;
731 }
732
733 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
734 pr_debug("force clock level is for dpm manual mode only.\n");
735 return -EINVAL;
736 }
737
738 mutex_lock(&hwmgr->smu_lock);
739 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
740 mutex_unlock(&hwmgr->smu_lock);
741 return ret;
742 }
743
pp_dpm_print_clock_levels(void * handle,enum pp_clock_type type,char * buf)744 static int pp_dpm_print_clock_levels(void *handle,
745 enum pp_clock_type type, char *buf)
746 {
747 struct pp_hwmgr *hwmgr = handle;
748 int ret = 0;
749
750 if (!hwmgr || !hwmgr->pm_en)
751 return -EINVAL;
752
753 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
754 pr_info_ratelimited("%s was not implemented.\n", __func__);
755 return 0;
756 }
757 mutex_lock(&hwmgr->smu_lock);
758 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
759 mutex_unlock(&hwmgr->smu_lock);
760 return ret;
761 }
762
pp_dpm_get_sclk_od(void * handle)763 static int pp_dpm_get_sclk_od(void *handle)
764 {
765 struct pp_hwmgr *hwmgr = handle;
766 int ret = 0;
767
768 if (!hwmgr || !hwmgr->pm_en)
769 return -EINVAL;
770
771 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
772 pr_info_ratelimited("%s was not implemented.\n", __func__);
773 return 0;
774 }
775 mutex_lock(&hwmgr->smu_lock);
776 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
777 mutex_unlock(&hwmgr->smu_lock);
778 return ret;
779 }
780
pp_dpm_set_sclk_od(void * handle,uint32_t value)781 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
782 {
783 struct pp_hwmgr *hwmgr = handle;
784 int ret = 0;
785
786 if (!hwmgr || !hwmgr->pm_en)
787 return -EINVAL;
788
789 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
790 pr_info_ratelimited("%s was not implemented.\n", __func__);
791 return 0;
792 }
793
794 mutex_lock(&hwmgr->smu_lock);
795 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
796 mutex_unlock(&hwmgr->smu_lock);
797 return ret;
798 }
799
pp_dpm_get_mclk_od(void * handle)800 static int pp_dpm_get_mclk_od(void *handle)
801 {
802 struct pp_hwmgr *hwmgr = handle;
803 int ret = 0;
804
805 if (!hwmgr || !hwmgr->pm_en)
806 return -EINVAL;
807
808 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
809 pr_info_ratelimited("%s was not implemented.\n", __func__);
810 return 0;
811 }
812 mutex_lock(&hwmgr->smu_lock);
813 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
814 mutex_unlock(&hwmgr->smu_lock);
815 return ret;
816 }
817
pp_dpm_set_mclk_od(void * handle,uint32_t value)818 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
819 {
820 struct pp_hwmgr *hwmgr = handle;
821 int ret = 0;
822
823 if (!hwmgr || !hwmgr->pm_en)
824 return -EINVAL;
825
826 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
827 pr_info_ratelimited("%s was not implemented.\n", __func__);
828 return 0;
829 }
830 mutex_lock(&hwmgr->smu_lock);
831 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
832 mutex_unlock(&hwmgr->smu_lock);
833 return ret;
834 }
835
pp_dpm_read_sensor(void * handle,int idx,void * value,int * size)836 static int pp_dpm_read_sensor(void *handle, int idx,
837 void *value, int *size)
838 {
839 struct pp_hwmgr *hwmgr = handle;
840 int ret = 0;
841
842 if (!hwmgr || !hwmgr->pm_en || !value)
843 return -EINVAL;
844
845 switch (idx) {
846 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
847 *((uint32_t *)value) = hwmgr->pstate_sclk;
848 return 0;
849 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
850 *((uint32_t *)value) = hwmgr->pstate_mclk;
851 return 0;
852 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
853 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
854 return 0;
855 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
856 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
857 return 0;
858 default:
859 mutex_lock(&hwmgr->smu_lock);
860 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
861 mutex_unlock(&hwmgr->smu_lock);
862 return ret;
863 }
864 }
865
866 static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void * handle,unsigned idx)867 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
868 {
869 struct pp_hwmgr *hwmgr = handle;
870
871 if (!hwmgr || !hwmgr->pm_en)
872 return NULL;
873
874 if (idx < hwmgr->num_vce_state_tables)
875 return &hwmgr->vce_states[idx];
876 return NULL;
877 }
878
pp_get_power_profile_mode(void * handle,char * buf)879 static int pp_get_power_profile_mode(void *handle, char *buf)
880 {
881 struct pp_hwmgr *hwmgr = handle;
882
883 if (!hwmgr || !hwmgr->pm_en || !buf)
884 return -EINVAL;
885
886 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
887 pr_info_ratelimited("%s was not implemented.\n", __func__);
888 return snprintf(buf, PAGE_SIZE, "\n");
889 }
890
891 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
892 }
893
pp_set_power_profile_mode(void * handle,long * input,uint32_t size)894 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
895 {
896 struct pp_hwmgr *hwmgr = handle;
897 int ret = -EINVAL;
898
899 if (!hwmgr || !hwmgr->pm_en)
900 return ret;
901
902 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
903 pr_info_ratelimited("%s was not implemented.\n", __func__);
904 return ret;
905 }
906
907 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
908 pr_debug("power profile setting is for manual dpm mode only.\n");
909 return ret;
910 }
911
912 mutex_lock(&hwmgr->smu_lock);
913 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
914 mutex_unlock(&hwmgr->smu_lock);
915 return ret;
916 }
917
pp_odn_edit_dpm_table(void * handle,uint32_t type,long * input,uint32_t size)918 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
919 {
920 struct pp_hwmgr *hwmgr = handle;
921
922 if (!hwmgr || !hwmgr->pm_en)
923 return -EINVAL;
924
925 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
926 pr_info_ratelimited("%s was not implemented.\n", __func__);
927 return -EINVAL;
928 }
929
930 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
931 }
932
pp_dpm_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)933 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
934 {
935 struct pp_hwmgr *hwmgr = handle;
936
937 if (!hwmgr)
938 return -EINVAL;
939
940 if (!hwmgr->pm_en)
941 return 0;
942
943 if (hwmgr->hwmgr_func->set_mp1_state)
944 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
945
946 return 0;
947 }
948
pp_dpm_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)949 static int pp_dpm_switch_power_profile(void *handle,
950 enum PP_SMC_POWER_PROFILE type, bool en)
951 {
952 struct pp_hwmgr *hwmgr = handle;
953 long workload;
954 uint32_t index;
955
956 if (!hwmgr || !hwmgr->pm_en)
957 return -EINVAL;
958
959 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
960 pr_info_ratelimited("%s was not implemented.\n", __func__);
961 return -EINVAL;
962 }
963
964 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
965 return -EINVAL;
966
967 mutex_lock(&hwmgr->smu_lock);
968
969 if (!en) {
970 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
971 index = fls(hwmgr->workload_mask);
972 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
973 workload = hwmgr->workload_setting[index];
974 } else {
975 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
976 index = fls(hwmgr->workload_mask);
977 index = index <= Workload_Policy_Max ? index - 1 : 0;
978 workload = hwmgr->workload_setting[index];
979 }
980
981 if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
982 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
983 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
984 mutex_unlock(&hwmgr->smu_lock);
985 return -EINVAL;
986 }
987 }
988
989 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
990 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
991 mutex_unlock(&hwmgr->smu_lock);
992
993 return 0;
994 }
995
pp_set_power_limit(void * handle,uint32_t limit)996 static int pp_set_power_limit(void *handle, uint32_t limit)
997 {
998 struct pp_hwmgr *hwmgr = handle;
999 uint32_t max_power_limit;
1000
1001 if (!hwmgr || !hwmgr->pm_en)
1002 return -EINVAL;
1003
1004 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
1005 pr_info_ratelimited("%s was not implemented.\n", __func__);
1006 return -EINVAL;
1007 }
1008
1009 if (limit == 0)
1010 limit = hwmgr->default_power_limit;
1011
1012 max_power_limit = hwmgr->default_power_limit;
1013 if (hwmgr->od_enabled) {
1014 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1015 max_power_limit /= 100;
1016 }
1017
1018 if (limit > max_power_limit)
1019 return -EINVAL;
1020
1021 mutex_lock(&hwmgr->smu_lock);
1022 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1023 hwmgr->power_limit = limit;
1024 mutex_unlock(&hwmgr->smu_lock);
1025 return 0;
1026 }
1027
pp_get_power_limit(void * handle,uint32_t * limit,bool default_limit)1028 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1029 {
1030 struct pp_hwmgr *hwmgr = handle;
1031
1032 if (!hwmgr || !hwmgr->pm_en ||!limit)
1033 return -EINVAL;
1034
1035 mutex_lock(&hwmgr->smu_lock);
1036
1037 if (default_limit) {
1038 *limit = hwmgr->default_power_limit;
1039 if (hwmgr->od_enabled) {
1040 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1041 *limit /= 100;
1042 }
1043 }
1044 else
1045 *limit = hwmgr->power_limit;
1046
1047 mutex_unlock(&hwmgr->smu_lock);
1048
1049 return 0;
1050 }
1051
pp_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)1052 static int pp_display_configuration_change(void *handle,
1053 const struct amd_pp_display_configuration *display_config)
1054 {
1055 struct pp_hwmgr *hwmgr = handle;
1056
1057 if (!hwmgr || !hwmgr->pm_en)
1058 return -EINVAL;
1059
1060 mutex_lock(&hwmgr->smu_lock);
1061 phm_store_dal_configuration_data(hwmgr, display_config);
1062 mutex_unlock(&hwmgr->smu_lock);
1063 return 0;
1064 }
1065
pp_get_display_power_level(void * handle,struct amd_pp_simple_clock_info * output)1066 static int pp_get_display_power_level(void *handle,
1067 struct amd_pp_simple_clock_info *output)
1068 {
1069 struct pp_hwmgr *hwmgr = handle;
1070 int ret = 0;
1071
1072 if (!hwmgr || !hwmgr->pm_en ||!output)
1073 return -EINVAL;
1074
1075 mutex_lock(&hwmgr->smu_lock);
1076 ret = phm_get_dal_power_level(hwmgr, output);
1077 mutex_unlock(&hwmgr->smu_lock);
1078 return ret;
1079 }
1080
pp_get_current_clocks(void * handle,struct amd_pp_clock_info * clocks)1081 static int pp_get_current_clocks(void *handle,
1082 struct amd_pp_clock_info *clocks)
1083 {
1084 struct amd_pp_simple_clock_info simple_clocks = { 0 };
1085 struct pp_clock_info hw_clocks;
1086 struct pp_hwmgr *hwmgr = handle;
1087 int ret = 0;
1088
1089 if (!hwmgr || !hwmgr->pm_en)
1090 return -EINVAL;
1091
1092 mutex_lock(&hwmgr->smu_lock);
1093
1094 phm_get_dal_power_level(hwmgr, &simple_clocks);
1095
1096 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1097 PHM_PlatformCaps_PowerContainment))
1098 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1099 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1100 else
1101 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1102 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1103
1104 if (ret) {
1105 pr_debug("Error in phm_get_clock_info \n");
1106 mutex_unlock(&hwmgr->smu_lock);
1107 return -EINVAL;
1108 }
1109
1110 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1111 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1112 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1113 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1114 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1115 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1116
1117 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1118 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1119
1120 if (simple_clocks.level == 0)
1121 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1122 else
1123 clocks->max_clocks_state = simple_clocks.level;
1124
1125 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1126 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1127 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1128 }
1129 mutex_unlock(&hwmgr->smu_lock);
1130 return 0;
1131 }
1132
pp_get_clock_by_type(void * handle,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1133 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1134 {
1135 struct pp_hwmgr *hwmgr = handle;
1136 int ret = 0;
1137
1138 if (!hwmgr || !hwmgr->pm_en)
1139 return -EINVAL;
1140
1141 if (clocks == NULL)
1142 return -EINVAL;
1143
1144 mutex_lock(&hwmgr->smu_lock);
1145 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1146 mutex_unlock(&hwmgr->smu_lock);
1147 return ret;
1148 }
1149
pp_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1150 static int pp_get_clock_by_type_with_latency(void *handle,
1151 enum amd_pp_clock_type type,
1152 struct pp_clock_levels_with_latency *clocks)
1153 {
1154 struct pp_hwmgr *hwmgr = handle;
1155 int ret = 0;
1156
1157 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1158 return -EINVAL;
1159
1160 mutex_lock(&hwmgr->smu_lock);
1161 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1162 mutex_unlock(&hwmgr->smu_lock);
1163 return ret;
1164 }
1165
pp_get_clock_by_type_with_voltage(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1166 static int pp_get_clock_by_type_with_voltage(void *handle,
1167 enum amd_pp_clock_type type,
1168 struct pp_clock_levels_with_voltage *clocks)
1169 {
1170 struct pp_hwmgr *hwmgr = handle;
1171 int ret = 0;
1172
1173 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1174 return -EINVAL;
1175
1176 mutex_lock(&hwmgr->smu_lock);
1177
1178 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1179
1180 mutex_unlock(&hwmgr->smu_lock);
1181 return ret;
1182 }
1183
pp_set_watermarks_for_clocks_ranges(void * handle,void * clock_ranges)1184 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1185 void *clock_ranges)
1186 {
1187 struct pp_hwmgr *hwmgr = handle;
1188 int ret = 0;
1189
1190 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1191 return -EINVAL;
1192
1193 mutex_lock(&hwmgr->smu_lock);
1194 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1195 clock_ranges);
1196 mutex_unlock(&hwmgr->smu_lock);
1197
1198 return ret;
1199 }
1200
pp_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock)1201 static int pp_display_clock_voltage_request(void *handle,
1202 struct pp_display_clock_request *clock)
1203 {
1204 struct pp_hwmgr *hwmgr = handle;
1205 int ret = 0;
1206
1207 if (!hwmgr || !hwmgr->pm_en ||!clock)
1208 return -EINVAL;
1209
1210 mutex_lock(&hwmgr->smu_lock);
1211 ret = phm_display_clock_voltage_request(hwmgr, clock);
1212 mutex_unlock(&hwmgr->smu_lock);
1213
1214 return ret;
1215 }
1216
pp_get_display_mode_validation_clocks(void * handle,struct amd_pp_simple_clock_info * clocks)1217 static int pp_get_display_mode_validation_clocks(void *handle,
1218 struct amd_pp_simple_clock_info *clocks)
1219 {
1220 struct pp_hwmgr *hwmgr = handle;
1221 int ret = 0;
1222
1223 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1224 return -EINVAL;
1225
1226 clocks->level = PP_DAL_POWERLEVEL_7;
1227
1228 mutex_lock(&hwmgr->smu_lock);
1229
1230 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1231 ret = phm_get_max_high_clocks(hwmgr, clocks);
1232
1233 mutex_unlock(&hwmgr->smu_lock);
1234 return ret;
1235 }
1236
pp_dpm_powergate_mmhub(void * handle)1237 static int pp_dpm_powergate_mmhub(void *handle)
1238 {
1239 struct pp_hwmgr *hwmgr = handle;
1240
1241 if (!hwmgr || !hwmgr->pm_en)
1242 return -EINVAL;
1243
1244 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1245 pr_info_ratelimited("%s was not implemented.\n", __func__);
1246 return 0;
1247 }
1248
1249 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1250 }
1251
pp_dpm_powergate_gfx(void * handle,bool gate)1252 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1253 {
1254 struct pp_hwmgr *hwmgr = handle;
1255
1256 if (!hwmgr || !hwmgr->pm_en)
1257 return 0;
1258
1259 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1260 pr_info_ratelimited("%s was not implemented.\n", __func__);
1261 return 0;
1262 }
1263
1264 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1265 }
1266
pp_dpm_powergate_acp(void * handle,bool gate)1267 static void pp_dpm_powergate_acp(void *handle, bool gate)
1268 {
1269 struct pp_hwmgr *hwmgr = handle;
1270
1271 if (!hwmgr || !hwmgr->pm_en)
1272 return;
1273
1274 if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1275 pr_info_ratelimited("%s was not implemented.\n", __func__);
1276 return;
1277 }
1278
1279 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1280 }
1281
pp_dpm_powergate_sdma(void * handle,bool gate)1282 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1283 {
1284 struct pp_hwmgr *hwmgr = handle;
1285
1286 if (!hwmgr)
1287 return;
1288
1289 if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1290 pr_info_ratelimited("%s was not implemented.\n", __func__);
1291 return;
1292 }
1293
1294 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1295 }
1296
pp_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)1297 static int pp_set_powergating_by_smu(void *handle,
1298 uint32_t block_type, bool gate)
1299 {
1300 int ret = 0;
1301
1302 switch (block_type) {
1303 case AMD_IP_BLOCK_TYPE_UVD:
1304 case AMD_IP_BLOCK_TYPE_VCN:
1305 pp_dpm_powergate_uvd(handle, gate);
1306 break;
1307 case AMD_IP_BLOCK_TYPE_VCE:
1308 pp_dpm_powergate_vce(handle, gate);
1309 break;
1310 case AMD_IP_BLOCK_TYPE_GMC:
1311 pp_dpm_powergate_mmhub(handle);
1312 break;
1313 case AMD_IP_BLOCK_TYPE_GFX:
1314 ret = pp_dpm_powergate_gfx(handle, gate);
1315 break;
1316 case AMD_IP_BLOCK_TYPE_ACP:
1317 pp_dpm_powergate_acp(handle, gate);
1318 break;
1319 case AMD_IP_BLOCK_TYPE_SDMA:
1320 pp_dpm_powergate_sdma(handle, gate);
1321 break;
1322 default:
1323 break;
1324 }
1325 return ret;
1326 }
1327
pp_notify_smu_enable_pwe(void * handle)1328 static int pp_notify_smu_enable_pwe(void *handle)
1329 {
1330 struct pp_hwmgr *hwmgr = handle;
1331
1332 if (!hwmgr || !hwmgr->pm_en)
1333 return -EINVAL;
1334
1335 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1336 pr_info_ratelimited("%s was not implemented.\n", __func__);
1337 return -EINVAL;
1338 }
1339
1340 mutex_lock(&hwmgr->smu_lock);
1341 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1342 mutex_unlock(&hwmgr->smu_lock);
1343
1344 return 0;
1345 }
1346
pp_enable_mgpu_fan_boost(void * handle)1347 static int pp_enable_mgpu_fan_boost(void *handle)
1348 {
1349 struct pp_hwmgr *hwmgr = handle;
1350
1351 if (!hwmgr)
1352 return -EINVAL;
1353
1354 if (!hwmgr->pm_en ||
1355 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1356 return 0;
1357
1358 mutex_lock(&hwmgr->smu_lock);
1359 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1360 mutex_unlock(&hwmgr->smu_lock);
1361
1362 return 0;
1363 }
1364
pp_set_min_deep_sleep_dcefclk(void * handle,uint32_t clock)1365 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1366 {
1367 struct pp_hwmgr *hwmgr = handle;
1368
1369 if (!hwmgr || !hwmgr->pm_en)
1370 return -EINVAL;
1371
1372 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1373 pr_debug("%s was not implemented.\n", __func__);
1374 return -EINVAL;
1375 }
1376
1377 mutex_lock(&hwmgr->smu_lock);
1378 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1379 mutex_unlock(&hwmgr->smu_lock);
1380
1381 return 0;
1382 }
1383
pp_set_hard_min_dcefclk_by_freq(void * handle,uint32_t clock)1384 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1385 {
1386 struct pp_hwmgr *hwmgr = handle;
1387
1388 if (!hwmgr || !hwmgr->pm_en)
1389 return -EINVAL;
1390
1391 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1392 pr_debug("%s was not implemented.\n", __func__);
1393 return -EINVAL;
1394 }
1395
1396 mutex_lock(&hwmgr->smu_lock);
1397 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1398 mutex_unlock(&hwmgr->smu_lock);
1399
1400 return 0;
1401 }
1402
pp_set_hard_min_fclk_by_freq(void * handle,uint32_t clock)1403 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1404 {
1405 struct pp_hwmgr *hwmgr = handle;
1406
1407 if (!hwmgr || !hwmgr->pm_en)
1408 return -EINVAL;
1409
1410 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1411 pr_debug("%s was not implemented.\n", __func__);
1412 return -EINVAL;
1413 }
1414
1415 mutex_lock(&hwmgr->smu_lock);
1416 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1417 mutex_unlock(&hwmgr->smu_lock);
1418
1419 return 0;
1420 }
1421
pp_set_active_display_count(void * handle,uint32_t count)1422 static int pp_set_active_display_count(void *handle, uint32_t count)
1423 {
1424 struct pp_hwmgr *hwmgr = handle;
1425 int ret = 0;
1426
1427 if (!hwmgr || !hwmgr->pm_en)
1428 return -EINVAL;
1429
1430 mutex_lock(&hwmgr->smu_lock);
1431 ret = phm_set_active_display_count(hwmgr, count);
1432 mutex_unlock(&hwmgr->smu_lock);
1433
1434 return ret;
1435 }
1436
pp_get_asic_baco_capability(void * handle,bool * cap)1437 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1438 {
1439 struct pp_hwmgr *hwmgr = handle;
1440
1441 *cap = false;
1442 if (!hwmgr)
1443 return -EINVAL;
1444
1445 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1446 return 0;
1447
1448 mutex_lock(&hwmgr->smu_lock);
1449 hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1450 mutex_unlock(&hwmgr->smu_lock);
1451
1452 return 0;
1453 }
1454
pp_get_asic_baco_state(void * handle,int * state)1455 static int pp_get_asic_baco_state(void *handle, int *state)
1456 {
1457 struct pp_hwmgr *hwmgr = handle;
1458
1459 if (!hwmgr)
1460 return -EINVAL;
1461
1462 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1463 return 0;
1464
1465 mutex_lock(&hwmgr->smu_lock);
1466 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1467 mutex_unlock(&hwmgr->smu_lock);
1468
1469 return 0;
1470 }
1471
pp_set_asic_baco_state(void * handle,int state)1472 static int pp_set_asic_baco_state(void *handle, int state)
1473 {
1474 struct pp_hwmgr *hwmgr = handle;
1475
1476 if (!hwmgr)
1477 return -EINVAL;
1478
1479 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1480 return 0;
1481
1482 mutex_lock(&hwmgr->smu_lock);
1483 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1484 mutex_unlock(&hwmgr->smu_lock);
1485
1486 return 0;
1487 }
1488
pp_get_ppfeature_status(void * handle,char * buf)1489 static int pp_get_ppfeature_status(void *handle, char *buf)
1490 {
1491 struct pp_hwmgr *hwmgr = handle;
1492 int ret = 0;
1493
1494 if (!hwmgr || !hwmgr->pm_en || !buf)
1495 return -EINVAL;
1496
1497 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1498 pr_info_ratelimited("%s was not implemented.\n", __func__);
1499 return -EINVAL;
1500 }
1501
1502 mutex_lock(&hwmgr->smu_lock);
1503 ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1504 mutex_unlock(&hwmgr->smu_lock);
1505
1506 return ret;
1507 }
1508
pp_set_ppfeature_status(void * handle,uint64_t ppfeature_masks)1509 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1510 {
1511 struct pp_hwmgr *hwmgr = handle;
1512 int ret = 0;
1513
1514 if (!hwmgr || !hwmgr->pm_en)
1515 return -EINVAL;
1516
1517 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1518 pr_info_ratelimited("%s was not implemented.\n", __func__);
1519 return -EINVAL;
1520 }
1521
1522 mutex_lock(&hwmgr->smu_lock);
1523 ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1524 mutex_unlock(&hwmgr->smu_lock);
1525
1526 return ret;
1527 }
1528
pp_asic_reset_mode_2(void * handle)1529 static int pp_asic_reset_mode_2(void *handle)
1530 {
1531 struct pp_hwmgr *hwmgr = handle;
1532 int ret = 0;
1533
1534 if (!hwmgr || !hwmgr->pm_en)
1535 return -EINVAL;
1536
1537 if (hwmgr->hwmgr_func->asic_reset == NULL) {
1538 pr_info_ratelimited("%s was not implemented.\n", __func__);
1539 return -EINVAL;
1540 }
1541
1542 mutex_lock(&hwmgr->smu_lock);
1543 ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1544 mutex_unlock(&hwmgr->smu_lock);
1545
1546 return ret;
1547 }
1548
pp_smu_i2c_bus_access(void * handle,bool acquire)1549 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1550 {
1551 struct pp_hwmgr *hwmgr = handle;
1552 int ret = 0;
1553
1554 if (!hwmgr || !hwmgr->pm_en)
1555 return -EINVAL;
1556
1557 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1558 pr_info_ratelimited("%s was not implemented.\n", __func__);
1559 return -EINVAL;
1560 }
1561
1562 mutex_lock(&hwmgr->smu_lock);
1563 ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1564 mutex_unlock(&hwmgr->smu_lock);
1565
1566 return ret;
1567 }
1568
pp_set_df_cstate(void * handle,enum pp_df_cstate state)1569 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1570 {
1571 struct pp_hwmgr *hwmgr = handle;
1572
1573 if (!hwmgr)
1574 return -EINVAL;
1575
1576 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1577 return 0;
1578
1579 mutex_lock(&hwmgr->smu_lock);
1580 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1581 mutex_unlock(&hwmgr->smu_lock);
1582
1583 return 0;
1584 }
1585
pp_set_xgmi_pstate(void * handle,uint32_t pstate)1586 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1587 {
1588 struct pp_hwmgr *hwmgr = handle;
1589
1590 if (!hwmgr)
1591 return -EINVAL;
1592
1593 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1594 return 0;
1595
1596 mutex_lock(&hwmgr->smu_lock);
1597 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1598 mutex_unlock(&hwmgr->smu_lock);
1599
1600 return 0;
1601 }
1602
1603 static const struct amd_pm_funcs pp_dpm_funcs = {
1604 .load_firmware = pp_dpm_load_fw,
1605 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1606 .force_performance_level = pp_dpm_force_performance_level,
1607 .get_performance_level = pp_dpm_get_performance_level,
1608 .get_current_power_state = pp_dpm_get_current_power_state,
1609 .dispatch_tasks = pp_dpm_dispatch_tasks,
1610 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1611 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1612 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1613 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1614 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1615 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1616 .get_pp_num_states = pp_dpm_get_pp_num_states,
1617 .get_pp_table = pp_dpm_get_pp_table,
1618 .set_pp_table = pp_dpm_set_pp_table,
1619 .force_clock_level = pp_dpm_force_clock_level,
1620 .print_clock_levels = pp_dpm_print_clock_levels,
1621 .get_sclk_od = pp_dpm_get_sclk_od,
1622 .set_sclk_od = pp_dpm_set_sclk_od,
1623 .get_mclk_od = pp_dpm_get_mclk_od,
1624 .set_mclk_od = pp_dpm_set_mclk_od,
1625 .read_sensor = pp_dpm_read_sensor,
1626 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1627 .switch_power_profile = pp_dpm_switch_power_profile,
1628 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1629 .set_powergating_by_smu = pp_set_powergating_by_smu,
1630 .get_power_profile_mode = pp_get_power_profile_mode,
1631 .set_power_profile_mode = pp_set_power_profile_mode,
1632 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1633 .set_mp1_state = pp_dpm_set_mp1_state,
1634 .set_power_limit = pp_set_power_limit,
1635 .get_power_limit = pp_get_power_limit,
1636 /* export to DC */
1637 .get_sclk = pp_dpm_get_sclk,
1638 .get_mclk = pp_dpm_get_mclk,
1639 .display_configuration_change = pp_display_configuration_change,
1640 .get_display_power_level = pp_get_display_power_level,
1641 .get_current_clocks = pp_get_current_clocks,
1642 .get_clock_by_type = pp_get_clock_by_type,
1643 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1644 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1645 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1646 .display_clock_voltage_request = pp_display_clock_voltage_request,
1647 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1648 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1649 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1650 .set_active_display_count = pp_set_active_display_count,
1651 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1652 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1653 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1654 .get_asic_baco_capability = pp_get_asic_baco_capability,
1655 .get_asic_baco_state = pp_get_asic_baco_state,
1656 .set_asic_baco_state = pp_set_asic_baco_state,
1657 .get_ppfeature_status = pp_get_ppfeature_status,
1658 .set_ppfeature_status = pp_set_ppfeature_status,
1659 .asic_reset_mode_2 = pp_asic_reset_mode_2,
1660 .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1661 .set_df_cstate = pp_set_df_cstate,
1662 .set_xgmi_pstate = pp_set_xgmi_pstate,
1663 };
1664