1 /* $NetBSD: amdgpu_smu8_smumgr.c,v 1.3 2021/12/19 12:21:30 riastradh Exp $ */
2
3 /*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_smu8_smumgr.c,v 1.3 2021/12/19 12:21:30 riastradh Exp $");
28
29 #include <linux/delay.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32 #include <linux/ktime.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35
36 #include "cgs_common.h"
37 #include "smu/smu_8_0_d.h"
38 #include "smu/smu_8_0_sh_mask.h"
39 #include "smu8.h"
40 #include "smu8_fusion.h"
41 #include "smu8_smumgr.h"
42 #include "cz_ppsmc.h"
43 #include "smu_ucode_xfer_cz.h"
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
46 #include "smumgr.h"
47
48 #include <linux/nbsd-namespace.h>
49
50 #define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
51
52 static const enum smu8_scratch_entry firmware_list[] = {
53 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0,
54 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1,
55 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE,
56 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
57 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME,
58 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
59 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
60 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
61 };
62
smu8_get_argument(struct pp_hwmgr * hwmgr)63 static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
64 {
65 if (hwmgr == NULL || hwmgr->device == NULL)
66 return 0;
67
68 return cgs_read_register(hwmgr->device,
69 mmSMU_MP1_SRBM2P_ARG_0);
70 }
71
72 /* Send a message to the SMC, and wait for its response.*/
smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)73 static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
74 uint16_t msg, uint32_t parameter)
75 {
76 int result = 0;
77 ktime_t t_start;
78 s64 elapsed_us;
79
80 if (hwmgr == NULL || hwmgr->device == NULL)
81 return -EINVAL;
82
83 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
84 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
85 if (result != 0) {
86 /* Read the last message to SMU, to report actual cause */
87 uint32_t val = cgs_read_register(hwmgr->device,
88 mmSMU_MP1_SRBM2P_MSG_0);
89 pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
90 __func__, msg, val);
91 return result;
92 }
93 t_start = ktime_get();
94
95 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
96
97 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
98 cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
99
100 result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
101 SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
102
103 elapsed_us = ktime_us_delta(ktime_get(), t_start);
104
105 WARN(result, "%s(0x%04x, %#x) timed out after %"PRId64" us\n",
106 __func__, msg, parameter, elapsed_us);
107
108 return result;
109 }
110
smu8_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)111 static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
112 {
113 return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
114 }
115
smu8_set_smc_sram_address(struct pp_hwmgr * hwmgr,uint32_t smc_address,uint32_t limit)116 static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
117 uint32_t smc_address, uint32_t limit)
118 {
119 if (hwmgr == NULL || hwmgr->device == NULL)
120 return -EINVAL;
121
122 if (0 != (3 & smc_address)) {
123 pr_err("SMC address must be 4 byte aligned\n");
124 return -EINVAL;
125 }
126
127 if (limit <= (smc_address + 3)) {
128 pr_err("SMC address beyond the SMC RAM area\n");
129 return -EINVAL;
130 }
131
132 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
133 SMN_MP1_SRAM_START_ADDR + smc_address);
134
135 return 0;
136 }
137
smu8_write_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_address,uint32_t value,uint32_t limit)138 static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
139 uint32_t smc_address, uint32_t value, uint32_t limit)
140 {
141 int result;
142
143 if (hwmgr == NULL || hwmgr->device == NULL)
144 return -EINVAL;
145
146 result = smu8_set_smc_sram_address(hwmgr, smc_address, limit);
147 if (!result)
148 cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
149
150 return result;
151 }
152
smu8_check_fw_load_finish(struct pp_hwmgr * hwmgr,uint32_t firmware)153 static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
154 uint32_t firmware)
155 {
156 int i;
157 uint32_t index = SMN_MP1_SRAM_START_ADDR +
158 SMU8_FIRMWARE_HEADER_LOCATION +
159 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
160
161 if (hwmgr == NULL || hwmgr->device == NULL)
162 return -EINVAL;
163
164 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
165
166 for (i = 0; i < hwmgr->usec_timeout; i++) {
167 if (firmware ==
168 (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
169 break;
170 udelay(1);
171 }
172
173 if (i >= hwmgr->usec_timeout) {
174 pr_err("SMU check loaded firmware failed.\n");
175 return -EINVAL;
176 }
177
178 return 0;
179 }
180
smu8_load_mec_firmware(struct pp_hwmgr * hwmgr)181 static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
182 {
183 uint32_t reg_data;
184 uint32_t tmp;
185 int ret = 0;
186 struct cgs_firmware_info info = {0};
187
188 if (hwmgr == NULL || hwmgr->device == NULL)
189 return -EINVAL;
190
191 ret = cgs_get_firmware_info(hwmgr->device,
192 CGS_UCODE_ID_CP_MEC, &info);
193
194 if (ret)
195 return -EINVAL;
196
197 /* Disable MEC parsing/prefetching */
198 tmp = cgs_read_register(hwmgr->device,
199 mmCP_MEC_CNTL);
200 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
201 tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
202 cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
203
204 tmp = cgs_read_register(hwmgr->device,
205 mmCP_CPC_IC_BASE_CNTL);
206
207 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
208 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
209 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
210 tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
211 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
212
213 reg_data = lower_32_bits(info.mc_addr) &
214 PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
215 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
216
217 reg_data = upper_32_bits(info.mc_addr) &
218 PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
219 cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
220
221 return 0;
222 }
223
smu8_translate_firmware_enum_to_arg(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry firmware_enum)224 static uint8_t smu8_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
225 enum smu8_scratch_entry firmware_enum)
226 {
227 uint8_t ret = 0;
228
229 switch (firmware_enum) {
230 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0:
231 ret = UCODE_ID_SDMA0;
232 break;
233 case SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1:
234 if (hwmgr->chip_id == CHIP_STONEY)
235 ret = UCODE_ID_SDMA0;
236 else
237 ret = UCODE_ID_SDMA1;
238 break;
239 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE:
240 ret = UCODE_ID_CP_CE;
241 break;
242 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
243 ret = UCODE_ID_CP_PFP;
244 break;
245 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME:
246 ret = UCODE_ID_CP_ME;
247 break;
248 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
249 ret = UCODE_ID_CP_MEC_JT1;
250 break;
251 case SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
252 if (hwmgr->chip_id == CHIP_STONEY)
253 ret = UCODE_ID_CP_MEC_JT1;
254 else
255 ret = UCODE_ID_CP_MEC_JT2;
256 break;
257 case SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
258 ret = UCODE_ID_GMCON_RENG;
259 break;
260 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G:
261 ret = UCODE_ID_RLC_G;
262 break;
263 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
264 ret = UCODE_ID_RLC_SCRATCH;
265 break;
266 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
267 ret = UCODE_ID_RLC_SRM_ARAM;
268 break;
269 case SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
270 ret = UCODE_ID_RLC_SRM_DRAM;
271 break;
272 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
273 ret = UCODE_ID_DMCU_ERAM;
274 break;
275 case SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
276 ret = UCODE_ID_DMCU_IRAM;
277 break;
278 case SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
279 ret = TASK_ARG_INIT_MM_PWR_LOG;
280 break;
281 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
282 case SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
283 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
284 case SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
285 case SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START:
286 case SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
287 ret = TASK_ARG_REG_MMIO;
288 break;
289 case SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
290 ret = TASK_ARG_INIT_CLK_TABLE;
291 break;
292 }
293
294 return ret;
295 }
296
smu8_convert_fw_type_to_cgs(uint32_t fw_type)297 static enum cgs_ucode_id smu8_convert_fw_type_to_cgs(uint32_t fw_type)
298 {
299 enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
300
301 switch (fw_type) {
302 case UCODE_ID_SDMA0:
303 result = CGS_UCODE_ID_SDMA0;
304 break;
305 case UCODE_ID_SDMA1:
306 result = CGS_UCODE_ID_SDMA1;
307 break;
308 case UCODE_ID_CP_CE:
309 result = CGS_UCODE_ID_CP_CE;
310 break;
311 case UCODE_ID_CP_PFP:
312 result = CGS_UCODE_ID_CP_PFP;
313 break;
314 case UCODE_ID_CP_ME:
315 result = CGS_UCODE_ID_CP_ME;
316 break;
317 case UCODE_ID_CP_MEC_JT1:
318 result = CGS_UCODE_ID_CP_MEC_JT1;
319 break;
320 case UCODE_ID_CP_MEC_JT2:
321 result = CGS_UCODE_ID_CP_MEC_JT2;
322 break;
323 case UCODE_ID_RLC_G:
324 result = CGS_UCODE_ID_RLC_G;
325 break;
326 default:
327 break;
328 }
329
330 return result;
331 }
332
smu8_smu_populate_single_scratch_task(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry fw_enum,uint8_t type,bool is_last)333 static int smu8_smu_populate_single_scratch_task(
334 struct pp_hwmgr *hwmgr,
335 enum smu8_scratch_entry fw_enum,
336 uint8_t type, bool is_last)
337 {
338 uint8_t i;
339 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
340 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
341 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
342
343 task->type = type;
344 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
345 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
346
347 for (i = 0; i < smu8_smu->scratch_buffer_length; i++)
348 if (smu8_smu->scratch_buffer[i].firmware_ID == fw_enum)
349 break;
350
351 if (i >= smu8_smu->scratch_buffer_length) {
352 pr_err("Invalid Firmware Type\n");
353 return -EINVAL;
354 }
355
356 task->addr.low = lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
357 task->addr.high = upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr);
358 task->size_bytes = smu8_smu->scratch_buffer[i].data_size;
359
360 if (SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == fw_enum) {
361 struct smu8_ih_meta_data *pIHReg_restore =
362 (struct smu8_ih_meta_data *)smu8_smu->scratch_buffer[i].kaddr;
363 pIHReg_restore->command =
364 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
365 }
366
367 return 0;
368 }
369
smu8_smu_populate_single_ucode_load_task(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry fw_enum,bool is_last)370 static int smu8_smu_populate_single_ucode_load_task(
371 struct pp_hwmgr *hwmgr,
372 enum smu8_scratch_entry fw_enum,
373 bool is_last)
374 {
375 uint8_t i;
376 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
377 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
378 struct SMU_Task *task = &toc->tasks[smu8_smu->toc_entry_used_count++];
379
380 task->type = TASK_TYPE_UCODE_LOAD;
381 task->arg = smu8_translate_firmware_enum_to_arg(hwmgr, fw_enum);
382 task->next = is_last ? END_OF_TASK_LIST : smu8_smu->toc_entry_used_count;
383
384 for (i = 0; i < smu8_smu->driver_buffer_length; i++)
385 if (smu8_smu->driver_buffer[i].firmware_ID == fw_enum)
386 break;
387
388 if (i >= smu8_smu->driver_buffer_length) {
389 pr_err("Invalid Firmware Type\n");
390 return -EINVAL;
391 }
392
393 task->addr.low = lower_32_bits(smu8_smu->driver_buffer[i].mc_addr);
394 task->addr.high = upper_32_bits(smu8_smu->driver_buffer[i].mc_addr);
395 task->size_bytes = smu8_smu->driver_buffer[i].data_size;
396
397 return 0;
398 }
399
smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr * hwmgr)400 static int smu8_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
401 {
402 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
403
404 smu8_smu->toc_entry_aram = smu8_smu->toc_entry_used_count;
405 smu8_smu_populate_single_scratch_task(hwmgr,
406 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
407 TASK_TYPE_UCODE_SAVE, true);
408
409 return 0;
410 }
411
smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr * hwmgr)412 static int smu8_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
413 {
414 int i;
415 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
416 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
417
418 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
419 toc->JobList[i] = (uint8_t)IGNORE_JOB;
420
421 return 0;
422 }
423
smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr * hwmgr)424 static int smu8_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
425 {
426 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
427 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
428
429 toc->JobList[JOB_GFX_SAVE] = (uint8_t)smu8_smu->toc_entry_used_count;
430 smu8_smu_populate_single_scratch_task(hwmgr,
431 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
432 TASK_TYPE_UCODE_SAVE, false);
433
434 smu8_smu_populate_single_scratch_task(hwmgr,
435 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
436 TASK_TYPE_UCODE_SAVE, true);
437
438 return 0;
439 }
440
441
smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr * hwmgr)442 static int smu8_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
443 {
444 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
445 struct TOC *toc = (struct TOC *)smu8_smu->toc_buffer.kaddr;
446
447 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)smu8_smu->toc_entry_used_count;
448
449 smu8_smu_populate_single_ucode_load_task(hwmgr,
450 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
451 smu8_smu_populate_single_ucode_load_task(hwmgr,
452 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
453 smu8_smu_populate_single_ucode_load_task(hwmgr,
454 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
455 smu8_smu_populate_single_ucode_load_task(hwmgr,
456 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
457
458 if (hwmgr->chip_id == CHIP_STONEY)
459 smu8_smu_populate_single_ucode_load_task(hwmgr,
460 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
461 else
462 smu8_smu_populate_single_ucode_load_task(hwmgr,
463 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
464
465 smu8_smu_populate_single_ucode_load_task(hwmgr,
466 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
467
468 /* populate scratch */
469 smu8_smu_populate_single_scratch_task(hwmgr,
470 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
471 TASK_TYPE_UCODE_LOAD, false);
472
473 smu8_smu_populate_single_scratch_task(hwmgr,
474 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
475 TASK_TYPE_UCODE_LOAD, false);
476
477 smu8_smu_populate_single_scratch_task(hwmgr,
478 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
479 TASK_TYPE_UCODE_LOAD, true);
480
481 return 0;
482 }
483
smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr * hwmgr)484 static int smu8_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
485 {
486 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
487
488 smu8_smu->toc_entry_power_profiling_index = smu8_smu->toc_entry_used_count;
489
490 smu8_smu_populate_single_scratch_task(hwmgr,
491 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
492 TASK_TYPE_INITIALIZE, true);
493 return 0;
494 }
495
smu8_smu_construct_toc_for_bootup(struct pp_hwmgr * hwmgr)496 static int smu8_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
497 {
498 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
499
500 smu8_smu->toc_entry_initialize_index = smu8_smu->toc_entry_used_count;
501
502 smu8_smu_populate_single_ucode_load_task(hwmgr,
503 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
504 if (hwmgr->chip_id != CHIP_STONEY)
505 smu8_smu_populate_single_ucode_load_task(hwmgr,
506 SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
507 smu8_smu_populate_single_ucode_load_task(hwmgr,
508 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
509 smu8_smu_populate_single_ucode_load_task(hwmgr,
510 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
511 smu8_smu_populate_single_ucode_load_task(hwmgr,
512 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
513 smu8_smu_populate_single_ucode_load_task(hwmgr,
514 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
515 if (hwmgr->chip_id != CHIP_STONEY)
516 smu8_smu_populate_single_ucode_load_task(hwmgr,
517 SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
518 smu8_smu_populate_single_ucode_load_task(hwmgr,
519 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
520
521 return 0;
522 }
523
smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr * hwmgr)524 static int smu8_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
525 {
526 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
527
528 smu8_smu->toc_entry_clock_table = smu8_smu->toc_entry_used_count;
529
530 smu8_smu_populate_single_scratch_task(hwmgr,
531 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
532 TASK_TYPE_INITIALIZE, true);
533
534 return 0;
535 }
536
smu8_smu_construct_toc(struct pp_hwmgr * hwmgr)537 static int smu8_smu_construct_toc(struct pp_hwmgr *hwmgr)
538 {
539 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
540
541 smu8_smu->toc_entry_used_count = 0;
542 smu8_smu_initialize_toc_empty_job_list(hwmgr);
543 smu8_smu_construct_toc_for_rlc_aram_save(hwmgr);
544 smu8_smu_construct_toc_for_vddgfx_enter(hwmgr);
545 smu8_smu_construct_toc_for_vddgfx_exit(hwmgr);
546 smu8_smu_construct_toc_for_power_profiling(hwmgr);
547 smu8_smu_construct_toc_for_bootup(hwmgr);
548 smu8_smu_construct_toc_for_clock_table(hwmgr);
549
550 return 0;
551 }
552
smu8_smu_populate_firmware_entries(struct pp_hwmgr * hwmgr)553 static int smu8_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
554 {
555 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
556 uint32_t firmware_type;
557 uint32_t i;
558 int ret;
559 enum cgs_ucode_id ucode_id;
560 struct cgs_firmware_info info = {0};
561
562 smu8_smu->driver_buffer_length = 0;
563
564 for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
565
566 firmware_type = smu8_translate_firmware_enum_to_arg(hwmgr,
567 firmware_list[i]);
568
569 ucode_id = smu8_convert_fw_type_to_cgs(firmware_type);
570
571 ret = cgs_get_firmware_info(hwmgr->device,
572 ucode_id, &info);
573
574 if (ret == 0) {
575 smu8_smu->driver_buffer[i].mc_addr = info.mc_addr;
576
577 smu8_smu->driver_buffer[i].data_size = info.image_size;
578
579 smu8_smu->driver_buffer[i].firmware_ID = firmware_list[i];
580 smu8_smu->driver_buffer_length++;
581 }
582 }
583
584 return 0;
585 }
586
smu8_smu_populate_single_scratch_entry(struct pp_hwmgr * hwmgr,enum smu8_scratch_entry scratch_type,uint32_t ulsize_byte,struct smu8_buffer_entry * entry)587 static int smu8_smu_populate_single_scratch_entry(
588 struct pp_hwmgr *hwmgr,
589 enum smu8_scratch_entry scratch_type,
590 uint32_t ulsize_byte,
591 struct smu8_buffer_entry *entry)
592 {
593 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
594 uint32_t ulsize_aligned = SIZE_ALIGN_32(ulsize_byte);
595
596 entry->data_size = ulsize_byte;
597 entry->kaddr = (char *) smu8_smu->smu_buffer.kaddr +
598 smu8_smu->smu_buffer_used_bytes;
599 entry->mc_addr = smu8_smu->smu_buffer.mc_addr + smu8_smu->smu_buffer_used_bytes;
600 entry->firmware_ID = scratch_type;
601
602 smu8_smu->smu_buffer_used_bytes += ulsize_aligned;
603
604 return 0;
605 }
606
smu8_download_pptable_settings(struct pp_hwmgr * hwmgr,void ** table)607 static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
608 {
609 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
610 unsigned long i;
611
612 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
613 if (smu8_smu->scratch_buffer[i].firmware_ID
614 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
615 break;
616 }
617
618 *table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
619
620 smu8_send_msg_to_smc_with_parameter(hwmgr,
621 PPSMC_MSG_SetClkTableAddrHi,
622 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
623
624 smu8_send_msg_to_smc_with_parameter(hwmgr,
625 PPSMC_MSG_SetClkTableAddrLo,
626 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
627
628 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
629 smu8_smu->toc_entry_clock_table);
630
631 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
632
633 return 0;
634 }
635
smu8_upload_pptable_settings(struct pp_hwmgr * hwmgr)636 static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
637 {
638 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
639 unsigned long i;
640
641 for (i = 0; i < smu8_smu->scratch_buffer_length; i++) {
642 if (smu8_smu->scratch_buffer[i].firmware_ID
643 == SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
644 break;
645 }
646
647 smu8_send_msg_to_smc_with_parameter(hwmgr,
648 PPSMC_MSG_SetClkTableAddrHi,
649 upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
650
651 smu8_send_msg_to_smc_with_parameter(hwmgr,
652 PPSMC_MSG_SetClkTableAddrLo,
653 lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
654
655 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
656 smu8_smu->toc_entry_clock_table);
657
658 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
659
660 return 0;
661 }
662
smu8_request_smu_load_fw(struct pp_hwmgr * hwmgr)663 static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
664 {
665 struct smu8_smumgr *smu8_smu = hwmgr->smu_backend;
666 uint32_t smc_address;
667 uint32_t fw_to_check = 0;
668 int ret;
669
670 amdgpu_ucode_init_bo(hwmgr->adev);
671
672 smu8_smu_populate_firmware_entries(hwmgr);
673
674 smu8_smu_construct_toc(hwmgr);
675
676 smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
677 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
678
679 smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
680
681 smu8_send_msg_to_smc_with_parameter(hwmgr,
682 PPSMC_MSG_DriverDramAddrHi,
683 upper_32_bits(smu8_smu->toc_buffer.mc_addr));
684
685 smu8_send_msg_to_smc_with_parameter(hwmgr,
686 PPSMC_MSG_DriverDramAddrLo,
687 lower_32_bits(smu8_smu->toc_buffer.mc_addr));
688
689 smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
690
691 smu8_send_msg_to_smc_with_parameter(hwmgr,
692 PPSMC_MSG_ExecuteJob,
693 smu8_smu->toc_entry_aram);
694 smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
695 smu8_smu->toc_entry_power_profiling_index);
696
697 smu8_send_msg_to_smc_with_parameter(hwmgr,
698 PPSMC_MSG_ExecuteJob,
699 smu8_smu->toc_entry_initialize_index);
700
701 fw_to_check = UCODE_ID_RLC_G_MASK |
702 UCODE_ID_SDMA0_MASK |
703 UCODE_ID_SDMA1_MASK |
704 UCODE_ID_CP_CE_MASK |
705 UCODE_ID_CP_ME_MASK |
706 UCODE_ID_CP_PFP_MASK |
707 UCODE_ID_CP_MEC_JT1_MASK |
708 UCODE_ID_CP_MEC_JT2_MASK;
709
710 if (hwmgr->chip_id == CHIP_STONEY)
711 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
712
713 ret = smu8_check_fw_load_finish(hwmgr, fw_to_check);
714 if (ret) {
715 pr_err("SMU firmware load failed\n");
716 return ret;
717 }
718
719 ret = smu8_load_mec_firmware(hwmgr);
720 if (ret) {
721 pr_err("Mec Firmware load failed\n");
722 return ret;
723 }
724
725 return 0;
726 }
727
smu8_start_smu(struct pp_hwmgr * hwmgr)728 static int smu8_start_smu(struct pp_hwmgr *hwmgr)
729 {
730 struct amdgpu_device *adev;
731
732 uint32_t index = SMN_MP1_SRAM_START_ADDR +
733 SMU8_FIRMWARE_HEADER_LOCATION +
734 offsetof(struct SMU8_Firmware_Header, Version);
735
736 if (hwmgr == NULL || hwmgr->device == NULL)
737 return -EINVAL;
738
739 adev = hwmgr->adev;
740
741 cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
742 hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
743 pr_info("smu version %02d.%02d.%02d\n",
744 ((hwmgr->smu_version >> 16) & 0xFF),
745 ((hwmgr->smu_version >> 8) & 0xFF),
746 (hwmgr->smu_version & 0xFF));
747 adev->pm.fw_version = hwmgr->smu_version >> 8;
748
749 return smu8_request_smu_load_fw(hwmgr);
750 }
751
smu8_smu_init(struct pp_hwmgr * hwmgr)752 static int smu8_smu_init(struct pp_hwmgr *hwmgr)
753 {
754 int ret = 0;
755 struct smu8_smumgr *smu8_smu;
756
757 smu8_smu = kzalloc(sizeof(struct smu8_smumgr), GFP_KERNEL);
758 if (smu8_smu == NULL)
759 return -ENOMEM;
760
761 hwmgr->smu_backend = smu8_smu;
762
763 smu8_smu->toc_buffer.data_size = 4096;
764 smu8_smu->smu_buffer.data_size =
765 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
766 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
767 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
768 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
769 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
770
771 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
772 smu8_smu->toc_buffer.data_size,
773 PAGE_SIZE,
774 AMDGPU_GEM_DOMAIN_VRAM,
775 &smu8_smu->toc_buffer.handle,
776 &smu8_smu->toc_buffer.mc_addr,
777 &smu8_smu->toc_buffer.kaddr);
778 if (ret)
779 goto err2;
780
781 ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
782 smu8_smu->smu_buffer.data_size,
783 PAGE_SIZE,
784 AMDGPU_GEM_DOMAIN_VRAM,
785 &smu8_smu->smu_buffer.handle,
786 &smu8_smu->smu_buffer.mc_addr,
787 &smu8_smu->smu_buffer.kaddr);
788 if (ret)
789 goto err1;
790
791 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
792 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
793 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
794 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
795 pr_err("Error when Populate Firmware Entry.\n");
796 goto err0;
797 }
798
799 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
800 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
801 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
802 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
803 pr_err("Error when Populate Firmware Entry.\n");
804 goto err0;
805 }
806 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
807 SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
808 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
809 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
810 pr_err("Error when Populate Firmware Entry.\n");
811 goto err0;
812 }
813
814 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
815 SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
816 sizeof(struct SMU8_MultimediaPowerLogData),
817 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
818 pr_err("Error when Populate Firmware Entry.\n");
819 goto err0;
820 }
821
822 if (0 != smu8_smu_populate_single_scratch_entry(hwmgr,
823 SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
824 sizeof(struct SMU8_Fusion_ClkTable),
825 &smu8_smu->scratch_buffer[smu8_smu->scratch_buffer_length++])) {
826 pr_err("Error when Populate Firmware Entry.\n");
827 goto err0;
828 }
829
830 return 0;
831
832 err0:
833 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
834 &smu8_smu->smu_buffer.mc_addr,
835 &smu8_smu->smu_buffer.kaddr);
836 err1:
837 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
838 &smu8_smu->toc_buffer.mc_addr,
839 &smu8_smu->toc_buffer.kaddr);
840 err2:
841 kfree(smu8_smu);
842 return -EINVAL;
843 }
844
smu8_smu_fini(struct pp_hwmgr * hwmgr)845 static int smu8_smu_fini(struct pp_hwmgr *hwmgr)
846 {
847 struct smu8_smumgr *smu8_smu;
848
849 if (hwmgr == NULL || hwmgr->device == NULL)
850 return -EINVAL;
851
852 smu8_smu = hwmgr->smu_backend;
853 if (smu8_smu) {
854 amdgpu_bo_free_kernel(&smu8_smu->toc_buffer.handle,
855 &smu8_smu->toc_buffer.mc_addr,
856 &smu8_smu->toc_buffer.kaddr);
857 amdgpu_bo_free_kernel(&smu8_smu->smu_buffer.handle,
858 &smu8_smu->smu_buffer.mc_addr,
859 &smu8_smu->smu_buffer.kaddr);
860 kfree(smu8_smu);
861 }
862
863 return 0;
864 }
865
smu8_dpm_check_smu_features(struct pp_hwmgr * hwmgr,unsigned long check_feature)866 static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
867 unsigned long check_feature)
868 {
869 int result;
870 unsigned long features;
871
872 result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
873 if (result == 0) {
874 features = smum_get_argument(hwmgr);
875 if (features & check_feature)
876 return true;
877 }
878
879 return false;
880 }
881
smu8_is_dpm_running(struct pp_hwmgr * hwmgr)882 static bool smu8_is_dpm_running(struct pp_hwmgr *hwmgr)
883 {
884 if (smu8_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
885 return true;
886 return false;
887 }
888
889 const struct pp_smumgr_func smu8_smu_funcs = {
890 .name = "smu8_smu",
891 .smu_init = smu8_smu_init,
892 .smu_fini = smu8_smu_fini,
893 .start_smu = smu8_start_smu,
894 .check_fw_load_finish = smu8_check_fw_load_finish,
895 .request_smu_load_fw = NULL,
896 .request_smu_load_specific_fw = NULL,
897 .get_argument = smu8_get_argument,
898 .send_msg_to_smc = smu8_send_msg_to_smc,
899 .send_msg_to_smc_with_parameter = smu8_send_msg_to_smc_with_parameter,
900 .download_pptable_settings = smu8_download_pptable_settings,
901 .upload_pptable_settings = smu8_upload_pptable_settings,
902 .is_dpm_running = smu8_is_dpm_running,
903 };
904
905