1c349dbc7Sjsg /*
2c349dbc7Sjsg * Copyright 2014 Advanced Micro Devices, Inc.
3c349dbc7Sjsg * Copyright 2008 Red Hat Inc.
4c349dbc7Sjsg * Copyright 2009 Jerome Glisse.
5c349dbc7Sjsg *
6c349dbc7Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
7c349dbc7Sjsg * copy of this software and associated documentation files (the "Software"),
8c349dbc7Sjsg * to deal in the Software without restriction, including without limitation
9c349dbc7Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10c349dbc7Sjsg * and/or sell copies of the Software, and to permit persons to whom the
11c349dbc7Sjsg * Software is furnished to do so, subject to the following conditions:
12c349dbc7Sjsg *
13c349dbc7Sjsg * The above copyright notice and this permission notice shall be included in
14c349dbc7Sjsg * all copies or substantial portions of the Software.
15c349dbc7Sjsg *
16c349dbc7Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17c349dbc7Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18c349dbc7Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19c349dbc7Sjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20c349dbc7Sjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21c349dbc7Sjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22c349dbc7Sjsg * OTHER DEALINGS IN THE SOFTWARE.
23c349dbc7Sjsg *
24c349dbc7Sjsg */
25c349dbc7Sjsg #include <linux/firmware.h>
26c349dbc7Sjsg #include "amdgpu.h"
27c349dbc7Sjsg #include "amdgpu_gfx.h"
28c349dbc7Sjsg #include "amdgpu_rlc.h"
29c349dbc7Sjsg
30c349dbc7Sjsg /**
31c349dbc7Sjsg * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
32c349dbc7Sjsg *
33c349dbc7Sjsg * @adev: amdgpu_device pointer
34*f005ef32Sjsg * @xcc_id: xcc accelerated compute core id
35c349dbc7Sjsg *
36c349dbc7Sjsg * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
37c349dbc7Sjsg */
amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device * adev,int xcc_id)38*f005ef32Sjsg void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id)
39c349dbc7Sjsg {
40*f005ef32Sjsg if (adev->gfx.rlc.in_safe_mode[xcc_id])
41c349dbc7Sjsg return;
42c349dbc7Sjsg
43c349dbc7Sjsg /* if RLC is not enabled, do nothing */
44c349dbc7Sjsg if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
45c349dbc7Sjsg return;
46c349dbc7Sjsg
47c349dbc7Sjsg if (adev->cg_flags &
48c349dbc7Sjsg (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
49c349dbc7Sjsg AMD_CG_SUPPORT_GFX_3D_CGCG)) {
50*f005ef32Sjsg adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id);
51*f005ef32Sjsg adev->gfx.rlc.in_safe_mode[xcc_id] = true;
52c349dbc7Sjsg }
53c349dbc7Sjsg }
54c349dbc7Sjsg
55c349dbc7Sjsg /**
56c349dbc7Sjsg * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
57c349dbc7Sjsg *
58c349dbc7Sjsg * @adev: amdgpu_device pointer
59*f005ef32Sjsg * @xcc_id: xcc accelerated compute core id
60c349dbc7Sjsg *
61c349dbc7Sjsg * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
62c349dbc7Sjsg */
amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device * adev,int xcc_id)63*f005ef32Sjsg void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
64c349dbc7Sjsg {
65*f005ef32Sjsg if (!(adev->gfx.rlc.in_safe_mode[xcc_id]))
66c349dbc7Sjsg return;
67c349dbc7Sjsg
68c349dbc7Sjsg /* if RLC is not enabled, do nothing */
69c349dbc7Sjsg if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
70c349dbc7Sjsg return;
71c349dbc7Sjsg
72c349dbc7Sjsg if (adev->cg_flags &
73c349dbc7Sjsg (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
74c349dbc7Sjsg AMD_CG_SUPPORT_GFX_3D_CGCG)) {
75*f005ef32Sjsg adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id);
76*f005ef32Sjsg adev->gfx.rlc.in_safe_mode[xcc_id] = false;
77c349dbc7Sjsg }
78c349dbc7Sjsg }
79c349dbc7Sjsg
80c349dbc7Sjsg /**
81c349dbc7Sjsg * amdgpu_gfx_rlc_init_sr - Init save restore block
82c349dbc7Sjsg *
83c349dbc7Sjsg * @adev: amdgpu_device pointer
84c349dbc7Sjsg * @dws: the size of save restore block
85c349dbc7Sjsg *
86c349dbc7Sjsg * Allocate and setup value to save restore block of rlc.
87c349dbc7Sjsg * Returns 0 on succeess or negative error code if allocate failed.
88c349dbc7Sjsg */
amdgpu_gfx_rlc_init_sr(struct amdgpu_device * adev,u32 dws)89c349dbc7Sjsg int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
90c349dbc7Sjsg {
91c349dbc7Sjsg const u32 *src_ptr;
92c349dbc7Sjsg volatile u32 *dst_ptr;
93c349dbc7Sjsg u32 i;
94c349dbc7Sjsg int r;
95c349dbc7Sjsg
96c349dbc7Sjsg /* allocate save restore block */
97c349dbc7Sjsg r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
98*f005ef32Sjsg AMDGPU_GEM_DOMAIN_VRAM |
99*f005ef32Sjsg AMDGPU_GEM_DOMAIN_GTT,
100c349dbc7Sjsg &adev->gfx.rlc.save_restore_obj,
101c349dbc7Sjsg &adev->gfx.rlc.save_restore_gpu_addr,
102c349dbc7Sjsg (void **)&adev->gfx.rlc.sr_ptr);
103c349dbc7Sjsg if (r) {
104c349dbc7Sjsg dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
105c349dbc7Sjsg amdgpu_gfx_rlc_fini(adev);
106c349dbc7Sjsg return r;
107c349dbc7Sjsg }
108c349dbc7Sjsg
109c349dbc7Sjsg /* write the sr buffer */
110c349dbc7Sjsg src_ptr = adev->gfx.rlc.reg_list;
111c349dbc7Sjsg dst_ptr = adev->gfx.rlc.sr_ptr;
112c349dbc7Sjsg for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
113c349dbc7Sjsg dst_ptr[i] = cpu_to_le32(src_ptr[i]);
114c349dbc7Sjsg amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
115c349dbc7Sjsg amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
116c349dbc7Sjsg
117c349dbc7Sjsg return 0;
118c349dbc7Sjsg }
119c349dbc7Sjsg
120c349dbc7Sjsg /**
121c349dbc7Sjsg * amdgpu_gfx_rlc_init_csb - Init clear state block
122c349dbc7Sjsg *
123c349dbc7Sjsg * @adev: amdgpu_device pointer
124c349dbc7Sjsg *
125c349dbc7Sjsg * Allocate and setup value to clear state block of rlc.
126c349dbc7Sjsg * Returns 0 on succeess or negative error code if allocate failed.
127c349dbc7Sjsg */
amdgpu_gfx_rlc_init_csb(struct amdgpu_device * adev)128c349dbc7Sjsg int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
129c349dbc7Sjsg {
130c349dbc7Sjsg u32 dws;
131c349dbc7Sjsg int r;
132c349dbc7Sjsg
133c349dbc7Sjsg /* allocate clear state block */
134c349dbc7Sjsg adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
135c349dbc7Sjsg r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
136*f005ef32Sjsg AMDGPU_GEM_DOMAIN_VRAM |
137*f005ef32Sjsg AMDGPU_GEM_DOMAIN_GTT,
138c349dbc7Sjsg &adev->gfx.rlc.clear_state_obj,
139c349dbc7Sjsg &adev->gfx.rlc.clear_state_gpu_addr,
140c349dbc7Sjsg (void **)&adev->gfx.rlc.cs_ptr);
141c349dbc7Sjsg if (r) {
142c349dbc7Sjsg dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
143c349dbc7Sjsg amdgpu_gfx_rlc_fini(adev);
144c349dbc7Sjsg return r;
145c349dbc7Sjsg }
146c349dbc7Sjsg
147c349dbc7Sjsg return 0;
148c349dbc7Sjsg }
149c349dbc7Sjsg
150c349dbc7Sjsg /**
151c349dbc7Sjsg * amdgpu_gfx_rlc_init_cpt - Init cp table
152c349dbc7Sjsg *
153c349dbc7Sjsg * @adev: amdgpu_device pointer
154c349dbc7Sjsg *
155c349dbc7Sjsg * Allocate and setup value to cp table of rlc.
156c349dbc7Sjsg * Returns 0 on succeess or negative error code if allocate failed.
157c349dbc7Sjsg */
amdgpu_gfx_rlc_init_cpt(struct amdgpu_device * adev)158c349dbc7Sjsg int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
159c349dbc7Sjsg {
160c349dbc7Sjsg int r;
161c349dbc7Sjsg
162c349dbc7Sjsg r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
163*f005ef32Sjsg PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
164*f005ef32Sjsg AMDGPU_GEM_DOMAIN_GTT,
165c349dbc7Sjsg &adev->gfx.rlc.cp_table_obj,
166c349dbc7Sjsg &adev->gfx.rlc.cp_table_gpu_addr,
167c349dbc7Sjsg (void **)&adev->gfx.rlc.cp_table_ptr);
168c349dbc7Sjsg if (r) {
169c349dbc7Sjsg dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
170c349dbc7Sjsg amdgpu_gfx_rlc_fini(adev);
171c349dbc7Sjsg return r;
172c349dbc7Sjsg }
173c349dbc7Sjsg
174c349dbc7Sjsg /* set up the cp table */
175c349dbc7Sjsg amdgpu_gfx_rlc_setup_cp_table(adev);
176c349dbc7Sjsg amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
177c349dbc7Sjsg amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
178c349dbc7Sjsg
179c349dbc7Sjsg return 0;
180c349dbc7Sjsg }
181c349dbc7Sjsg
182c349dbc7Sjsg /**
183c349dbc7Sjsg * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
184c349dbc7Sjsg *
185c349dbc7Sjsg * @adev: amdgpu_device pointer
186c349dbc7Sjsg *
187c349dbc7Sjsg * Write cp firmware data into cp table.
188c349dbc7Sjsg */
amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device * adev)189c349dbc7Sjsg void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
190c349dbc7Sjsg {
191c349dbc7Sjsg const __le32 *fw_data;
192c349dbc7Sjsg volatile u32 *dst_ptr;
193c349dbc7Sjsg int me, i, max_me;
194c349dbc7Sjsg u32 bo_offset = 0;
195c349dbc7Sjsg u32 table_offset, table_size;
196c349dbc7Sjsg
197c349dbc7Sjsg max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
198c349dbc7Sjsg
199c349dbc7Sjsg /* write the cp table buffer */
200c349dbc7Sjsg dst_ptr = adev->gfx.rlc.cp_table_ptr;
201c349dbc7Sjsg for (me = 0; me < max_me; me++) {
202c349dbc7Sjsg if (me == 0) {
203c349dbc7Sjsg const struct gfx_firmware_header_v1_0 *hdr =
204c349dbc7Sjsg (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
205c349dbc7Sjsg fw_data = (const __le32 *)
206c349dbc7Sjsg (adev->gfx.ce_fw->data +
207c349dbc7Sjsg le32_to_cpu(hdr->header.ucode_array_offset_bytes));
208c349dbc7Sjsg table_offset = le32_to_cpu(hdr->jt_offset);
209c349dbc7Sjsg table_size = le32_to_cpu(hdr->jt_size);
210c349dbc7Sjsg } else if (me == 1) {
211c349dbc7Sjsg const struct gfx_firmware_header_v1_0 *hdr =
212c349dbc7Sjsg (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
213c349dbc7Sjsg fw_data = (const __le32 *)
214c349dbc7Sjsg (adev->gfx.pfp_fw->data +
215c349dbc7Sjsg le32_to_cpu(hdr->header.ucode_array_offset_bytes));
216c349dbc7Sjsg table_offset = le32_to_cpu(hdr->jt_offset);
217c349dbc7Sjsg table_size = le32_to_cpu(hdr->jt_size);
218c349dbc7Sjsg } else if (me == 2) {
219c349dbc7Sjsg const struct gfx_firmware_header_v1_0 *hdr =
220c349dbc7Sjsg (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
221c349dbc7Sjsg fw_data = (const __le32 *)
222c349dbc7Sjsg (adev->gfx.me_fw->data +
223c349dbc7Sjsg le32_to_cpu(hdr->header.ucode_array_offset_bytes));
224c349dbc7Sjsg table_offset = le32_to_cpu(hdr->jt_offset);
225c349dbc7Sjsg table_size = le32_to_cpu(hdr->jt_size);
226c349dbc7Sjsg } else if (me == 3) {
227c349dbc7Sjsg const struct gfx_firmware_header_v1_0 *hdr =
228c349dbc7Sjsg (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
229c349dbc7Sjsg fw_data = (const __le32 *)
230c349dbc7Sjsg (adev->gfx.mec_fw->data +
231c349dbc7Sjsg le32_to_cpu(hdr->header.ucode_array_offset_bytes));
232c349dbc7Sjsg table_offset = le32_to_cpu(hdr->jt_offset);
233c349dbc7Sjsg table_size = le32_to_cpu(hdr->jt_size);
234c349dbc7Sjsg } else if (me == 4) {
235c349dbc7Sjsg const struct gfx_firmware_header_v1_0 *hdr =
236c349dbc7Sjsg (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
237c349dbc7Sjsg fw_data = (const __le32 *)
238c349dbc7Sjsg (adev->gfx.mec2_fw->data +
239c349dbc7Sjsg le32_to_cpu(hdr->header.ucode_array_offset_bytes));
240c349dbc7Sjsg table_offset = le32_to_cpu(hdr->jt_offset);
241c349dbc7Sjsg table_size = le32_to_cpu(hdr->jt_size);
242c349dbc7Sjsg }
243c349dbc7Sjsg
244c349dbc7Sjsg for (i = 0; i < table_size; i ++) {
245c349dbc7Sjsg dst_ptr[bo_offset + i] =
246c349dbc7Sjsg cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
247c349dbc7Sjsg }
248c349dbc7Sjsg
249c349dbc7Sjsg bo_offset += table_size;
250c349dbc7Sjsg }
251c349dbc7Sjsg }
252c349dbc7Sjsg
253c349dbc7Sjsg /**
254c349dbc7Sjsg * amdgpu_gfx_rlc_fini - Free BO which used for RLC
255c349dbc7Sjsg *
256c349dbc7Sjsg * @adev: amdgpu_device pointer
257c349dbc7Sjsg *
258c349dbc7Sjsg * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
259c349dbc7Sjsg * and rlc_jump_table_block.
260c349dbc7Sjsg */
amdgpu_gfx_rlc_fini(struct amdgpu_device * adev)261c349dbc7Sjsg void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
262c349dbc7Sjsg {
263c349dbc7Sjsg /* save restore block */
264c349dbc7Sjsg if (adev->gfx.rlc.save_restore_obj) {
265c349dbc7Sjsg amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
266c349dbc7Sjsg &adev->gfx.rlc.save_restore_gpu_addr,
267c349dbc7Sjsg (void **)&adev->gfx.rlc.sr_ptr);
268c349dbc7Sjsg }
269c349dbc7Sjsg
270c349dbc7Sjsg /* clear state block */
271c349dbc7Sjsg amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
272c349dbc7Sjsg &adev->gfx.rlc.clear_state_gpu_addr,
273c349dbc7Sjsg (void **)&adev->gfx.rlc.cs_ptr);
274c349dbc7Sjsg
275c349dbc7Sjsg /* jump table block */
276c349dbc7Sjsg amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
277c349dbc7Sjsg &adev->gfx.rlc.cp_table_gpu_addr,
278c349dbc7Sjsg (void **)&adev->gfx.rlc.cp_table_ptr);
279c349dbc7Sjsg }
2801bb76ff1Sjsg
amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device * adev)2811bb76ff1Sjsg static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
2821bb76ff1Sjsg {
2831bb76ff1Sjsg const struct common_firmware_header *common_hdr;
2841bb76ff1Sjsg const struct rlc_firmware_header_v2_0 *rlc_hdr;
2851bb76ff1Sjsg struct amdgpu_firmware_info *info;
2861bb76ff1Sjsg unsigned int *tmp;
2871bb76ff1Sjsg unsigned int i;
2881bb76ff1Sjsg
2891bb76ff1Sjsg rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2901bb76ff1Sjsg
2911bb76ff1Sjsg adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
2921bb76ff1Sjsg adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
2931bb76ff1Sjsg adev->gfx.rlc.save_and_restore_offset =
2941bb76ff1Sjsg le32_to_cpu(rlc_hdr->save_and_restore_offset);
2951bb76ff1Sjsg adev->gfx.rlc.clear_state_descriptor_offset =
2961bb76ff1Sjsg le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
2971bb76ff1Sjsg adev->gfx.rlc.avail_scratch_ram_locations =
2981bb76ff1Sjsg le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
2991bb76ff1Sjsg adev->gfx.rlc.reg_restore_list_size =
3001bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_restore_list_size);
3011bb76ff1Sjsg adev->gfx.rlc.reg_list_format_start =
3021bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_format_start);
3031bb76ff1Sjsg adev->gfx.rlc.reg_list_format_separate_start =
3041bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
3051bb76ff1Sjsg adev->gfx.rlc.starting_offsets_start =
3061bb76ff1Sjsg le32_to_cpu(rlc_hdr->starting_offsets_start);
3071bb76ff1Sjsg adev->gfx.rlc.reg_list_format_size_bytes =
3081bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
3091bb76ff1Sjsg adev->gfx.rlc.reg_list_size_bytes =
3101bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_size_bytes);
3111bb76ff1Sjsg adev->gfx.rlc.register_list_format =
3121bb76ff1Sjsg kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
3131bb76ff1Sjsg adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
3141bb76ff1Sjsg if (!adev->gfx.rlc.register_list_format) {
3151bb76ff1Sjsg dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
3161bb76ff1Sjsg return -ENOMEM;
3171bb76ff1Sjsg }
3181bb76ff1Sjsg
3191bb76ff1Sjsg tmp = (unsigned int *)((uintptr_t)rlc_hdr +
3201bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
3211bb76ff1Sjsg for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
3221bb76ff1Sjsg adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
3231bb76ff1Sjsg
3241bb76ff1Sjsg adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
3251bb76ff1Sjsg
3261bb76ff1Sjsg tmp = (unsigned int *)((uintptr_t)rlc_hdr +
3271bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
3281bb76ff1Sjsg for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
3291bb76ff1Sjsg adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
3301bb76ff1Sjsg
3311bb76ff1Sjsg if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
3321bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
3331bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
3341bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
3351bb76ff1Sjsg if (info->fw) {
3361bb76ff1Sjsg common_hdr = (const struct common_firmware_header *)info->fw->data;
3371bb76ff1Sjsg adev->firmware.fw_size +=
338*f005ef32Sjsg ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
3391bb76ff1Sjsg }
3401bb76ff1Sjsg }
3411bb76ff1Sjsg
3421bb76ff1Sjsg return 0;
3431bb76ff1Sjsg }
3441bb76ff1Sjsg
amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device * adev)3451bb76ff1Sjsg static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
3461bb76ff1Sjsg {
3471bb76ff1Sjsg const struct rlc_firmware_header_v2_1 *rlc_hdr;
3481bb76ff1Sjsg struct amdgpu_firmware_info *info;
3491bb76ff1Sjsg
3501bb76ff1Sjsg rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
3511bb76ff1Sjsg adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
3521bb76ff1Sjsg adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
3531bb76ff1Sjsg adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
3541bb76ff1Sjsg adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
3551bb76ff1Sjsg adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
3561bb76ff1Sjsg adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
3571bb76ff1Sjsg adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
3581bb76ff1Sjsg adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
3591bb76ff1Sjsg adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
3601bb76ff1Sjsg adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
3611bb76ff1Sjsg adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
3621bb76ff1Sjsg adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
3631bb76ff1Sjsg adev->gfx.rlc.reg_list_format_direct_reg_list_length =
3641bb76ff1Sjsg le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
3651bb76ff1Sjsg
3661bb76ff1Sjsg if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
3671bb76ff1Sjsg if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
3681bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
3691bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
3701bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
3711bb76ff1Sjsg adev->firmware.fw_size +=
372*f005ef32Sjsg ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
3731bb76ff1Sjsg }
3741bb76ff1Sjsg
3751bb76ff1Sjsg if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
3761bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
3771bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
3781bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
3791bb76ff1Sjsg adev->firmware.fw_size +=
380*f005ef32Sjsg ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
3811bb76ff1Sjsg }
3821bb76ff1Sjsg
3831bb76ff1Sjsg if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
3841bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
3851bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
3861bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
3871bb76ff1Sjsg adev->firmware.fw_size +=
388*f005ef32Sjsg ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
3891bb76ff1Sjsg }
3901bb76ff1Sjsg }
3911bb76ff1Sjsg }
3921bb76ff1Sjsg
amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device * adev)3931bb76ff1Sjsg static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
3941bb76ff1Sjsg {
3951bb76ff1Sjsg const struct rlc_firmware_header_v2_2 *rlc_hdr;
3961bb76ff1Sjsg struct amdgpu_firmware_info *info;
3971bb76ff1Sjsg
3981bb76ff1Sjsg rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
3991bb76ff1Sjsg adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
4001bb76ff1Sjsg adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
4011bb76ff1Sjsg adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
4021bb76ff1Sjsg adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
4031bb76ff1Sjsg
4041bb76ff1Sjsg if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
4051bb76ff1Sjsg if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
4061bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
4071bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
4081bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4091bb76ff1Sjsg adev->firmware.fw_size +=
410*f005ef32Sjsg ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
4111bb76ff1Sjsg }
4121bb76ff1Sjsg
4131bb76ff1Sjsg if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
4141bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
4151bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
4161bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4171bb76ff1Sjsg adev->firmware.fw_size +=
418*f005ef32Sjsg ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
4191bb76ff1Sjsg }
4201bb76ff1Sjsg }
4211bb76ff1Sjsg }
4221bb76ff1Sjsg
amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device * adev)4231bb76ff1Sjsg static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
4241bb76ff1Sjsg {
4251bb76ff1Sjsg const struct rlc_firmware_header_v2_3 *rlc_hdr;
4261bb76ff1Sjsg struct amdgpu_firmware_info *info;
4271bb76ff1Sjsg
4281bb76ff1Sjsg rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
4291bb76ff1Sjsg adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
4301bb76ff1Sjsg adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
4311bb76ff1Sjsg adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
4321bb76ff1Sjsg adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
4331bb76ff1Sjsg
4341bb76ff1Sjsg adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
4351bb76ff1Sjsg adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
4361bb76ff1Sjsg adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
4371bb76ff1Sjsg adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
4381bb76ff1Sjsg
4391bb76ff1Sjsg if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
4401bb76ff1Sjsg if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
4411bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
4421bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
4431bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4441bb76ff1Sjsg adev->firmware.fw_size +=
445*f005ef32Sjsg ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
4461bb76ff1Sjsg }
4471bb76ff1Sjsg
4481bb76ff1Sjsg if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
4491bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
4501bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
4511bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4521bb76ff1Sjsg adev->firmware.fw_size +=
453*f005ef32Sjsg ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
4541bb76ff1Sjsg }
4551bb76ff1Sjsg }
4561bb76ff1Sjsg }
4571bb76ff1Sjsg
amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device * adev)4581bb76ff1Sjsg static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
4591bb76ff1Sjsg {
4601bb76ff1Sjsg const struct rlc_firmware_header_v2_4 *rlc_hdr;
4611bb76ff1Sjsg struct amdgpu_firmware_info *info;
4621bb76ff1Sjsg
4631bb76ff1Sjsg rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
4641bb76ff1Sjsg adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
4651bb76ff1Sjsg adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
4661bb76ff1Sjsg adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
4671bb76ff1Sjsg adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
4681bb76ff1Sjsg adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
4691bb76ff1Sjsg adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
4701bb76ff1Sjsg adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
4711bb76ff1Sjsg adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
4721bb76ff1Sjsg adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
4731bb76ff1Sjsg adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
4741bb76ff1Sjsg
4751bb76ff1Sjsg if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
4761bb76ff1Sjsg if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
4771bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
4781bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
4791bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4801bb76ff1Sjsg adev->firmware.fw_size +=
481*f005ef32Sjsg ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
4821bb76ff1Sjsg }
4831bb76ff1Sjsg
4841bb76ff1Sjsg if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
4851bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
4861bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
4871bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4881bb76ff1Sjsg adev->firmware.fw_size +=
489*f005ef32Sjsg ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
4901bb76ff1Sjsg }
4911bb76ff1Sjsg
4921bb76ff1Sjsg if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
4931bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
4941bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
4951bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
4961bb76ff1Sjsg adev->firmware.fw_size +=
497*f005ef32Sjsg ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
4981bb76ff1Sjsg }
4991bb76ff1Sjsg
5001bb76ff1Sjsg if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
5011bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
5021bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
5031bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
5041bb76ff1Sjsg adev->firmware.fw_size +=
505*f005ef32Sjsg ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
5061bb76ff1Sjsg }
5071bb76ff1Sjsg
5081bb76ff1Sjsg if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
5091bb76ff1Sjsg info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
5101bb76ff1Sjsg info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
5111bb76ff1Sjsg info->fw = adev->gfx.rlc_fw;
5121bb76ff1Sjsg adev->firmware.fw_size +=
513*f005ef32Sjsg ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
5141bb76ff1Sjsg }
5151bb76ff1Sjsg }
5161bb76ff1Sjsg }
5171bb76ff1Sjsg
amdgpu_gfx_rlc_init_microcode(struct amdgpu_device * adev,uint16_t version_major,uint16_t version_minor)5181bb76ff1Sjsg int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
5191bb76ff1Sjsg uint16_t version_major,
5201bb76ff1Sjsg uint16_t version_minor)
5211bb76ff1Sjsg {
5221bb76ff1Sjsg int err;
5231bb76ff1Sjsg
5241bb76ff1Sjsg if (version_major < 2) {
5251bb76ff1Sjsg /* only support rlc_hdr v2.x and onwards */
5261bb76ff1Sjsg dev_err(adev->dev, "unsupported rlc fw hdr\n");
5271bb76ff1Sjsg return -EINVAL;
5281bb76ff1Sjsg }
5291bb76ff1Sjsg
5301bb76ff1Sjsg /* is_rlc_v2_1 is still used in APU code path */
5311bb76ff1Sjsg if (version_major == 2 && version_minor == 1)
5321bb76ff1Sjsg adev->gfx.rlc.is_rlc_v2_1 = true;
5331bb76ff1Sjsg
5341bb76ff1Sjsg if (version_minor >= 0) {
5351bb76ff1Sjsg err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
5361bb76ff1Sjsg if (err) {
5371bb76ff1Sjsg dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
5381bb76ff1Sjsg return err;
5391bb76ff1Sjsg }
5401bb76ff1Sjsg }
5411bb76ff1Sjsg if (version_minor >= 1)
5421bb76ff1Sjsg amdgpu_gfx_rlc_init_microcode_v2_1(adev);
5431bb76ff1Sjsg if (version_minor >= 2)
5441bb76ff1Sjsg amdgpu_gfx_rlc_init_microcode_v2_2(adev);
5451bb76ff1Sjsg if (version_minor == 3)
5461bb76ff1Sjsg amdgpu_gfx_rlc_init_microcode_v2_3(adev);
5471bb76ff1Sjsg if (version_minor == 4)
5481bb76ff1Sjsg amdgpu_gfx_rlc_init_microcode_v2_4(adev);
5491bb76ff1Sjsg
5501bb76ff1Sjsg return 0;
5511bb76ff1Sjsg }
552