xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_si_smc.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: amdgpu_si_smc.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2011 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_si_smc.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
29 
30 #include <linux/firmware.h>
31 
32 #include "amdgpu.h"
33 #include "sid.h"
34 #include "ppsmc.h"
35 #include "amdgpu_ucode.h"
36 #include "sislands_smc.h"
37 
si_set_smc_sram_address(struct amdgpu_device * adev,u32 smc_address,u32 limit)38 static int si_set_smc_sram_address(struct amdgpu_device *adev,
39 				   u32 smc_address, u32 limit)
40 {
41 	if (smc_address & 3)
42 		return -EINVAL;
43 	if ((smc_address + 3) > limit)
44 		return -EINVAL;
45 
46 	WREG32(SMC_IND_INDEX_0, smc_address);
47 	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
48 
49 	return 0;
50 }
51 
amdgpu_si_copy_bytes_to_smc(struct amdgpu_device * adev,u32 smc_start_address,const u8 * src,u32 byte_count,u32 limit)52 int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,
53 				u32 smc_start_address,
54 				const u8 *src, u32 byte_count, u32 limit)
55 {
56 	unsigned long flags;
57 	int ret = 0;
58 	u32 data, original_data, addr, extra_shift;
59 
60 	if (smc_start_address & 3)
61 		return -EINVAL;
62 	if ((smc_start_address + byte_count) > limit)
63 		return -EINVAL;
64 
65 	addr = smc_start_address;
66 
67 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
68 	while (byte_count >= 4) {
69 		/* SMC address space is BE */
70 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
71 
72 		ret = si_set_smc_sram_address(adev, addr, limit);
73 		if (ret)
74 			goto done;
75 
76 		WREG32(SMC_IND_DATA_0, data);
77 
78 		src += 4;
79 		byte_count -= 4;
80 		addr += 4;
81 	}
82 
83 	/* RMW for the final bytes */
84 	if (byte_count > 0) {
85 		data = 0;
86 
87 		ret = si_set_smc_sram_address(adev, addr, limit);
88 		if (ret)
89 			goto done;
90 
91 		original_data = RREG32(SMC_IND_DATA_0);
92 		extra_shift = 8 * (4 - byte_count);
93 
94 		while (byte_count > 0) {
95 			/* SMC address space is BE */
96 			data = (data << 8) + *src++;
97 			byte_count--;
98 		}
99 
100 		data <<= extra_shift;
101 		data |= (original_data & ~((~0UL) << extra_shift));
102 
103 		ret = si_set_smc_sram_address(adev, addr, limit);
104 		if (ret)
105 			goto done;
106 
107 		WREG32(SMC_IND_DATA_0, data);
108 	}
109 
110 done:
111 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
112 
113 	return ret;
114 }
115 
amdgpu_si_start_smc(struct amdgpu_device * adev)116 void amdgpu_si_start_smc(struct amdgpu_device *adev)
117 {
118 	u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
119 
120 	tmp &= ~RST_REG;
121 
122 	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
123 }
124 
amdgpu_si_reset_smc(struct amdgpu_device * adev)125 void amdgpu_si_reset_smc(struct amdgpu_device *adev)
126 {
127 	u32 tmp;
128 
129 	RREG32(CB_CGTT_SCLK_CTRL);
130 	RREG32(CB_CGTT_SCLK_CTRL);
131 	RREG32(CB_CGTT_SCLK_CTRL);
132 	RREG32(CB_CGTT_SCLK_CTRL);
133 
134 	tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |
135 	      RST_REG;
136 	WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
137 }
138 
amdgpu_si_program_jump_on_start(struct amdgpu_device * adev)139 int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev)
140 {
141 	static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
142 
143 	return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
144 }
145 
amdgpu_si_smc_clock(struct amdgpu_device * adev,bool enable)146 void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable)
147 {
148 	u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
149 
150 	if (enable)
151 		tmp &= ~CK_DISABLE;
152 	else
153 		tmp |= CK_DISABLE;
154 
155 	WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
156 }
157 
amdgpu_si_is_smc_running(struct amdgpu_device * adev)158 bool amdgpu_si_is_smc_running(struct amdgpu_device *adev)
159 {
160 	u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
161 	u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
162 
163 	if (!(rst & RST_REG) && !(clk & CK_DISABLE))
164 		return true;
165 
166 	return false;
167 }
168 
amdgpu_si_send_msg_to_smc(struct amdgpu_device * adev,PPSMC_Msg msg)169 PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
170 				       PPSMC_Msg msg)
171 {
172 	u32 tmp;
173 	int i;
174 
175 	if (!amdgpu_si_is_smc_running(adev))
176 		return PPSMC_Result_Failed;
177 
178 	WREG32(SMC_MESSAGE_0, msg);
179 
180 	for (i = 0; i < adev->usec_timeout; i++) {
181 		tmp = RREG32(SMC_RESP_0);
182 		if (tmp != 0)
183 			break;
184 		udelay(1);
185 	}
186 
187 	return (PPSMC_Result)RREG32(SMC_RESP_0);
188 }
189 
amdgpu_si_wait_for_smc_inactive(struct amdgpu_device * adev)190 PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
191 {
192 	u32 tmp;
193 	int i;
194 
195 	if (!amdgpu_si_is_smc_running(adev))
196 		return PPSMC_Result_OK;
197 
198 	for (i = 0; i < adev->usec_timeout; i++) {
199 		tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
200 		if ((tmp & CKEN) == 0)
201 			break;
202 		udelay(1);
203 	}
204 
205 	return PPSMC_Result_OK;
206 }
207 
amdgpu_si_load_smc_ucode(struct amdgpu_device * adev,u32 limit)208 int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
209 {
210 	const struct smc_firmware_header_v1_0 *hdr;
211 	unsigned long flags;
212 	u32 ucode_start_address;
213 	u32 ucode_size;
214 	const u8 *src;
215 	u32 data;
216 
217 	if (!adev->pm.fw)
218 		return -EINVAL;
219 
220 	hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
221 
222 	amdgpu_ucode_print_smc_hdr(&hdr->header);
223 
224 	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
225 	ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
226 	ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
227 	src = (const u8 *)
228 		(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
229 	if (ucode_size & 3)
230 		return -EINVAL;
231 
232 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
233 	WREG32(SMC_IND_INDEX_0, ucode_start_address);
234 	WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
235 	while (ucode_size >= 4) {
236 		/* SMC address space is BE */
237 		data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
238 
239 		WREG32(SMC_IND_DATA_0, data);
240 
241 		src += 4;
242 		ucode_size -= 4;
243 	}
244 	WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
245 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
246 
247 	return 0;
248 }
249 
amdgpu_si_read_smc_sram_dword(struct amdgpu_device * adev,u32 smc_address,u32 * value,u32 limit)250 int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
251 				  u32 *value, u32 limit)
252 {
253 	unsigned long flags;
254 	int ret;
255 
256 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
257 	ret = si_set_smc_sram_address(adev, smc_address, limit);
258 	if (ret == 0)
259 		*value = RREG32(SMC_IND_DATA_0);
260 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
261 
262 	return ret;
263 }
264 
amdgpu_si_write_smc_sram_dword(struct amdgpu_device * adev,u32 smc_address,u32 value,u32 limit)265 int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
266 				   u32 value, u32 limit)
267 {
268 	unsigned long flags;
269 	int ret;
270 
271 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
272 	ret = si_set_smc_sram_address(adev, smc_address, limit);
273 	if (ret == 0)
274 		WREG32(SMC_IND_DATA_0, value);
275 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
276 
277 	return ret;
278 }
279