xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_kv_smc.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: amdgpu_kv_smc.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_kv_smc.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
29 
30 #include "amdgpu.h"
31 #include "cikd.h"
32 #include "kv_dpm.h"
33 
34 #include "smu/smu_7_0_0_d.h"
35 #include "smu/smu_7_0_0_sh_mask.h"
36 
amdgpu_kv_notify_message_to_smu(struct amdgpu_device * adev,u32 id)37 int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id)
38 {
39 	u32 i;
40 	u32 tmp = 0;
41 
42 	WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK);
43 
44 	for (i = 0; i < adev->usec_timeout; i++) {
45 		if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0)
46 			break;
47 		udelay(1);
48 	}
49 	tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK;
50 
51 	if (tmp != 1) {
52 		if (tmp == 0xFF)
53 			return -EINVAL;
54 		else if (tmp == 0xFE)
55 			return -EINVAL;
56 	}
57 
58 	return 0;
59 }
60 
amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device * adev,u32 * enable_mask)61 int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask)
62 {
63 	int ret;
64 
65 	ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
66 
67 	if (ret == 0)
68 		*enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0);
69 
70 	return ret;
71 }
72 
amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device * adev,PPSMC_Msg msg,u32 parameter)73 int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
74 				      PPSMC_Msg msg, u32 parameter)
75 {
76 
77 	WREG32(mmSMC_MSG_ARG_0, parameter);
78 
79 	return amdgpu_kv_notify_message_to_smu(adev, msg);
80 }
81 
kv_set_smc_sram_address(struct amdgpu_device * adev,u32 smc_address,u32 limit)82 static int kv_set_smc_sram_address(struct amdgpu_device *adev,
83 				   u32 smc_address, u32 limit)
84 {
85 	if (smc_address & 3)
86 		return -EINVAL;
87 	if ((smc_address + 3) > limit)
88 		return -EINVAL;
89 
90 	WREG32(mmSMC_IND_INDEX_0, smc_address);
91 	WREG32_P(mmSMC_IND_ACCESS_CNTL, 0,
92 			~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
93 
94 	return 0;
95 }
96 
amdgpu_kv_read_smc_sram_dword(struct amdgpu_device * adev,u32 smc_address,u32 * value,u32 limit)97 int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
98 			   u32 *value, u32 limit)
99 {
100 	int ret;
101 
102 	ret = kv_set_smc_sram_address(adev, smc_address, limit);
103 	if (ret)
104 		return ret;
105 
106 	*value = RREG32(mmSMC_IND_DATA_0);
107 	return 0;
108 }
109 
amdgpu_kv_smc_dpm_enable(struct amdgpu_device * adev,bool enable)110 int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable)
111 {
112 	if (enable)
113 		return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable);
114 	else
115 		return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable);
116 }
117 
amdgpu_kv_smc_bapm_enable(struct amdgpu_device * adev,bool enable)118 int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable)
119 {
120 	if (enable)
121 		return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM);
122 	else
123 		return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM);
124 }
125 
amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device * adev,u32 smc_start_address,const u8 * src,u32 byte_count,u32 limit)126 int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev,
127 			 u32 smc_start_address,
128 			 const u8 *src, u32 byte_count, u32 limit)
129 {
130 	int ret;
131 	u32 data, original_data, addr, extra_shift, t_byte, count, mask;
132 
133 	if ((smc_start_address + byte_count) > limit)
134 		return -EINVAL;
135 
136 	addr = smc_start_address;
137 	t_byte = addr & 3;
138 
139 	/* RMW for the initial bytes */
140 	if  (t_byte != 0) {
141 		addr -= t_byte;
142 
143 		ret = kv_set_smc_sram_address(adev, addr, limit);
144 		if (ret)
145 			return ret;
146 
147 		original_data = RREG32(mmSMC_IND_DATA_0);
148 
149 		data = 0;
150 		mask = 0;
151 		count = 4;
152 		while (count > 0) {
153 			if (t_byte > 0) {
154 				mask = (mask << 8) | 0xff;
155 				t_byte--;
156 			} else if (byte_count > 0) {
157 				data = (data << 8) + *src++;
158 				byte_count--;
159 				mask <<= 8;
160 			} else {
161 				data <<= 8;
162 				mask = (mask << 8) | 0xff;
163 			}
164 			count--;
165 		}
166 
167 		data |= original_data & mask;
168 
169 		ret = kv_set_smc_sram_address(adev, addr, limit);
170 		if (ret)
171 			return ret;
172 
173 		WREG32(mmSMC_IND_DATA_0, data);
174 
175 		addr += 4;
176 	}
177 
178 	while (byte_count >= 4) {
179 		/* SMC address space is BE */
180 		data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
181 
182 		ret = kv_set_smc_sram_address(adev, addr, limit);
183 		if (ret)
184 			return ret;
185 
186 		WREG32(mmSMC_IND_DATA_0, data);
187 
188 		src += 4;
189 		byte_count -= 4;
190 		addr += 4;
191 	}
192 
193 	/* RMW for the final bytes */
194 	if (byte_count > 0) {
195 		data = 0;
196 
197 		ret = kv_set_smc_sram_address(adev, addr, limit);
198 		if (ret)
199 			return ret;
200 
201 		original_data = RREG32(mmSMC_IND_DATA_0);
202 
203 		extra_shift = 8 * (4 - byte_count);
204 
205 		while (byte_count > 0) {
206 			/* SMC address space is BE */
207 			data = (data << 8) + *src++;
208 			byte_count--;
209 		}
210 
211 		data <<= extra_shift;
212 
213 		data |= (original_data & ~((~0UL) << extra_shift));
214 
215 		ret = kv_set_smc_sram_address(adev, addr, limit);
216 		if (ret)
217 			return ret;
218 
219 		WREG32(mmSMC_IND_DATA_0, data);
220 	}
221 	return 0;
222 }
223 
224