xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_virt.c (revision 2b73d18af7a98bc9907041875c671f63165f1d3e)
1 /*	$NetBSD: amdgpu_virt.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2016 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_virt.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
28 
29 #include <linux/module.h>
30 
31 #include <drm/drm_drv.h>
32 
33 #include "amdgpu.h"
34 
amdgpu_virt_mmio_blocked(struct amdgpu_device * adev)35 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
36 {
37 	/* By now all MMIO pages except mailbox are blocked */
38 	/* if blocking is enabled in hypervisor. Choose the */
39 	/* SCRATCH_REG0 to test. */
40 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
41 }
42 
amdgpu_virt_init_setting(struct amdgpu_device * adev)43 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
44 {
45 	/* enable virtual display */
46 	adev->mode_info.num_crtc = 1;
47 	adev->enable_virtual_display = true;
48 	adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
49 	adev->cg_flags = 0;
50 	adev->pg_flags = 0;
51 }
52 
amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)53 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
54 					uint32_t reg0, uint32_t reg1,
55 					uint32_t ref, uint32_t mask)
56 {
57 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
58 	struct amdgpu_ring *ring = &kiq->ring;
59 	signed long r, cnt = 0;
60 	unsigned long flags;
61 	uint32_t seq;
62 
63 	spin_lock_irqsave(&kiq->ring_lock, flags);
64 	amdgpu_ring_alloc(ring, 32);
65 	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
66 					    ref, mask);
67 	amdgpu_fence_emit_polling(ring, &seq);
68 	amdgpu_ring_commit(ring);
69 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
70 
71 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
72 
73 	/* don't wait anymore for IRQ context */
74 	if (r < 1 && in_interrupt())
75 		goto failed_kiq;
76 
77 	might_sleep();
78 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
79 
80 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
81 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
82 	}
83 
84 	if (cnt > MAX_KIQ_REG_TRY)
85 		goto failed_kiq;
86 
87 	return;
88 
89 failed_kiq:
90 	pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
91 }
92 
93 /**
94  * amdgpu_virt_request_full_gpu() - request full gpu access
95  * @amdgpu:	amdgpu device.
96  * @init:	is driver init time.
97  * When start to init/fini driver, first need to request full gpu access.
98  * Return: Zero if request success, otherwise will return error.
99  */
amdgpu_virt_request_full_gpu(struct amdgpu_device * adev,bool init)100 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
101 {
102 	struct amdgpu_virt *virt = &adev->virt;
103 	int r;
104 
105 	if (virt->ops && virt->ops->req_full_gpu) {
106 		r = virt->ops->req_full_gpu(adev, init);
107 		if (r)
108 			return r;
109 
110 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
111 	}
112 
113 	return 0;
114 }
115 
116 /**
117  * amdgpu_virt_release_full_gpu() - release full gpu access
118  * @amdgpu:	amdgpu device.
119  * @init:	is driver init time.
120  * When finishing driver init/fini, need to release full gpu access.
121  * Return: Zero if release success, otherwise will returen error.
122  */
amdgpu_virt_release_full_gpu(struct amdgpu_device * adev,bool init)123 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
124 {
125 	struct amdgpu_virt *virt = &adev->virt;
126 	int r;
127 
128 	if (virt->ops && virt->ops->rel_full_gpu) {
129 		r = virt->ops->rel_full_gpu(adev, init);
130 		if (r)
131 			return r;
132 
133 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
134 	}
135 	return 0;
136 }
137 
138 /**
139  * amdgpu_virt_reset_gpu() - reset gpu
140  * @amdgpu:	amdgpu device.
141  * Send reset command to GPU hypervisor to reset GPU that VM is using
142  * Return: Zero if reset success, otherwise will return error.
143  */
amdgpu_virt_reset_gpu(struct amdgpu_device * adev)144 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
145 {
146 	struct amdgpu_virt *virt = &adev->virt;
147 	int r;
148 
149 	if (virt->ops && virt->ops->reset_gpu) {
150 		r = virt->ops->reset_gpu(adev);
151 		if (r)
152 			return r;
153 
154 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
155 	}
156 
157 	return 0;
158 }
159 
160 /**
161  * amdgpu_virt_wait_reset() - wait for reset gpu completed
162  * @amdgpu:	amdgpu device.
163  * Wait for GPU reset completed.
164  * Return: Zero if reset success, otherwise will return error.
165  */
amdgpu_virt_wait_reset(struct amdgpu_device * adev)166 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
167 {
168 	struct amdgpu_virt *virt = &adev->virt;
169 
170 	if (!virt->ops || !virt->ops->wait_reset)
171 		return -EINVAL;
172 
173 	return virt->ops->wait_reset(adev);
174 }
175 
176 /**
177  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
178  * @amdgpu:	amdgpu device.
179  * MM table is used by UVD and VCE for its initialization
180  * Return: Zero if allocate success.
181  */
amdgpu_virt_alloc_mm_table(struct amdgpu_device * adev)182 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
183 {
184 	int r;
185 
186 	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
187 		return 0;
188 
189 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
190 				    AMDGPU_GEM_DOMAIN_VRAM,
191 				    &adev->virt.mm_table.bo,
192 				    &adev->virt.mm_table.gpu_addr,
193 				    (void *)&adev->virt.mm_table.cpu_addr);
194 	if (r) {
195 		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
196 		return r;
197 	}
198 
199 	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
200 	DRM_INFO("MM table gpu addr = 0x%"PRIx64", cpu addr = %p.\n",
201 		 adev->virt.mm_table.gpu_addr,
202 		 adev->virt.mm_table.cpu_addr);
203 	return 0;
204 }
205 
206 /**
207  * amdgpu_virt_free_mm_table() - free mm table memory
208  * @amdgpu:	amdgpu device.
209  * Free MM table memory
210  */
amdgpu_virt_free_mm_table(struct amdgpu_device * adev)211 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
212 {
213 	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
214 		return;
215 
216 	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
217 			      &adev->virt.mm_table.gpu_addr,
218 			      (void *)&adev->virt.mm_table.cpu_addr);
219 	adev->virt.mm_table.gpu_addr = 0;
220 }
221 
222 
amdgpu_virt_fw_reserve_get_checksum(void * obj,unsigned long obj_size,unsigned int key,unsigned int chksum)223 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
224 					unsigned long obj_size,
225 					unsigned int key,
226 					unsigned int chksum)
227 {
228 	unsigned int ret = key;
229 	unsigned long i = 0;
230 	unsigned char *pos;
231 
232 	pos = (char *)obj;
233 	/* calculate checksum */
234 	for (i = 0; i < obj_size; ++i)
235 		ret += *(pos + i);
236 	/* minus the chksum itself */
237 	pos = (char *)&chksum;
238 	for (i = 0; i < sizeof(chksum); ++i)
239 		ret -= *(pos + i);
240 	return ret;
241 }
242 
amdgpu_virt_init_data_exchange(struct amdgpu_device * adev)243 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
244 {
245 	uint32_t pf2vf_size = 0;
246 	uint32_t checksum = 0;
247 	uint32_t checkval;
248 	char *str;
249 
250 	adev->virt.fw_reserve.p_pf2vf = NULL;
251 	adev->virt.fw_reserve.p_vf2pf = NULL;
252 
253 	if (adev->fw_vram_usage.va != NULL) {
254 		adev->virt.fw_reserve.p_pf2vf =
255 			(struct amd_sriov_msg_pf2vf_info_header *)(
256 			adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
257 		AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
258 		AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
259 		AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
260 
261 		/* pf2vf message must be in 4K */
262 		if (pf2vf_size > 0 && pf2vf_size < 4096) {
263 			checkval = amdgpu_virt_fw_reserve_get_checksum(
264 				adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
265 				adev->virt.fw_reserve.checksum_key, checksum);
266 			if (checkval == checksum) {
267 				adev->virt.fw_reserve.p_vf2pf =
268 					((void *)adev->virt.fw_reserve.p_pf2vf +
269 					pf2vf_size);
270 				memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
271 					sizeof(amdgim_vf2pf_info));
272 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
273 					AMDGPU_FW_VRAM_VF2PF_VER);
274 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
275 					sizeof(amdgim_vf2pf_info));
276 				AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
277 					&str);
278 #ifndef __NetBSD__		/* XXX ??? */
279 #ifdef MODULE
280 				if (THIS_MODULE->version != NULL)
281 					strcpy(str, THIS_MODULE->version);
282 				else
283 #endif
284 #endif
285 					strcpy(str, "N/A");
286 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
287 					0);
288 				AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
289 					amdgpu_virt_fw_reserve_get_checksum(
290 					adev->virt.fw_reserve.p_vf2pf,
291 					pf2vf_size,
292 					adev->virt.fw_reserve.checksum_key, 0));
293 			}
294 		}
295 	}
296 }
297