1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev * Copyright 2016 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev *
4b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev *
11b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev *
14b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev *
22b843c749SSergey Zigachev */
23b843c749SSergey Zigachev
24b843c749SSergey Zigachev #include "amdgpu.h"
25b843c749SSergey Zigachev #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
26b843c749SSergey Zigachev #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
27b843c749SSergey Zigachev #define MAX_KIQ_REG_TRY 20
28b843c749SSergey Zigachev
amdgpu_csa_vaddr(struct amdgpu_device * adev)29b843c749SSergey Zigachev uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
30b843c749SSergey Zigachev {
31b843c749SSergey Zigachev uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
32b843c749SSergey Zigachev
33b843c749SSergey Zigachev addr -= AMDGPU_VA_RESERVED_SIZE;
34b843c749SSergey Zigachev
35b843c749SSergey Zigachev if (addr >= AMDGPU_VA_HOLE_START)
36b843c749SSergey Zigachev addr |= AMDGPU_VA_HOLE_END;
37b843c749SSergey Zigachev
38b843c749SSergey Zigachev return addr;
39b843c749SSergey Zigachev }
40b843c749SSergey Zigachev
amdgpu_virt_mmio_blocked(struct amdgpu_device * adev)41b843c749SSergey Zigachev bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
42b843c749SSergey Zigachev {
43b843c749SSergey Zigachev /* By now all MMIO pages except mailbox are blocked */
44b843c749SSergey Zigachev /* if blocking is enabled in hypervisor. Choose the */
45b843c749SSergey Zigachev /* SCRATCH_REG0 to test. */
46b843c749SSergey Zigachev return RREG32_NO_KIQ(0xc040) == 0xffffffff;
47b843c749SSergey Zigachev }
48b843c749SSergey Zigachev
amdgpu_allocate_static_csa(struct amdgpu_device * adev)49b843c749SSergey Zigachev int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
50b843c749SSergey Zigachev {
51b843c749SSergey Zigachev int r;
52b843c749SSergey Zigachev void *ptr;
53b843c749SSergey Zigachev
54b843c749SSergey Zigachev r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
55b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
56*78973132SSergey Zigachev (u64 *)&adev->virt.csa_vmid0_addr, &ptr);
57b843c749SSergey Zigachev if (r)
58b843c749SSergey Zigachev return r;
59b843c749SSergey Zigachev
60b843c749SSergey Zigachev memset(ptr, 0, AMDGPU_CSA_SIZE);
61b843c749SSergey Zigachev return 0;
62b843c749SSergey Zigachev }
63b843c749SSergey Zigachev
amdgpu_free_static_csa(struct amdgpu_device * adev)64b843c749SSergey Zigachev void amdgpu_free_static_csa(struct amdgpu_device *adev) {
65b843c749SSergey Zigachev amdgpu_bo_free_kernel(&adev->virt.csa_obj,
66*78973132SSergey Zigachev (u64 *)&adev->virt.csa_vmid0_addr,
67b843c749SSergey Zigachev NULL);
68b843c749SSergey Zigachev }
69b843c749SSergey Zigachev
70b843c749SSergey Zigachev /*
71b843c749SSergey Zigachev * amdgpu_map_static_csa should be called during amdgpu_vm_init
72b843c749SSergey Zigachev * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
73b843c749SSergey Zigachev * submission of GFX should use this virtual address within META_DATA init
74b843c749SSergey Zigachev * package to support SRIOV gfx preemption.
75b843c749SSergey Zigachev */
amdgpu_map_static_csa(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_bo_va ** bo_va)76b843c749SSergey Zigachev int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
77b843c749SSergey Zigachev struct amdgpu_bo_va **bo_va)
78b843c749SSergey Zigachev {
79b843c749SSergey Zigachev uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
80b843c749SSergey Zigachev struct ww_acquire_ctx ticket;
81b843c749SSergey Zigachev struct list_head list;
82b843c749SSergey Zigachev struct amdgpu_bo_list_entry pd;
83b843c749SSergey Zigachev struct ttm_validate_buffer csa_tv;
84b843c749SSergey Zigachev int r;
85b843c749SSergey Zigachev
86b843c749SSergey Zigachev INIT_LIST_HEAD(&list);
87b843c749SSergey Zigachev INIT_LIST_HEAD(&csa_tv.head);
88b843c749SSergey Zigachev csa_tv.bo = &adev->virt.csa_obj->tbo;
89b843c749SSergey Zigachev csa_tv.shared = true;
90b843c749SSergey Zigachev
91b843c749SSergey Zigachev list_add(&csa_tv.head, &list);
92b843c749SSergey Zigachev amdgpu_vm_get_pd_bo(vm, &list, &pd);
93b843c749SSergey Zigachev
94b843c749SSergey Zigachev r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
95b843c749SSergey Zigachev if (r) {
96b843c749SSergey Zigachev DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
97b843c749SSergey Zigachev return r;
98b843c749SSergey Zigachev }
99b843c749SSergey Zigachev
100b843c749SSergey Zigachev *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
101b843c749SSergey Zigachev if (!*bo_va) {
102b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list);
103b843c749SSergey Zigachev DRM_ERROR("failed to create bo_va for static CSA\n");
104b843c749SSergey Zigachev return -ENOMEM;
105b843c749SSergey Zigachev }
106b843c749SSergey Zigachev
107b843c749SSergey Zigachev r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
108b843c749SSergey Zigachev AMDGPU_CSA_SIZE);
109b843c749SSergey Zigachev if (r) {
110b843c749SSergey Zigachev DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
111b843c749SSergey Zigachev amdgpu_vm_bo_rmv(adev, *bo_va);
112b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list);
113b843c749SSergey Zigachev return r;
114b843c749SSergey Zigachev }
115b843c749SSergey Zigachev
116b843c749SSergey Zigachev r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
117b843c749SSergey Zigachev AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
118b843c749SSergey Zigachev AMDGPU_PTE_EXECUTABLE);
119b843c749SSergey Zigachev
120b843c749SSergey Zigachev if (r) {
121b843c749SSergey Zigachev DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
122b843c749SSergey Zigachev amdgpu_vm_bo_rmv(adev, *bo_va);
123b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list);
124b843c749SSergey Zigachev return r;
125b843c749SSergey Zigachev }
126b843c749SSergey Zigachev
127b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list);
128b843c749SSergey Zigachev return 0;
129b843c749SSergey Zigachev }
130b843c749SSergey Zigachev
amdgpu_virt_init_setting(struct amdgpu_device * adev)131b843c749SSergey Zigachev void amdgpu_virt_init_setting(struct amdgpu_device *adev)
132b843c749SSergey Zigachev {
133b843c749SSergey Zigachev /* enable virtual display */
134b843c749SSergey Zigachev adev->mode_info.num_crtc = 1;
135b843c749SSergey Zigachev adev->enable_virtual_display = true;
136b843c749SSergey Zigachev adev->cg_flags = 0;
137b843c749SSergey Zigachev adev->pg_flags = 0;
138b843c749SSergey Zigachev }
139b843c749SSergey Zigachev
amdgpu_virt_kiq_rreg(struct amdgpu_device * adev,uint32_t reg)140b843c749SSergey Zigachev uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
141b843c749SSergey Zigachev {
142b843c749SSergey Zigachev signed long r, cnt = 0;
143b843c749SSergey Zigachev unsigned long flags;
144b843c749SSergey Zigachev uint32_t seq;
145b843c749SSergey Zigachev struct amdgpu_kiq *kiq = &adev->gfx.kiq;
146b843c749SSergey Zigachev struct amdgpu_ring *ring = &kiq->ring;
147b843c749SSergey Zigachev
148b843c749SSergey Zigachev BUG_ON(!ring->funcs->emit_rreg);
149b843c749SSergey Zigachev
150b843c749SSergey Zigachev spin_lock_irqsave(&kiq->ring_lock, flags);
151b843c749SSergey Zigachev amdgpu_ring_alloc(ring, 32);
152b843c749SSergey Zigachev amdgpu_ring_emit_rreg(ring, reg);
153b843c749SSergey Zigachev amdgpu_fence_emit_polling(ring, &seq);
154b843c749SSergey Zigachev amdgpu_ring_commit(ring);
155b843c749SSergey Zigachev spin_unlock_irqrestore(&kiq->ring_lock, flags);
156b843c749SSergey Zigachev
157b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
158b843c749SSergey Zigachev
159b843c749SSergey Zigachev /* don't wait anymore for gpu reset case because this way may
160b843c749SSergey Zigachev * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
161b843c749SSergey Zigachev * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
162b843c749SSergey Zigachev * never return if we keep waiting in virt_kiq_rreg, which cause
163b843c749SSergey Zigachev * gpu_recover() hang there.
164b843c749SSergey Zigachev *
165b843c749SSergey Zigachev * also don't wait anymore for IRQ context
166b843c749SSergey Zigachev * */
167*78973132SSergey Zigachev #if 0
168b843c749SSergey Zigachev if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
169b843c749SSergey Zigachev goto failed_kiq_read;
170b843c749SSergey Zigachev
171b843c749SSergey Zigachev if (in_interrupt())
172b843c749SSergey Zigachev might_sleep();
173*78973132SSergey Zigachev #endif
174*78973132SSergey Zigachev kprintf("amdgpu_virt_kiq_rreg: implement in_interrupt() function\n");
175*78973132SSergey Zigachev if (r < 1 && (adev->in_gpu_reset))
176*78973132SSergey Zigachev goto failed_kiq_read;
177*78973132SSergey Zigachev
178b843c749SSergey Zigachev
179b843c749SSergey Zigachev while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
180b843c749SSergey Zigachev msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
181b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
182b843c749SSergey Zigachev }
183b843c749SSergey Zigachev
184b843c749SSergey Zigachev if (cnt > MAX_KIQ_REG_TRY)
185b843c749SSergey Zigachev goto failed_kiq_read;
186b843c749SSergey Zigachev
187b843c749SSergey Zigachev return adev->wb.wb[adev->virt.reg_val_offs];
188b843c749SSergey Zigachev
189b843c749SSergey Zigachev failed_kiq_read:
190b843c749SSergey Zigachev pr_err("failed to read reg:%x\n", reg);
191b843c749SSergey Zigachev return ~0;
192b843c749SSergey Zigachev }
193b843c749SSergey Zigachev
amdgpu_virt_kiq_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)194b843c749SSergey Zigachev void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
195b843c749SSergey Zigachev {
196b843c749SSergey Zigachev signed long r, cnt = 0;
197b843c749SSergey Zigachev unsigned long flags;
198b843c749SSergey Zigachev uint32_t seq;
199b843c749SSergey Zigachev struct amdgpu_kiq *kiq = &adev->gfx.kiq;
200b843c749SSergey Zigachev struct amdgpu_ring *ring = &kiq->ring;
201b843c749SSergey Zigachev
202b843c749SSergey Zigachev BUG_ON(!ring->funcs->emit_wreg);
203b843c749SSergey Zigachev
204b843c749SSergey Zigachev spin_lock_irqsave(&kiq->ring_lock, flags);
205b843c749SSergey Zigachev amdgpu_ring_alloc(ring, 32);
206b843c749SSergey Zigachev amdgpu_ring_emit_wreg(ring, reg, v);
207b843c749SSergey Zigachev amdgpu_fence_emit_polling(ring, &seq);
208b843c749SSergey Zigachev amdgpu_ring_commit(ring);
209b843c749SSergey Zigachev spin_unlock_irqrestore(&kiq->ring_lock, flags);
210b843c749SSergey Zigachev
211b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
212b843c749SSergey Zigachev
213b843c749SSergey Zigachev /* don't wait anymore for gpu reset case because this way may
214b843c749SSergey Zigachev * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
215b843c749SSergey Zigachev * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
216b843c749SSergey Zigachev * never return if we keep waiting in virt_kiq_rreg, which cause
217b843c749SSergey Zigachev * gpu_recover() hang there.
218b843c749SSergey Zigachev *
219b843c749SSergey Zigachev * also don't wait anymore for IRQ context
220b843c749SSergey Zigachev * */
221*78973132SSergey Zigachev #if 0
222b843c749SSergey Zigachev if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
223b843c749SSergey Zigachev goto failed_kiq_write;
224b843c749SSergey Zigachev
225b843c749SSergey Zigachev if (in_interrupt())
226b843c749SSergey Zigachev might_sleep();
227*78973132SSergey Zigachev #endif
228*78973132SSergey Zigachev kprintf("amdgpu_virt_kiq_wreg: implement in_interrupt() function\n");
229*78973132SSergey Zigachev if (r < 1 && (adev->in_gpu_reset))
230*78973132SSergey Zigachev goto failed_kiq_write;
231b843c749SSergey Zigachev
232b843c749SSergey Zigachev while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
233b843c749SSergey Zigachev
234b843c749SSergey Zigachev msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
235b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
236b843c749SSergey Zigachev }
237b843c749SSergey Zigachev
238b843c749SSergey Zigachev if (cnt > MAX_KIQ_REG_TRY)
239b843c749SSergey Zigachev goto failed_kiq_write;
240b843c749SSergey Zigachev
241b843c749SSergey Zigachev return;
242b843c749SSergey Zigachev
243b843c749SSergey Zigachev failed_kiq_write:
244b843c749SSergey Zigachev pr_err("failed to write reg:%x\n", reg);
245b843c749SSergey Zigachev }
246b843c749SSergey Zigachev
247b843c749SSergey Zigachev /**
248b843c749SSergey Zigachev * amdgpu_virt_request_full_gpu() - request full gpu access
249b843c749SSergey Zigachev * @amdgpu: amdgpu device.
250b843c749SSergey Zigachev * @init: is driver init time.
251b843c749SSergey Zigachev * When start to init/fini driver, first need to request full gpu access.
252b843c749SSergey Zigachev * Return: Zero if request success, otherwise will return error.
253b843c749SSergey Zigachev */
amdgpu_virt_request_full_gpu(struct amdgpu_device * adev,bool init)254b843c749SSergey Zigachev int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
255b843c749SSergey Zigachev {
256b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt;
257b843c749SSergey Zigachev int r;
258b843c749SSergey Zigachev
259b843c749SSergey Zigachev if (virt->ops && virt->ops->req_full_gpu) {
260b843c749SSergey Zigachev r = virt->ops->req_full_gpu(adev, init);
261b843c749SSergey Zigachev if (r)
262b843c749SSergey Zigachev return r;
263b843c749SSergey Zigachev
264b843c749SSergey Zigachev adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
265b843c749SSergey Zigachev }
266b843c749SSergey Zigachev
267b843c749SSergey Zigachev return 0;
268b843c749SSergey Zigachev }
269b843c749SSergey Zigachev
270b843c749SSergey Zigachev /**
271b843c749SSergey Zigachev * amdgpu_virt_release_full_gpu() - release full gpu access
272b843c749SSergey Zigachev * @amdgpu: amdgpu device.
273b843c749SSergey Zigachev * @init: is driver init time.
274b843c749SSergey Zigachev * When finishing driver init/fini, need to release full gpu access.
275b843c749SSergey Zigachev * Return: Zero if release success, otherwise will returen error.
276b843c749SSergey Zigachev */
amdgpu_virt_release_full_gpu(struct amdgpu_device * adev,bool init)277b843c749SSergey Zigachev int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
278b843c749SSergey Zigachev {
279b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt;
280b843c749SSergey Zigachev int r;
281b843c749SSergey Zigachev
282b843c749SSergey Zigachev if (virt->ops && virt->ops->rel_full_gpu) {
283b843c749SSergey Zigachev r = virt->ops->rel_full_gpu(adev, init);
284b843c749SSergey Zigachev if (r)
285b843c749SSergey Zigachev return r;
286b843c749SSergey Zigachev
287b843c749SSergey Zigachev adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
288b843c749SSergey Zigachev }
289b843c749SSergey Zigachev return 0;
290b843c749SSergey Zigachev }
291b843c749SSergey Zigachev
292b843c749SSergey Zigachev /**
293b843c749SSergey Zigachev * amdgpu_virt_reset_gpu() - reset gpu
294b843c749SSergey Zigachev * @amdgpu: amdgpu device.
295b843c749SSergey Zigachev * Send reset command to GPU hypervisor to reset GPU that VM is using
296b843c749SSergey Zigachev * Return: Zero if reset success, otherwise will return error.
297b843c749SSergey Zigachev */
amdgpu_virt_reset_gpu(struct amdgpu_device * adev)298b843c749SSergey Zigachev int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
299b843c749SSergey Zigachev {
300b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt;
301b843c749SSergey Zigachev int r;
302b843c749SSergey Zigachev
303b843c749SSergey Zigachev if (virt->ops && virt->ops->reset_gpu) {
304b843c749SSergey Zigachev r = virt->ops->reset_gpu(adev);
305b843c749SSergey Zigachev if (r)
306b843c749SSergey Zigachev return r;
307b843c749SSergey Zigachev
308b843c749SSergey Zigachev adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
309b843c749SSergey Zigachev }
310b843c749SSergey Zigachev
311b843c749SSergey Zigachev return 0;
312b843c749SSergey Zigachev }
313b843c749SSergey Zigachev
314b843c749SSergey Zigachev /**
315b843c749SSergey Zigachev * amdgpu_virt_wait_reset() - wait for reset gpu completed
316b843c749SSergey Zigachev * @amdgpu: amdgpu device.
317b843c749SSergey Zigachev * Wait for GPU reset completed.
318b843c749SSergey Zigachev * Return: Zero if reset success, otherwise will return error.
319b843c749SSergey Zigachev */
amdgpu_virt_wait_reset(struct amdgpu_device * adev)320b843c749SSergey Zigachev int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
321b843c749SSergey Zigachev {
322b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt;
323b843c749SSergey Zigachev
324b843c749SSergey Zigachev if (!virt->ops || !virt->ops->wait_reset)
325b843c749SSergey Zigachev return -EINVAL;
326b843c749SSergey Zigachev
327b843c749SSergey Zigachev return virt->ops->wait_reset(adev);
328b843c749SSergey Zigachev }
329b843c749SSergey Zigachev
330b843c749SSergey Zigachev /**
331b843c749SSergey Zigachev * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
332b843c749SSergey Zigachev * @amdgpu: amdgpu device.
333b843c749SSergey Zigachev * MM table is used by UVD and VCE for its initialization
334b843c749SSergey Zigachev * Return: Zero if allocate success.
335b843c749SSergey Zigachev */
amdgpu_virt_alloc_mm_table(struct amdgpu_device * adev)336b843c749SSergey Zigachev int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
337b843c749SSergey Zigachev {
338b843c749SSergey Zigachev int r;
339b843c749SSergey Zigachev
340b843c749SSergey Zigachev if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
341b843c749SSergey Zigachev return 0;
342b843c749SSergey Zigachev
343b843c749SSergey Zigachev r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
344b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_VRAM,
345b843c749SSergey Zigachev &adev->virt.mm_table.bo,
346*78973132SSergey Zigachev (u64 *)&adev->virt.mm_table.gpu_addr,
347b843c749SSergey Zigachev (void *)&adev->virt.mm_table.cpu_addr);
348b843c749SSergey Zigachev if (r) {
349b843c749SSergey Zigachev DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
350b843c749SSergey Zigachev return r;
351b843c749SSergey Zigachev }
352b843c749SSergey Zigachev
353b843c749SSergey Zigachev memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
354*78973132SSergey Zigachev DRM_INFO("MM table gpu addr = 0x%lx, cpu addr = %p.\n",
355b843c749SSergey Zigachev adev->virt.mm_table.gpu_addr,
356b843c749SSergey Zigachev adev->virt.mm_table.cpu_addr);
357b843c749SSergey Zigachev return 0;
358b843c749SSergey Zigachev }
359b843c749SSergey Zigachev
360b843c749SSergey Zigachev /**
361b843c749SSergey Zigachev * amdgpu_virt_free_mm_table() - free mm table memory
362b843c749SSergey Zigachev * @amdgpu: amdgpu device.
363b843c749SSergey Zigachev * Free MM table memory
364b843c749SSergey Zigachev */
amdgpu_virt_free_mm_table(struct amdgpu_device * adev)365b843c749SSergey Zigachev void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
366b843c749SSergey Zigachev {
367b843c749SSergey Zigachev if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
368b843c749SSergey Zigachev return;
369b843c749SSergey Zigachev
370b843c749SSergey Zigachev amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
371*78973132SSergey Zigachev (u64 *)&adev->virt.mm_table.gpu_addr,
372b843c749SSergey Zigachev (void *)&adev->virt.mm_table.cpu_addr);
373b843c749SSergey Zigachev adev->virt.mm_table.gpu_addr = 0;
374b843c749SSergey Zigachev }
375b843c749SSergey Zigachev
376b843c749SSergey Zigachev
amdgpu_virt_fw_reserve_get_checksum(void * obj,unsigned long obj_size,unsigned int key,unsigned int chksum)377b843c749SSergey Zigachev int amdgpu_virt_fw_reserve_get_checksum(void *obj,
378b843c749SSergey Zigachev unsigned long obj_size,
379b843c749SSergey Zigachev unsigned int key,
380b843c749SSergey Zigachev unsigned int chksum)
381b843c749SSergey Zigachev {
382b843c749SSergey Zigachev unsigned int ret = key;
383b843c749SSergey Zigachev unsigned long i = 0;
384b843c749SSergey Zigachev unsigned char *pos;
385b843c749SSergey Zigachev
386b843c749SSergey Zigachev pos = (char *)obj;
387b843c749SSergey Zigachev /* calculate checksum */
388b843c749SSergey Zigachev for (i = 0; i < obj_size; ++i)
389b843c749SSergey Zigachev ret += *(pos + i);
390b843c749SSergey Zigachev /* minus the chksum itself */
391b843c749SSergey Zigachev pos = (char *)&chksum;
392b843c749SSergey Zigachev for (i = 0; i < sizeof(chksum); ++i)
393b843c749SSergey Zigachev ret -= *(pos + i);
394b843c749SSergey Zigachev return ret;
395b843c749SSergey Zigachev }
396b843c749SSergey Zigachev
amdgpu_virt_init_data_exchange(struct amdgpu_device * adev)397b843c749SSergey Zigachev void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
398b843c749SSergey Zigachev {
399b843c749SSergey Zigachev uint32_t pf2vf_size = 0;
400b843c749SSergey Zigachev uint32_t checksum = 0;
401b843c749SSergey Zigachev uint32_t checkval;
402b843c749SSergey Zigachev char *str;
403b843c749SSergey Zigachev
404b843c749SSergey Zigachev adev->virt.fw_reserve.p_pf2vf = NULL;
405b843c749SSergey Zigachev adev->virt.fw_reserve.p_vf2pf = NULL;
406b843c749SSergey Zigachev
407b843c749SSergey Zigachev if (adev->fw_vram_usage.va != NULL) {
408b843c749SSergey Zigachev adev->virt.fw_reserve.p_pf2vf =
409b843c749SSergey Zigachev (struct amdgim_pf2vf_info_header *)(
410b843c749SSergey Zigachev adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
411b843c749SSergey Zigachev AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
412b843c749SSergey Zigachev AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
413b843c749SSergey Zigachev AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
414b843c749SSergey Zigachev
415b843c749SSergey Zigachev /* pf2vf message must be in 4K */
416b843c749SSergey Zigachev if (pf2vf_size > 0 && pf2vf_size < 4096) {
417b843c749SSergey Zigachev checkval = amdgpu_virt_fw_reserve_get_checksum(
418b843c749SSergey Zigachev adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
419b843c749SSergey Zigachev adev->virt.fw_reserve.checksum_key, checksum);
420b843c749SSergey Zigachev if (checkval == checksum) {
421b843c749SSergey Zigachev adev->virt.fw_reserve.p_vf2pf =
422b843c749SSergey Zigachev ((void *)adev->virt.fw_reserve.p_pf2vf +
423b843c749SSergey Zigachev pf2vf_size);
424b843c749SSergey Zigachev memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
425b843c749SSergey Zigachev sizeof(amdgim_vf2pf_info));
426b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
427b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_VER);
428b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
429b843c749SSergey Zigachev sizeof(amdgim_vf2pf_info));
430b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
431b843c749SSergey Zigachev &str);
432b843c749SSergey Zigachev #ifdef MODULE
433b843c749SSergey Zigachev if (THIS_MODULE->version != NULL)
434b843c749SSergey Zigachev strcpy(str, THIS_MODULE->version);
435b843c749SSergey Zigachev else
436b843c749SSergey Zigachev #endif
437b843c749SSergey Zigachev strcpy(str, "N/A");
438b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
439b843c749SSergey Zigachev 0);
440b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
441b843c749SSergey Zigachev amdgpu_virt_fw_reserve_get_checksum(
442b843c749SSergey Zigachev adev->virt.fw_reserve.p_vf2pf,
443b843c749SSergey Zigachev pf2vf_size,
444b843c749SSergey Zigachev adev->virt.fw_reserve.checksum_key, 0));
445b843c749SSergey Zigachev }
446b843c749SSergey Zigachev }
447b843c749SSergey Zigachev }
448b843c749SSergey Zigachev }
449b843c749SSergey Zigachev
450b843c749SSergey Zigachev
451