1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev * Copyright 2013 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev * All Rights Reserved.
4b843c749SSergey Zigachev *
5b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
6b843c749SSergey Zigachev * copy of this software and associated documentation files (the
7b843c749SSergey Zigachev * "Software"), to deal in the Software without restriction, including
8b843c749SSergey Zigachev * without limitation the rights to use, copy, modify, merge, publish,
9b843c749SSergey Zigachev * distribute, sub license, and/or sell copies of the Software, and to
10b843c749SSergey Zigachev * permit persons to whom the Software is furnished to do so, subject to
11b843c749SSergey Zigachev * the following conditions:
12b843c749SSergey Zigachev *
13b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16b843c749SSergey Zigachev * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17b843c749SSergey Zigachev * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18b843c749SSergey Zigachev * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19b843c749SSergey Zigachev * USE OR OTHER DEALINGS IN THE SOFTWARE.
20b843c749SSergey Zigachev *
21b843c749SSergey Zigachev * The above copyright notice and this permission notice (including the
22b843c749SSergey Zigachev * next paragraph) shall be included in all copies or substantial portions
23b843c749SSergey Zigachev * of the Software.
24b843c749SSergey Zigachev *
25b843c749SSergey Zigachev * Authors: Christian König <christian.koenig@amd.com>
26b843c749SSergey Zigachev */
27b843c749SSergey Zigachev
28b843c749SSergey Zigachev #include <linux/firmware.h>
29b843c749SSergey Zigachev #include <linux/module.h>
30b843c749SSergey Zigachev #include <drm/drmP.h>
31b843c749SSergey Zigachev #include <drm/drm.h>
32b843c749SSergey Zigachev
33b843c749SSergey Zigachev #include "amdgpu.h"
34b843c749SSergey Zigachev #include "amdgpu_pm.h"
35b843c749SSergey Zigachev #include "amdgpu_vce.h"
36b843c749SSergey Zigachev #include "cikd.h"
37b843c749SSergey Zigachev
38b843c749SSergey Zigachev /* 1 second timeout */
39b843c749SSergey Zigachev #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
40b843c749SSergey Zigachev
41b843c749SSergey Zigachev /* Firmware Names */
42b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
43*809f3802SSergey Zigachev #define FIRMWARE_BONAIRE "amdgpufw_bonaire_vce"
44*809f3802SSergey Zigachev #define FIRMWARE_KABINI "amdgpufw_kabini_vce"
45*809f3802SSergey Zigachev #define FIRMWARE_KAVERI "amdgpufw_kaveri_vce"
46*809f3802SSergey Zigachev #define FIRMWARE_HAWAII "amdgpufw_hawaii_vce"
47*809f3802SSergey Zigachev #define FIRMWARE_MULLINS "amdgpufw_mullins_vce"
48b843c749SSergey Zigachev #endif
49*809f3802SSergey Zigachev #define FIRMWARE_TONGA "amdgpufw_tonga_vce"
50*809f3802SSergey Zigachev #define FIRMWARE_CARRIZO "amdgpufw_carrizo_vce"
51*809f3802SSergey Zigachev #define FIRMWARE_FIJI "amdgpufw_fiji_vce"
52*809f3802SSergey Zigachev #define FIRMWARE_STONEY "amdgpufw_stoney_vce"
53*809f3802SSergey Zigachev #define FIRMWARE_POLARIS10 "amdgpufw_polaris10_vce"
54*809f3802SSergey Zigachev #define FIRMWARE_POLARIS11 "amdgpufw_polaris11_vce"
55*809f3802SSergey Zigachev #define FIRMWARE_POLARIS12 "amdgpufw_polaris12_vce"
56*809f3802SSergey Zigachev #define FIRMWARE_VEGAM "amdgpufw_vegam_vce"
57b843c749SSergey Zigachev
58*809f3802SSergey Zigachev #define FIRMWARE_VEGA10 "amdgpufw_vega10_vce"
59*809f3802SSergey Zigachev #define FIRMWARE_VEGA12 "amdgpufw_vega12_vce"
60*809f3802SSergey Zigachev #define FIRMWARE_VEGA20 "amdgpufw_vega20_vce"
61b843c749SSergey Zigachev
62b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
63b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_BONAIRE);
64b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_KABINI);
65b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_KAVERI);
66b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_HAWAII);
67b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_MULLINS);
68b843c749SSergey Zigachev #endif
69b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_TONGA);
70b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_CARRIZO);
71b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_FIJI);
72b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_STONEY);
73b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_POLARIS10);
74b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_POLARIS11);
75b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_POLARIS12);
76b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_VEGAM);
77b843c749SSergey Zigachev
78b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_VEGA10);
79b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_VEGA12);
80b843c749SSergey Zigachev MODULE_FIRMWARE(FIRMWARE_VEGA20);
81b843c749SSergey Zigachev
82b843c749SSergey Zigachev static void amdgpu_vce_idle_work_handler(struct work_struct *work);
83b843c749SSergey Zigachev
84b843c749SSergey Zigachev /**
85b843c749SSergey Zigachev * amdgpu_vce_init - allocate memory, load vce firmware
86b843c749SSergey Zigachev *
87b843c749SSergey Zigachev * @adev: amdgpu_device pointer
88b843c749SSergey Zigachev *
89b843c749SSergey Zigachev * First step to get VCE online, allocate memory and load the firmware
90b843c749SSergey Zigachev */
amdgpu_vce_sw_init(struct amdgpu_device * adev,unsigned long size)91b843c749SSergey Zigachev int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
92b843c749SSergey Zigachev {
93b843c749SSergey Zigachev const char *fw_name;
94b843c749SSergey Zigachev const struct common_firmware_header *hdr;
95b843c749SSergey Zigachev unsigned ucode_version, version_major, version_minor, binary_id;
96b843c749SSergey Zigachev int i, r;
97b843c749SSergey Zigachev
98b843c749SSergey Zigachev switch (adev->asic_type) {
99b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
100b843c749SSergey Zigachev case CHIP_BONAIRE:
101b843c749SSergey Zigachev fw_name = FIRMWARE_BONAIRE;
102b843c749SSergey Zigachev break;
103b843c749SSergey Zigachev case CHIP_KAVERI:
104b843c749SSergey Zigachev fw_name = FIRMWARE_KAVERI;
105b843c749SSergey Zigachev break;
106b843c749SSergey Zigachev case CHIP_KABINI:
107b843c749SSergey Zigachev fw_name = FIRMWARE_KABINI;
108b843c749SSergey Zigachev break;
109b843c749SSergey Zigachev case CHIP_HAWAII:
110b843c749SSergey Zigachev fw_name = FIRMWARE_HAWAII;
111b843c749SSergey Zigachev break;
112b843c749SSergey Zigachev case CHIP_MULLINS:
113b843c749SSergey Zigachev fw_name = FIRMWARE_MULLINS;
114b843c749SSergey Zigachev break;
115b843c749SSergey Zigachev #endif
116b843c749SSergey Zigachev case CHIP_TONGA:
117b843c749SSergey Zigachev fw_name = FIRMWARE_TONGA;
118b843c749SSergey Zigachev break;
119b843c749SSergey Zigachev case CHIP_CARRIZO:
120b843c749SSergey Zigachev fw_name = FIRMWARE_CARRIZO;
121b843c749SSergey Zigachev break;
122b843c749SSergey Zigachev case CHIP_FIJI:
123b843c749SSergey Zigachev fw_name = FIRMWARE_FIJI;
124b843c749SSergey Zigachev break;
125b843c749SSergey Zigachev case CHIP_STONEY:
126b843c749SSergey Zigachev fw_name = FIRMWARE_STONEY;
127b843c749SSergey Zigachev break;
128b843c749SSergey Zigachev case CHIP_POLARIS10:
129b843c749SSergey Zigachev fw_name = FIRMWARE_POLARIS10;
130b843c749SSergey Zigachev break;
131b843c749SSergey Zigachev case CHIP_POLARIS11:
132b843c749SSergey Zigachev fw_name = FIRMWARE_POLARIS11;
133b843c749SSergey Zigachev break;
134b843c749SSergey Zigachev case CHIP_POLARIS12:
135b843c749SSergey Zigachev fw_name = FIRMWARE_POLARIS12;
136b843c749SSergey Zigachev break;
137b843c749SSergey Zigachev case CHIP_VEGAM:
138b843c749SSergey Zigachev fw_name = FIRMWARE_VEGAM;
139b843c749SSergey Zigachev break;
140b843c749SSergey Zigachev case CHIP_VEGA10:
141b843c749SSergey Zigachev fw_name = FIRMWARE_VEGA10;
142b843c749SSergey Zigachev break;
143b843c749SSergey Zigachev case CHIP_VEGA12:
144b843c749SSergey Zigachev fw_name = FIRMWARE_VEGA12;
145b843c749SSergey Zigachev break;
146b843c749SSergey Zigachev case CHIP_VEGA20:
147b843c749SSergey Zigachev fw_name = FIRMWARE_VEGA20;
148b843c749SSergey Zigachev break;
149b843c749SSergey Zigachev
150b843c749SSergey Zigachev default:
151b843c749SSergey Zigachev return -EINVAL;
152b843c749SSergey Zigachev }
153b843c749SSergey Zigachev
154b843c749SSergey Zigachev r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
155b843c749SSergey Zigachev if (r) {
156b843c749SSergey Zigachev dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
157b843c749SSergey Zigachev fw_name);
158b843c749SSergey Zigachev return r;
159b843c749SSergey Zigachev }
160b843c749SSergey Zigachev
161b843c749SSergey Zigachev r = amdgpu_ucode_validate(adev->vce.fw);
162b843c749SSergey Zigachev if (r) {
163b843c749SSergey Zigachev dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
164b843c749SSergey Zigachev fw_name);
165b843c749SSergey Zigachev release_firmware(adev->vce.fw);
166b843c749SSergey Zigachev adev->vce.fw = NULL;
167b843c749SSergey Zigachev return r;
168b843c749SSergey Zigachev }
169b843c749SSergey Zigachev
170b843c749SSergey Zigachev hdr = (const struct common_firmware_header *)adev->vce.fw->data;
171b843c749SSergey Zigachev
172b843c749SSergey Zigachev ucode_version = le32_to_cpu(hdr->ucode_version);
173b843c749SSergey Zigachev version_major = (ucode_version >> 20) & 0xfff;
174b843c749SSergey Zigachev version_minor = (ucode_version >> 8) & 0xfff;
175b843c749SSergey Zigachev binary_id = ucode_version & 0xff;
176b843c749SSergey Zigachev DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
177b843c749SSergey Zigachev version_major, version_minor, binary_id);
178b843c749SSergey Zigachev adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
179b843c749SSergey Zigachev (binary_id << 8));
180b843c749SSergey Zigachev
181b843c749SSergey Zigachev r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
182b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
18378973132SSergey Zigachev (u64 *)&adev->vce.gpu_addr, &adev->vce.cpu_addr);
184b843c749SSergey Zigachev if (r) {
185b843c749SSergey Zigachev dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
186b843c749SSergey Zigachev return r;
187b843c749SSergey Zigachev }
188b843c749SSergey Zigachev
189b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
190b843c749SSergey Zigachev atomic_set(&adev->vce.handles[i], 0);
191b843c749SSergey Zigachev adev->vce.filp[i] = NULL;
192b843c749SSergey Zigachev }
193b843c749SSergey Zigachev
194b843c749SSergey Zigachev INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
19578973132SSergey Zigachev lockinit(&adev->vce.idle_mutex, "agavim", 0, LK_CANRECURSE);
196b843c749SSergey Zigachev
197b843c749SSergey Zigachev return 0;
198b843c749SSergey Zigachev }
199b843c749SSergey Zigachev
200b843c749SSergey Zigachev /**
201b843c749SSergey Zigachev * amdgpu_vce_fini - free memory
202b843c749SSergey Zigachev *
203b843c749SSergey Zigachev * @adev: amdgpu_device pointer
204b843c749SSergey Zigachev *
205b843c749SSergey Zigachev * Last step on VCE teardown, free firmware memory
206b843c749SSergey Zigachev */
amdgpu_vce_sw_fini(struct amdgpu_device * adev)207b843c749SSergey Zigachev int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
208b843c749SSergey Zigachev {
209b843c749SSergey Zigachev unsigned i;
210b843c749SSergey Zigachev
211b843c749SSergey Zigachev if (adev->vce.vcpu_bo == NULL)
212b843c749SSergey Zigachev return 0;
213b843c749SSergey Zigachev
214b843c749SSergey Zigachev drm_sched_entity_destroy(&adev->vce.entity);
215b843c749SSergey Zigachev
21678973132SSergey Zigachev amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, (u64 *)&adev->vce.gpu_addr,
217b843c749SSergey Zigachev (void **)&adev->vce.cpu_addr);
218b843c749SSergey Zigachev
219b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++)
220b843c749SSergey Zigachev amdgpu_ring_fini(&adev->vce.ring[i]);
221b843c749SSergey Zigachev
222b843c749SSergey Zigachev release_firmware(adev->vce.fw);
223b843c749SSergey Zigachev mutex_destroy(&adev->vce.idle_mutex);
224b843c749SSergey Zigachev
225b843c749SSergey Zigachev return 0;
226b843c749SSergey Zigachev }
227b843c749SSergey Zigachev
228b843c749SSergey Zigachev /**
229b843c749SSergey Zigachev * amdgpu_vce_entity_init - init entity
230b843c749SSergey Zigachev *
231b843c749SSergey Zigachev * @adev: amdgpu_device pointer
232b843c749SSergey Zigachev *
233b843c749SSergey Zigachev */
amdgpu_vce_entity_init(struct amdgpu_device * adev)234b843c749SSergey Zigachev int amdgpu_vce_entity_init(struct amdgpu_device *adev)
235b843c749SSergey Zigachev {
236b843c749SSergey Zigachev struct amdgpu_ring *ring;
237b843c749SSergey Zigachev struct drm_sched_rq *rq;
238b843c749SSergey Zigachev int r;
239b843c749SSergey Zigachev
240b843c749SSergey Zigachev ring = &adev->vce.ring[0];
241b843c749SSergey Zigachev rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
242b843c749SSergey Zigachev r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
243b843c749SSergey Zigachev if (r != 0) {
244b843c749SSergey Zigachev DRM_ERROR("Failed setting up VCE run queue.\n");
245b843c749SSergey Zigachev return r;
246b843c749SSergey Zigachev }
247b843c749SSergey Zigachev
248b843c749SSergey Zigachev return 0;
249b843c749SSergey Zigachev }
250b843c749SSergey Zigachev
251b843c749SSergey Zigachev /**
252b843c749SSergey Zigachev * amdgpu_vce_suspend - unpin VCE fw memory
253b843c749SSergey Zigachev *
254b843c749SSergey Zigachev * @adev: amdgpu_device pointer
255b843c749SSergey Zigachev *
256b843c749SSergey Zigachev */
amdgpu_vce_suspend(struct amdgpu_device * adev)257b843c749SSergey Zigachev int amdgpu_vce_suspend(struct amdgpu_device *adev)
258b843c749SSergey Zigachev {
259b843c749SSergey Zigachev int i;
260b843c749SSergey Zigachev
261b843c749SSergey Zigachev cancel_delayed_work_sync(&adev->vce.idle_work);
262b843c749SSergey Zigachev
263b843c749SSergey Zigachev if (adev->vce.vcpu_bo == NULL)
264b843c749SSergey Zigachev return 0;
265b843c749SSergey Zigachev
266b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
267b843c749SSergey Zigachev if (atomic_read(&adev->vce.handles[i]))
268b843c749SSergey Zigachev break;
269b843c749SSergey Zigachev
270b843c749SSergey Zigachev if (i == AMDGPU_MAX_VCE_HANDLES)
271b843c749SSergey Zigachev return 0;
272b843c749SSergey Zigachev
273b843c749SSergey Zigachev /* TODO: suspending running encoding sessions isn't supported */
274b843c749SSergey Zigachev return -EINVAL;
275b843c749SSergey Zigachev }
276b843c749SSergey Zigachev
277b843c749SSergey Zigachev /**
278b843c749SSergey Zigachev * amdgpu_vce_resume - pin VCE fw memory
279b843c749SSergey Zigachev *
280b843c749SSergey Zigachev * @adev: amdgpu_device pointer
281b843c749SSergey Zigachev *
282b843c749SSergey Zigachev */
amdgpu_vce_resume(struct amdgpu_device * adev)283b843c749SSergey Zigachev int amdgpu_vce_resume(struct amdgpu_device *adev)
284b843c749SSergey Zigachev {
285b843c749SSergey Zigachev void *cpu_addr;
286b843c749SSergey Zigachev const struct common_firmware_header *hdr;
287b843c749SSergey Zigachev unsigned offset;
288b843c749SSergey Zigachev int r;
289b843c749SSergey Zigachev
290b843c749SSergey Zigachev if (adev->vce.vcpu_bo == NULL)
291b843c749SSergey Zigachev return -EINVAL;
292b843c749SSergey Zigachev
293b843c749SSergey Zigachev r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
294b843c749SSergey Zigachev if (r) {
295b843c749SSergey Zigachev dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
296b843c749SSergey Zigachev return r;
297b843c749SSergey Zigachev }
298b843c749SSergey Zigachev
299b843c749SSergey Zigachev r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
300b843c749SSergey Zigachev if (r) {
301b843c749SSergey Zigachev amdgpu_bo_unreserve(adev->vce.vcpu_bo);
302b843c749SSergey Zigachev dev_err(adev->dev, "(%d) VCE map failed\n", r);
303b843c749SSergey Zigachev return r;
304b843c749SSergey Zigachev }
305b843c749SSergey Zigachev
306b843c749SSergey Zigachev hdr = (const struct common_firmware_header *)adev->vce.fw->data;
307b843c749SSergey Zigachev offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
30878973132SSergey Zigachev memcpy_toio(cpu_addr, (adev->vce.fw->data) + offset,
30978973132SSergey Zigachev (adev->vce.fw->datasize) - offset);
310b843c749SSergey Zigachev
311b843c749SSergey Zigachev amdgpu_bo_kunmap(adev->vce.vcpu_bo);
312b843c749SSergey Zigachev
313b843c749SSergey Zigachev amdgpu_bo_unreserve(adev->vce.vcpu_bo);
314b843c749SSergey Zigachev
315b843c749SSergey Zigachev return 0;
316b843c749SSergey Zigachev }
317b843c749SSergey Zigachev
318b843c749SSergey Zigachev /**
319b843c749SSergey Zigachev * amdgpu_vce_idle_work_handler - power off VCE
320b843c749SSergey Zigachev *
321b843c749SSergey Zigachev * @work: pointer to work structure
322b843c749SSergey Zigachev *
323b843c749SSergey Zigachev * power of VCE when it's not used any more
324b843c749SSergey Zigachev */
amdgpu_vce_idle_work_handler(struct work_struct * work)325b843c749SSergey Zigachev static void amdgpu_vce_idle_work_handler(struct work_struct *work)
326b843c749SSergey Zigachev {
327b843c749SSergey Zigachev struct amdgpu_device *adev =
328b843c749SSergey Zigachev container_of(work, struct amdgpu_device, vce.idle_work.work);
329b843c749SSergey Zigachev unsigned i, count = 0;
330b843c749SSergey Zigachev
331b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++)
332b843c749SSergey Zigachev count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
333b843c749SSergey Zigachev
334b843c749SSergey Zigachev if (count == 0) {
335b843c749SSergey Zigachev if (adev->pm.dpm_enabled) {
336b843c749SSergey Zigachev amdgpu_dpm_enable_vce(adev, false);
337b843c749SSergey Zigachev } else {
338b843c749SSergey Zigachev amdgpu_asic_set_vce_clocks(adev, 0, 0);
339b843c749SSergey Zigachev amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
340b843c749SSergey Zigachev AMD_PG_STATE_GATE);
341b843c749SSergey Zigachev amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
342b843c749SSergey Zigachev AMD_CG_STATE_GATE);
343b843c749SSergey Zigachev }
344b843c749SSergey Zigachev } else {
345b843c749SSergey Zigachev schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
346b843c749SSergey Zigachev }
347b843c749SSergey Zigachev }
348b843c749SSergey Zigachev
349b843c749SSergey Zigachev /**
350b843c749SSergey Zigachev * amdgpu_vce_ring_begin_use - power up VCE
351b843c749SSergey Zigachev *
352b843c749SSergey Zigachev * @ring: amdgpu ring
353b843c749SSergey Zigachev *
354b843c749SSergey Zigachev * Make sure VCE is powerd up when we want to use it
355b843c749SSergey Zigachev */
amdgpu_vce_ring_begin_use(struct amdgpu_ring * ring)356b843c749SSergey Zigachev void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
357b843c749SSergey Zigachev {
358b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev;
359b843c749SSergey Zigachev bool set_clocks;
360b843c749SSergey Zigachev
361b843c749SSergey Zigachev if (amdgpu_sriov_vf(adev))
362b843c749SSergey Zigachev return;
363b843c749SSergey Zigachev
364b843c749SSergey Zigachev mutex_lock(&adev->vce.idle_mutex);
365b843c749SSergey Zigachev set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
366b843c749SSergey Zigachev if (set_clocks) {
367b843c749SSergey Zigachev if (adev->pm.dpm_enabled) {
368b843c749SSergey Zigachev amdgpu_dpm_enable_vce(adev, true);
369b843c749SSergey Zigachev } else {
370b843c749SSergey Zigachev amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
371b843c749SSergey Zigachev amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
372b843c749SSergey Zigachev AMD_CG_STATE_UNGATE);
373b843c749SSergey Zigachev amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
374b843c749SSergey Zigachev AMD_PG_STATE_UNGATE);
375b843c749SSergey Zigachev
376b843c749SSergey Zigachev }
377b843c749SSergey Zigachev }
378b843c749SSergey Zigachev mutex_unlock(&adev->vce.idle_mutex);
379b843c749SSergey Zigachev }
380b843c749SSergey Zigachev
381b843c749SSergey Zigachev /**
382b843c749SSergey Zigachev * amdgpu_vce_ring_end_use - power VCE down
383b843c749SSergey Zigachev *
384b843c749SSergey Zigachev * @ring: amdgpu ring
385b843c749SSergey Zigachev *
386b843c749SSergey Zigachev * Schedule work to power VCE down again
387b843c749SSergey Zigachev */
amdgpu_vce_ring_end_use(struct amdgpu_ring * ring)388b843c749SSergey Zigachev void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
389b843c749SSergey Zigachev {
390b843c749SSergey Zigachev if (!amdgpu_sriov_vf(ring->adev))
391b843c749SSergey Zigachev schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
392b843c749SSergey Zigachev }
393b843c749SSergey Zigachev
394b843c749SSergey Zigachev /**
395b843c749SSergey Zigachev * amdgpu_vce_free_handles - free still open VCE handles
396b843c749SSergey Zigachev *
397b843c749SSergey Zigachev * @adev: amdgpu_device pointer
398b843c749SSergey Zigachev * @filp: drm file pointer
399b843c749SSergey Zigachev *
400b843c749SSergey Zigachev * Close all VCE handles still open by this file pointer
401b843c749SSergey Zigachev */
amdgpu_vce_free_handles(struct amdgpu_device * adev,struct drm_file * filp)402b843c749SSergey Zigachev void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
403b843c749SSergey Zigachev {
404b843c749SSergey Zigachev struct amdgpu_ring *ring = &adev->vce.ring[0];
405b843c749SSergey Zigachev int i, r;
406b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
407b843c749SSergey Zigachev uint32_t handle = atomic_read(&adev->vce.handles[i]);
408b843c749SSergey Zigachev
409b843c749SSergey Zigachev if (!handle || adev->vce.filp[i] != filp)
410b843c749SSergey Zigachev continue;
411b843c749SSergey Zigachev
412b843c749SSergey Zigachev r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
413b843c749SSergey Zigachev if (r)
414b843c749SSergey Zigachev DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
415b843c749SSergey Zigachev
416b843c749SSergey Zigachev adev->vce.filp[i] = NULL;
417b843c749SSergey Zigachev atomic_set(&adev->vce.handles[i], 0);
418b843c749SSergey Zigachev }
419b843c749SSergey Zigachev }
420b843c749SSergey Zigachev
421b843c749SSergey Zigachev /**
422b843c749SSergey Zigachev * amdgpu_vce_get_create_msg - generate a VCE create msg
423b843c749SSergey Zigachev *
424b843c749SSergey Zigachev * @adev: amdgpu_device pointer
425b843c749SSergey Zigachev * @ring: ring we should submit the msg to
426b843c749SSergey Zigachev * @handle: VCE session handle to use
427b843c749SSergey Zigachev * @fence: optional fence to return
428b843c749SSergey Zigachev *
429b843c749SSergey Zigachev * Open up a stream for HW test
430b843c749SSergey Zigachev */
amdgpu_vce_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)431b843c749SSergey Zigachev int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
432b843c749SSergey Zigachev struct dma_fence **fence)
433b843c749SSergey Zigachev {
434b843c749SSergey Zigachev const unsigned ib_size_dw = 1024;
435b843c749SSergey Zigachev struct amdgpu_job *job;
436b843c749SSergey Zigachev struct amdgpu_ib *ib;
437b843c749SSergey Zigachev struct dma_fence *f = NULL;
438b843c749SSergey Zigachev uint64_t dummy;
439b843c749SSergey Zigachev int i, r;
440b843c749SSergey Zigachev
441b843c749SSergey Zigachev r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
442b843c749SSergey Zigachev if (r)
443b843c749SSergey Zigachev return r;
444b843c749SSergey Zigachev
445b843c749SSergey Zigachev ib = &job->ibs[0];
446b843c749SSergey Zigachev
447b843c749SSergey Zigachev dummy = ib->gpu_addr + 1024;
448b843c749SSergey Zigachev
449b843c749SSergey Zigachev /* stitch together an VCE create msg */
450b843c749SSergey Zigachev ib->length_dw = 0;
451b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
452b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
453b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = handle;
454b843c749SSergey Zigachev
455b843c749SSergey Zigachev if ((ring->adev->vce.fw_version >> 24) >= 52)
456b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000040; /* len */
457b843c749SSergey Zigachev else
458b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000030; /* len */
459b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
460b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
461b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000042;
462b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x0000000a;
463b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000001;
464b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000080;
465b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000060;
466b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000100;
467b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000100;
468b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x0000000c;
469b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
470b843c749SSergey Zigachev if ((ring->adev->vce.fw_version >> 24) >= 52) {
471b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
472b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
473b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
474b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
475b843c749SSergey Zigachev }
476b843c749SSergey Zigachev
477b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000014; /* len */
478b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
479b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
480b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = dummy;
481b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000001;
482b843c749SSergey Zigachev
483b843c749SSergey Zigachev for (i = ib->length_dw; i < ib_size_dw; ++i)
484b843c749SSergey Zigachev ib->ptr[i] = 0x0;
485b843c749SSergey Zigachev
486b843c749SSergey Zigachev r = amdgpu_job_submit_direct(job, ring, &f);
487b843c749SSergey Zigachev if (r)
488b843c749SSergey Zigachev goto err;
489b843c749SSergey Zigachev
490b843c749SSergey Zigachev if (fence)
491b843c749SSergey Zigachev *fence = dma_fence_get(f);
492b843c749SSergey Zigachev dma_fence_put(f);
493b843c749SSergey Zigachev return 0;
494b843c749SSergey Zigachev
495b843c749SSergey Zigachev err:
496b843c749SSergey Zigachev amdgpu_job_free(job);
497b843c749SSergey Zigachev return r;
498b843c749SSergey Zigachev }
499b843c749SSergey Zigachev
500b843c749SSergey Zigachev /**
501b843c749SSergey Zigachev * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
502b843c749SSergey Zigachev *
503b843c749SSergey Zigachev * @adev: amdgpu_device pointer
504b843c749SSergey Zigachev * @ring: ring we should submit the msg to
505b843c749SSergey Zigachev * @handle: VCE session handle to use
506b843c749SSergey Zigachev * @fence: optional fence to return
507b843c749SSergey Zigachev *
508b843c749SSergey Zigachev * Close up a stream for HW test or if userspace failed to do so
509b843c749SSergey Zigachev */
amdgpu_vce_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)510b843c749SSergey Zigachev int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
511b843c749SSergey Zigachev bool direct, struct dma_fence **fence)
512b843c749SSergey Zigachev {
513b843c749SSergey Zigachev const unsigned ib_size_dw = 1024;
514b843c749SSergey Zigachev struct amdgpu_job *job;
515b843c749SSergey Zigachev struct amdgpu_ib *ib;
516b843c749SSergey Zigachev struct dma_fence *f = NULL;
517b843c749SSergey Zigachev int i, r;
518b843c749SSergey Zigachev
519b843c749SSergey Zigachev r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
520b843c749SSergey Zigachev if (r)
521b843c749SSergey Zigachev return r;
522b843c749SSergey Zigachev
523b843c749SSergey Zigachev ib = &job->ibs[0];
524b843c749SSergey Zigachev
525b843c749SSergey Zigachev /* stitch together an VCE destroy msg */
526b843c749SSergey Zigachev ib->length_dw = 0;
527b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
528b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
529b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = handle;
530b843c749SSergey Zigachev
531b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000020; /* len */
532b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
533b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
534b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
535b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
536b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
537b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
538b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000000;
539b843c749SSergey Zigachev
540b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x00000008; /* len */
541b843c749SSergey Zigachev ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
542b843c749SSergey Zigachev
543b843c749SSergey Zigachev for (i = ib->length_dw; i < ib_size_dw; ++i)
544b843c749SSergey Zigachev ib->ptr[i] = 0x0;
545b843c749SSergey Zigachev
546b843c749SSergey Zigachev if (direct)
547b843c749SSergey Zigachev r = amdgpu_job_submit_direct(job, ring, &f);
548b843c749SSergey Zigachev else
549b843c749SSergey Zigachev r = amdgpu_job_submit(job, &ring->adev->vce.entity,
550b843c749SSergey Zigachev AMDGPU_FENCE_OWNER_UNDEFINED, &f);
551b843c749SSergey Zigachev if (r)
552b843c749SSergey Zigachev goto err;
553b843c749SSergey Zigachev
554b843c749SSergey Zigachev if (fence)
555b843c749SSergey Zigachev *fence = dma_fence_get(f);
556b843c749SSergey Zigachev dma_fence_put(f);
557b843c749SSergey Zigachev return 0;
558b843c749SSergey Zigachev
559b843c749SSergey Zigachev err:
560b843c749SSergey Zigachev amdgpu_job_free(job);
561b843c749SSergey Zigachev return r;
562b843c749SSergey Zigachev }
563b843c749SSergey Zigachev
564b843c749SSergey Zigachev /**
565b843c749SSergey Zigachev * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
566b843c749SSergey Zigachev *
567b843c749SSergey Zigachev * @p: parser context
568b843c749SSergey Zigachev * @lo: address of lower dword
569b843c749SSergey Zigachev * @hi: address of higher dword
570b843c749SSergey Zigachev * @size: minimum size
571b843c749SSergey Zigachev * @index: bs/fb index
572b843c749SSergey Zigachev *
573b843c749SSergey Zigachev * Make sure that no BO cross a 4GB boundary.
574b843c749SSergey Zigachev */
amdgpu_vce_validate_bo(struct amdgpu_cs_parser * p,uint32_t ib_idx,int lo,int hi,unsigned size,int32_t index)575b843c749SSergey Zigachev static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
576b843c749SSergey Zigachev int lo, int hi, unsigned size, int32_t index)
577b843c749SSergey Zigachev {
578b843c749SSergey Zigachev int64_t offset = ((uint64_t)size) * ((int64_t)index);
579b843c749SSergey Zigachev struct ttm_operation_ctx ctx = { false, false };
580b843c749SSergey Zigachev struct amdgpu_bo_va_mapping *mapping;
581b843c749SSergey Zigachev unsigned i, fpfn, lpfn;
582b843c749SSergey Zigachev struct amdgpu_bo *bo;
583b843c749SSergey Zigachev uint64_t addr;
584b843c749SSergey Zigachev int r;
585b843c749SSergey Zigachev
586b843c749SSergey Zigachev addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
587b843c749SSergey Zigachev ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
588b843c749SSergey Zigachev if (index >= 0) {
589b843c749SSergey Zigachev addr += offset;
590b843c749SSergey Zigachev fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
591b843c749SSergey Zigachev lpfn = 0x100000000ULL >> PAGE_SHIFT;
592b843c749SSergey Zigachev } else {
593b843c749SSergey Zigachev fpfn = 0;
594b843c749SSergey Zigachev lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
595b843c749SSergey Zigachev }
596b843c749SSergey Zigachev
597b843c749SSergey Zigachev r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
598b843c749SSergey Zigachev if (r) {
59978973132SSergey Zigachev DRM_ERROR("Can't find BO for addr 0x%010lx %d %d %d %d\n",
600b843c749SSergey Zigachev addr, lo, hi, size, index);
601b843c749SSergey Zigachev return r;
602b843c749SSergey Zigachev }
603b843c749SSergey Zigachev
604b843c749SSergey Zigachev for (i = 0; i < bo->placement.num_placement; ++i) {
605b843c749SSergey Zigachev bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
606b843c749SSergey Zigachev bo->placements[i].lpfn = bo->placements[i].lpfn ?
607b843c749SSergey Zigachev min(bo->placements[i].lpfn, lpfn) : lpfn;
608b843c749SSergey Zigachev }
609b843c749SSergey Zigachev return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
610b843c749SSergey Zigachev }
611b843c749SSergey Zigachev
612b843c749SSergey Zigachev
613b843c749SSergey Zigachev /**
614b843c749SSergey Zigachev * amdgpu_vce_cs_reloc - command submission relocation
615b843c749SSergey Zigachev *
616b843c749SSergey Zigachev * @p: parser context
617b843c749SSergey Zigachev * @lo: address of lower dword
618b843c749SSergey Zigachev * @hi: address of higher dword
619b843c749SSergey Zigachev * @size: minimum size
620b843c749SSergey Zigachev *
621b843c749SSergey Zigachev * Patch relocation inside command stream with real buffer address
622b843c749SSergey Zigachev */
amdgpu_vce_cs_reloc(struct amdgpu_cs_parser * p,uint32_t ib_idx,int lo,int hi,unsigned size,uint32_t index)623b843c749SSergey Zigachev static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
624b843c749SSergey Zigachev int lo, int hi, unsigned size, uint32_t index)
625b843c749SSergey Zigachev {
626b843c749SSergey Zigachev struct amdgpu_bo_va_mapping *mapping;
627b843c749SSergey Zigachev struct amdgpu_bo *bo;
628b843c749SSergey Zigachev uint64_t addr;
629b843c749SSergey Zigachev int r;
630b843c749SSergey Zigachev
631b843c749SSergey Zigachev if (index == 0xffffffff)
632b843c749SSergey Zigachev index = 0;
633b843c749SSergey Zigachev
634b843c749SSergey Zigachev addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
635b843c749SSergey Zigachev ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
636b843c749SSergey Zigachev addr += ((uint64_t)size) * ((uint64_t)index);
637b843c749SSergey Zigachev
638b843c749SSergey Zigachev r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
639b843c749SSergey Zigachev if (r) {
64078973132SSergey Zigachev DRM_ERROR("Can't find BO for addr 0x%010lx %d %d %d %d\n",
641b843c749SSergey Zigachev addr, lo, hi, size, index);
642b843c749SSergey Zigachev return r;
643b843c749SSergey Zigachev }
644b843c749SSergey Zigachev
645b843c749SSergey Zigachev if ((addr + (uint64_t)size) >
646b843c749SSergey Zigachev (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
64778973132SSergey Zigachev DRM_ERROR("BO to small for addr 0x%010lx %d %d\n",
648b843c749SSergey Zigachev addr, lo, hi);
649b843c749SSergey Zigachev return -EINVAL;
650b843c749SSergey Zigachev }
651b843c749SSergey Zigachev
652b843c749SSergey Zigachev addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
653b843c749SSergey Zigachev addr += amdgpu_bo_gpu_offset(bo);
654b843c749SSergey Zigachev addr -= ((uint64_t)size) * ((uint64_t)index);
655b843c749SSergey Zigachev
656b843c749SSergey Zigachev amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
657b843c749SSergey Zigachev amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
658b843c749SSergey Zigachev
659b843c749SSergey Zigachev return 0;
660b843c749SSergey Zigachev }
661b843c749SSergey Zigachev
662b843c749SSergey Zigachev /**
663b843c749SSergey Zigachev * amdgpu_vce_validate_handle - validate stream handle
664b843c749SSergey Zigachev *
665b843c749SSergey Zigachev * @p: parser context
666b843c749SSergey Zigachev * @handle: handle to validate
667b843c749SSergey Zigachev * @allocated: allocated a new handle?
668b843c749SSergey Zigachev *
669b843c749SSergey Zigachev * Validates the handle and return the found session index or -EINVAL
670b843c749SSergey Zigachev * we we don't have another free session index.
671b843c749SSergey Zigachev */
amdgpu_vce_validate_handle(struct amdgpu_cs_parser * p,uint32_t handle,uint32_t * allocated)672b843c749SSergey Zigachev static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
673b843c749SSergey Zigachev uint32_t handle, uint32_t *allocated)
674b843c749SSergey Zigachev {
675b843c749SSergey Zigachev unsigned i;
676b843c749SSergey Zigachev
677b843c749SSergey Zigachev /* validate the handle */
678b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
679b843c749SSergey Zigachev if (atomic_read(&p->adev->vce.handles[i]) == handle) {
680b843c749SSergey Zigachev if (p->adev->vce.filp[i] != p->filp) {
681b843c749SSergey Zigachev DRM_ERROR("VCE handle collision detected!\n");
682b843c749SSergey Zigachev return -EINVAL;
683b843c749SSergey Zigachev }
684b843c749SSergey Zigachev return i;
685b843c749SSergey Zigachev }
686b843c749SSergey Zigachev }
687b843c749SSergey Zigachev
688b843c749SSergey Zigachev /* handle not found try to alloc a new one */
689b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
690b843c749SSergey Zigachev if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
691b843c749SSergey Zigachev p->adev->vce.filp[i] = p->filp;
692b843c749SSergey Zigachev p->adev->vce.img_size[i] = 0;
693b843c749SSergey Zigachev *allocated |= 1 << i;
694b843c749SSergey Zigachev return i;
695b843c749SSergey Zigachev }
696b843c749SSergey Zigachev }
697b843c749SSergey Zigachev
698b843c749SSergey Zigachev DRM_ERROR("No more free VCE handles!\n");
699b843c749SSergey Zigachev return -EINVAL;
700b843c749SSergey Zigachev }
701b843c749SSergey Zigachev
702b843c749SSergey Zigachev /**
703b843c749SSergey Zigachev * amdgpu_vce_cs_parse - parse and validate the command stream
704b843c749SSergey Zigachev *
705b843c749SSergey Zigachev * @p: parser context
706b843c749SSergey Zigachev *
707b843c749SSergey Zigachev */
amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser * p,uint32_t ib_idx)708b843c749SSergey Zigachev int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
709b843c749SSergey Zigachev {
710b843c749SSergey Zigachev struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
711b843c749SSergey Zigachev unsigned fb_idx = 0, bs_idx = 0;
712b843c749SSergey Zigachev int session_idx = -1;
713b843c749SSergey Zigachev uint32_t destroyed = 0;
714b843c749SSergey Zigachev uint32_t created = 0;
715b843c749SSergey Zigachev uint32_t allocated = 0;
716b843c749SSergey Zigachev uint32_t tmp, handle = 0;
717b843c749SSergey Zigachev uint32_t *size = &tmp;
718b843c749SSergey Zigachev unsigned idx;
719b843c749SSergey Zigachev int i, r = 0;
720b843c749SSergey Zigachev
721b843c749SSergey Zigachev p->job->vm = NULL;
722b843c749SSergey Zigachev ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
723b843c749SSergey Zigachev
724b843c749SSergey Zigachev for (idx = 0; idx < ib->length_dw;) {
725b843c749SSergey Zigachev uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
726b843c749SSergey Zigachev uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
727b843c749SSergey Zigachev
728b843c749SSergey Zigachev if ((len < 8) || (len & 3)) {
729b843c749SSergey Zigachev DRM_ERROR("invalid VCE command length (%d)!\n", len);
730b843c749SSergey Zigachev r = -EINVAL;
731b843c749SSergey Zigachev goto out;
732b843c749SSergey Zigachev }
733b843c749SSergey Zigachev
734b843c749SSergey Zigachev switch (cmd) {
735b843c749SSergey Zigachev case 0x00000002: /* task info */
736b843c749SSergey Zigachev fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
737b843c749SSergey Zigachev bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
738b843c749SSergey Zigachev break;
739b843c749SSergey Zigachev
740b843c749SSergey Zigachev case 0x03000001: /* encode */
741b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
742b843c749SSergey Zigachev idx + 9, 0, 0);
743b843c749SSergey Zigachev if (r)
744b843c749SSergey Zigachev goto out;
745b843c749SSergey Zigachev
746b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
747b843c749SSergey Zigachev idx + 11, 0, 0);
748b843c749SSergey Zigachev if (r)
749b843c749SSergey Zigachev goto out;
750b843c749SSergey Zigachev break;
751b843c749SSergey Zigachev
752b843c749SSergey Zigachev case 0x05000001: /* context buffer */
753b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
754b843c749SSergey Zigachev idx + 2, 0, 0);
755b843c749SSergey Zigachev if (r)
756b843c749SSergey Zigachev goto out;
757b843c749SSergey Zigachev break;
758b843c749SSergey Zigachev
759b843c749SSergey Zigachev case 0x05000004: /* video bitstream buffer */
760b843c749SSergey Zigachev tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
761b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
762b843c749SSergey Zigachev tmp, bs_idx);
763b843c749SSergey Zigachev if (r)
764b843c749SSergey Zigachev goto out;
765b843c749SSergey Zigachev break;
766b843c749SSergey Zigachev
767b843c749SSergey Zigachev case 0x05000005: /* feedback buffer */
768b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
769b843c749SSergey Zigachev 4096, fb_idx);
770b843c749SSergey Zigachev if (r)
771b843c749SSergey Zigachev goto out;
772b843c749SSergey Zigachev break;
773b843c749SSergey Zigachev
774b843c749SSergey Zigachev case 0x0500000d: /* MV buffer */
775b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
776b843c749SSergey Zigachev idx + 2, 0, 0);
777b843c749SSergey Zigachev if (r)
778b843c749SSergey Zigachev goto out;
779b843c749SSergey Zigachev
780b843c749SSergey Zigachev r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
781b843c749SSergey Zigachev idx + 7, 0, 0);
782b843c749SSergey Zigachev if (r)
783b843c749SSergey Zigachev goto out;
784b843c749SSergey Zigachev break;
785b843c749SSergey Zigachev }
786b843c749SSergey Zigachev
787b843c749SSergey Zigachev idx += len / 4;
788b843c749SSergey Zigachev }
789b843c749SSergey Zigachev
790b843c749SSergey Zigachev for (idx = 0; idx < ib->length_dw;) {
791b843c749SSergey Zigachev uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
792b843c749SSergey Zigachev uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
793b843c749SSergey Zigachev
794b843c749SSergey Zigachev switch (cmd) {
795b843c749SSergey Zigachev case 0x00000001: /* session */
796b843c749SSergey Zigachev handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
797b843c749SSergey Zigachev session_idx = amdgpu_vce_validate_handle(p, handle,
798b843c749SSergey Zigachev &allocated);
799b843c749SSergey Zigachev if (session_idx < 0) {
800b843c749SSergey Zigachev r = session_idx;
801b843c749SSergey Zigachev goto out;
802b843c749SSergey Zigachev }
803b843c749SSergey Zigachev size = &p->adev->vce.img_size[session_idx];
804b843c749SSergey Zigachev break;
805b843c749SSergey Zigachev
806b843c749SSergey Zigachev case 0x00000002: /* task info */
807b843c749SSergey Zigachev fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
808b843c749SSergey Zigachev bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
809b843c749SSergey Zigachev break;
810b843c749SSergey Zigachev
811b843c749SSergey Zigachev case 0x01000001: /* create */
812b843c749SSergey Zigachev created |= 1 << session_idx;
813b843c749SSergey Zigachev if (destroyed & (1 << session_idx)) {
814b843c749SSergey Zigachev destroyed &= ~(1 << session_idx);
815b843c749SSergey Zigachev allocated |= 1 << session_idx;
816b843c749SSergey Zigachev
817b843c749SSergey Zigachev } else if (!(allocated & (1 << session_idx))) {
818b843c749SSergey Zigachev DRM_ERROR("Handle already in use!\n");
819b843c749SSergey Zigachev r = -EINVAL;
820b843c749SSergey Zigachev goto out;
821b843c749SSergey Zigachev }
822b843c749SSergey Zigachev
823b843c749SSergey Zigachev *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
824b843c749SSergey Zigachev amdgpu_get_ib_value(p, ib_idx, idx + 10) *
825b843c749SSergey Zigachev 8 * 3 / 2;
826b843c749SSergey Zigachev break;
827b843c749SSergey Zigachev
828b843c749SSergey Zigachev case 0x04000001: /* config extension */
829b843c749SSergey Zigachev case 0x04000002: /* pic control */
830b843c749SSergey Zigachev case 0x04000005: /* rate control */
831b843c749SSergey Zigachev case 0x04000007: /* motion estimation */
832b843c749SSergey Zigachev case 0x04000008: /* rdo */
833b843c749SSergey Zigachev case 0x04000009: /* vui */
834b843c749SSergey Zigachev case 0x05000002: /* auxiliary buffer */
835b843c749SSergey Zigachev case 0x05000009: /* clock table */
836b843c749SSergey Zigachev break;
837b843c749SSergey Zigachev
838b843c749SSergey Zigachev case 0x0500000c: /* hw config */
839b843c749SSergey Zigachev switch (p->adev->asic_type) {
840b843c749SSergey Zigachev #ifdef CONFIG_DRM_AMDGPU_CIK
841b843c749SSergey Zigachev case CHIP_KAVERI:
842b843c749SSergey Zigachev case CHIP_MULLINS:
843b843c749SSergey Zigachev #endif
844b843c749SSergey Zigachev case CHIP_CARRIZO:
845b843c749SSergey Zigachev break;
846b843c749SSergey Zigachev default:
847b843c749SSergey Zigachev r = -EINVAL;
848b843c749SSergey Zigachev goto out;
849b843c749SSergey Zigachev }
850b843c749SSergey Zigachev break;
851b843c749SSergey Zigachev
852b843c749SSergey Zigachev case 0x03000001: /* encode */
853b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
854b843c749SSergey Zigachev *size, 0);
855b843c749SSergey Zigachev if (r)
856b843c749SSergey Zigachev goto out;
857b843c749SSergey Zigachev
858b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
859b843c749SSergey Zigachev *size / 3, 0);
860b843c749SSergey Zigachev if (r)
861b843c749SSergey Zigachev goto out;
862b843c749SSergey Zigachev break;
863b843c749SSergey Zigachev
864b843c749SSergey Zigachev case 0x02000001: /* destroy */
865b843c749SSergey Zigachev destroyed |= 1 << session_idx;
866b843c749SSergey Zigachev break;
867b843c749SSergey Zigachev
868b843c749SSergey Zigachev case 0x05000001: /* context buffer */
869b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
870b843c749SSergey Zigachev *size * 2, 0);
871b843c749SSergey Zigachev if (r)
872b843c749SSergey Zigachev goto out;
873b843c749SSergey Zigachev break;
874b843c749SSergey Zigachev
875b843c749SSergey Zigachev case 0x05000004: /* video bitstream buffer */
876b843c749SSergey Zigachev tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
877b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
878b843c749SSergey Zigachev tmp, bs_idx);
879b843c749SSergey Zigachev if (r)
880b843c749SSergey Zigachev goto out;
881b843c749SSergey Zigachev break;
882b843c749SSergey Zigachev
883b843c749SSergey Zigachev case 0x05000005: /* feedback buffer */
884b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
885b843c749SSergey Zigachev 4096, fb_idx);
886b843c749SSergey Zigachev if (r)
887b843c749SSergey Zigachev goto out;
888b843c749SSergey Zigachev break;
889b843c749SSergey Zigachev
890b843c749SSergey Zigachev case 0x0500000d: /* MV buffer */
891b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
892b843c749SSergey Zigachev idx + 2, *size, 0);
893b843c749SSergey Zigachev if (r)
894b843c749SSergey Zigachev goto out;
895b843c749SSergey Zigachev
896b843c749SSergey Zigachev r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
897b843c749SSergey Zigachev idx + 7, *size / 12, 0);
898b843c749SSergey Zigachev if (r)
899b843c749SSergey Zigachev goto out;
900b843c749SSergey Zigachev break;
901b843c749SSergey Zigachev
902b843c749SSergey Zigachev default:
903b843c749SSergey Zigachev DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
904b843c749SSergey Zigachev r = -EINVAL;
905b843c749SSergey Zigachev goto out;
906b843c749SSergey Zigachev }
907b843c749SSergey Zigachev
908b843c749SSergey Zigachev if (session_idx == -1) {
909b843c749SSergey Zigachev DRM_ERROR("no session command at start of IB\n");
910b843c749SSergey Zigachev r = -EINVAL;
911b843c749SSergey Zigachev goto out;
912b843c749SSergey Zigachev }
913b843c749SSergey Zigachev
914b843c749SSergey Zigachev idx += len / 4;
915b843c749SSergey Zigachev }
916b843c749SSergey Zigachev
917b843c749SSergey Zigachev if (allocated & ~created) {
918b843c749SSergey Zigachev DRM_ERROR("New session without create command!\n");
919b843c749SSergey Zigachev r = -ENOENT;
920b843c749SSergey Zigachev }
921b843c749SSergey Zigachev
922b843c749SSergey Zigachev out:
923b843c749SSergey Zigachev if (!r) {
924b843c749SSergey Zigachev /* No error, free all destroyed handle slots */
925b843c749SSergey Zigachev tmp = destroyed;
926b843c749SSergey Zigachev } else {
927b843c749SSergey Zigachev /* Error during parsing, free all allocated handle slots */
928b843c749SSergey Zigachev tmp = allocated;
929b843c749SSergey Zigachev }
930b843c749SSergey Zigachev
931b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
932b843c749SSergey Zigachev if (tmp & (1 << i))
933b843c749SSergey Zigachev atomic_set(&p->adev->vce.handles[i], 0);
934b843c749SSergey Zigachev
935b843c749SSergey Zigachev return r;
936b843c749SSergey Zigachev }
937b843c749SSergey Zigachev
938b843c749SSergey Zigachev /**
939b843c749SSergey Zigachev * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
940b843c749SSergey Zigachev *
941b843c749SSergey Zigachev * @p: parser context
942b843c749SSergey Zigachev *
943b843c749SSergey Zigachev */
amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser * p,uint32_t ib_idx)944b843c749SSergey Zigachev int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
945b843c749SSergey Zigachev {
946b843c749SSergey Zigachev struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
947b843c749SSergey Zigachev int session_idx = -1;
948b843c749SSergey Zigachev uint32_t destroyed = 0;
949b843c749SSergey Zigachev uint32_t created = 0;
950b843c749SSergey Zigachev uint32_t allocated = 0;
951b843c749SSergey Zigachev uint32_t tmp, handle = 0;
952b843c749SSergey Zigachev int i, r = 0, idx = 0;
953b843c749SSergey Zigachev
954b843c749SSergey Zigachev while (idx < ib->length_dw) {
955b843c749SSergey Zigachev uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
956b843c749SSergey Zigachev uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
957b843c749SSergey Zigachev
958b843c749SSergey Zigachev if ((len < 8) || (len & 3)) {
959b843c749SSergey Zigachev DRM_ERROR("invalid VCE command length (%d)!\n", len);
960b843c749SSergey Zigachev r = -EINVAL;
961b843c749SSergey Zigachev goto out;
962b843c749SSergey Zigachev }
963b843c749SSergey Zigachev
964b843c749SSergey Zigachev switch (cmd) {
965b843c749SSergey Zigachev case 0x00000001: /* session */
966b843c749SSergey Zigachev handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
967b843c749SSergey Zigachev session_idx = amdgpu_vce_validate_handle(p, handle,
968b843c749SSergey Zigachev &allocated);
969b843c749SSergey Zigachev if (session_idx < 0) {
970b843c749SSergey Zigachev r = session_idx;
971b843c749SSergey Zigachev goto out;
972b843c749SSergey Zigachev }
973b843c749SSergey Zigachev break;
974b843c749SSergey Zigachev
975b843c749SSergey Zigachev case 0x01000001: /* create */
976b843c749SSergey Zigachev created |= 1 << session_idx;
977b843c749SSergey Zigachev if (destroyed & (1 << session_idx)) {
978b843c749SSergey Zigachev destroyed &= ~(1 << session_idx);
979b843c749SSergey Zigachev allocated |= 1 << session_idx;
980b843c749SSergey Zigachev
981b843c749SSergey Zigachev } else if (!(allocated & (1 << session_idx))) {
982b843c749SSergey Zigachev DRM_ERROR("Handle already in use!\n");
983b843c749SSergey Zigachev r = -EINVAL;
984b843c749SSergey Zigachev goto out;
985b843c749SSergey Zigachev }
986b843c749SSergey Zigachev
987b843c749SSergey Zigachev break;
988b843c749SSergey Zigachev
989b843c749SSergey Zigachev case 0x02000001: /* destroy */
990b843c749SSergey Zigachev destroyed |= 1 << session_idx;
991b843c749SSergey Zigachev break;
992b843c749SSergey Zigachev
993b843c749SSergey Zigachev default:
994b843c749SSergey Zigachev break;
995b843c749SSergey Zigachev }
996b843c749SSergey Zigachev
997b843c749SSergey Zigachev if (session_idx == -1) {
998b843c749SSergey Zigachev DRM_ERROR("no session command at start of IB\n");
999b843c749SSergey Zigachev r = -EINVAL;
1000b843c749SSergey Zigachev goto out;
1001b843c749SSergey Zigachev }
1002b843c749SSergey Zigachev
1003b843c749SSergey Zigachev idx += len / 4;
1004b843c749SSergey Zigachev }
1005b843c749SSergey Zigachev
1006b843c749SSergey Zigachev if (allocated & ~created) {
1007b843c749SSergey Zigachev DRM_ERROR("New session without create command!\n");
1008b843c749SSergey Zigachev r = -ENOENT;
1009b843c749SSergey Zigachev }
1010b843c749SSergey Zigachev
1011b843c749SSergey Zigachev out:
1012b843c749SSergey Zigachev if (!r) {
1013b843c749SSergey Zigachev /* No error, free all destroyed handle slots */
1014b843c749SSergey Zigachev tmp = destroyed;
1015b843c749SSergey Zigachev amdgpu_ib_free(p->adev, ib, NULL);
1016b843c749SSergey Zigachev } else {
1017b843c749SSergey Zigachev /* Error during parsing, free all allocated handle slots */
1018b843c749SSergey Zigachev tmp = allocated;
1019b843c749SSergey Zigachev }
1020b843c749SSergey Zigachev
1021b843c749SSergey Zigachev for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1022b843c749SSergey Zigachev if (tmp & (1 << i))
1023b843c749SSergey Zigachev atomic_set(&p->adev->vce.handles[i], 0);
1024b843c749SSergey Zigachev
1025b843c749SSergey Zigachev return r;
1026b843c749SSergey Zigachev }
1027b843c749SSergey Zigachev
1028b843c749SSergey Zigachev /**
1029b843c749SSergey Zigachev * amdgpu_vce_ring_emit_ib - execute indirect buffer
1030b843c749SSergey Zigachev *
1031b843c749SSergey Zigachev * @ring: engine to use
1032b843c749SSergey Zigachev * @ib: the IB to execute
1033b843c749SSergey Zigachev *
1034b843c749SSergey Zigachev */
amdgpu_vce_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned vmid,bool ctx_switch)1035b843c749SSergey Zigachev void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
1036b843c749SSergey Zigachev unsigned vmid, bool ctx_switch)
1037b843c749SSergey Zigachev {
1038b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_IB);
1039b843c749SSergey Zigachev amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1040b843c749SSergey Zigachev amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1041b843c749SSergey Zigachev amdgpu_ring_write(ring, ib->length_dw);
1042b843c749SSergey Zigachev }
1043b843c749SSergey Zigachev
1044b843c749SSergey Zigachev /**
1045b843c749SSergey Zigachev * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1046b843c749SSergey Zigachev *
1047b843c749SSergey Zigachev * @ring: engine to use
1048b843c749SSergey Zigachev * @fence: the fence
1049b843c749SSergey Zigachev *
1050b843c749SSergey Zigachev */
amdgpu_vce_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)105178973132SSergey Zigachev void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq,
1052b843c749SSergey Zigachev unsigned flags)
1053b843c749SSergey Zigachev {
1054b843c749SSergey Zigachev WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1055b843c749SSergey Zigachev
1056b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_FENCE);
1057b843c749SSergey Zigachev amdgpu_ring_write(ring, addr);
1058b843c749SSergey Zigachev amdgpu_ring_write(ring, upper_32_bits(addr));
1059b843c749SSergey Zigachev amdgpu_ring_write(ring, seq);
1060b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_TRAP);
1061b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_END);
1062b843c749SSergey Zigachev }
1063b843c749SSergey Zigachev
1064b843c749SSergey Zigachev /**
1065b843c749SSergey Zigachev * amdgpu_vce_ring_test_ring - test if VCE ring is working
1066b843c749SSergey Zigachev *
1067b843c749SSergey Zigachev * @ring: the engine to test on
1068b843c749SSergey Zigachev *
1069b843c749SSergey Zigachev */
amdgpu_vce_ring_test_ring(struct amdgpu_ring * ring)1070b843c749SSergey Zigachev int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1071b843c749SSergey Zigachev {
1072b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev;
1073b843c749SSergey Zigachev uint32_t rptr;
1074b843c749SSergey Zigachev unsigned i;
1075b843c749SSergey Zigachev int r, timeout = adev->usec_timeout;
1076b843c749SSergey Zigachev
1077b843c749SSergey Zigachev /* skip ring test for sriov*/
1078b843c749SSergey Zigachev if (amdgpu_sriov_vf(adev))
1079b843c749SSergey Zigachev return 0;
1080b843c749SSergey Zigachev
1081b843c749SSergey Zigachev r = amdgpu_ring_alloc(ring, 16);
1082b843c749SSergey Zigachev if (r) {
1083b843c749SSergey Zigachev DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
1084b843c749SSergey Zigachev ring->idx, r);
1085b843c749SSergey Zigachev return r;
1086b843c749SSergey Zigachev }
1087b843c749SSergey Zigachev
1088b843c749SSergey Zigachev rptr = amdgpu_ring_get_rptr(ring);
1089b843c749SSergey Zigachev
1090b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_END);
1091b843c749SSergey Zigachev amdgpu_ring_commit(ring);
1092b843c749SSergey Zigachev
1093b843c749SSergey Zigachev for (i = 0; i < timeout; i++) {
1094b843c749SSergey Zigachev if (amdgpu_ring_get_rptr(ring) != rptr)
1095b843c749SSergey Zigachev break;
1096b843c749SSergey Zigachev DRM_UDELAY(1);
1097b843c749SSergey Zigachev }
1098b843c749SSergey Zigachev
1099b843c749SSergey Zigachev if (i < timeout) {
1100b843c749SSergey Zigachev DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
1101b843c749SSergey Zigachev ring->idx, i);
1102b843c749SSergey Zigachev } else {
1103b843c749SSergey Zigachev DRM_ERROR("amdgpu: ring %d test failed\n",
1104b843c749SSergey Zigachev ring->idx);
1105b843c749SSergey Zigachev r = -ETIMEDOUT;
1106b843c749SSergey Zigachev }
1107b843c749SSergey Zigachev
1108b843c749SSergey Zigachev return r;
1109b843c749SSergey Zigachev }
1110b843c749SSergey Zigachev
1111b843c749SSergey Zigachev /**
1112b843c749SSergey Zigachev * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1113b843c749SSergey Zigachev *
1114b843c749SSergey Zigachev * @ring: the engine to test on
1115b843c749SSergey Zigachev *
1116b843c749SSergey Zigachev */
amdgpu_vce_ring_test_ib(struct amdgpu_ring * ring,long timeout)1117b843c749SSergey Zigachev int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1118b843c749SSergey Zigachev {
1119b843c749SSergey Zigachev struct dma_fence *fence = NULL;
1120b843c749SSergey Zigachev long r;
1121b843c749SSergey Zigachev
1122b843c749SSergey Zigachev /* skip vce ring1/2 ib test for now, since it's not reliable */
1123b843c749SSergey Zigachev if (ring != &ring->adev->vce.ring[0])
1124b843c749SSergey Zigachev return 0;
1125b843c749SSergey Zigachev
1126b843c749SSergey Zigachev r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1127b843c749SSergey Zigachev if (r) {
1128b843c749SSergey Zigachev DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1129b843c749SSergey Zigachev goto error;
1130b843c749SSergey Zigachev }
1131b843c749SSergey Zigachev
1132b843c749SSergey Zigachev r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1133b843c749SSergey Zigachev if (r) {
1134b843c749SSergey Zigachev DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1135b843c749SSergey Zigachev goto error;
1136b843c749SSergey Zigachev }
1137b843c749SSergey Zigachev
1138b843c749SSergey Zigachev r = dma_fence_wait_timeout(fence, false, timeout);
1139b843c749SSergey Zigachev if (r == 0) {
1140b843c749SSergey Zigachev DRM_ERROR("amdgpu: IB test timed out.\n");
1141b843c749SSergey Zigachev r = -ETIMEDOUT;
1142b843c749SSergey Zigachev } else if (r < 0) {
1143b843c749SSergey Zigachev DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1144b843c749SSergey Zigachev } else {
1145b843c749SSergey Zigachev DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
1146b843c749SSergey Zigachev r = 0;
1147b843c749SSergey Zigachev }
1148b843c749SSergey Zigachev error:
1149b843c749SSergey Zigachev dma_fence_put(fence);
1150b843c749SSergey Zigachev return r;
1151b843c749SSergey Zigachev }
1152