1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev * Copyright 2014 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev * All Rights Reserved.
4b843c749SSergey Zigachev *
5b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a
6b843c749SSergey Zigachev * copy of this software and associated documentation files (the
7b843c749SSergey Zigachev * "Software"), to deal in the Software without restriction, including
8b843c749SSergey Zigachev * without limitation the rights to use, copy, modify, merge, publish,
9b843c749SSergey Zigachev * distribute, sub license, and/or sell copies of the Software, and to
10b843c749SSergey Zigachev * permit persons to whom the Software is furnished to do so, subject to
11b843c749SSergey Zigachev * the following conditions:
12b843c749SSergey Zigachev *
13b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16b843c749SSergey Zigachev * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17b843c749SSergey Zigachev * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18b843c749SSergey Zigachev * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19b843c749SSergey Zigachev * USE OR OTHER DEALINGS IN THE SOFTWARE.
20b843c749SSergey Zigachev *
21b843c749SSergey Zigachev * The above copyright notice and this permission notice (including the
22b843c749SSergey Zigachev * next paragraph) shall be included in all copies or substantial portions
23b843c749SSergey Zigachev * of the Software.
24b843c749SSergey Zigachev *
25b843c749SSergey Zigachev * Authors: Christian König <christian.koenig@amd.com>
26b843c749SSergey Zigachev */
27b843c749SSergey Zigachev
28b843c749SSergey Zigachev #include <linux/firmware.h>
29b843c749SSergey Zigachev #include <drm/drmP.h>
30b843c749SSergey Zigachev #include "amdgpu.h"
31b843c749SSergey Zigachev #include "amdgpu_vce.h"
32b843c749SSergey Zigachev #include "vid.h"
33b843c749SSergey Zigachev #include "vce/vce_3_0_d.h"
34b843c749SSergey Zigachev #include "vce/vce_3_0_sh_mask.h"
35b843c749SSergey Zigachev #include "oss/oss_3_0_d.h"
36b843c749SSergey Zigachev #include "oss/oss_3_0_sh_mask.h"
37b843c749SSergey Zigachev #include "gca/gfx_8_0_d.h"
38b843c749SSergey Zigachev #include "smu/smu_7_1_2_d.h"
39b843c749SSergey Zigachev #include "smu/smu_7_1_2_sh_mask.h"
40b843c749SSergey Zigachev #include "gca/gfx_8_0_d.h"
41b843c749SSergey Zigachev #include "gca/gfx_8_0_sh_mask.h"
42b843c749SSergey Zigachev #include "ivsrcid/ivsrcid_vislands30.h"
43b843c749SSergey Zigachev
44b843c749SSergey Zigachev
45b843c749SSergey Zigachev #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
46b843c749SSergey Zigachev #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
47b843c749SSergey Zigachev #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
48b843c749SSergey Zigachev
49b843c749SSergey Zigachev #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
50b843c749SSergey Zigachev #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
51b843c749SSergey Zigachev #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
52b843c749SSergey Zigachev #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
53b843c749SSergey Zigachev
54b843c749SSergey Zigachev #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
55b843c749SSergey Zigachev
56b843c749SSergey Zigachev #define VCE_V3_0_FW_SIZE (384 * 1024)
57b843c749SSergey Zigachev #define VCE_V3_0_STACK_SIZE (64 * 1024)
58b843c749SSergey Zigachev #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
59b843c749SSergey Zigachev
60b843c749SSergey Zigachev #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
61b843c749SSergey Zigachev
62b843c749SSergey Zigachev #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
63b843c749SSergey Zigachev | GRBM_GFX_INDEX__VCE_ALL_PIPE)
64b843c749SSergey Zigachev
65b843c749SSergey Zigachev static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
66b843c749SSergey Zigachev static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
67b843c749SSergey Zigachev static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
68b843c749SSergey Zigachev static int vce_v3_0_wait_for_idle(void *handle);
69b843c749SSergey Zigachev static int vce_v3_0_set_clockgating_state(void *handle,
70b843c749SSergey Zigachev enum amd_clockgating_state state);
71b843c749SSergey Zigachev /**
72b843c749SSergey Zigachev * vce_v3_0_ring_get_rptr - get read pointer
73b843c749SSergey Zigachev *
74b843c749SSergey Zigachev * @ring: amdgpu_ring pointer
75b843c749SSergey Zigachev *
76b843c749SSergey Zigachev * Returns the current hardware read pointer
77b843c749SSergey Zigachev */
vce_v3_0_ring_get_rptr(struct amdgpu_ring * ring)78b843c749SSergey Zigachev static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
79b843c749SSergey Zigachev {
80b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev;
81b843c749SSergey Zigachev u32 v;
82b843c749SSergey Zigachev
83b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
84b843c749SSergey Zigachev if (adev->vce.harvest_config == 0 ||
85b843c749SSergey Zigachev adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
86b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
87b843c749SSergey Zigachev else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
88b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
89b843c749SSergey Zigachev
90b843c749SSergey Zigachev if (ring->me == 0)
91b843c749SSergey Zigachev v = RREG32(mmVCE_RB_RPTR);
92b843c749SSergey Zigachev else if (ring->me == 1)
93b843c749SSergey Zigachev v = RREG32(mmVCE_RB_RPTR2);
94b843c749SSergey Zigachev else
95b843c749SSergey Zigachev v = RREG32(mmVCE_RB_RPTR3);
96b843c749SSergey Zigachev
97b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
98b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
99b843c749SSergey Zigachev
100b843c749SSergey Zigachev return v;
101b843c749SSergey Zigachev }
102b843c749SSergey Zigachev
103b843c749SSergey Zigachev /**
104b843c749SSergey Zigachev * vce_v3_0_ring_get_wptr - get write pointer
105b843c749SSergey Zigachev *
106b843c749SSergey Zigachev * @ring: amdgpu_ring pointer
107b843c749SSergey Zigachev *
108b843c749SSergey Zigachev * Returns the current hardware write pointer
109b843c749SSergey Zigachev */
vce_v3_0_ring_get_wptr(struct amdgpu_ring * ring)110b843c749SSergey Zigachev static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
111b843c749SSergey Zigachev {
112b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev;
113b843c749SSergey Zigachev u32 v;
114b843c749SSergey Zigachev
115b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
116b843c749SSergey Zigachev if (adev->vce.harvest_config == 0 ||
117b843c749SSergey Zigachev adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
118b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
119b843c749SSergey Zigachev else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
120b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
121b843c749SSergey Zigachev
122b843c749SSergey Zigachev if (ring->me == 0)
123b843c749SSergey Zigachev v = RREG32(mmVCE_RB_WPTR);
124b843c749SSergey Zigachev else if (ring->me == 1)
125b843c749SSergey Zigachev v = RREG32(mmVCE_RB_WPTR2);
126b843c749SSergey Zigachev else
127b843c749SSergey Zigachev v = RREG32(mmVCE_RB_WPTR3);
128b843c749SSergey Zigachev
129b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
130b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
131b843c749SSergey Zigachev
132b843c749SSergey Zigachev return v;
133b843c749SSergey Zigachev }
134b843c749SSergey Zigachev
135b843c749SSergey Zigachev /**
136b843c749SSergey Zigachev * vce_v3_0_ring_set_wptr - set write pointer
137b843c749SSergey Zigachev *
138b843c749SSergey Zigachev * @ring: amdgpu_ring pointer
139b843c749SSergey Zigachev *
140b843c749SSergey Zigachev * Commits the write pointer to the hardware
141b843c749SSergey Zigachev */
vce_v3_0_ring_set_wptr(struct amdgpu_ring * ring)142b843c749SSergey Zigachev static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
143b843c749SSergey Zigachev {
144b843c749SSergey Zigachev struct amdgpu_device *adev = ring->adev;
145b843c749SSergey Zigachev
146b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
147b843c749SSergey Zigachev if (adev->vce.harvest_config == 0 ||
148b843c749SSergey Zigachev adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
149b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
150b843c749SSergey Zigachev else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
151b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
152b843c749SSergey Zigachev
153b843c749SSergey Zigachev if (ring->me == 0)
154b843c749SSergey Zigachev WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
155b843c749SSergey Zigachev else if (ring->me == 1)
156b843c749SSergey Zigachev WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
157b843c749SSergey Zigachev else
158b843c749SSergey Zigachev WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
159b843c749SSergey Zigachev
160b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
161b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
162b843c749SSergey Zigachev }
163b843c749SSergey Zigachev
vce_v3_0_override_vce_clock_gating(struct amdgpu_device * adev,bool override)164b843c749SSergey Zigachev static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
165b843c749SSergey Zigachev {
166b843c749SSergey Zigachev WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
167b843c749SSergey Zigachev }
168b843c749SSergey Zigachev
vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device * adev,bool gated)169b843c749SSergey Zigachev static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
170b843c749SSergey Zigachev bool gated)
171b843c749SSergey Zigachev {
172b843c749SSergey Zigachev u32 data;
173b843c749SSergey Zigachev
174b843c749SSergey Zigachev /* Set Override to disable Clock Gating */
175b843c749SSergey Zigachev vce_v3_0_override_vce_clock_gating(adev, true);
176b843c749SSergey Zigachev
177b843c749SSergey Zigachev /* This function enables MGCG which is controlled by firmware.
178b843c749SSergey Zigachev With the clocks in the gated state the core is still
179b843c749SSergey Zigachev accessible but the firmware will throttle the clocks on the
180b843c749SSergey Zigachev fly as necessary.
181b843c749SSergey Zigachev */
182b843c749SSergey Zigachev if (!gated) {
183b843c749SSergey Zigachev data = RREG32(mmVCE_CLOCK_GATING_B);
184b843c749SSergey Zigachev data |= 0x1ff;
185b843c749SSergey Zigachev data &= ~0xef0000;
186b843c749SSergey Zigachev WREG32(mmVCE_CLOCK_GATING_B, data);
187b843c749SSergey Zigachev
188b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_CLOCK_GATING);
189b843c749SSergey Zigachev data |= 0x3ff000;
190b843c749SSergey Zigachev data &= ~0xffc00000;
191b843c749SSergey Zigachev WREG32(mmVCE_UENC_CLOCK_GATING, data);
192b843c749SSergey Zigachev
193b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
194b843c749SSergey Zigachev data |= 0x2;
195b843c749SSergey Zigachev data &= ~0x00010000;
196b843c749SSergey Zigachev WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
197b843c749SSergey Zigachev
198b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
199b843c749SSergey Zigachev data |= 0x37f;
200b843c749SSergey Zigachev WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
201b843c749SSergey Zigachev
202b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
203b843c749SSergey Zigachev data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
204b843c749SSergey Zigachev VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
205b843c749SSergey Zigachev VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
206b843c749SSergey Zigachev 0x8;
207b843c749SSergey Zigachev WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
208b843c749SSergey Zigachev } else {
209b843c749SSergey Zigachev data = RREG32(mmVCE_CLOCK_GATING_B);
210b843c749SSergey Zigachev data &= ~0x80010;
211b843c749SSergey Zigachev data |= 0xe70008;
212b843c749SSergey Zigachev WREG32(mmVCE_CLOCK_GATING_B, data);
213b843c749SSergey Zigachev
214b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_CLOCK_GATING);
215b843c749SSergey Zigachev data |= 0xffc00000;
216b843c749SSergey Zigachev WREG32(mmVCE_UENC_CLOCK_GATING, data);
217b843c749SSergey Zigachev
218b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
219b843c749SSergey Zigachev data |= 0x10000;
220b843c749SSergey Zigachev WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
221b843c749SSergey Zigachev
222b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
223b843c749SSergey Zigachev data &= ~0x3ff;
224b843c749SSergey Zigachev WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
225b843c749SSergey Zigachev
226b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
227b843c749SSergey Zigachev data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
228b843c749SSergey Zigachev VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
229b843c749SSergey Zigachev VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
230b843c749SSergey Zigachev 0x8);
231b843c749SSergey Zigachev WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
232b843c749SSergey Zigachev }
233b843c749SSergey Zigachev vce_v3_0_override_vce_clock_gating(adev, false);
234b843c749SSergey Zigachev }
235b843c749SSergey Zigachev
vce_v3_0_firmware_loaded(struct amdgpu_device * adev)236b843c749SSergey Zigachev static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
237b843c749SSergey Zigachev {
238b843c749SSergey Zigachev int i, j;
239b843c749SSergey Zigachev
240b843c749SSergey Zigachev for (i = 0; i < 10; ++i) {
241b843c749SSergey Zigachev for (j = 0; j < 100; ++j) {
242b843c749SSergey Zigachev uint32_t status = RREG32(mmVCE_STATUS);
243b843c749SSergey Zigachev
244b843c749SSergey Zigachev if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
245b843c749SSergey Zigachev return 0;
246b843c749SSergey Zigachev mdelay(10);
247b843c749SSergey Zigachev }
248b843c749SSergey Zigachev
249b843c749SSergey Zigachev DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
250b843c749SSergey Zigachev WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
251b843c749SSergey Zigachev mdelay(10);
252b843c749SSergey Zigachev WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
253b843c749SSergey Zigachev mdelay(10);
254b843c749SSergey Zigachev }
255b843c749SSergey Zigachev
256b843c749SSergey Zigachev return -ETIMEDOUT;
257b843c749SSergey Zigachev }
258b843c749SSergey Zigachev
259b843c749SSergey Zigachev /**
260b843c749SSergey Zigachev * vce_v3_0_start - start VCE block
261b843c749SSergey Zigachev *
262b843c749SSergey Zigachev * @adev: amdgpu_device pointer
263b843c749SSergey Zigachev *
264b843c749SSergey Zigachev * Setup and start the VCE block
265b843c749SSergey Zigachev */
vce_v3_0_start(struct amdgpu_device * adev)266b843c749SSergey Zigachev static int vce_v3_0_start(struct amdgpu_device *adev)
267b843c749SSergey Zigachev {
268b843c749SSergey Zigachev struct amdgpu_ring *ring;
269b843c749SSergey Zigachev int idx, r;
270b843c749SSergey Zigachev
271b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
272b843c749SSergey Zigachev for (idx = 0; idx < 2; ++idx) {
273b843c749SSergey Zigachev if (adev->vce.harvest_config & (1 << idx))
274b843c749SSergey Zigachev continue;
275b843c749SSergey Zigachev
276b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
277b843c749SSergey Zigachev
278b843c749SSergey Zigachev /* Program instance 0 reg space for two instances or instance 0 case
279b843c749SSergey Zigachev program instance 1 reg space for only instance 1 available case */
280b843c749SSergey Zigachev if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
281b843c749SSergey Zigachev ring = &adev->vce.ring[0];
282b843c749SSergey Zigachev WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
283b843c749SSergey Zigachev WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
284b843c749SSergey Zigachev WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
285b843c749SSergey Zigachev WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
286b843c749SSergey Zigachev WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
287b843c749SSergey Zigachev
288b843c749SSergey Zigachev ring = &adev->vce.ring[1];
289b843c749SSergey Zigachev WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
290b843c749SSergey Zigachev WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
291b843c749SSergey Zigachev WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
292b843c749SSergey Zigachev WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
293b843c749SSergey Zigachev WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
294b843c749SSergey Zigachev
295b843c749SSergey Zigachev ring = &adev->vce.ring[2];
296b843c749SSergey Zigachev WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
297b843c749SSergey Zigachev WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
298b843c749SSergey Zigachev WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
299b843c749SSergey Zigachev WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
300b843c749SSergey Zigachev WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
301b843c749SSergey Zigachev }
302b843c749SSergey Zigachev
303b843c749SSergey Zigachev vce_v3_0_mc_resume(adev, idx);
304b843c749SSergey Zigachev WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
305b843c749SSergey Zigachev
306b843c749SSergey Zigachev if (adev->asic_type >= CHIP_STONEY)
307b843c749SSergey Zigachev WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
308b843c749SSergey Zigachev else
309b843c749SSergey Zigachev WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
310b843c749SSergey Zigachev
311b843c749SSergey Zigachev WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
312b843c749SSergey Zigachev mdelay(100);
313b843c749SSergey Zigachev
314b843c749SSergey Zigachev r = vce_v3_0_firmware_loaded(adev);
315b843c749SSergey Zigachev
316b843c749SSergey Zigachev /* clear BUSY flag */
317b843c749SSergey Zigachev WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
318b843c749SSergey Zigachev
319b843c749SSergey Zigachev if (r) {
320b843c749SSergey Zigachev DRM_ERROR("VCE not responding, giving up!!!\n");
321b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
322b843c749SSergey Zigachev return r;
323b843c749SSergey Zigachev }
324b843c749SSergey Zigachev }
325b843c749SSergey Zigachev
326b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
327b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
328b843c749SSergey Zigachev
329b843c749SSergey Zigachev return 0;
330b843c749SSergey Zigachev }
331b843c749SSergey Zigachev
vce_v3_0_stop(struct amdgpu_device * adev)332b843c749SSergey Zigachev static int vce_v3_0_stop(struct amdgpu_device *adev)
333b843c749SSergey Zigachev {
334b843c749SSergey Zigachev int idx;
335b843c749SSergey Zigachev
336b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
337b843c749SSergey Zigachev for (idx = 0; idx < 2; ++idx) {
338b843c749SSergey Zigachev if (adev->vce.harvest_config & (1 << idx))
339b843c749SSergey Zigachev continue;
340b843c749SSergey Zigachev
341b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
342b843c749SSergey Zigachev
343b843c749SSergey Zigachev if (adev->asic_type >= CHIP_STONEY)
344b843c749SSergey Zigachev WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
345b843c749SSergey Zigachev else
346b843c749SSergey Zigachev WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
347b843c749SSergey Zigachev
348b843c749SSergey Zigachev /* hold on ECPU */
349b843c749SSergey Zigachev WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
350b843c749SSergey Zigachev
351b843c749SSergey Zigachev /* clear VCE STATUS */
352b843c749SSergey Zigachev WREG32(mmVCE_STATUS, 0);
353b843c749SSergey Zigachev }
354b843c749SSergey Zigachev
355b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
356b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
357b843c749SSergey Zigachev
358b843c749SSergey Zigachev return 0;
359b843c749SSergey Zigachev }
360b843c749SSergey Zigachev
361b843c749SSergey Zigachev #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
362b843c749SSergey Zigachev #define VCE_HARVEST_FUSE_MACRO__SHIFT 27
363b843c749SSergey Zigachev #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
364b843c749SSergey Zigachev
vce_v3_0_get_harvest_config(struct amdgpu_device * adev)365b843c749SSergey Zigachev static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
366b843c749SSergey Zigachev {
367b843c749SSergey Zigachev u32 tmp;
368b843c749SSergey Zigachev
369b843c749SSergey Zigachev if ((adev->asic_type == CHIP_FIJI) ||
370b843c749SSergey Zigachev (adev->asic_type == CHIP_STONEY))
371b843c749SSergey Zigachev return AMDGPU_VCE_HARVEST_VCE1;
372b843c749SSergey Zigachev
373b843c749SSergey Zigachev if (adev->flags & AMD_IS_APU)
374b843c749SSergey Zigachev tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
375b843c749SSergey Zigachev VCE_HARVEST_FUSE_MACRO__MASK) >>
376b843c749SSergey Zigachev VCE_HARVEST_FUSE_MACRO__SHIFT;
377b843c749SSergey Zigachev else
378b843c749SSergey Zigachev tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
379b843c749SSergey Zigachev CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
380b843c749SSergey Zigachev CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
381b843c749SSergey Zigachev
382b843c749SSergey Zigachev switch (tmp) {
383b843c749SSergey Zigachev case 1:
384b843c749SSergey Zigachev return AMDGPU_VCE_HARVEST_VCE0;
385b843c749SSergey Zigachev case 2:
386b843c749SSergey Zigachev return AMDGPU_VCE_HARVEST_VCE1;
387b843c749SSergey Zigachev case 3:
388b843c749SSergey Zigachev return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
389b843c749SSergey Zigachev default:
390b843c749SSergey Zigachev if ((adev->asic_type == CHIP_POLARIS10) ||
391b843c749SSergey Zigachev (adev->asic_type == CHIP_POLARIS11) ||
392b843c749SSergey Zigachev (adev->asic_type == CHIP_POLARIS12) ||
393b843c749SSergey Zigachev (adev->asic_type == CHIP_VEGAM))
394b843c749SSergey Zigachev return AMDGPU_VCE_HARVEST_VCE1;
395b843c749SSergey Zigachev
396b843c749SSergey Zigachev return 0;
397b843c749SSergey Zigachev }
398b843c749SSergey Zigachev }
399b843c749SSergey Zigachev
vce_v3_0_early_init(void * handle)400b843c749SSergey Zigachev static int vce_v3_0_early_init(void *handle)
401b843c749SSergey Zigachev {
402b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
403b843c749SSergey Zigachev
404b843c749SSergey Zigachev adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
405b843c749SSergey Zigachev
406b843c749SSergey Zigachev if ((adev->vce.harvest_config &
407b843c749SSergey Zigachev (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
408b843c749SSergey Zigachev (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
409b843c749SSergey Zigachev return -ENOENT;
410b843c749SSergey Zigachev
411b843c749SSergey Zigachev adev->vce.num_rings = 3;
412b843c749SSergey Zigachev
413b843c749SSergey Zigachev vce_v3_0_set_ring_funcs(adev);
414b843c749SSergey Zigachev vce_v3_0_set_irq_funcs(adev);
415b843c749SSergey Zigachev
416b843c749SSergey Zigachev return 0;
417b843c749SSergey Zigachev }
418b843c749SSergey Zigachev
vce_v3_0_sw_init(void * handle)419b843c749SSergey Zigachev static int vce_v3_0_sw_init(void *handle)
420b843c749SSergey Zigachev {
421b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
422b843c749SSergey Zigachev struct amdgpu_ring *ring;
423b843c749SSergey Zigachev int r, i;
424b843c749SSergey Zigachev
425b843c749SSergey Zigachev /* VCE */
426b843c749SSergey Zigachev r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
427b843c749SSergey Zigachev if (r)
428b843c749SSergey Zigachev return r;
429b843c749SSergey Zigachev
430b843c749SSergey Zigachev r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
431b843c749SSergey Zigachev (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
432b843c749SSergey Zigachev if (r)
433b843c749SSergey Zigachev return r;
434b843c749SSergey Zigachev
435b843c749SSergey Zigachev /* 52.8.3 required for 3 ring support */
436b843c749SSergey Zigachev if (adev->vce.fw_version < FW_52_8_3)
437b843c749SSergey Zigachev adev->vce.num_rings = 2;
438b843c749SSergey Zigachev
439b843c749SSergey Zigachev r = amdgpu_vce_resume(adev);
440b843c749SSergey Zigachev if (r)
441b843c749SSergey Zigachev return r;
442b843c749SSergey Zigachev
443b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++) {
444b843c749SSergey Zigachev ring = &adev->vce.ring[i];
445*78973132SSergey Zigachev ksprintf(ring->name, "vce%d", i);
446b843c749SSergey Zigachev r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
447b843c749SSergey Zigachev if (r)
448b843c749SSergey Zigachev return r;
449b843c749SSergey Zigachev }
450b843c749SSergey Zigachev
451b843c749SSergey Zigachev r = amdgpu_vce_entity_init(adev);
452b843c749SSergey Zigachev
453b843c749SSergey Zigachev return r;
454b843c749SSergey Zigachev }
455b843c749SSergey Zigachev
vce_v3_0_sw_fini(void * handle)456b843c749SSergey Zigachev static int vce_v3_0_sw_fini(void *handle)
457b843c749SSergey Zigachev {
458b843c749SSergey Zigachev int r;
459b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
460b843c749SSergey Zigachev
461b843c749SSergey Zigachev r = amdgpu_vce_suspend(adev);
462b843c749SSergey Zigachev if (r)
463b843c749SSergey Zigachev return r;
464b843c749SSergey Zigachev
465b843c749SSergey Zigachev return amdgpu_vce_sw_fini(adev);
466b843c749SSergey Zigachev }
467b843c749SSergey Zigachev
vce_v3_0_hw_init(void * handle)468b843c749SSergey Zigachev static int vce_v3_0_hw_init(void *handle)
469b843c749SSergey Zigachev {
470b843c749SSergey Zigachev int r, i;
471b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
472b843c749SSergey Zigachev
473b843c749SSergey Zigachev vce_v3_0_override_vce_clock_gating(adev, true);
474b843c749SSergey Zigachev
475b843c749SSergey Zigachev amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
476b843c749SSergey Zigachev
477b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++)
478b843c749SSergey Zigachev adev->vce.ring[i].ready = false;
479b843c749SSergey Zigachev
480b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++) {
481b843c749SSergey Zigachev r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
482b843c749SSergey Zigachev if (r)
483b843c749SSergey Zigachev return r;
484b843c749SSergey Zigachev else
485b843c749SSergey Zigachev adev->vce.ring[i].ready = true;
486b843c749SSergey Zigachev }
487b843c749SSergey Zigachev
488b843c749SSergey Zigachev DRM_INFO("VCE initialized successfully.\n");
489b843c749SSergey Zigachev
490b843c749SSergey Zigachev return 0;
491b843c749SSergey Zigachev }
492b843c749SSergey Zigachev
vce_v3_0_hw_fini(void * handle)493b843c749SSergey Zigachev static int vce_v3_0_hw_fini(void *handle)
494b843c749SSergey Zigachev {
495b843c749SSergey Zigachev int r;
496b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
497b843c749SSergey Zigachev
498b843c749SSergey Zigachev r = vce_v3_0_wait_for_idle(handle);
499b843c749SSergey Zigachev if (r)
500b843c749SSergey Zigachev return r;
501b843c749SSergey Zigachev
502b843c749SSergey Zigachev vce_v3_0_stop(adev);
503b843c749SSergey Zigachev return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
504b843c749SSergey Zigachev }
505b843c749SSergey Zigachev
vce_v3_0_suspend(void * handle)506b843c749SSergey Zigachev static int vce_v3_0_suspend(void *handle)
507b843c749SSergey Zigachev {
508b843c749SSergey Zigachev int r;
509b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
510b843c749SSergey Zigachev
511b843c749SSergey Zigachev r = vce_v3_0_hw_fini(adev);
512b843c749SSergey Zigachev if (r)
513b843c749SSergey Zigachev return r;
514b843c749SSergey Zigachev
515b843c749SSergey Zigachev return amdgpu_vce_suspend(adev);
516b843c749SSergey Zigachev }
517b843c749SSergey Zigachev
vce_v3_0_resume(void * handle)518b843c749SSergey Zigachev static int vce_v3_0_resume(void *handle)
519b843c749SSergey Zigachev {
520b843c749SSergey Zigachev int r;
521b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
522b843c749SSergey Zigachev
523b843c749SSergey Zigachev r = amdgpu_vce_resume(adev);
524b843c749SSergey Zigachev if (r)
525b843c749SSergey Zigachev return r;
526b843c749SSergey Zigachev
527b843c749SSergey Zigachev return vce_v3_0_hw_init(adev);
528b843c749SSergey Zigachev }
529b843c749SSergey Zigachev
vce_v3_0_mc_resume(struct amdgpu_device * adev,int idx)530b843c749SSergey Zigachev static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
531b843c749SSergey Zigachev {
532b843c749SSergey Zigachev uint32_t offset, size;
533b843c749SSergey Zigachev
534b843c749SSergey Zigachev WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
535b843c749SSergey Zigachev WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
536b843c749SSergey Zigachev WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
537b843c749SSergey Zigachev WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
538b843c749SSergey Zigachev
539b843c749SSergey Zigachev WREG32(mmVCE_LMI_CTRL, 0x00398000);
540b843c749SSergey Zigachev WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
541b843c749SSergey Zigachev WREG32(mmVCE_LMI_SWAP_CNTL, 0);
542b843c749SSergey Zigachev WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
543b843c749SSergey Zigachev WREG32(mmVCE_LMI_VM_CTRL, 0);
544b843c749SSergey Zigachev WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
545b843c749SSergey Zigachev
546b843c749SSergey Zigachev if (adev->asic_type >= CHIP_STONEY) {
547b843c749SSergey Zigachev WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
548b843c749SSergey Zigachev WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
549b843c749SSergey Zigachev WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
550b843c749SSergey Zigachev } else
551b843c749SSergey Zigachev WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
552b843c749SSergey Zigachev offset = AMDGPU_VCE_FIRMWARE_OFFSET;
553b843c749SSergey Zigachev size = VCE_V3_0_FW_SIZE;
554b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
555b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
556b843c749SSergey Zigachev
557b843c749SSergey Zigachev if (idx == 0) {
558b843c749SSergey Zigachev offset += size;
559b843c749SSergey Zigachev size = VCE_V3_0_STACK_SIZE;
560b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
561b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
562b843c749SSergey Zigachev offset += size;
563b843c749SSergey Zigachev size = VCE_V3_0_DATA_SIZE;
564b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
565b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
566b843c749SSergey Zigachev } else {
567b843c749SSergey Zigachev offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
568b843c749SSergey Zigachev size = VCE_V3_0_STACK_SIZE;
569b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
570b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
571b843c749SSergey Zigachev offset += size;
572b843c749SSergey Zigachev size = VCE_V3_0_DATA_SIZE;
573b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
574b843c749SSergey Zigachev WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
575b843c749SSergey Zigachev }
576b843c749SSergey Zigachev
577b843c749SSergey Zigachev WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
578b843c749SSergey Zigachev WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
579b843c749SSergey Zigachev }
580b843c749SSergey Zigachev
vce_v3_0_is_idle(void * handle)581b843c749SSergey Zigachev static bool vce_v3_0_is_idle(void *handle)
582b843c749SSergey Zigachev {
583b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
584b843c749SSergey Zigachev u32 mask = 0;
585b843c749SSergey Zigachev
586b843c749SSergey Zigachev mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
587b843c749SSergey Zigachev mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
588b843c749SSergey Zigachev
589b843c749SSergey Zigachev return !(RREG32(mmSRBM_STATUS2) & mask);
590b843c749SSergey Zigachev }
591b843c749SSergey Zigachev
vce_v3_0_wait_for_idle(void * handle)592b843c749SSergey Zigachev static int vce_v3_0_wait_for_idle(void *handle)
593b843c749SSergey Zigachev {
594b843c749SSergey Zigachev unsigned i;
595b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
596b843c749SSergey Zigachev
597b843c749SSergey Zigachev for (i = 0; i < adev->usec_timeout; i++)
598b843c749SSergey Zigachev if (vce_v3_0_is_idle(handle))
599b843c749SSergey Zigachev return 0;
600b843c749SSergey Zigachev
601b843c749SSergey Zigachev return -ETIMEDOUT;
602b843c749SSergey Zigachev }
603b843c749SSergey Zigachev
604b843c749SSergey Zigachev #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
605b843c749SSergey Zigachev #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
606b843c749SSergey Zigachev #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
607b843c749SSergey Zigachev #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
608b843c749SSergey Zigachev VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
609b843c749SSergey Zigachev
vce_v3_0_check_soft_reset(void * handle)610b843c749SSergey Zigachev static bool vce_v3_0_check_soft_reset(void *handle)
611b843c749SSergey Zigachev {
612b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
613b843c749SSergey Zigachev u32 srbm_soft_reset = 0;
614b843c749SSergey Zigachev
615b843c749SSergey Zigachev /* According to VCE team , we should use VCE_STATUS instead
616b843c749SSergey Zigachev * SRBM_STATUS.VCE_BUSY bit for busy status checking.
617b843c749SSergey Zigachev * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
618b843c749SSergey Zigachev * instance's registers are accessed
619b843c749SSergey Zigachev * (0 for 1st instance, 10 for 2nd instance).
620b843c749SSergey Zigachev *
621b843c749SSergey Zigachev *VCE_STATUS
622b843c749SSergey Zigachev *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
623b843c749SSergey Zigachev *|----+----+-----------+----+----+----+----------+---------+----|
624b843c749SSergey Zigachev *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
625b843c749SSergey Zigachev *
626b843c749SSergey Zigachev * VCE team suggest use bit 3--bit 6 for busy status check
627b843c749SSergey Zigachev */
628b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
629b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
630b843c749SSergey Zigachev if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
631b843c749SSergey Zigachev srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
632b843c749SSergey Zigachev srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
633b843c749SSergey Zigachev }
634b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
635b843c749SSergey Zigachev if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
636b843c749SSergey Zigachev srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
637b843c749SSergey Zigachev srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
638b843c749SSergey Zigachev }
639b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
640b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
641b843c749SSergey Zigachev
642b843c749SSergey Zigachev if (srbm_soft_reset) {
643b843c749SSergey Zigachev adev->vce.srbm_soft_reset = srbm_soft_reset;
644b843c749SSergey Zigachev return true;
645b843c749SSergey Zigachev } else {
646b843c749SSergey Zigachev adev->vce.srbm_soft_reset = 0;
647b843c749SSergey Zigachev return false;
648b843c749SSergey Zigachev }
649b843c749SSergey Zigachev }
650b843c749SSergey Zigachev
vce_v3_0_soft_reset(void * handle)651b843c749SSergey Zigachev static int vce_v3_0_soft_reset(void *handle)
652b843c749SSergey Zigachev {
653b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654b843c749SSergey Zigachev u32 srbm_soft_reset;
655b843c749SSergey Zigachev
656b843c749SSergey Zigachev if (!adev->vce.srbm_soft_reset)
657b843c749SSergey Zigachev return 0;
658b843c749SSergey Zigachev srbm_soft_reset = adev->vce.srbm_soft_reset;
659b843c749SSergey Zigachev
660b843c749SSergey Zigachev if (srbm_soft_reset) {
661b843c749SSergey Zigachev u32 tmp;
662b843c749SSergey Zigachev
663b843c749SSergey Zigachev tmp = RREG32(mmSRBM_SOFT_RESET);
664b843c749SSergey Zigachev tmp |= srbm_soft_reset;
665b843c749SSergey Zigachev dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
666b843c749SSergey Zigachev WREG32(mmSRBM_SOFT_RESET, tmp);
667b843c749SSergey Zigachev tmp = RREG32(mmSRBM_SOFT_RESET);
668b843c749SSergey Zigachev
669b843c749SSergey Zigachev udelay(50);
670b843c749SSergey Zigachev
671b843c749SSergey Zigachev tmp &= ~srbm_soft_reset;
672b843c749SSergey Zigachev WREG32(mmSRBM_SOFT_RESET, tmp);
673b843c749SSergey Zigachev tmp = RREG32(mmSRBM_SOFT_RESET);
674b843c749SSergey Zigachev
675b843c749SSergey Zigachev /* Wait a little for things to settle down */
676b843c749SSergey Zigachev udelay(50);
677b843c749SSergey Zigachev }
678b843c749SSergey Zigachev
679b843c749SSergey Zigachev return 0;
680b843c749SSergey Zigachev }
681b843c749SSergey Zigachev
vce_v3_0_pre_soft_reset(void * handle)682b843c749SSergey Zigachev static int vce_v3_0_pre_soft_reset(void *handle)
683b843c749SSergey Zigachev {
684b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
685b843c749SSergey Zigachev
686b843c749SSergey Zigachev if (!adev->vce.srbm_soft_reset)
687b843c749SSergey Zigachev return 0;
688b843c749SSergey Zigachev
689b843c749SSergey Zigachev mdelay(5);
690b843c749SSergey Zigachev
691b843c749SSergey Zigachev return vce_v3_0_suspend(adev);
692b843c749SSergey Zigachev }
693b843c749SSergey Zigachev
694b843c749SSergey Zigachev
vce_v3_0_post_soft_reset(void * handle)695b843c749SSergey Zigachev static int vce_v3_0_post_soft_reset(void *handle)
696b843c749SSergey Zigachev {
697b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
698b843c749SSergey Zigachev
699b843c749SSergey Zigachev if (!adev->vce.srbm_soft_reset)
700b843c749SSergey Zigachev return 0;
701b843c749SSergey Zigachev
702b843c749SSergey Zigachev mdelay(5);
703b843c749SSergey Zigachev
704b843c749SSergey Zigachev return vce_v3_0_resume(adev);
705b843c749SSergey Zigachev }
706b843c749SSergey Zigachev
vce_v3_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)707b843c749SSergey Zigachev static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
708b843c749SSergey Zigachev struct amdgpu_irq_src *source,
709b843c749SSergey Zigachev unsigned type,
710b843c749SSergey Zigachev enum amdgpu_interrupt_state state)
711b843c749SSergey Zigachev {
712b843c749SSergey Zigachev uint32_t val = 0;
713b843c749SSergey Zigachev
714b843c749SSergey Zigachev if (state == AMDGPU_IRQ_STATE_ENABLE)
715b843c749SSergey Zigachev val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
716b843c749SSergey Zigachev
717b843c749SSergey Zigachev WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
718b843c749SSergey Zigachev return 0;
719b843c749SSergey Zigachev }
720b843c749SSergey Zigachev
vce_v3_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)721b843c749SSergey Zigachev static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
722b843c749SSergey Zigachev struct amdgpu_irq_src *source,
723b843c749SSergey Zigachev struct amdgpu_iv_entry *entry)
724b843c749SSergey Zigachev {
725b843c749SSergey Zigachev DRM_DEBUG("IH: VCE\n");
726b843c749SSergey Zigachev
727b843c749SSergey Zigachev WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
728b843c749SSergey Zigachev
729b843c749SSergey Zigachev switch (entry->src_data[0]) {
730b843c749SSergey Zigachev case 0:
731b843c749SSergey Zigachev case 1:
732b843c749SSergey Zigachev case 2:
733b843c749SSergey Zigachev amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
734b843c749SSergey Zigachev break;
735b843c749SSergey Zigachev default:
736b843c749SSergey Zigachev DRM_ERROR("Unhandled interrupt: %d %d\n",
737b843c749SSergey Zigachev entry->src_id, entry->src_data[0]);
738b843c749SSergey Zigachev break;
739b843c749SSergey Zigachev }
740b843c749SSergey Zigachev
741b843c749SSergey Zigachev return 0;
742b843c749SSergey Zigachev }
743b843c749SSergey Zigachev
vce_v3_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)744b843c749SSergey Zigachev static int vce_v3_0_set_clockgating_state(void *handle,
745b843c749SSergey Zigachev enum amd_clockgating_state state)
746b843c749SSergey Zigachev {
747b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
748b843c749SSergey Zigachev bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
749b843c749SSergey Zigachev int i;
750b843c749SSergey Zigachev
751b843c749SSergey Zigachev if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
752b843c749SSergey Zigachev return 0;
753b843c749SSergey Zigachev
754b843c749SSergey Zigachev mutex_lock(&adev->grbm_idx_mutex);
755b843c749SSergey Zigachev for (i = 0; i < 2; i++) {
756b843c749SSergey Zigachev /* Program VCE Instance 0 or 1 if not harvested */
757b843c749SSergey Zigachev if (adev->vce.harvest_config & (1 << i))
758b843c749SSergey Zigachev continue;
759b843c749SSergey Zigachev
760b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
761b843c749SSergey Zigachev
762b843c749SSergey Zigachev if (!enable) {
763b843c749SSergey Zigachev /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
764b843c749SSergey Zigachev uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
765b843c749SSergey Zigachev data &= ~(0xf | 0xff0);
766b843c749SSergey Zigachev data |= ((0x0 << 0) | (0x04 << 4));
767b843c749SSergey Zigachev WREG32(mmVCE_CLOCK_GATING_A, data);
768b843c749SSergey Zigachev
769b843c749SSergey Zigachev /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
770b843c749SSergey Zigachev data = RREG32(mmVCE_UENC_CLOCK_GATING);
771b843c749SSergey Zigachev data &= ~(0xf | 0xff0);
772b843c749SSergey Zigachev data |= ((0x0 << 0) | (0x04 << 4));
773b843c749SSergey Zigachev WREG32(mmVCE_UENC_CLOCK_GATING, data);
774b843c749SSergey Zigachev }
775b843c749SSergey Zigachev
776b843c749SSergey Zigachev vce_v3_0_set_vce_sw_clock_gating(adev, enable);
777b843c749SSergey Zigachev }
778b843c749SSergey Zigachev
779b843c749SSergey Zigachev WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
780b843c749SSergey Zigachev mutex_unlock(&adev->grbm_idx_mutex);
781b843c749SSergey Zigachev
782b843c749SSergey Zigachev return 0;
783b843c749SSergey Zigachev }
784b843c749SSergey Zigachev
vce_v3_0_set_powergating_state(void * handle,enum amd_powergating_state state)785b843c749SSergey Zigachev static int vce_v3_0_set_powergating_state(void *handle,
786b843c749SSergey Zigachev enum amd_powergating_state state)
787b843c749SSergey Zigachev {
788b843c749SSergey Zigachev /* This doesn't actually powergate the VCE block.
789b843c749SSergey Zigachev * That's done in the dpm code via the SMC. This
790b843c749SSergey Zigachev * just re-inits the block as necessary. The actual
791b843c749SSergey Zigachev * gating still happens in the dpm code. We should
792b843c749SSergey Zigachev * revisit this when there is a cleaner line between
793b843c749SSergey Zigachev * the smc and the hw blocks
794b843c749SSergey Zigachev */
795b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
796b843c749SSergey Zigachev int ret = 0;
797b843c749SSergey Zigachev
798b843c749SSergey Zigachev if (state == AMD_PG_STATE_GATE) {
799b843c749SSergey Zigachev ret = vce_v3_0_stop(adev);
800b843c749SSergey Zigachev if (ret)
801b843c749SSergey Zigachev goto out;
802b843c749SSergey Zigachev } else {
803b843c749SSergey Zigachev ret = vce_v3_0_start(adev);
804b843c749SSergey Zigachev if (ret)
805b843c749SSergey Zigachev goto out;
806b843c749SSergey Zigachev }
807b843c749SSergey Zigachev
808b843c749SSergey Zigachev out:
809b843c749SSergey Zigachev return ret;
810b843c749SSergey Zigachev }
811b843c749SSergey Zigachev
vce_v3_0_get_clockgating_state(void * handle,u32 * flags)812b843c749SSergey Zigachev static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
813b843c749SSergey Zigachev {
814b843c749SSergey Zigachev struct amdgpu_device *adev = (struct amdgpu_device *)handle;
815b843c749SSergey Zigachev int data;
816b843c749SSergey Zigachev
817b843c749SSergey Zigachev mutex_lock(&adev->pm.mutex);
818b843c749SSergey Zigachev
819b843c749SSergey Zigachev if (adev->flags & AMD_IS_APU)
820b843c749SSergey Zigachev data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
821b843c749SSergey Zigachev else
822b843c749SSergey Zigachev data = RREG32_SMC(ixCURRENT_PG_STATUS);
823b843c749SSergey Zigachev
824b843c749SSergey Zigachev if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
825b843c749SSergey Zigachev DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
826b843c749SSergey Zigachev goto out;
827b843c749SSergey Zigachev }
828b843c749SSergey Zigachev
829b843c749SSergey Zigachev WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
830b843c749SSergey Zigachev
831b843c749SSergey Zigachev /* AMD_CG_SUPPORT_VCE_MGCG */
832b843c749SSergey Zigachev data = RREG32(mmVCE_CLOCK_GATING_A);
833b843c749SSergey Zigachev if (data & (0x04 << 4))
834b843c749SSergey Zigachev *flags |= AMD_CG_SUPPORT_VCE_MGCG;
835b843c749SSergey Zigachev
836b843c749SSergey Zigachev out:
837b843c749SSergey Zigachev mutex_unlock(&adev->pm.mutex);
838b843c749SSergey Zigachev }
839b843c749SSergey Zigachev
vce_v3_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned int vmid,bool ctx_switch)840b843c749SSergey Zigachev static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
841b843c749SSergey Zigachev struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
842b843c749SSergey Zigachev {
843b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_IB_VM);
844b843c749SSergey Zigachev amdgpu_ring_write(ring, vmid);
845b843c749SSergey Zigachev amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
846b843c749SSergey Zigachev amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
847b843c749SSergey Zigachev amdgpu_ring_write(ring, ib->length_dw);
848b843c749SSergey Zigachev }
849b843c749SSergey Zigachev
vce_v3_0_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)850b843c749SSergey Zigachev static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
851b843c749SSergey Zigachev unsigned int vmid, uint64_t pd_addr)
852b843c749SSergey Zigachev {
853b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
854b843c749SSergey Zigachev amdgpu_ring_write(ring, vmid);
855b843c749SSergey Zigachev amdgpu_ring_write(ring, pd_addr >> 12);
856b843c749SSergey Zigachev
857b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
858b843c749SSergey Zigachev amdgpu_ring_write(ring, vmid);
859b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_END);
860b843c749SSergey Zigachev }
861b843c749SSergey Zigachev
vce_v3_0_emit_pipeline_sync(struct amdgpu_ring * ring)862b843c749SSergey Zigachev static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
863b843c749SSergey Zigachev {
864b843c749SSergey Zigachev uint32_t seq = ring->fence_drv.sync_seq;
865b843c749SSergey Zigachev uint64_t addr = ring->fence_drv.gpu_addr;
866b843c749SSergey Zigachev
867b843c749SSergey Zigachev amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
868b843c749SSergey Zigachev amdgpu_ring_write(ring, lower_32_bits(addr));
869b843c749SSergey Zigachev amdgpu_ring_write(ring, upper_32_bits(addr));
870b843c749SSergey Zigachev amdgpu_ring_write(ring, seq);
871b843c749SSergey Zigachev }
872b843c749SSergey Zigachev
873b843c749SSergey Zigachev static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
874b843c749SSergey Zigachev .name = "vce_v3_0",
875b843c749SSergey Zigachev .early_init = vce_v3_0_early_init,
876b843c749SSergey Zigachev .late_init = NULL,
877b843c749SSergey Zigachev .sw_init = vce_v3_0_sw_init,
878b843c749SSergey Zigachev .sw_fini = vce_v3_0_sw_fini,
879b843c749SSergey Zigachev .hw_init = vce_v3_0_hw_init,
880b843c749SSergey Zigachev .hw_fini = vce_v3_0_hw_fini,
881b843c749SSergey Zigachev .suspend = vce_v3_0_suspend,
882b843c749SSergey Zigachev .resume = vce_v3_0_resume,
883b843c749SSergey Zigachev .is_idle = vce_v3_0_is_idle,
884b843c749SSergey Zigachev .wait_for_idle = vce_v3_0_wait_for_idle,
885b843c749SSergey Zigachev .check_soft_reset = vce_v3_0_check_soft_reset,
886b843c749SSergey Zigachev .pre_soft_reset = vce_v3_0_pre_soft_reset,
887b843c749SSergey Zigachev .soft_reset = vce_v3_0_soft_reset,
888b843c749SSergey Zigachev .post_soft_reset = vce_v3_0_post_soft_reset,
889b843c749SSergey Zigachev .set_clockgating_state = vce_v3_0_set_clockgating_state,
890b843c749SSergey Zigachev .set_powergating_state = vce_v3_0_set_powergating_state,
891b843c749SSergey Zigachev .get_clockgating_state = vce_v3_0_get_clockgating_state,
892b843c749SSergey Zigachev };
893b843c749SSergey Zigachev
894b843c749SSergey Zigachev static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
895b843c749SSergey Zigachev .type = AMDGPU_RING_TYPE_VCE,
896b843c749SSergey Zigachev .align_mask = 0xf,
897b843c749SSergey Zigachev .nop = VCE_CMD_NO_OP,
898b843c749SSergey Zigachev .support_64bit_ptrs = false,
899b843c749SSergey Zigachev .get_rptr = vce_v3_0_ring_get_rptr,
900b843c749SSergey Zigachev .get_wptr = vce_v3_0_ring_get_wptr,
901b843c749SSergey Zigachev .set_wptr = vce_v3_0_ring_set_wptr,
902b843c749SSergey Zigachev .parse_cs = amdgpu_vce_ring_parse_cs,
903b843c749SSergey Zigachev .emit_frame_size =
904b843c749SSergey Zigachev 4 + /* vce_v3_0_emit_pipeline_sync */
905b843c749SSergey Zigachev 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
906b843c749SSergey Zigachev .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
907b843c749SSergey Zigachev .emit_ib = amdgpu_vce_ring_emit_ib,
908b843c749SSergey Zigachev .emit_fence = amdgpu_vce_ring_emit_fence,
909b843c749SSergey Zigachev .test_ring = amdgpu_vce_ring_test_ring,
910b843c749SSergey Zigachev .test_ib = amdgpu_vce_ring_test_ib,
911b843c749SSergey Zigachev .insert_nop = amdgpu_ring_insert_nop,
912b843c749SSergey Zigachev .pad_ib = amdgpu_ring_generic_pad_ib,
913b843c749SSergey Zigachev .begin_use = amdgpu_vce_ring_begin_use,
914b843c749SSergey Zigachev .end_use = amdgpu_vce_ring_end_use,
915b843c749SSergey Zigachev };
916b843c749SSergey Zigachev
917b843c749SSergey Zigachev static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
918b843c749SSergey Zigachev .type = AMDGPU_RING_TYPE_VCE,
919b843c749SSergey Zigachev .align_mask = 0xf,
920b843c749SSergey Zigachev .nop = VCE_CMD_NO_OP,
921b843c749SSergey Zigachev .support_64bit_ptrs = false,
922b843c749SSergey Zigachev .get_rptr = vce_v3_0_ring_get_rptr,
923b843c749SSergey Zigachev .get_wptr = vce_v3_0_ring_get_wptr,
924b843c749SSergey Zigachev .set_wptr = vce_v3_0_ring_set_wptr,
925b843c749SSergey Zigachev .parse_cs = amdgpu_vce_ring_parse_cs_vm,
926b843c749SSergey Zigachev .emit_frame_size =
927b843c749SSergey Zigachev 6 + /* vce_v3_0_emit_vm_flush */
928b843c749SSergey Zigachev 4 + /* vce_v3_0_emit_pipeline_sync */
929b843c749SSergey Zigachev 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
930b843c749SSergey Zigachev .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
931b843c749SSergey Zigachev .emit_ib = vce_v3_0_ring_emit_ib,
932b843c749SSergey Zigachev .emit_vm_flush = vce_v3_0_emit_vm_flush,
933b843c749SSergey Zigachev .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
934b843c749SSergey Zigachev .emit_fence = amdgpu_vce_ring_emit_fence,
935b843c749SSergey Zigachev .test_ring = amdgpu_vce_ring_test_ring,
936b843c749SSergey Zigachev .test_ib = amdgpu_vce_ring_test_ib,
937b843c749SSergey Zigachev .insert_nop = amdgpu_ring_insert_nop,
938b843c749SSergey Zigachev .pad_ib = amdgpu_ring_generic_pad_ib,
939b843c749SSergey Zigachev .begin_use = amdgpu_vce_ring_begin_use,
940b843c749SSergey Zigachev .end_use = amdgpu_vce_ring_end_use,
941b843c749SSergey Zigachev };
942b843c749SSergey Zigachev
vce_v3_0_set_ring_funcs(struct amdgpu_device * adev)943b843c749SSergey Zigachev static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
944b843c749SSergey Zigachev {
945b843c749SSergey Zigachev int i;
946b843c749SSergey Zigachev
947b843c749SSergey Zigachev if (adev->asic_type >= CHIP_STONEY) {
948b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++) {
949b843c749SSergey Zigachev adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
950b843c749SSergey Zigachev adev->vce.ring[i].me = i;
951b843c749SSergey Zigachev }
952b843c749SSergey Zigachev DRM_INFO("VCE enabled in VM mode\n");
953b843c749SSergey Zigachev } else {
954b843c749SSergey Zigachev for (i = 0; i < adev->vce.num_rings; i++) {
955b843c749SSergey Zigachev adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
956b843c749SSergey Zigachev adev->vce.ring[i].me = i;
957b843c749SSergey Zigachev }
958b843c749SSergey Zigachev DRM_INFO("VCE enabled in physical mode\n");
959b843c749SSergey Zigachev }
960b843c749SSergey Zigachev }
961b843c749SSergey Zigachev
962b843c749SSergey Zigachev static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
963b843c749SSergey Zigachev .set = vce_v3_0_set_interrupt_state,
964b843c749SSergey Zigachev .process = vce_v3_0_process_interrupt,
965b843c749SSergey Zigachev };
966b843c749SSergey Zigachev
vce_v3_0_set_irq_funcs(struct amdgpu_device * adev)967b843c749SSergey Zigachev static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
968b843c749SSergey Zigachev {
969b843c749SSergey Zigachev adev->vce.irq.num_types = 1;
970b843c749SSergey Zigachev adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
971b843c749SSergey Zigachev };
972b843c749SSergey Zigachev
973b843c749SSergey Zigachev const struct amdgpu_ip_block_version vce_v3_0_ip_block =
974b843c749SSergey Zigachev {
975b843c749SSergey Zigachev .type = AMD_IP_BLOCK_TYPE_VCE,
976b843c749SSergey Zigachev .major = 3,
977b843c749SSergey Zigachev .minor = 0,
978b843c749SSergey Zigachev .rev = 0,
979b843c749SSergey Zigachev .funcs = &vce_v3_0_ip_funcs,
980b843c749SSergey Zigachev };
981b843c749SSergey Zigachev
982b843c749SSergey Zigachev const struct amdgpu_ip_block_version vce_v3_1_ip_block =
983b843c749SSergey Zigachev {
984b843c749SSergey Zigachev .type = AMD_IP_BLOCK_TYPE_VCE,
985b843c749SSergey Zigachev .major = 3,
986b843c749SSergey Zigachev .minor = 1,
987b843c749SSergey Zigachev .rev = 0,
988b843c749SSergey Zigachev .funcs = &vce_v3_0_ip_funcs,
989b843c749SSergey Zigachev };
990b843c749SSergey Zigachev
991b843c749SSergey Zigachev const struct amdgpu_ip_block_version vce_v3_4_ip_block =
992b843c749SSergey Zigachev {
993b843c749SSergey Zigachev .type = AMD_IP_BLOCK_TYPE_VCE,
994b843c749SSergey Zigachev .major = 3,
995b843c749SSergey Zigachev .minor = 4,
996b843c749SSergey Zigachev .rev = 0,
997b843c749SSergey Zigachev .funcs = &vce_v3_0_ip_funcs,
998b843c749SSergey Zigachev };
999