xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/uvd_v5_0.c (revision 789731325bde747251c28a37e0a00ed4efb88c46)
1b843c749SSergey Zigachev /*
2b843c749SSergey Zigachev  * Copyright 2014 Advanced Micro Devices, Inc.
3b843c749SSergey Zigachev  *
4b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10b843c749SSergey Zigachev  *
11b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13b843c749SSergey Zigachev  *
14b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21b843c749SSergey Zigachev  *
22b843c749SSergey Zigachev  * Authors: Christian König <christian.koenig@amd.com>
23b843c749SSergey Zigachev  */
24b843c749SSergey Zigachev 
25b843c749SSergey Zigachev #include <linux/firmware.h>
26b843c749SSergey Zigachev #include <drm/drmP.h>
27b843c749SSergey Zigachev #include "amdgpu.h"
28b843c749SSergey Zigachev #include "amdgpu_uvd.h"
29b843c749SSergey Zigachev #include "vid.h"
30b843c749SSergey Zigachev #include "uvd/uvd_5_0_d.h"
31b843c749SSergey Zigachev #include "uvd/uvd_5_0_sh_mask.h"
32b843c749SSergey Zigachev #include "oss/oss_2_0_d.h"
33b843c749SSergey Zigachev #include "oss/oss_2_0_sh_mask.h"
34b843c749SSergey Zigachev #include "bif/bif_5_0_d.h"
35b843c749SSergey Zigachev #include "vi.h"
36b843c749SSergey Zigachev #include "smu/smu_7_1_2_d.h"
37b843c749SSergey Zigachev #include "smu/smu_7_1_2_sh_mask.h"
38b843c749SSergey Zigachev #include "ivsrcid/ivsrcid_vislands30.h"
39b843c749SSergey Zigachev 
40b843c749SSergey Zigachev static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
41b843c749SSergey Zigachev static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
42b843c749SSergey Zigachev static int uvd_v5_0_start(struct amdgpu_device *adev);
43b843c749SSergey Zigachev static void uvd_v5_0_stop(struct amdgpu_device *adev);
44b843c749SSergey Zigachev static int uvd_v5_0_set_clockgating_state(void *handle,
45b843c749SSergey Zigachev 					  enum amd_clockgating_state state);
46b843c749SSergey Zigachev static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
47b843c749SSergey Zigachev 				 bool enable);
48b843c749SSergey Zigachev /**
49b843c749SSergey Zigachev  * uvd_v5_0_ring_get_rptr - get read pointer
50b843c749SSergey Zigachev  *
51b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
52b843c749SSergey Zigachev  *
53b843c749SSergey Zigachev  * Returns the current hardware read pointer
54b843c749SSergey Zigachev  */
uvd_v5_0_ring_get_rptr(struct amdgpu_ring * ring)55b843c749SSergey Zigachev static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
56b843c749SSergey Zigachev {
57b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
58b843c749SSergey Zigachev 
59b843c749SSergey Zigachev 	return RREG32(mmUVD_RBC_RB_RPTR);
60b843c749SSergey Zigachev }
61b843c749SSergey Zigachev 
62b843c749SSergey Zigachev /**
63b843c749SSergey Zigachev  * uvd_v5_0_ring_get_wptr - get write pointer
64b843c749SSergey Zigachev  *
65b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
66b843c749SSergey Zigachev  *
67b843c749SSergey Zigachev  * Returns the current hardware write pointer
68b843c749SSergey Zigachev  */
uvd_v5_0_ring_get_wptr(struct amdgpu_ring * ring)69b843c749SSergey Zigachev static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
70b843c749SSergey Zigachev {
71b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
72b843c749SSergey Zigachev 
73b843c749SSergey Zigachev 	return RREG32(mmUVD_RBC_RB_WPTR);
74b843c749SSergey Zigachev }
75b843c749SSergey Zigachev 
76b843c749SSergey Zigachev /**
77b843c749SSergey Zigachev  * uvd_v5_0_ring_set_wptr - set write pointer
78b843c749SSergey Zigachev  *
79b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
80b843c749SSergey Zigachev  *
81b843c749SSergey Zigachev  * Commits the write pointer to the hardware
82b843c749SSergey Zigachev  */
uvd_v5_0_ring_set_wptr(struct amdgpu_ring * ring)83b843c749SSergey Zigachev static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
84b843c749SSergey Zigachev {
85b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
86b843c749SSergey Zigachev 
87b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
88b843c749SSergey Zigachev }
89b843c749SSergey Zigachev 
uvd_v5_0_early_init(void * handle)90b843c749SSergey Zigachev static int uvd_v5_0_early_init(void *handle)
91b843c749SSergey Zigachev {
92b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
93b843c749SSergey Zigachev 	adev->uvd.num_uvd_inst = 1;
94b843c749SSergey Zigachev 
95b843c749SSergey Zigachev 	uvd_v5_0_set_ring_funcs(adev);
96b843c749SSergey Zigachev 	uvd_v5_0_set_irq_funcs(adev);
97b843c749SSergey Zigachev 
98b843c749SSergey Zigachev 	return 0;
99b843c749SSergey Zigachev }
100b843c749SSergey Zigachev 
uvd_v5_0_sw_init(void * handle)101b843c749SSergey Zigachev static int uvd_v5_0_sw_init(void *handle)
102b843c749SSergey Zigachev {
103b843c749SSergey Zigachev 	struct amdgpu_ring *ring;
104b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
105b843c749SSergey Zigachev 	int r;
106b843c749SSergey Zigachev 
107b843c749SSergey Zigachev 	/* UVD TRAP */
108b843c749SSergey Zigachev 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
109b843c749SSergey Zigachev 	if (r)
110b843c749SSergey Zigachev 		return r;
111b843c749SSergey Zigachev 
112b843c749SSergey Zigachev 	r = amdgpu_uvd_sw_init(adev);
113b843c749SSergey Zigachev 	if (r)
114b843c749SSergey Zigachev 		return r;
115b843c749SSergey Zigachev 
116b843c749SSergey Zigachev 	ring = &adev->uvd.inst->ring;
117*78973132SSergey Zigachev 	ksprintf(ring->name, "uvd");
118b843c749SSergey Zigachev 	r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
119b843c749SSergey Zigachev 	if (r)
120b843c749SSergey Zigachev 		return r;
121b843c749SSergey Zigachev 
122b843c749SSergey Zigachev 	r = amdgpu_uvd_resume(adev);
123b843c749SSergey Zigachev 	if (r)
124b843c749SSergey Zigachev 		return r;
125b843c749SSergey Zigachev 
126b843c749SSergey Zigachev 	r = amdgpu_uvd_entity_init(adev);
127b843c749SSergey Zigachev 
128b843c749SSergey Zigachev 	return r;
129b843c749SSergey Zigachev }
130b843c749SSergey Zigachev 
uvd_v5_0_sw_fini(void * handle)131b843c749SSergey Zigachev static int uvd_v5_0_sw_fini(void *handle)
132b843c749SSergey Zigachev {
133b843c749SSergey Zigachev 	int r;
134b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
135b843c749SSergey Zigachev 
136b843c749SSergey Zigachev 	r = amdgpu_uvd_suspend(adev);
137b843c749SSergey Zigachev 	if (r)
138b843c749SSergey Zigachev 		return r;
139b843c749SSergey Zigachev 
140b843c749SSergey Zigachev 	return amdgpu_uvd_sw_fini(adev);
141b843c749SSergey Zigachev }
142b843c749SSergey Zigachev 
143b843c749SSergey Zigachev /**
144b843c749SSergey Zigachev  * uvd_v5_0_hw_init - start and test UVD block
145b843c749SSergey Zigachev  *
146b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
147b843c749SSergey Zigachev  *
148b843c749SSergey Zigachev  * Initialize the hardware, boot up the VCPU and do some testing
149b843c749SSergey Zigachev  */
uvd_v5_0_hw_init(void * handle)150b843c749SSergey Zigachev static int uvd_v5_0_hw_init(void *handle)
151b843c749SSergey Zigachev {
152b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
153b843c749SSergey Zigachev 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
154b843c749SSergey Zigachev 	uint32_t tmp;
155b843c749SSergey Zigachev 	int r;
156b843c749SSergey Zigachev 
157b843c749SSergey Zigachev 	amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
158b843c749SSergey Zigachev 	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
159b843c749SSergey Zigachev 	uvd_v5_0_enable_mgcg(adev, true);
160b843c749SSergey Zigachev 
161b843c749SSergey Zigachev 	ring->ready = true;
162b843c749SSergey Zigachev 	r = amdgpu_ring_test_ring(ring);
163b843c749SSergey Zigachev 	if (r) {
164b843c749SSergey Zigachev 		ring->ready = false;
165b843c749SSergey Zigachev 		goto done;
166b843c749SSergey Zigachev 	}
167b843c749SSergey Zigachev 
168b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 10);
169b843c749SSergey Zigachev 	if (r) {
170b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
171b843c749SSergey Zigachev 		goto done;
172b843c749SSergey Zigachev 	}
173b843c749SSergey Zigachev 
174b843c749SSergey Zigachev 	tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
175b843c749SSergey Zigachev 	amdgpu_ring_write(ring, tmp);
176b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xFFFFF);
177b843c749SSergey Zigachev 
178b843c749SSergey Zigachev 	tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
179b843c749SSergey Zigachev 	amdgpu_ring_write(ring, tmp);
180b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xFFFFF);
181b843c749SSergey Zigachev 
182b843c749SSergey Zigachev 	tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
183b843c749SSergey Zigachev 	amdgpu_ring_write(ring, tmp);
184b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xFFFFF);
185b843c749SSergey Zigachev 
186b843c749SSergey Zigachev 	/* Clear timeout status bits */
187b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
188b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0x8);
189b843c749SSergey Zigachev 
190b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
191b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 3);
192b843c749SSergey Zigachev 
193b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
194b843c749SSergey Zigachev 
195b843c749SSergey Zigachev done:
196b843c749SSergey Zigachev 	if (!r)
197b843c749SSergey Zigachev 		DRM_INFO("UVD initialized successfully.\n");
198b843c749SSergey Zigachev 
199b843c749SSergey Zigachev 	return r;
200b843c749SSergey Zigachev 
201b843c749SSergey Zigachev }
202b843c749SSergey Zigachev 
203b843c749SSergey Zigachev /**
204b843c749SSergey Zigachev  * uvd_v5_0_hw_fini - stop the hardware block
205b843c749SSergey Zigachev  *
206b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
207b843c749SSergey Zigachev  *
208b843c749SSergey Zigachev  * Stop the UVD block, mark ring as not ready any more
209b843c749SSergey Zigachev  */
uvd_v5_0_hw_fini(void * handle)210b843c749SSergey Zigachev static int uvd_v5_0_hw_fini(void *handle)
211b843c749SSergey Zigachev {
212b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
213b843c749SSergey Zigachev 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
214b843c749SSergey Zigachev 
215b843c749SSergey Zigachev 	if (RREG32(mmUVD_STATUS) != 0)
216b843c749SSergey Zigachev 		uvd_v5_0_stop(adev);
217b843c749SSergey Zigachev 
218b843c749SSergey Zigachev 	ring->ready = false;
219b843c749SSergey Zigachev 
220b843c749SSergey Zigachev 	return 0;
221b843c749SSergey Zigachev }
222b843c749SSergey Zigachev 
uvd_v5_0_suspend(void * handle)223b843c749SSergey Zigachev static int uvd_v5_0_suspend(void *handle)
224b843c749SSergey Zigachev {
225b843c749SSergey Zigachev 	int r;
226b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
227b843c749SSergey Zigachev 
228b843c749SSergey Zigachev 	r = uvd_v5_0_hw_fini(adev);
229b843c749SSergey Zigachev 	if (r)
230b843c749SSergey Zigachev 		return r;
231b843c749SSergey Zigachev 	uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
232b843c749SSergey Zigachev 
233b843c749SSergey Zigachev 	return amdgpu_uvd_suspend(adev);
234b843c749SSergey Zigachev }
235b843c749SSergey Zigachev 
uvd_v5_0_resume(void * handle)236b843c749SSergey Zigachev static int uvd_v5_0_resume(void *handle)
237b843c749SSergey Zigachev {
238b843c749SSergey Zigachev 	int r;
239b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240b843c749SSergey Zigachev 
241b843c749SSergey Zigachev 	r = amdgpu_uvd_resume(adev);
242b843c749SSergey Zigachev 	if (r)
243b843c749SSergey Zigachev 		return r;
244b843c749SSergey Zigachev 
245b843c749SSergey Zigachev 	return uvd_v5_0_hw_init(adev);
246b843c749SSergey Zigachev }
247b843c749SSergey Zigachev 
248b843c749SSergey Zigachev /**
249b843c749SSergey Zigachev  * uvd_v5_0_mc_resume - memory controller programming
250b843c749SSergey Zigachev  *
251b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
252b843c749SSergey Zigachev  *
253b843c749SSergey Zigachev  * Let the UVD memory controller know it's offsets
254b843c749SSergey Zigachev  */
uvd_v5_0_mc_resume(struct amdgpu_device * adev)255b843c749SSergey Zigachev static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
256b843c749SSergey Zigachev {
257b843c749SSergey Zigachev 	uint64_t offset;
258b843c749SSergey Zigachev 	uint32_t size;
259b843c749SSergey Zigachev 
260b843c749SSergey Zigachev 	/* programm memory controller bits 0-27 */
261b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
262b843c749SSergey Zigachev 			lower_32_bits(adev->uvd.inst->gpu_addr));
263b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
264b843c749SSergey Zigachev 			upper_32_bits(adev->uvd.inst->gpu_addr));
265b843c749SSergey Zigachev 
266b843c749SSergey Zigachev 	offset = AMDGPU_UVD_FIRMWARE_OFFSET;
267b843c749SSergey Zigachev 	size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
268b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
269b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
270b843c749SSergey Zigachev 
271b843c749SSergey Zigachev 	offset += size;
272b843c749SSergey Zigachev 	size = AMDGPU_UVD_HEAP_SIZE;
273b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
274b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
275b843c749SSergey Zigachev 
276b843c749SSergey Zigachev 	offset += size;
277b843c749SSergey Zigachev 	size = AMDGPU_UVD_STACK_SIZE +
278b843c749SSergey Zigachev 	       (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
279b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
280b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
281b843c749SSergey Zigachev 
282b843c749SSergey Zigachev 	WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
283b843c749SSergey Zigachev 	WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
284b843c749SSergey Zigachev 	WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
285b843c749SSergey Zigachev }
286b843c749SSergey Zigachev 
287b843c749SSergey Zigachev /**
288b843c749SSergey Zigachev  * uvd_v5_0_start - start UVD block
289b843c749SSergey Zigachev  *
290b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
291b843c749SSergey Zigachev  *
292b843c749SSergey Zigachev  * Setup and start the UVD block
293b843c749SSergey Zigachev  */
uvd_v5_0_start(struct amdgpu_device * adev)294b843c749SSergey Zigachev static int uvd_v5_0_start(struct amdgpu_device *adev)
295b843c749SSergey Zigachev {
296b843c749SSergey Zigachev 	struct amdgpu_ring *ring = &adev->uvd.inst->ring;
297b843c749SSergey Zigachev 	uint32_t rb_bufsz, tmp;
298b843c749SSergey Zigachev 	uint32_t lmi_swap_cntl;
299b843c749SSergey Zigachev 	uint32_t mp_swap_cntl;
300b843c749SSergey Zigachev 	int i, j, r;
301b843c749SSergey Zigachev 
302b843c749SSergey Zigachev 	/*disable DPG */
303b843c749SSergey Zigachev 	WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
304b843c749SSergey Zigachev 
305b843c749SSergey Zigachev 	/* disable byte swapping */
306b843c749SSergey Zigachev 	lmi_swap_cntl = 0;
307b843c749SSergey Zigachev 	mp_swap_cntl = 0;
308b843c749SSergey Zigachev 
309b843c749SSergey Zigachev 	uvd_v5_0_mc_resume(adev);
310b843c749SSergey Zigachev 
311b843c749SSergey Zigachev 	/* disable interupt */
312b843c749SSergey Zigachev 	WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
313b843c749SSergey Zigachev 
314b843c749SSergey Zigachev 	/* stall UMC and register bus before resetting VCPU */
315b843c749SSergey Zigachev 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
316b843c749SSergey Zigachev 	mdelay(1);
317b843c749SSergey Zigachev 
318b843c749SSergey Zigachev 	/* put LMI, VCPU, RBC etc... into reset */
319b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
320b843c749SSergey Zigachev 		UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
321b843c749SSergey Zigachev 		UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
322b843c749SSergey Zigachev 		UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
323b843c749SSergey Zigachev 		UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
324b843c749SSergey Zigachev 	mdelay(5);
325b843c749SSergey Zigachev 
326b843c749SSergey Zigachev 	/* take UVD block out of reset */
327b843c749SSergey Zigachev 	WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
328b843c749SSergey Zigachev 	mdelay(5);
329b843c749SSergey Zigachev 
330b843c749SSergey Zigachev 	/* initialize UVD memory controller */
331b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
332b843c749SSergey Zigachev 			     (1 << 21) | (1 << 9) | (1 << 20));
333b843c749SSergey Zigachev 
334b843c749SSergey Zigachev #ifdef __BIG_ENDIAN
335b843c749SSergey Zigachev 	/* swap (8 in 32) RB and IB */
336b843c749SSergey Zigachev 	lmi_swap_cntl = 0xa;
337b843c749SSergey Zigachev 	mp_swap_cntl = 0;
338b843c749SSergey Zigachev #endif
339b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
340b843c749SSergey Zigachev 	WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
341b843c749SSergey Zigachev 
342b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
343b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
344b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
345b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
346b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_ALU, 0);
347b843c749SSergey Zigachev 	WREG32(mmUVD_MPC_SET_MUX, 0x88);
348b843c749SSergey Zigachev 
349b843c749SSergey Zigachev 	/* take all subblocks out of reset, except VCPU */
350b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
351b843c749SSergey Zigachev 	mdelay(5);
352b843c749SSergey Zigachev 
353b843c749SSergey Zigachev 	/* enable VCPU clock */
354b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CNTL,  1 << 9);
355b843c749SSergey Zigachev 
356b843c749SSergey Zigachev 	/* enable UMC */
357b843c749SSergey Zigachev 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
358b843c749SSergey Zigachev 
359b843c749SSergey Zigachev 	/* boot up the VCPU */
360b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, 0);
361b843c749SSergey Zigachev 	mdelay(10);
362b843c749SSergey Zigachev 
363b843c749SSergey Zigachev 	for (i = 0; i < 10; ++i) {
364b843c749SSergey Zigachev 		uint32_t status;
365b843c749SSergey Zigachev 		for (j = 0; j < 100; ++j) {
366b843c749SSergey Zigachev 			status = RREG32(mmUVD_STATUS);
367b843c749SSergey Zigachev 			if (status & 2)
368b843c749SSergey Zigachev 				break;
369b843c749SSergey Zigachev 			mdelay(10);
370b843c749SSergey Zigachev 		}
371b843c749SSergey Zigachev 		r = 0;
372b843c749SSergey Zigachev 		if (status & 2)
373b843c749SSergey Zigachev 			break;
374b843c749SSergey Zigachev 
375b843c749SSergey Zigachev 		DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
376b843c749SSergey Zigachev 		WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
377b843c749SSergey Zigachev 				~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
378b843c749SSergey Zigachev 		mdelay(10);
379b843c749SSergey Zigachev 		WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
380b843c749SSergey Zigachev 		mdelay(10);
381b843c749SSergey Zigachev 		r = -1;
382b843c749SSergey Zigachev 	}
383b843c749SSergey Zigachev 
384b843c749SSergey Zigachev 	if (r) {
385b843c749SSergey Zigachev 		DRM_ERROR("UVD not responding, giving up!!!\n");
386b843c749SSergey Zigachev 		return r;
387b843c749SSergey Zigachev 	}
388b843c749SSergey Zigachev 	/* enable master interrupt */
389b843c749SSergey Zigachev 	WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
390b843c749SSergey Zigachev 
391b843c749SSergey Zigachev 	/* clear the bit 4 of UVD_STATUS */
392b843c749SSergey Zigachev 	WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
393b843c749SSergey Zigachev 
394b843c749SSergey Zigachev 	rb_bufsz = order_base_2(ring->ring_size);
395b843c749SSergey Zigachev 	tmp = 0;
396b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
397b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
398b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
399b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
400b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
401b843c749SSergey Zigachev 	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
402b843c749SSergey Zigachev 	/* force RBC into idle state */
403b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_CNTL, tmp);
404b843c749SSergey Zigachev 
405b843c749SSergey Zigachev 	/* set the write pointer delay */
406b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
407b843c749SSergey Zigachev 
408b843c749SSergey Zigachev 	/* set the wb address */
409b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
410b843c749SSergey Zigachev 
411b843c749SSergey Zigachev 	/* programm the RB_BASE for ring buffer */
412b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
413b843c749SSergey Zigachev 			lower_32_bits(ring->gpu_addr));
414b843c749SSergey Zigachev 	WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
415b843c749SSergey Zigachev 			upper_32_bits(ring->gpu_addr));
416b843c749SSergey Zigachev 
417b843c749SSergey Zigachev 	/* Initialize the ring buffer's read and write pointers */
418b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_RPTR, 0);
419b843c749SSergey Zigachev 
420b843c749SSergey Zigachev 	ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
421b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
422b843c749SSergey Zigachev 
423b843c749SSergey Zigachev 	WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
424b843c749SSergey Zigachev 
425b843c749SSergey Zigachev 	return 0;
426b843c749SSergey Zigachev }
427b843c749SSergey Zigachev 
428b843c749SSergey Zigachev /**
429b843c749SSergey Zigachev  * uvd_v5_0_stop - stop UVD block
430b843c749SSergey Zigachev  *
431b843c749SSergey Zigachev  * @adev: amdgpu_device pointer
432b843c749SSergey Zigachev  *
433b843c749SSergey Zigachev  * stop the UVD block
434b843c749SSergey Zigachev  */
uvd_v5_0_stop(struct amdgpu_device * adev)435b843c749SSergey Zigachev static void uvd_v5_0_stop(struct amdgpu_device *adev)
436b843c749SSergey Zigachev {
437b843c749SSergey Zigachev 	/* force RBC into idle state */
438b843c749SSergey Zigachev 	WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
439b843c749SSergey Zigachev 
440b843c749SSergey Zigachev 	/* Stall UMC and register bus before resetting VCPU */
441b843c749SSergey Zigachev 	WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
442b843c749SSergey Zigachev 	mdelay(1);
443b843c749SSergey Zigachev 
444b843c749SSergey Zigachev 	/* put VCPU into reset */
445b843c749SSergey Zigachev 	WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
446b843c749SSergey Zigachev 	mdelay(5);
447b843c749SSergey Zigachev 
448b843c749SSergey Zigachev 	/* disable VCPU clock */
449b843c749SSergey Zigachev 	WREG32(mmUVD_VCPU_CNTL, 0x0);
450b843c749SSergey Zigachev 
451b843c749SSergey Zigachev 	/* Unstall UMC and register bus */
452b843c749SSergey Zigachev 	WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
453b843c749SSergey Zigachev 
454b843c749SSergey Zigachev 	WREG32(mmUVD_STATUS, 0);
455b843c749SSergey Zigachev }
456b843c749SSergey Zigachev 
457b843c749SSergey Zigachev /**
458b843c749SSergey Zigachev  * uvd_v5_0_ring_emit_fence - emit an fence & trap command
459b843c749SSergey Zigachev  *
460b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
461b843c749SSergey Zigachev  * @fence: fence to emit
462b843c749SSergey Zigachev  *
463b843c749SSergey Zigachev  * Write a fence and a trap command to the ring.
464b843c749SSergey Zigachev  */
uvd_v5_0_ring_emit_fence(struct amdgpu_ring * ring,uint64_t addr,uint64_t seq,unsigned flags)465*78973132SSergey Zigachev static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq,
466b843c749SSergey Zigachev 				     unsigned flags)
467b843c749SSergey Zigachev {
468b843c749SSergey Zigachev 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
469b843c749SSergey Zigachev 
470b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
471b843c749SSergey Zigachev 	amdgpu_ring_write(ring, seq);
472b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
473b843c749SSergey Zigachev 	amdgpu_ring_write(ring, addr & 0xffffffff);
474b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
475b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
476b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
477b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
478b843c749SSergey Zigachev 
479b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
480b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
481b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
482b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0);
483b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
484b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 2);
485b843c749SSergey Zigachev }
486b843c749SSergey Zigachev 
487b843c749SSergey Zigachev /**
488b843c749SSergey Zigachev  * uvd_v5_0_ring_test_ring - register write test
489b843c749SSergey Zigachev  *
490b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
491b843c749SSergey Zigachev  *
492b843c749SSergey Zigachev  * Test if we can successfully write to the context register
493b843c749SSergey Zigachev  */
uvd_v5_0_ring_test_ring(struct amdgpu_ring * ring)494b843c749SSergey Zigachev static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
495b843c749SSergey Zigachev {
496b843c749SSergey Zigachev 	struct amdgpu_device *adev = ring->adev;
497b843c749SSergey Zigachev 	uint32_t tmp = 0;
498b843c749SSergey Zigachev 	unsigned i;
499b843c749SSergey Zigachev 	int r;
500b843c749SSergey Zigachev 
501b843c749SSergey Zigachev 	WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
502b843c749SSergey Zigachev 	r = amdgpu_ring_alloc(ring, 3);
503b843c749SSergey Zigachev 	if (r) {
504b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
505b843c749SSergey Zigachev 			  ring->idx, r);
506b843c749SSergey Zigachev 		return r;
507b843c749SSergey Zigachev 	}
508b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
509b843c749SSergey Zigachev 	amdgpu_ring_write(ring, 0xDEADBEEF);
510b843c749SSergey Zigachev 	amdgpu_ring_commit(ring);
511b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
512b843c749SSergey Zigachev 		tmp = RREG32(mmUVD_CONTEXT_ID);
513b843c749SSergey Zigachev 		if (tmp == 0xDEADBEEF)
514b843c749SSergey Zigachev 			break;
515b843c749SSergey Zigachev 		DRM_UDELAY(1);
516b843c749SSergey Zigachev 	}
517b843c749SSergey Zigachev 
518b843c749SSergey Zigachev 	if (i < adev->usec_timeout) {
519b843c749SSergey Zigachev 		DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
520b843c749SSergey Zigachev 			 ring->idx, i);
521b843c749SSergey Zigachev 	} else {
522b843c749SSergey Zigachev 		DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
523b843c749SSergey Zigachev 			  ring->idx, tmp);
524b843c749SSergey Zigachev 		r = -EINVAL;
525b843c749SSergey Zigachev 	}
526b843c749SSergey Zigachev 	return r;
527b843c749SSergey Zigachev }
528b843c749SSergey Zigachev 
529b843c749SSergey Zigachev /**
530b843c749SSergey Zigachev  * uvd_v5_0_ring_emit_ib - execute indirect buffer
531b843c749SSergey Zigachev  *
532b843c749SSergey Zigachev  * @ring: amdgpu_ring pointer
533b843c749SSergey Zigachev  * @ib: indirect buffer to execute
534b843c749SSergey Zigachev  *
535b843c749SSergey Zigachev  * Write ring commands to execute the indirect buffer
536b843c749SSergey Zigachev  */
uvd_v5_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_ib * ib,unsigned vmid,bool ctx_switch)537b843c749SSergey Zigachev static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
538b843c749SSergey Zigachev 				  struct amdgpu_ib *ib,
539b843c749SSergey Zigachev 				  unsigned vmid, bool ctx_switch)
540b843c749SSergey Zigachev {
541b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
542b843c749SSergey Zigachev 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
543b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
544b843c749SSergey Zigachev 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
545b843c749SSergey Zigachev 	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
546b843c749SSergey Zigachev 	amdgpu_ring_write(ring, ib->length_dw);
547b843c749SSergey Zigachev }
548b843c749SSergey Zigachev 
uvd_v5_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)549b843c749SSergey Zigachev static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
550b843c749SSergey Zigachev {
551b843c749SSergey Zigachev 	int i;
552b843c749SSergey Zigachev 
553b843c749SSergey Zigachev 	WARN_ON(ring->wptr % 2 || count % 2);
554b843c749SSergey Zigachev 
555b843c749SSergey Zigachev 	for (i = 0; i < count / 2; i++) {
556b843c749SSergey Zigachev 		amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
557b843c749SSergey Zigachev 		amdgpu_ring_write(ring, 0);
558b843c749SSergey Zigachev 	}
559b843c749SSergey Zigachev }
560b843c749SSergey Zigachev 
uvd_v5_0_is_idle(void * handle)561b843c749SSergey Zigachev static bool uvd_v5_0_is_idle(void *handle)
562b843c749SSergey Zigachev {
563b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
564b843c749SSergey Zigachev 
565b843c749SSergey Zigachev 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
566b843c749SSergey Zigachev }
567b843c749SSergey Zigachev 
uvd_v5_0_wait_for_idle(void * handle)568b843c749SSergey Zigachev static int uvd_v5_0_wait_for_idle(void *handle)
569b843c749SSergey Zigachev {
570b843c749SSergey Zigachev 	unsigned i;
571b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
572b843c749SSergey Zigachev 
573b843c749SSergey Zigachev 	for (i = 0; i < adev->usec_timeout; i++) {
574b843c749SSergey Zigachev 		if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
575b843c749SSergey Zigachev 			return 0;
576b843c749SSergey Zigachev 	}
577b843c749SSergey Zigachev 	return -ETIMEDOUT;
578b843c749SSergey Zigachev }
579b843c749SSergey Zigachev 
uvd_v5_0_soft_reset(void * handle)580b843c749SSergey Zigachev static int uvd_v5_0_soft_reset(void *handle)
581b843c749SSergey Zigachev {
582b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583b843c749SSergey Zigachev 
584b843c749SSergey Zigachev 	uvd_v5_0_stop(adev);
585b843c749SSergey Zigachev 
586b843c749SSergey Zigachev 	WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
587b843c749SSergey Zigachev 			~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
588b843c749SSergey Zigachev 	mdelay(5);
589b843c749SSergey Zigachev 
590b843c749SSergey Zigachev 	return uvd_v5_0_start(adev);
591b843c749SSergey Zigachev }
592b843c749SSergey Zigachev 
uvd_v5_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)593b843c749SSergey Zigachev static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
594b843c749SSergey Zigachev 					struct amdgpu_irq_src *source,
595b843c749SSergey Zigachev 					unsigned type,
596b843c749SSergey Zigachev 					enum amdgpu_interrupt_state state)
597b843c749SSergey Zigachev {
598b843c749SSergey Zigachev 	// TODO
599b843c749SSergey Zigachev 	return 0;
600b843c749SSergey Zigachev }
601b843c749SSergey Zigachev 
uvd_v5_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)602b843c749SSergey Zigachev static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
603b843c749SSergey Zigachev 				      struct amdgpu_irq_src *source,
604b843c749SSergey Zigachev 				      struct amdgpu_iv_entry *entry)
605b843c749SSergey Zigachev {
606b843c749SSergey Zigachev 	DRM_DEBUG("IH: UVD TRAP\n");
607b843c749SSergey Zigachev 	amdgpu_fence_process(&adev->uvd.inst->ring);
608b843c749SSergey Zigachev 	return 0;
609b843c749SSergey Zigachev }
610b843c749SSergey Zigachev 
uvd_v5_0_enable_clock_gating(struct amdgpu_device * adev,bool enable)611b843c749SSergey Zigachev static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
612b843c749SSergey Zigachev {
613b843c749SSergey Zigachev 	uint32_t data1, data3, suvd_flags;
614b843c749SSergey Zigachev 
615b843c749SSergey Zigachev 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
616b843c749SSergey Zigachev 	data3 = RREG32(mmUVD_CGC_GATE);
617b843c749SSergey Zigachev 
618b843c749SSergey Zigachev 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
619b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SIT_MASK |
620b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SMP_MASK |
621b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SCM_MASK |
622b843c749SSergey Zigachev 		     UVD_SUVD_CGC_GATE__SDB_MASK;
623b843c749SSergey Zigachev 
624b843c749SSergey Zigachev 	if (enable) {
625b843c749SSergey Zigachev 		data3 |= (UVD_CGC_GATE__SYS_MASK     |
626b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_MASK      |
627b843c749SSergey Zigachev 			UVD_CGC_GATE__MPEG2_MASK     |
628b843c749SSergey Zigachev 			UVD_CGC_GATE__RBC_MASK       |
629b843c749SSergey Zigachev 			UVD_CGC_GATE__LMI_MC_MASK    |
630b843c749SSergey Zigachev 			UVD_CGC_GATE__IDCT_MASK      |
631b843c749SSergey Zigachev 			UVD_CGC_GATE__MPRD_MASK      |
632b843c749SSergey Zigachev 			UVD_CGC_GATE__MPC_MASK       |
633b843c749SSergey Zigachev 			UVD_CGC_GATE__LBSI_MASK      |
634b843c749SSergey Zigachev 			UVD_CGC_GATE__LRBBM_MASK     |
635b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_RE_MASK   |
636b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_CM_MASK   |
637b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_IT_MASK   |
638b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_DB_MASK   |
639b843c749SSergey Zigachev 			UVD_CGC_GATE__UDEC_MP_MASK   |
640b843c749SSergey Zigachev 			UVD_CGC_GATE__WCB_MASK       |
641b843c749SSergey Zigachev 			UVD_CGC_GATE__JPEG_MASK      |
642b843c749SSergey Zigachev 			UVD_CGC_GATE__SCPU_MASK);
643b843c749SSergey Zigachev 		/* only in pg enabled, we can gate clock to vcpu*/
644b843c749SSergey Zigachev 		if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
645b843c749SSergey Zigachev 			data3 |= UVD_CGC_GATE__VCPU_MASK;
646b843c749SSergey Zigachev 		data3 &= ~UVD_CGC_GATE__REGS_MASK;
647b843c749SSergey Zigachev 		data1 |= suvd_flags;
648b843c749SSergey Zigachev 	} else {
649b843c749SSergey Zigachev 		data3 = 0;
650b843c749SSergey Zigachev 		data1 = 0;
651b843c749SSergey Zigachev 	}
652b843c749SSergey Zigachev 
653b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
654b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_GATE, data3);
655b843c749SSergey Zigachev }
656b843c749SSergey Zigachev 
uvd_v5_0_set_sw_clock_gating(struct amdgpu_device * adev)657b843c749SSergey Zigachev static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
658b843c749SSergey Zigachev {
659b843c749SSergey Zigachev 	uint32_t data, data2;
660b843c749SSergey Zigachev 
661b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_CTRL);
662b843c749SSergey Zigachev 	data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
663b843c749SSergey Zigachev 
664b843c749SSergey Zigachev 
665b843c749SSergey Zigachev 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
666b843c749SSergey Zigachev 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
667b843c749SSergey Zigachev 
668b843c749SSergey Zigachev 
669b843c749SSergey Zigachev 	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
670b843c749SSergey Zigachev 		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
671b843c749SSergey Zigachev 		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
672b843c749SSergey Zigachev 
673b843c749SSergey Zigachev 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
674b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
675b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
676b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
677b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
678b843c749SSergey Zigachev 			UVD_CGC_CTRL__SYS_MODE_MASK |
679b843c749SSergey Zigachev 			UVD_CGC_CTRL__UDEC_MODE_MASK |
680b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPEG2_MODE_MASK |
681b843c749SSergey Zigachev 			UVD_CGC_CTRL__REGS_MODE_MASK |
682b843c749SSergey Zigachev 			UVD_CGC_CTRL__RBC_MODE_MASK |
683b843c749SSergey Zigachev 			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
684b843c749SSergey Zigachev 			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
685b843c749SSergey Zigachev 			UVD_CGC_CTRL__IDCT_MODE_MASK |
686b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPRD_MODE_MASK |
687b843c749SSergey Zigachev 			UVD_CGC_CTRL__MPC_MODE_MASK |
688b843c749SSergey Zigachev 			UVD_CGC_CTRL__LBSI_MODE_MASK |
689b843c749SSergey Zigachev 			UVD_CGC_CTRL__LRBBM_MODE_MASK |
690b843c749SSergey Zigachev 			UVD_CGC_CTRL__WCB_MODE_MASK |
691b843c749SSergey Zigachev 			UVD_CGC_CTRL__VCPU_MODE_MASK |
692b843c749SSergey Zigachev 			UVD_CGC_CTRL__JPEG_MODE_MASK |
693b843c749SSergey Zigachev 			UVD_CGC_CTRL__SCPU_MODE_MASK);
694b843c749SSergey Zigachev 	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
695b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
696b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
697b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
698b843c749SSergey Zigachev 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
699b843c749SSergey Zigachev 
700b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_CTRL, data);
701b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_CTRL, data2);
702b843c749SSergey Zigachev }
703b843c749SSergey Zigachev 
704b843c749SSergey Zigachev #if 0
705b843c749SSergey Zigachev static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
706b843c749SSergey Zigachev {
707b843c749SSergey Zigachev 	uint32_t data, data1, cgc_flags, suvd_flags;
708b843c749SSergey Zigachev 
709b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_GATE);
710b843c749SSergey Zigachev 	data1 = RREG32(mmUVD_SUVD_CGC_GATE);
711b843c749SSergey Zigachev 
712b843c749SSergey Zigachev 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
713b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_MASK |
714b843c749SSergey Zigachev 				UVD_CGC_GATE__MPEG2_MASK |
715b843c749SSergey Zigachev 				UVD_CGC_GATE__RBC_MASK |
716b843c749SSergey Zigachev 				UVD_CGC_GATE__LMI_MC_MASK |
717b843c749SSergey Zigachev 				UVD_CGC_GATE__IDCT_MASK |
718b843c749SSergey Zigachev 				UVD_CGC_GATE__MPRD_MASK |
719b843c749SSergey Zigachev 				UVD_CGC_GATE__MPC_MASK |
720b843c749SSergey Zigachev 				UVD_CGC_GATE__LBSI_MASK |
721b843c749SSergey Zigachev 				UVD_CGC_GATE__LRBBM_MASK |
722b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_RE_MASK |
723b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_CM_MASK |
724b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_IT_MASK |
725b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_DB_MASK |
726b843c749SSergey Zigachev 				UVD_CGC_GATE__UDEC_MP_MASK |
727b843c749SSergey Zigachev 				UVD_CGC_GATE__WCB_MASK |
728b843c749SSergey Zigachev 				UVD_CGC_GATE__VCPU_MASK |
729b843c749SSergey Zigachev 				UVD_CGC_GATE__SCPU_MASK;
730b843c749SSergey Zigachev 
731b843c749SSergey Zigachev 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
732b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SIT_MASK |
733b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SMP_MASK |
734b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SCM_MASK |
735b843c749SSergey Zigachev 				UVD_SUVD_CGC_GATE__SDB_MASK;
736b843c749SSergey Zigachev 
737b843c749SSergey Zigachev 	data |= cgc_flags;
738b843c749SSergey Zigachev 	data1 |= suvd_flags;
739b843c749SSergey Zigachev 
740b843c749SSergey Zigachev 	WREG32(mmUVD_CGC_GATE, data);
741b843c749SSergey Zigachev 	WREG32(mmUVD_SUVD_CGC_GATE, data1);
742b843c749SSergey Zigachev }
743b843c749SSergey Zigachev #endif
744b843c749SSergey Zigachev 
uvd_v5_0_enable_mgcg(struct amdgpu_device * adev,bool enable)745b843c749SSergey Zigachev static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
746b843c749SSergey Zigachev 				 bool enable)
747b843c749SSergey Zigachev {
748b843c749SSergey Zigachev 	u32 orig, data;
749b843c749SSergey Zigachev 
750b843c749SSergey Zigachev 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
751b843c749SSergey Zigachev 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
752b843c749SSergey Zigachev 		data |= 0xfff;
753b843c749SSergey Zigachev 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
754b843c749SSergey Zigachev 
755b843c749SSergey Zigachev 		orig = data = RREG32(mmUVD_CGC_CTRL);
756b843c749SSergey Zigachev 		data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
757b843c749SSergey Zigachev 		if (orig != data)
758b843c749SSergey Zigachev 			WREG32(mmUVD_CGC_CTRL, data);
759b843c749SSergey Zigachev 	} else {
760b843c749SSergey Zigachev 		data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
761b843c749SSergey Zigachev 		data &= ~0xfff;
762b843c749SSergey Zigachev 		WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
763b843c749SSergey Zigachev 
764b843c749SSergey Zigachev 		orig = data = RREG32(mmUVD_CGC_CTRL);
765b843c749SSergey Zigachev 		data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
766b843c749SSergey Zigachev 		if (orig != data)
767b843c749SSergey Zigachev 			WREG32(mmUVD_CGC_CTRL, data);
768b843c749SSergey Zigachev 	}
769b843c749SSergey Zigachev }
770b843c749SSergey Zigachev 
uvd_v5_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)771b843c749SSergey Zigachev static int uvd_v5_0_set_clockgating_state(void *handle,
772b843c749SSergey Zigachev 					  enum amd_clockgating_state state)
773b843c749SSergey Zigachev {
774b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
775b843c749SSergey Zigachev 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
776b843c749SSergey Zigachev 
777b843c749SSergey Zigachev 	if (enable) {
778b843c749SSergey Zigachev 		/* wait for STATUS to clear */
779b843c749SSergey Zigachev 		if (uvd_v5_0_wait_for_idle(handle))
780b843c749SSergey Zigachev 			return -EBUSY;
781b843c749SSergey Zigachev 		uvd_v5_0_enable_clock_gating(adev, true);
782b843c749SSergey Zigachev 
783b843c749SSergey Zigachev 		/* enable HW gates because UVD is idle */
784b843c749SSergey Zigachev /*		uvd_v5_0_set_hw_clock_gating(adev); */
785b843c749SSergey Zigachev 	} else {
786b843c749SSergey Zigachev 		uvd_v5_0_enable_clock_gating(adev, false);
787b843c749SSergey Zigachev 	}
788b843c749SSergey Zigachev 
789b843c749SSergey Zigachev 	uvd_v5_0_set_sw_clock_gating(adev);
790b843c749SSergey Zigachev 	return 0;
791b843c749SSergey Zigachev }
792b843c749SSergey Zigachev 
uvd_v5_0_set_powergating_state(void * handle,enum amd_powergating_state state)793b843c749SSergey Zigachev static int uvd_v5_0_set_powergating_state(void *handle,
794b843c749SSergey Zigachev 					  enum amd_powergating_state state)
795b843c749SSergey Zigachev {
796b843c749SSergey Zigachev 	/* This doesn't actually powergate the UVD block.
797b843c749SSergey Zigachev 	 * That's done in the dpm code via the SMC.  This
798b843c749SSergey Zigachev 	 * just re-inits the block as necessary.  The actual
799b843c749SSergey Zigachev 	 * gating still happens in the dpm code.  We should
800b843c749SSergey Zigachev 	 * revisit this when there is a cleaner line between
801b843c749SSergey Zigachev 	 * the smc and the hw blocks
802b843c749SSergey Zigachev 	 */
803b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
804b843c749SSergey Zigachev 	int ret = 0;
805b843c749SSergey Zigachev 
806b843c749SSergey Zigachev 	if (state == AMD_PG_STATE_GATE) {
807b843c749SSergey Zigachev 		uvd_v5_0_stop(adev);
808b843c749SSergey Zigachev 	} else {
809b843c749SSergey Zigachev 		ret = uvd_v5_0_start(adev);
810b843c749SSergey Zigachev 		if (ret)
811b843c749SSergey Zigachev 			goto out;
812b843c749SSergey Zigachev 	}
813b843c749SSergey Zigachev 
814b843c749SSergey Zigachev out:
815b843c749SSergey Zigachev 	return ret;
816b843c749SSergey Zigachev }
817b843c749SSergey Zigachev 
uvd_v5_0_get_clockgating_state(void * handle,u32 * flags)818b843c749SSergey Zigachev static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
819b843c749SSergey Zigachev {
820b843c749SSergey Zigachev 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
821b843c749SSergey Zigachev 	int data;
822b843c749SSergey Zigachev 
823b843c749SSergey Zigachev 	mutex_lock(&adev->pm.mutex);
824b843c749SSergey Zigachev 
825b843c749SSergey Zigachev 	if (RREG32_SMC(ixCURRENT_PG_STATUS) &
826b843c749SSergey Zigachev 				CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
827b843c749SSergey Zigachev 		DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
828b843c749SSergey Zigachev 		goto out;
829b843c749SSergey Zigachev 	}
830b843c749SSergey Zigachev 
831b843c749SSergey Zigachev 	/* AMD_CG_SUPPORT_UVD_MGCG */
832b843c749SSergey Zigachev 	data = RREG32(mmUVD_CGC_CTRL);
833b843c749SSergey Zigachev 	if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
834b843c749SSergey Zigachev 		*flags |= AMD_CG_SUPPORT_UVD_MGCG;
835b843c749SSergey Zigachev 
836b843c749SSergey Zigachev out:
837b843c749SSergey Zigachev 	mutex_unlock(&adev->pm.mutex);
838b843c749SSergey Zigachev }
839b843c749SSergey Zigachev 
840b843c749SSergey Zigachev static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
841b843c749SSergey Zigachev 	.name = "uvd_v5_0",
842b843c749SSergey Zigachev 	.early_init = uvd_v5_0_early_init,
843b843c749SSergey Zigachev 	.late_init = NULL,
844b843c749SSergey Zigachev 	.sw_init = uvd_v5_0_sw_init,
845b843c749SSergey Zigachev 	.sw_fini = uvd_v5_0_sw_fini,
846b843c749SSergey Zigachev 	.hw_init = uvd_v5_0_hw_init,
847b843c749SSergey Zigachev 	.hw_fini = uvd_v5_0_hw_fini,
848b843c749SSergey Zigachev 	.suspend = uvd_v5_0_suspend,
849b843c749SSergey Zigachev 	.resume = uvd_v5_0_resume,
850b843c749SSergey Zigachev 	.is_idle = uvd_v5_0_is_idle,
851b843c749SSergey Zigachev 	.wait_for_idle = uvd_v5_0_wait_for_idle,
852b843c749SSergey Zigachev 	.soft_reset = uvd_v5_0_soft_reset,
853b843c749SSergey Zigachev 	.set_clockgating_state = uvd_v5_0_set_clockgating_state,
854b843c749SSergey Zigachev 	.set_powergating_state = uvd_v5_0_set_powergating_state,
855b843c749SSergey Zigachev 	.get_clockgating_state = uvd_v5_0_get_clockgating_state,
856b843c749SSergey Zigachev };
857b843c749SSergey Zigachev 
858b843c749SSergey Zigachev static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
859b843c749SSergey Zigachev 	.type = AMDGPU_RING_TYPE_UVD,
860b843c749SSergey Zigachev 	.align_mask = 0xf,
861b843c749SSergey Zigachev 	.support_64bit_ptrs = false,
862b843c749SSergey Zigachev 	.get_rptr = uvd_v5_0_ring_get_rptr,
863b843c749SSergey Zigachev 	.get_wptr = uvd_v5_0_ring_get_wptr,
864b843c749SSergey Zigachev 	.set_wptr = uvd_v5_0_ring_set_wptr,
865b843c749SSergey Zigachev 	.parse_cs = amdgpu_uvd_ring_parse_cs,
866b843c749SSergey Zigachev 	.emit_frame_size =
867b843c749SSergey Zigachev 		14, /* uvd_v5_0_ring_emit_fence  x1 no user fence */
868b843c749SSergey Zigachev 	.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
869b843c749SSergey Zigachev 	.emit_ib = uvd_v5_0_ring_emit_ib,
870b843c749SSergey Zigachev 	.emit_fence = uvd_v5_0_ring_emit_fence,
871b843c749SSergey Zigachev 	.test_ring = uvd_v5_0_ring_test_ring,
872b843c749SSergey Zigachev 	.test_ib = amdgpu_uvd_ring_test_ib,
873b843c749SSergey Zigachev 	.insert_nop = uvd_v5_0_ring_insert_nop,
874b843c749SSergey Zigachev 	.pad_ib = amdgpu_ring_generic_pad_ib,
875b843c749SSergey Zigachev 	.begin_use = amdgpu_uvd_ring_begin_use,
876b843c749SSergey Zigachev 	.end_use = amdgpu_uvd_ring_end_use,
877b843c749SSergey Zigachev };
878b843c749SSergey Zigachev 
uvd_v5_0_set_ring_funcs(struct amdgpu_device * adev)879b843c749SSergey Zigachev static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
880b843c749SSergey Zigachev {
881b843c749SSergey Zigachev 	adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
882b843c749SSergey Zigachev }
883b843c749SSergey Zigachev 
884b843c749SSergey Zigachev static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
885b843c749SSergey Zigachev 	.set = uvd_v5_0_set_interrupt_state,
886b843c749SSergey Zigachev 	.process = uvd_v5_0_process_interrupt,
887b843c749SSergey Zigachev };
888b843c749SSergey Zigachev 
uvd_v5_0_set_irq_funcs(struct amdgpu_device * adev)889b843c749SSergey Zigachev static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
890b843c749SSergey Zigachev {
891b843c749SSergey Zigachev 	adev->uvd.inst->irq.num_types = 1;
892b843c749SSergey Zigachev 	adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
893b843c749SSergey Zigachev }
894b843c749SSergey Zigachev 
895b843c749SSergey Zigachev const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
896b843c749SSergey Zigachev {
897b843c749SSergey Zigachev 		.type = AMD_IP_BLOCK_TYPE_UVD,
898b843c749SSergey Zigachev 		.major = 5,
899b843c749SSergey Zigachev 		.minor = 0,
900b843c749SSergey Zigachev 		.rev = 0,
901b843c749SSergey Zigachev 		.funcs = &uvd_v5_0_ip_funcs,
902b843c749SSergey Zigachev };
903