xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/uvd_v7_0.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_uvd.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "soc15_common.h"
31 #include "mmsch_v1_0.h"
32 
33 #include "uvd/uvd_7_0_offset.h"
34 #include "uvd/uvd_7_0_sh_mask.h"
35 #include "vce/vce_4_0_offset.h"
36 #include "vce/vce_4_0_default.h"
37 #include "vce/vce_4_0_sh_mask.h"
38 #include "nbif/nbif_6_1_offset.h"
39 #include "hdp/hdp_4_0_offset.h"
40 #include "mmhub/mmhub_1_0_offset.h"
41 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43 
44 #define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
45 #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
46 //UVD_PG0_CC_UVD_HARVESTING
47 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
48 #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
49 
50 #define UVD7_MAX_HW_INSTANCES_VEGA20			2
51 
52 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
53 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
54 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int uvd_v7_0_start(struct amdgpu_device *adev);
56 static void uvd_v7_0_stop(struct amdgpu_device *adev);
57 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
58 
59 static int amdgpu_ih_clientid_uvds[] = {
60 	SOC15_IH_CLIENTID_UVD,
61 	SOC15_IH_CLIENTID_UVD1
62 };
63 
64 /**
65  * uvd_v7_0_ring_get_rptr - get read pointer
66  *
67  * @ring: amdgpu_ring pointer
68  *
69  * Returns the current hardware read pointer
70  */
71 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
72 {
73 	struct amdgpu_device *adev = ring->adev;
74 
75 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
76 }
77 
78 /**
79  * uvd_v7_0_enc_ring_get_rptr - get enc read pointer
80  *
81  * @ring: amdgpu_ring pointer
82  *
83  * Returns the current hardware enc read pointer
84  */
85 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
86 {
87 	struct amdgpu_device *adev = ring->adev;
88 
89 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
91 	else
92 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
93 }
94 
95 /**
96  * uvd_v7_0_ring_get_wptr - get write pointer
97  *
98  * @ring: amdgpu_ring pointer
99  *
100  * Returns the current hardware write pointer
101  */
102 static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
103 {
104 	struct amdgpu_device *adev = ring->adev;
105 
106 	return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
107 }
108 
109 /**
110  * uvd_v7_0_enc_ring_get_wptr - get enc write pointer
111  *
112  * @ring: amdgpu_ring pointer
113  *
114  * Returns the current hardware enc write pointer
115  */
116 static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
117 {
118 	struct amdgpu_device *adev = ring->adev;
119 
120 	if (ring->use_doorbell)
121 		return adev->wb.wb[ring->wptr_offs];
122 
123 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
124 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
125 	else
126 		return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
127 }
128 
129 /**
130  * uvd_v7_0_ring_set_wptr - set write pointer
131  *
132  * @ring: amdgpu_ring pointer
133  *
134  * Commits the write pointer to the hardware
135  */
136 static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
137 {
138 	struct amdgpu_device *adev = ring->adev;
139 
140 	WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
141 }
142 
143 /**
144  * uvd_v7_0_enc_ring_set_wptr - set enc write pointer
145  *
146  * @ring: amdgpu_ring pointer
147  *
148  * Commits the enc write pointer to the hardware
149  */
150 static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
151 {
152 	struct amdgpu_device *adev = ring->adev;
153 
154 	if (ring->use_doorbell) {
155 		/* XXX check if swapping is necessary on BE */
156 		adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
157 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
158 		return;
159 	}
160 
161 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
162 		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
163 			lower_32_bits(ring->wptr));
164 	else
165 		WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
166 			lower_32_bits(ring->wptr));
167 }
168 
169 /**
170  * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
171  *
172  * @ring: the engine to test on
173  *
174  */
175 static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
176 {
177 	struct amdgpu_device *adev = ring->adev;
178 	uint32_t rptr;
179 	unsigned i;
180 	int r;
181 
182 	if (amdgpu_sriov_vf(adev))
183 		return 0;
184 
185 	r = amdgpu_ring_alloc(ring, 16);
186 	if (r) {
187 		DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
188 			  ring->me, ring->idx, r);
189 		return r;
190 	}
191 
192 	rptr = amdgpu_ring_get_rptr(ring);
193 
194 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
195 	amdgpu_ring_commit(ring);
196 
197 	for (i = 0; i < adev->usec_timeout; i++) {
198 		if (amdgpu_ring_get_rptr(ring) != rptr)
199 			break;
200 		DRM_UDELAY(1);
201 	}
202 
203 	if (i < adev->usec_timeout) {
204 		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
205 			 ring->me, ring->idx, i);
206 	} else {
207 		DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
208 			  ring->me, ring->idx);
209 		r = -ETIMEDOUT;
210 	}
211 
212 	return r;
213 }
214 
215 /**
216  * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
217  *
218  * @adev: amdgpu_device pointer
219  * @ring: ring we should submit the msg to
220  * @handle: session handle to use
221  * @fence: optional fence to return
222  *
223  * Open up a stream for HW test
224  */
225 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
226 				       struct dma_fence **fence)
227 {
228 	const unsigned ib_size_dw = 16;
229 	struct amdgpu_job *job;
230 	struct amdgpu_ib *ib;
231 	struct dma_fence *f = NULL;
232 	uint64_t dummy;
233 	int i, r;
234 
235 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
236 	if (r)
237 		return r;
238 
239 	ib = &job->ibs[0];
240 	dummy = ib->gpu_addr + 1024;
241 
242 	ib->length_dw = 0;
243 	ib->ptr[ib->length_dw++] = 0x00000018;
244 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
245 	ib->ptr[ib->length_dw++] = handle;
246 	ib->ptr[ib->length_dw++] = 0x00000000;
247 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
248 	ib->ptr[ib->length_dw++] = dummy;
249 
250 	ib->ptr[ib->length_dw++] = 0x00000014;
251 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
252 	ib->ptr[ib->length_dw++] = 0x0000001c;
253 	ib->ptr[ib->length_dw++] = 0x00000000;
254 	ib->ptr[ib->length_dw++] = 0x00000000;
255 
256 	ib->ptr[ib->length_dw++] = 0x00000008;
257 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
258 
259 	for (i = ib->length_dw; i < ib_size_dw; ++i)
260 		ib->ptr[i] = 0x0;
261 
262 	r = amdgpu_job_submit_direct(job, ring, &f);
263 	if (r)
264 		goto err;
265 
266 	if (fence)
267 		*fence = dma_fence_get(f);
268 	dma_fence_put(f);
269 	return 0;
270 
271 err:
272 	amdgpu_job_free(job);
273 	return r;
274 }
275 
276 /**
277  * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
278  *
279  * @adev: amdgpu_device pointer
280  * @ring: ring we should submit the msg to
281  * @handle: session handle to use
282  * @fence: optional fence to return
283  *
284  * Close up a stream for HW test or if userspace failed to do so
285  */
286 int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
287 				 bool direct, struct dma_fence **fence)
288 {
289 	const unsigned ib_size_dw = 16;
290 	struct amdgpu_job *job;
291 	struct amdgpu_ib *ib;
292 	struct dma_fence *f = NULL;
293 	uint64_t dummy;
294 	int i, r;
295 
296 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
297 	if (r)
298 		return r;
299 
300 	ib = &job->ibs[0];
301 	dummy = ib->gpu_addr + 1024;
302 
303 	ib->length_dw = 0;
304 	ib->ptr[ib->length_dw++] = 0x00000018;
305 	ib->ptr[ib->length_dw++] = 0x00000001;
306 	ib->ptr[ib->length_dw++] = handle;
307 	ib->ptr[ib->length_dw++] = 0x00000000;
308 	ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
309 	ib->ptr[ib->length_dw++] = dummy;
310 
311 	ib->ptr[ib->length_dw++] = 0x00000014;
312 	ib->ptr[ib->length_dw++] = 0x00000002;
313 	ib->ptr[ib->length_dw++] = 0x0000001c;
314 	ib->ptr[ib->length_dw++] = 0x00000000;
315 	ib->ptr[ib->length_dw++] = 0x00000000;
316 
317 	ib->ptr[ib->length_dw++] = 0x00000008;
318 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
319 
320 	for (i = ib->length_dw; i < ib_size_dw; ++i)
321 		ib->ptr[i] = 0x0;
322 
323 	if (direct)
324 		r = amdgpu_job_submit_direct(job, ring, &f);
325 	else
326 		r = amdgpu_job_submit(job, &ring->adev->vce.entity,
327 				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
328 	if (r)
329 		goto err;
330 
331 	if (fence)
332 		*fence = dma_fence_get(f);
333 	dma_fence_put(f);
334 	return 0;
335 
336 err:
337 	amdgpu_job_free(job);
338 	return r;
339 }
340 
341 /**
342  * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
343  *
344  * @ring: the engine to test on
345  *
346  */
347 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
348 {
349 	struct dma_fence *fence = NULL;
350 	long r;
351 
352 	r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
353 	if (r) {
354 		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
355 		goto error;
356 	}
357 
358 	r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence);
359 	if (r) {
360 		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
361 		goto error;
362 	}
363 
364 	r = dma_fence_wait_timeout(fence, false, timeout);
365 	if (r == 0) {
366 		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
367 		r = -ETIMEDOUT;
368 	} else if (r < 0) {
369 		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
370 	} else {
371 		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
372 		r = 0;
373 	}
374 error:
375 	dma_fence_put(fence);
376 	return r;
377 }
378 
379 static int uvd_v7_0_early_init(void *handle)
380 {
381 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
382 
383 	if (adev->asic_type == CHIP_VEGA20) {
384 		u32 harvest;
385 		int i;
386 
387 		adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
388 		for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
389 			harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
390 			if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
391 				adev->uvd.harvest_config |= 1 << i;
392 			}
393 		}
394 		if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
395 						 AMDGPU_UVD_HARVEST_UVD1))
396 			/* both instances are harvested, disable the block */
397 			return -ENOENT;
398 	} else {
399 		adev->uvd.num_uvd_inst = 1;
400 	}
401 
402 	if (amdgpu_sriov_vf(adev))
403 		adev->uvd.num_enc_rings = 1;
404 	else
405 		adev->uvd.num_enc_rings = 2;
406 	uvd_v7_0_set_ring_funcs(adev);
407 	uvd_v7_0_set_enc_ring_funcs(adev);
408 	uvd_v7_0_set_irq_funcs(adev);
409 
410 	return 0;
411 }
412 
413 static int uvd_v7_0_sw_init(void *handle)
414 {
415 	struct amdgpu_ring *ring;
416 
417 	int i, j, r;
418 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
419 
420 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
421 		if (adev->uvd.harvest_config & (1 << j))
422 			continue;
423 		/* UVD TRAP */
424 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
425 		if (r)
426 			return r;
427 
428 		/* UVD ENC TRAP */
429 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
430 			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
431 			if (r)
432 				return r;
433 		}
434 	}
435 
436 	r = amdgpu_uvd_sw_init(adev);
437 	if (r)
438 		return r;
439 
440 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
441 		const struct common_firmware_header *hdr;
442 		hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
443 		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
444 		adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
445 		adev->firmware.fw_size +=
446 			roundup2(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
447 		DRM_INFO("PSP loading UVD firmware\n");
448 	}
449 
450 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
451 		if (adev->uvd.harvest_config & (1 << j))
452 			continue;
453 		if (!amdgpu_sriov_vf(adev)) {
454 			ring = &adev->uvd.inst[j].ring;
455 			snprintf(ring->name, sizeof(ring->name), "uvd<%d>", j);
456 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
457 			if (r)
458 				return r;
459 		}
460 
461 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
462 			ring = &adev->uvd.inst[j].ring_enc[i];
463 			snprintf(ring->name, sizeof(ring->name), "uvd_enc%d<%d>", i, j);
464 			if (amdgpu_sriov_vf(adev)) {
465 				ring->use_doorbell = true;
466 
467 				/* currently only use the first enconding ring for
468 				 * sriov, so set unused location for other unused rings.
469 				 */
470 				if (i == 0)
471 					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
472 				else
473 					ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
474 			}
475 			r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
476 			if (r)
477 				return r;
478 		}
479 	}
480 
481 	r = amdgpu_uvd_resume(adev);
482 	if (r)
483 		return r;
484 
485 	r = amdgpu_uvd_entity_init(adev);
486 	if (r)
487 		return r;
488 
489 	r = amdgpu_virt_alloc_mm_table(adev);
490 	if (r)
491 		return r;
492 
493 	return r;
494 }
495 
496 static int uvd_v7_0_sw_fini(void *handle)
497 {
498 	int i, j, r;
499 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
500 
501 	amdgpu_virt_free_mm_table(adev);
502 
503 	r = amdgpu_uvd_suspend(adev);
504 	if (r)
505 		return r;
506 
507 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
508 		if (adev->uvd.harvest_config & (1 << j))
509 			continue;
510 		for (i = 0; i < adev->uvd.num_enc_rings; ++i)
511 			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
512 	}
513 	return amdgpu_uvd_sw_fini(adev);
514 }
515 
516 /**
517  * uvd_v7_0_hw_init - start and test UVD block
518  *
519  * @adev: amdgpu_device pointer
520  *
521  * Initialize the hardware, boot up the VCPU and do some testing
522  */
523 static int uvd_v7_0_hw_init(void *handle)
524 {
525 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
526 	struct amdgpu_ring *ring;
527 	uint32_t tmp;
528 	int i, j, r;
529 
530 	if (amdgpu_sriov_vf(adev))
531 		r = uvd_v7_0_sriov_start(adev);
532 	else
533 		r = uvd_v7_0_start(adev);
534 	if (r)
535 		goto done;
536 
537 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
538 		if (adev->uvd.harvest_config & (1 << j))
539 			continue;
540 		ring = &adev->uvd.inst[j].ring;
541 
542 		if (!amdgpu_sriov_vf(adev)) {
543 			ring->ready = true;
544 			r = amdgpu_ring_test_ring(ring);
545 			if (r) {
546 				ring->ready = false;
547 				goto done;
548 			}
549 
550 			r = amdgpu_ring_alloc(ring, 10);
551 			if (r) {
552 				DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
553 				goto done;
554 			}
555 
556 			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
557 				mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
558 			amdgpu_ring_write(ring, tmp);
559 			amdgpu_ring_write(ring, 0xFFFFF);
560 
561 			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
562 				mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
563 			amdgpu_ring_write(ring, tmp);
564 			amdgpu_ring_write(ring, 0xFFFFF);
565 
566 			tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
567 				mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
568 			amdgpu_ring_write(ring, tmp);
569 			amdgpu_ring_write(ring, 0xFFFFF);
570 
571 			/* Clear timeout status bits */
572 			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
573 				mmUVD_SEMA_TIMEOUT_STATUS), 0));
574 			amdgpu_ring_write(ring, 0x8);
575 
576 			amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
577 				mmUVD_SEMA_CNTL), 0));
578 			amdgpu_ring_write(ring, 3);
579 
580 			amdgpu_ring_commit(ring);
581 		}
582 
583 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
584 			ring = &adev->uvd.inst[j].ring_enc[i];
585 			ring->ready = true;
586 			r = amdgpu_ring_test_ring(ring);
587 			if (r) {
588 				ring->ready = false;
589 				goto done;
590 			}
591 		}
592 	}
593 done:
594 	if (!r)
595 		DRM_INFO("UVD and UVD ENC initialized successfully.\n");
596 
597 	return r;
598 }
599 
600 /**
601  * uvd_v7_0_hw_fini - stop the hardware block
602  *
603  * @adev: amdgpu_device pointer
604  *
605  * Stop the UVD block, mark ring as not ready any more
606  */
607 static int uvd_v7_0_hw_fini(void *handle)
608 {
609 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610 	int i;
611 
612 	if (!amdgpu_sriov_vf(adev))
613 		uvd_v7_0_stop(adev);
614 	else {
615 		/* full access mode, so don't touch any UVD register */
616 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
617 	}
618 
619 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
620 		if (adev->uvd.harvest_config & (1 << i))
621 			continue;
622 		adev->uvd.inst[i].ring.ready = false;
623 	}
624 
625 	return 0;
626 }
627 
628 static int uvd_v7_0_suspend(void *handle)
629 {
630 	int r;
631 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
632 
633 	r = uvd_v7_0_hw_fini(adev);
634 	if (r)
635 		return r;
636 
637 	return amdgpu_uvd_suspend(adev);
638 }
639 
640 static int uvd_v7_0_resume(void *handle)
641 {
642 	int r;
643 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
644 
645 	r = amdgpu_uvd_resume(adev);
646 	if (r)
647 		return r;
648 
649 	return uvd_v7_0_hw_init(adev);
650 }
651 
652 /**
653  * uvd_v7_0_mc_resume - memory controller programming
654  *
655  * @adev: amdgpu_device pointer
656  *
657  * Let the UVD memory controller know it's offsets
658  */
659 static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
660 {
661 	uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
662 	uint32_t offset;
663 	int i;
664 
665 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
666 		if (adev->uvd.harvest_config & (1 << i))
667 			continue;
668 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
669 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
670 				lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
671 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
672 				upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
673 			offset = 0;
674 		} else {
675 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
676 				lower_32_bits(adev->uvd.inst[i].gpu_addr));
677 			WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
678 				upper_32_bits(adev->uvd.inst[i].gpu_addr));
679 			offset = size;
680 		}
681 
682 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
683 					AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
684 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
685 
686 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
687 				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
688 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
689 				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
690 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
691 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
692 
693 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
694 				lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
695 		WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
696 				upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
697 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
698 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
699 				AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
700 
701 		WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
702 				adev->gfx.config.gb_addr_config);
703 		WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
704 				adev->gfx.config.gb_addr_config);
705 		WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
706 				adev->gfx.config.gb_addr_config);
707 
708 		WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
709 	}
710 }
711 
712 static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
713 				struct amdgpu_mm_table *table)
714 {
715 	uint32_t data = 0, loop;
716 	uint64_t addr = table->gpu_addr;
717 	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
718 	uint32_t size;
719 	int i;
720 
721 	size = header->header_size + header->vce_table_size + header->uvd_table_size;
722 
723 	/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
724 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
725 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
726 
727 	/* 2, update vmid of descriptor */
728 	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
729 	data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
730 	data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
731 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
732 
733 	/* 3, notify mmsch about the size of this descriptor */
734 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
735 
736 	/* 4, set resp to zero */
737 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
738 
739 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
740 		if (adev->uvd.harvest_config & (1 << i))
741 			continue;
742 		WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
743 		adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
744 		adev->uvd.inst[i].ring_enc[0].wptr = 0;
745 		adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
746 	}
747 	/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
748 	WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
749 
750 	data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
751 	loop = 1000;
752 	while ((data & 0x10000002) != 0x10000002) {
753 		udelay(10);
754 		data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
755 		loop--;
756 		if (!loop)
757 			break;
758 	}
759 
760 	if (!loop) {
761 		dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
762 		return -EBUSY;
763 	}
764 
765 	return 0;
766 }
767 
768 static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
769 {
770 	struct amdgpu_ring *ring;
771 	uint32_t offset, size, tmp;
772 	uint32_t table_size = 0;
773 	struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
774 	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
775 	struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
776 	struct mmsch_v1_0_cmd_end end = { {0} };
777 	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
778 	struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
779 	uint8_t i = 0;
780 
781 	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
782 	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
783 	direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
784 	end.cmd_header.command_type = MMSCH_COMMAND__END;
785 
786 	if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
787 		header->version = MMSCH_VERSION;
788 		header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
789 
790 		if (header->vce_table_offset == 0 && header->vce_table_size == 0)
791 			header->uvd_table_offset = header->header_size;
792 		else
793 			header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
794 
795 		init_table += header->uvd_table_offset;
796 
797 		for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
798 			if (adev->uvd.harvest_config & (1 << i))
799 				continue;
800 			ring = &adev->uvd.inst[i].ring;
801 			ring->wptr = 0;
802 			size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
803 
804 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
805 							   0xFFFFFFFF, 0x00000004);
806 			/* mc resume*/
807 			if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
808 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
809 							    lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
810 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
811 							    upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
812 				offset = 0;
813 			} else {
814 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
815 							    lower_32_bits(adev->uvd.inst[i].gpu_addr));
816 				MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
817 							    upper_32_bits(adev->uvd.inst[i].gpu_addr));
818 				offset = size;
819 			}
820 
821 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
822 						    AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
823 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
824 
825 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
826 						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
827 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
828 						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
829 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
830 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
831 
832 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
833 						    lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
834 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
835 						    upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
836 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
837 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
838 						    AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
839 
840 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
841 			/* mc resume end*/
842 
843 			/* disable clock gating */
844 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
845 							   ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
846 
847 			/* disable interupt */
848 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
849 							   ~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
850 
851 			/* stall UMC and register bus before resetting VCPU */
852 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
853 							   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
854 							   UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
855 
856 			/* put LMI, VCPU, RBC etc... into reset */
857 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
858 						    (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
859 							       UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
860 							       UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
861 							       UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
862 							       UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
863 							       UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
864 							       UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
865 							       UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
866 
867 			/* initialize UVD memory controller */
868 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
869 						    (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
870 							       UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
871 							       UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
872 							       UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
873 							       UVD_LMI_CTRL__REQ_MODE_MASK |
874 							       0x00100000L));
875 
876 			/* take all subblocks out of reset, except VCPU */
877 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
878 						    UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
879 
880 			/* enable VCPU clock */
881 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
882 						    UVD_VCPU_CNTL__CLK_EN_MASK);
883 
884 			/* enable master interrupt */
885 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
886 							   ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
887 							   (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
888 
889 			/* clear the bit 4 of UVD_STATUS */
890 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
891 							   ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
892 
893 			/* force RBC into idle state */
894 			size = order_base_2(ring->ring_size);
895 			tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
896 			tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
897 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
898 
899 			ring = &adev->uvd.inst[i].ring_enc[0];
900 			ring->wptr = 0;
901 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
902 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
903 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
904 
905 			/* boot up the VCPU */
906 			MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
907 
908 			/* enable UMC */
909 			MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
910 											   ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
911 
912 			MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
913 		}
914 		/* add end packet */
915 		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
916 		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
917 		header->uvd_table_size = table_size;
918 
919 	}
920 	return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
921 }
922 
923 /**
924  * uvd_v7_0_start - start UVD block
925  *
926  * @adev: amdgpu_device pointer
927  *
928  * Setup and start the UVD block
929  */
930 static int uvd_v7_0_start(struct amdgpu_device *adev)
931 {
932 	struct amdgpu_ring *ring;
933 	uint32_t rb_bufsz, tmp;
934 	uint32_t lmi_swap_cntl;
935 	uint32_t mp_swap_cntl;
936 	int i, j, k, r;
937 
938 	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
939 		if (adev->uvd.harvest_config & (1 << k))
940 			continue;
941 		/* disable DPG */
942 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
943 				~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
944 	}
945 
946 	/* disable byte swapping */
947 	lmi_swap_cntl = 0;
948 	mp_swap_cntl = 0;
949 
950 	uvd_v7_0_mc_resume(adev);
951 
952 	for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
953 		if (adev->uvd.harvest_config & (1 << k))
954 			continue;
955 		ring = &adev->uvd.inst[k].ring;
956 		/* disable clock gating */
957 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
958 				~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
959 
960 		/* disable interupt */
961 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
962 				~UVD_MASTINT_EN__VCPU_EN_MASK);
963 
964 		/* stall UMC and register bus before resetting VCPU */
965 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
966 				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
967 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
968 		mdelay(1);
969 
970 		/* put LMI, VCPU, RBC etc... into reset */
971 		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
972 			UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
973 			UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
974 			UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
975 			UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
976 			UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
977 			UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
978 			UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
979 			UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
980 		mdelay(5);
981 
982 		/* initialize UVD memory controller */
983 		WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
984 			(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
985 			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
986 			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
987 			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
988 			UVD_LMI_CTRL__REQ_MODE_MASK |
989 			0x00100000L);
990 
991 #ifdef __BIG_ENDIAN
992 		/* swap (8 in 32) RB and IB */
993 		lmi_swap_cntl = 0xa;
994 		mp_swap_cntl = 0;
995 #endif
996 		WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
997 		WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
998 
999 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
1000 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
1001 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
1002 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
1003 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
1004 		WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
1005 
1006 		/* take all subblocks out of reset, except VCPU */
1007 		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
1008 				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1009 		mdelay(5);
1010 
1011 		/* enable VCPU clock */
1012 		WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
1013 				UVD_VCPU_CNTL__CLK_EN_MASK);
1014 
1015 		/* enable UMC */
1016 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
1017 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1018 
1019 		/* boot up the VCPU */
1020 		WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
1021 		mdelay(10);
1022 
1023 		for (i = 0; i < 10; ++i) {
1024 			uint32_t status;
1025 
1026 			for (j = 0; j < 100; ++j) {
1027 				status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
1028 				if (status & 2)
1029 					break;
1030 				mdelay(10);
1031 			}
1032 			r = 0;
1033 			if (status & 2)
1034 				break;
1035 
1036 			DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
1037 			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
1038 					UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1039 					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1040 			mdelay(10);
1041 			WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
1042 					~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1043 			mdelay(10);
1044 			r = -1;
1045 		}
1046 
1047 		if (r) {
1048 			DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
1049 			return r;
1050 		}
1051 		/* enable master interrupt */
1052 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
1053 			(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
1054 			~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
1055 
1056 		/* clear the bit 4 of UVD_STATUS */
1057 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
1058 				~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1059 
1060 		/* force RBC into idle state */
1061 		rb_bufsz = order_base_2(ring->ring_size);
1062 		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1063 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1064 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1065 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
1066 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1067 		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1068 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
1069 
1070 		/* set the write pointer delay */
1071 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
1072 
1073 		/* set the wb address */
1074 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
1075 				(upper_32_bits(ring->gpu_addr) >> 2));
1076 
1077 		/* programm the RB_BASE for ring buffer */
1078 		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1079 				lower_32_bits(ring->gpu_addr));
1080 		WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1081 				upper_32_bits(ring->gpu_addr));
1082 
1083 		/* Initialize the ring buffer's read and write pointers */
1084 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
1085 
1086 		ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
1087 		WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
1088 				lower_32_bits(ring->wptr));
1089 
1090 		WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
1091 				~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1092 
1093 		ring = &adev->uvd.inst[k].ring_enc[0];
1094 		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1095 		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1096 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
1097 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1098 		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
1099 
1100 		ring = &adev->uvd.inst[k].ring_enc[1];
1101 		WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1102 		WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1103 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1104 		WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1105 		WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
1106 	}
1107 	return 0;
1108 }
1109 
1110 /**
1111  * uvd_v7_0_stop - stop UVD block
1112  *
1113  * @adev: amdgpu_device pointer
1114  *
1115  * stop the UVD block
1116  */
1117 static void uvd_v7_0_stop(struct amdgpu_device *adev)
1118 {
1119 	uint8_t i = 0;
1120 
1121 	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1122 		if (adev->uvd.harvest_config & (1 << i))
1123 			continue;
1124 		/* force RBC into idle state */
1125 		WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1126 
1127 		/* Stall UMC and register bus before resetting VCPU */
1128 		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
1129 				UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1130 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1131 		mdelay(1);
1132 
1133 		/* put VCPU into reset */
1134 		WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
1135 				UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1136 		mdelay(5);
1137 
1138 		/* disable VCPU clock */
1139 		WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
1140 
1141 		/* Unstall UMC and register bus */
1142 		WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
1143 				~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1144 	}
1145 }
1146 
1147 /**
1148  * uvd_v7_0_ring_emit_fence - emit an fence & trap command
1149  *
1150  * @ring: amdgpu_ring pointer
1151  * @fence: fence to emit
1152  *
1153  * Write a fence and a trap command to the ring.
1154  */
1155 static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1156 				     unsigned flags)
1157 {
1158 	struct amdgpu_device *adev = ring->adev;
1159 
1160 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1161 
1162 	amdgpu_ring_write(ring,
1163 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1164 	amdgpu_ring_write(ring, seq);
1165 	amdgpu_ring_write(ring,
1166 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1167 	amdgpu_ring_write(ring, addr & 0xffffffff);
1168 	amdgpu_ring_write(ring,
1169 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1170 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1171 	amdgpu_ring_write(ring,
1172 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1173 	amdgpu_ring_write(ring, 0);
1174 
1175 	amdgpu_ring_write(ring,
1176 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1177 	amdgpu_ring_write(ring, 0);
1178 	amdgpu_ring_write(ring,
1179 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1180 	amdgpu_ring_write(ring, 0);
1181 	amdgpu_ring_write(ring,
1182 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1183 	amdgpu_ring_write(ring, 2);
1184 }
1185 
1186 /**
1187  * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
1188  *
1189  * @ring: amdgpu_ring pointer
1190  * @fence: fence to emit
1191  *
1192  * Write enc a fence and a trap command to the ring.
1193  */
1194 static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1195 			u64 seq, unsigned flags)
1196 {
1197 
1198 	WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1199 
1200 	amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
1201 	amdgpu_ring_write(ring, addr);
1202 	amdgpu_ring_write(ring, upper_32_bits(addr));
1203 	amdgpu_ring_write(ring, seq);
1204 	amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
1205 }
1206 
1207 /**
1208  * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
1209  *
1210  * @ring: amdgpu_ring pointer
1211  */
1212 static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
1213 {
1214 	/* The firmware doesn't seem to like touching registers at this point. */
1215 }
1216 
1217 /**
1218  * uvd_v7_0_ring_test_ring - register write test
1219  *
1220  * @ring: amdgpu_ring pointer
1221  *
1222  * Test if we can successfully write to the context register
1223  */
1224 static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1225 {
1226 	struct amdgpu_device *adev = ring->adev;
1227 	uint32_t tmp = 0;
1228 	unsigned i;
1229 	int r;
1230 
1231 	WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
1232 	r = amdgpu_ring_alloc(ring, 3);
1233 	if (r) {
1234 		DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
1235 			  ring->me, ring->idx, r);
1236 		return r;
1237 	}
1238 	amdgpu_ring_write(ring,
1239 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
1240 	amdgpu_ring_write(ring, 0xDEADBEEF);
1241 	amdgpu_ring_commit(ring);
1242 	for (i = 0; i < adev->usec_timeout; i++) {
1243 		tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
1244 		if (tmp == 0xDEADBEEF)
1245 			break;
1246 		DRM_UDELAY(1);
1247 	}
1248 
1249 	if (i < adev->usec_timeout) {
1250 		DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
1251 			 ring->me, ring->idx, i);
1252 	} else {
1253 		DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
1254 			  ring->me, ring->idx, tmp);
1255 		r = -EINVAL;
1256 	}
1257 	return r;
1258 }
1259 
1260 /**
1261  * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1262  *
1263  * @p: the CS parser with the IBs
1264  * @ib_idx: which IB to patch
1265  *
1266  */
1267 static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1268 					   uint32_t ib_idx)
1269 {
1270 	struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1271 	unsigned i;
1272 
1273 	/* No patching necessary for the first instance */
1274 	if (!p->ring->me)
1275 		return 0;
1276 
1277 	for (i = 0; i < ib->length_dw; i += 2) {
1278 		uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1279 
1280 		reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1281 		reg += p->adev->reg_offset[UVD_HWIP][1][1];
1282 
1283 		amdgpu_set_ib_value(p, ib_idx, i, reg);
1284 	}
1285 	return 0;
1286 }
1287 
1288 /**
1289  * uvd_v7_0_ring_emit_ib - execute indirect buffer
1290  *
1291  * @ring: amdgpu_ring pointer
1292  * @ib: indirect buffer to execute
1293  *
1294  * Write ring commands to execute the indirect buffer
1295  */
1296 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
1297 				  struct amdgpu_ib *ib,
1298 				  unsigned vmid, bool ctx_switch)
1299 {
1300 	struct amdgpu_device *adev = ring->adev;
1301 
1302 	amdgpu_ring_write(ring,
1303 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
1304 	amdgpu_ring_write(ring, vmid);
1305 
1306 	amdgpu_ring_write(ring,
1307 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1308 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1309 	amdgpu_ring_write(ring,
1310 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1311 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1312 	amdgpu_ring_write(ring,
1313 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
1314 	amdgpu_ring_write(ring, ib->length_dw);
1315 }
1316 
1317 /**
1318  * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
1319  *
1320  * @ring: amdgpu_ring pointer
1321  * @ib: indirect buffer to execute
1322  *
1323  * Write enc ring commands to execute the indirect buffer
1324  */
1325 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1326 		struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
1327 {
1328 	amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1329 	amdgpu_ring_write(ring, vmid);
1330 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1331 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1332 	amdgpu_ring_write(ring, ib->length_dw);
1333 }
1334 
1335 static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1336 				    uint32_t reg, uint32_t val)
1337 {
1338 	struct amdgpu_device *adev = ring->adev;
1339 
1340 	amdgpu_ring_write(ring,
1341 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1342 	amdgpu_ring_write(ring, reg << 2);
1343 	amdgpu_ring_write(ring,
1344 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1345 	amdgpu_ring_write(ring, val);
1346 	amdgpu_ring_write(ring,
1347 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1348 	amdgpu_ring_write(ring, 8);
1349 }
1350 
1351 static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1352 					uint32_t val, uint32_t mask)
1353 {
1354 	struct amdgpu_device *adev = ring->adev;
1355 
1356 	amdgpu_ring_write(ring,
1357 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
1358 	amdgpu_ring_write(ring, reg << 2);
1359 	amdgpu_ring_write(ring,
1360 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
1361 	amdgpu_ring_write(ring, val);
1362 	amdgpu_ring_write(ring,
1363 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
1364 	amdgpu_ring_write(ring, mask);
1365 	amdgpu_ring_write(ring,
1366 		PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
1367 	amdgpu_ring_write(ring, 12);
1368 }
1369 
1370 static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1371 					unsigned vmid, uint64_t pd_addr)
1372 {
1373 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1374 	uint32_t data0, data1, mask;
1375 
1376 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1377 
1378 	/* wait for reg writes */
1379 	data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
1380 	data1 = lower_32_bits(pd_addr);
1381 	mask = 0xffffffff;
1382 	uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
1383 }
1384 
1385 static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1386 {
1387 	struct amdgpu_device *adev = ring->adev;
1388 	int i;
1389 
1390 	WARN_ON(ring->wptr % 2 || count % 2);
1391 
1392 	for (i = 0; i < count / 2; i++) {
1393 		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
1394 		amdgpu_ring_write(ring, 0);
1395 	}
1396 }
1397 
1398 static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1399 {
1400 	amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1401 }
1402 
1403 static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1404 					    uint32_t reg, uint32_t val,
1405 					    uint32_t mask)
1406 {
1407 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
1408 	amdgpu_ring_write(ring,	reg << 2);
1409 	amdgpu_ring_write(ring, mask);
1410 	amdgpu_ring_write(ring, val);
1411 }
1412 
1413 static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1414 					    unsigned int vmid, uint64_t pd_addr)
1415 {
1416 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1417 
1418 	pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1419 
1420 	/* wait for reg writes */
1421 	uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
1422 					lower_32_bits(pd_addr), 0xffffffff);
1423 }
1424 
1425 static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1426 					uint32_t reg, uint32_t val)
1427 {
1428 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
1429 	amdgpu_ring_write(ring,	reg << 2);
1430 	amdgpu_ring_write(ring, val);
1431 }
1432 
1433 #if 0
1434 static bool uvd_v7_0_is_idle(void *handle)
1435 {
1436 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1437 
1438 	return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1439 }
1440 
1441 static int uvd_v7_0_wait_for_idle(void *handle)
1442 {
1443 	unsigned i;
1444 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445 
1446 	for (i = 0; i < adev->usec_timeout; i++) {
1447 		if (uvd_v7_0_is_idle(handle))
1448 			return 0;
1449 	}
1450 	return -ETIMEDOUT;
1451 }
1452 
1453 #define AMDGPU_UVD_STATUS_BUSY_MASK    0xfd
1454 static bool uvd_v7_0_check_soft_reset(void *handle)
1455 {
1456 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1457 	u32 srbm_soft_reset = 0;
1458 	u32 tmp = RREG32(mmSRBM_STATUS);
1459 
1460 	if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1461 	    REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1462 	    (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
1463 		    AMDGPU_UVD_STATUS_BUSY_MASK))
1464 		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1465 				SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1466 
1467 	if (srbm_soft_reset) {
1468 		adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1469 		return true;
1470 	} else {
1471 		adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1472 		return false;
1473 	}
1474 }
1475 
1476 static int uvd_v7_0_pre_soft_reset(void *handle)
1477 {
1478 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1479 
1480 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1481 		return 0;
1482 
1483 	uvd_v7_0_stop(adev);
1484 	return 0;
1485 }
1486 
1487 static int uvd_v7_0_soft_reset(void *handle)
1488 {
1489 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1490 	u32 srbm_soft_reset;
1491 
1492 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1493 		return 0;
1494 	srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1495 
1496 	if (srbm_soft_reset) {
1497 		u32 tmp;
1498 
1499 		tmp = RREG32(mmSRBM_SOFT_RESET);
1500 		tmp |= srbm_soft_reset;
1501 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1502 		WREG32(mmSRBM_SOFT_RESET, tmp);
1503 		tmp = RREG32(mmSRBM_SOFT_RESET);
1504 
1505 		udelay(50);
1506 
1507 		tmp &= ~srbm_soft_reset;
1508 		WREG32(mmSRBM_SOFT_RESET, tmp);
1509 		tmp = RREG32(mmSRBM_SOFT_RESET);
1510 
1511 		/* Wait a little for things to settle down */
1512 		udelay(50);
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 static int uvd_v7_0_post_soft_reset(void *handle)
1519 {
1520 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1521 
1522 	if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1523 		return 0;
1524 
1525 	mdelay(5);
1526 
1527 	return uvd_v7_0_start(adev);
1528 }
1529 #endif
1530 
1531 static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
1532 					struct amdgpu_irq_src *source,
1533 					unsigned type,
1534 					enum amdgpu_interrupt_state state)
1535 {
1536 	// TODO
1537 	return 0;
1538 }
1539 
1540 static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
1541 				      struct amdgpu_irq_src *source,
1542 				      struct amdgpu_iv_entry *entry)
1543 {
1544 	uint32_t ip_instance;
1545 
1546 	switch (entry->client_id) {
1547 	case SOC15_IH_CLIENTID_UVD:
1548 		ip_instance = 0;
1549 		break;
1550 	case SOC15_IH_CLIENTID_UVD1:
1551 		ip_instance = 1;
1552 		break;
1553 	default:
1554 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1555 		return 0;
1556 	}
1557 
1558 	DRM_DEBUG("IH: UVD TRAP\n");
1559 
1560 	switch (entry->src_id) {
1561 	case 124:
1562 		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
1563 		break;
1564 	case 119:
1565 		amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
1566 		break;
1567 	case 120:
1568 		if (!amdgpu_sriov_vf(adev))
1569 			amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
1570 		break;
1571 	default:
1572 		DRM_ERROR("Unhandled interrupt: %d %d\n",
1573 			  entry->src_id, entry->src_data[0]);
1574 		break;
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 #if 0
1581 static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
1582 {
1583 	uint32_t data, data1, data2, suvd_flags;
1584 
1585 	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
1586 	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1587 	data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
1588 
1589 	data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1590 		  UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1591 
1592 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1593 		     UVD_SUVD_CGC_GATE__SIT_MASK |
1594 		     UVD_SUVD_CGC_GATE__SMP_MASK |
1595 		     UVD_SUVD_CGC_GATE__SCM_MASK |
1596 		     UVD_SUVD_CGC_GATE__SDB_MASK;
1597 
1598 	data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1599 		(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1600 		(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1601 
1602 	data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1603 			UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1604 			UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1605 			UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1606 			UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1607 			UVD_CGC_CTRL__SYS_MODE_MASK |
1608 			UVD_CGC_CTRL__UDEC_MODE_MASK |
1609 			UVD_CGC_CTRL__MPEG2_MODE_MASK |
1610 			UVD_CGC_CTRL__REGS_MODE_MASK |
1611 			UVD_CGC_CTRL__RBC_MODE_MASK |
1612 			UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1613 			UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1614 			UVD_CGC_CTRL__IDCT_MODE_MASK |
1615 			UVD_CGC_CTRL__MPRD_MODE_MASK |
1616 			UVD_CGC_CTRL__MPC_MODE_MASK |
1617 			UVD_CGC_CTRL__LBSI_MODE_MASK |
1618 			UVD_CGC_CTRL__LRBBM_MODE_MASK |
1619 			UVD_CGC_CTRL__WCB_MODE_MASK |
1620 			UVD_CGC_CTRL__VCPU_MODE_MASK |
1621 			UVD_CGC_CTRL__JPEG_MODE_MASK |
1622 			UVD_CGC_CTRL__JPEG2_MODE_MASK |
1623 			UVD_CGC_CTRL__SCPU_MODE_MASK);
1624 	data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1625 			UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1626 			UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1627 			UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1628 			UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1629 	data1 |= suvd_flags;
1630 
1631 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
1632 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
1633 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1634 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
1635 }
1636 
1637 static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
1638 {
1639 	uint32_t data, data1, cgc_flags, suvd_flags;
1640 
1641 	data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
1642 	data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
1643 
1644 	cgc_flags = UVD_CGC_GATE__SYS_MASK |
1645 		UVD_CGC_GATE__UDEC_MASK |
1646 		UVD_CGC_GATE__MPEG2_MASK |
1647 		UVD_CGC_GATE__RBC_MASK |
1648 		UVD_CGC_GATE__LMI_MC_MASK |
1649 		UVD_CGC_GATE__IDCT_MASK |
1650 		UVD_CGC_GATE__MPRD_MASK |
1651 		UVD_CGC_GATE__MPC_MASK |
1652 		UVD_CGC_GATE__LBSI_MASK |
1653 		UVD_CGC_GATE__LRBBM_MASK |
1654 		UVD_CGC_GATE__UDEC_RE_MASK |
1655 		UVD_CGC_GATE__UDEC_CM_MASK |
1656 		UVD_CGC_GATE__UDEC_IT_MASK |
1657 		UVD_CGC_GATE__UDEC_DB_MASK |
1658 		UVD_CGC_GATE__UDEC_MP_MASK |
1659 		UVD_CGC_GATE__WCB_MASK |
1660 		UVD_CGC_GATE__VCPU_MASK |
1661 		UVD_CGC_GATE__SCPU_MASK |
1662 		UVD_CGC_GATE__JPEG_MASK |
1663 		UVD_CGC_GATE__JPEG2_MASK;
1664 
1665 	suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1666 				UVD_SUVD_CGC_GATE__SIT_MASK |
1667 				UVD_SUVD_CGC_GATE__SMP_MASK |
1668 				UVD_SUVD_CGC_GATE__SCM_MASK |
1669 				UVD_SUVD_CGC_GATE__SDB_MASK;
1670 
1671 	data |= cgc_flags;
1672 	data1 |= suvd_flags;
1673 
1674 	WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
1675 	WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
1676 }
1677 
1678 static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
1679 {
1680 	u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
1681 
1682 	if (enable)
1683 		tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1684 			GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1685 	else
1686 		tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
1687 			 GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
1688 
1689 	WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
1690 }
1691 
1692 
1693 static int uvd_v7_0_set_clockgating_state(void *handle,
1694 					  enum amd_clockgating_state state)
1695 {
1696 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1698 
1699 	uvd_v7_0_set_bypass_mode(adev, enable);
1700 
1701 	if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1702 		return 0;
1703 
1704 	if (enable) {
1705 		/* disable HW gating and enable Sw gating */
1706 		uvd_v7_0_set_sw_clock_gating(adev);
1707 	} else {
1708 		/* wait for STATUS to clear */
1709 		if (uvd_v7_0_wait_for_idle(handle))
1710 			return -EBUSY;
1711 
1712 		/* enable HW gates because UVD is idle */
1713 		/* uvd_v7_0_set_hw_clock_gating(adev); */
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 static int uvd_v7_0_set_powergating_state(void *handle,
1720 					  enum amd_powergating_state state)
1721 {
1722 	/* This doesn't actually powergate the UVD block.
1723 	 * That's done in the dpm code via the SMC.  This
1724 	 * just re-inits the block as necessary.  The actual
1725 	 * gating still happens in the dpm code.  We should
1726 	 * revisit this when there is a cleaner line between
1727 	 * the smc and the hw blocks
1728 	 */
1729 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1730 
1731 	if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1732 		return 0;
1733 
1734 	WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1735 
1736 	if (state == AMD_PG_STATE_GATE) {
1737 		uvd_v7_0_stop(adev);
1738 		return 0;
1739 	} else {
1740 		return uvd_v7_0_start(adev);
1741 	}
1742 }
1743 #endif
1744 
1745 static int uvd_v7_0_set_clockgating_state(void *handle,
1746 					  enum amd_clockgating_state state)
1747 {
1748 	/* needed for driver unload*/
1749 	return 0;
1750 }
1751 
1752 const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
1753 	.name = "uvd_v7_0",
1754 	.early_init = uvd_v7_0_early_init,
1755 	.late_init = NULL,
1756 	.sw_init = uvd_v7_0_sw_init,
1757 	.sw_fini = uvd_v7_0_sw_fini,
1758 	.hw_init = uvd_v7_0_hw_init,
1759 	.hw_fini = uvd_v7_0_hw_fini,
1760 	.suspend = uvd_v7_0_suspend,
1761 	.resume = uvd_v7_0_resume,
1762 	.is_idle = NULL /* uvd_v7_0_is_idle */,
1763 	.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
1764 	.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
1765 	.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
1766 	.soft_reset = NULL /* uvd_v7_0_soft_reset */,
1767 	.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
1768 	.set_clockgating_state = uvd_v7_0_set_clockgating_state,
1769 	.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
1770 };
1771 
1772 static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1773 	.type = AMDGPU_RING_TYPE_UVD,
1774 	.align_mask = 0xf,
1775 	.support_64bit_ptrs = false,
1776 	.vmhub = AMDGPU_MMHUB,
1777 	.get_rptr = uvd_v7_0_ring_get_rptr,
1778 	.get_wptr = uvd_v7_0_ring_get_wptr,
1779 	.set_wptr = uvd_v7_0_ring_set_wptr,
1780 	.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1781 	.emit_frame_size =
1782 		6 + /* hdp invalidate */
1783 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1784 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1785 		8 + /* uvd_v7_0_ring_emit_vm_flush */
1786 		14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
1787 	.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
1788 	.emit_ib = uvd_v7_0_ring_emit_ib,
1789 	.emit_fence = uvd_v7_0_ring_emit_fence,
1790 	.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
1791 	.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
1792 	.test_ring = uvd_v7_0_ring_test_ring,
1793 	.test_ib = amdgpu_uvd_ring_test_ib,
1794 	.insert_nop = uvd_v7_0_ring_insert_nop,
1795 	.pad_ib = amdgpu_ring_generic_pad_ib,
1796 	.begin_use = amdgpu_uvd_ring_begin_use,
1797 	.end_use = amdgpu_uvd_ring_end_use,
1798 	.emit_wreg = uvd_v7_0_ring_emit_wreg,
1799 	.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
1800 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1801 };
1802 
1803 static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
1804 	.type = AMDGPU_RING_TYPE_UVD_ENC,
1805 	.align_mask = 0x3f,
1806 	.nop = HEVC_ENC_CMD_NO_OP,
1807 	.support_64bit_ptrs = false,
1808 	.vmhub = AMDGPU_MMHUB,
1809 	.get_rptr = uvd_v7_0_enc_ring_get_rptr,
1810 	.get_wptr = uvd_v7_0_enc_ring_get_wptr,
1811 	.set_wptr = uvd_v7_0_enc_ring_set_wptr,
1812 	.emit_frame_size =
1813 		3 + 3 + /* hdp flush / invalidate */
1814 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1815 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1816 		4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
1817 		5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1818 		1, /* uvd_v7_0_enc_ring_insert_end */
1819 	.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
1820 	.emit_ib = uvd_v7_0_enc_ring_emit_ib,
1821 	.emit_fence = uvd_v7_0_enc_ring_emit_fence,
1822 	.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
1823 	.test_ring = uvd_v7_0_enc_ring_test_ring,
1824 	.test_ib = uvd_v7_0_enc_ring_test_ib,
1825 	.insert_nop = amdgpu_ring_insert_nop,
1826 	.insert_end = uvd_v7_0_enc_ring_insert_end,
1827 	.pad_ib = amdgpu_ring_generic_pad_ib,
1828 	.begin_use = amdgpu_uvd_ring_begin_use,
1829 	.end_use = amdgpu_uvd_ring_end_use,
1830 	.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
1831 	.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
1832 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1833 };
1834 
1835 static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1836 {
1837 	int i;
1838 
1839 	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1840 		if (adev->uvd.harvest_config & (1 << i))
1841 			continue;
1842 		adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1843 		adev->uvd.inst[i].ring.me = i;
1844 		DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
1845 	}
1846 }
1847 
1848 static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1849 {
1850 	int i, j;
1851 
1852 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1853 		if (adev->uvd.harvest_config & (1 << j))
1854 			continue;
1855 		for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1856 			adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1857 			adev->uvd.inst[j].ring_enc[i].me = j;
1858 		}
1859 
1860 		DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
1861 	}
1862 }
1863 
1864 static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
1865 	.set = uvd_v7_0_set_interrupt_state,
1866 	.process = uvd_v7_0_process_interrupt,
1867 };
1868 
1869 static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1870 {
1871 	int i;
1872 
1873 	for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1874 		if (adev->uvd.harvest_config & (1 << i))
1875 			continue;
1876 		adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1877 		adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1878 	}
1879 }
1880 
1881 const struct amdgpu_ip_block_version uvd_v7_0_ip_block =
1882 {
1883 		.type = AMD_IP_BLOCK_TYPE_UVD,
1884 		.major = 7,
1885 		.minor = 0,
1886 		.rev = 0,
1887 		.funcs = &uvd_v7_0_ip_funcs,
1888 };
1889