1 /* $NetBSD: amdgpu_vcn_v2_5.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */
2
3 /*
4 * Copyright 2019 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vcn_v2_5.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
28
29 #include <linux/firmware.h>
30
31 #include "amdgpu.h"
32 #include "amdgpu_vcn.h"
33 #include "amdgpu_pm.h"
34 #include "soc15.h"
35 #include "soc15d.h"
36 #include "vcn_v2_0.h"
37 #include "mmsch_v1_0.h"
38
39 #include "vcn/vcn_2_5_offset.h"
40 #include "vcn/vcn_2_5_sh_mask.h"
41 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
42
43 #include <linux/nbsd-namespace.h>
44
45 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
46 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
47 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
48 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
49 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
50 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
51 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
52
53 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
54 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
55 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
56 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
57
58 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
59
60 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
61 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
62 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
63 static int vcn_v2_5_set_powergating_state(void *handle,
64 enum amd_powergating_state state);
65 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
66 int inst_idx, struct dpg_pause_state *new_state);
67 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
68
69 static int amdgpu_ih_clientid_vcns[] = {
70 SOC15_IH_CLIENTID_VCN,
71 SOC15_IH_CLIENTID_VCN1
72 };
73
74 /**
75 * vcn_v2_5_early_init - set function pointers
76 *
77 * @handle: amdgpu_device pointer
78 *
79 * Set ring and irq function pointers
80 */
vcn_v2_5_early_init(void * handle)81 static int vcn_v2_5_early_init(void *handle)
82 {
83 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
84 if (adev->asic_type == CHIP_ARCTURUS) {
85 u32 harvest;
86 int i;
87
88 adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
89 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
90 harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
91 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
92 adev->vcn.harvest_config |= 1 << i;
93 }
94
95 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
96 AMDGPU_VCN_HARVEST_VCN1))
97 /* both instances are harvested, disable the block */
98 return -ENOENT;
99 } else
100 adev->vcn.num_vcn_inst = 1;
101
102 if (amdgpu_sriov_vf(adev)) {
103 adev->vcn.num_vcn_inst = 2;
104 adev->vcn.harvest_config = 0;
105 adev->vcn.num_enc_rings = 1;
106 } else {
107 adev->vcn.num_enc_rings = 2;
108 }
109
110 vcn_v2_5_set_dec_ring_funcs(adev);
111 vcn_v2_5_set_enc_ring_funcs(adev);
112 vcn_v2_5_set_irq_funcs(adev);
113
114 return 0;
115 }
116
117 /**
118 * vcn_v2_5_sw_init - sw init for VCN block
119 *
120 * @handle: amdgpu_device pointer
121 *
122 * Load firmware and sw initialization
123 */
vcn_v2_5_sw_init(void * handle)124 static int vcn_v2_5_sw_init(void *handle)
125 {
126 struct amdgpu_ring *ring;
127 int i, j, r;
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
131 if (adev->vcn.harvest_config & (1 << j))
132 continue;
133 /* VCN DEC TRAP */
134 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
135 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
136 if (r)
137 return r;
138
139 /* VCN ENC TRAP */
140 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
141 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
142 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
143 if (r)
144 return r;
145 }
146 }
147
148 r = amdgpu_vcn_sw_init(adev);
149 if (r)
150 return r;
151
152 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
153 const struct common_firmware_header *hdr;
154 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
155 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
156 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
157 adev->firmware.fw_size +=
158 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
159
160 if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
161 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
162 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
163 adev->firmware.fw_size +=
164 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
165 }
166 DRM_INFO("PSP loading VCN firmware\n");
167 }
168
169 r = amdgpu_vcn_resume(adev);
170 if (r)
171 return r;
172
173 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
174 if (adev->vcn.harvest_config & (1 << j))
175 continue;
176 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
177 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
178 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
179 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
180 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
181 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
182
183 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
184 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
185 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
186 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
187 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
188 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
189 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
190 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
191 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
192 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
193
194 ring = &adev->vcn.inst[j].ring_dec;
195 ring->use_doorbell = true;
196
197 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
198 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
199 snprintf(ring->name, sizeof(ring->name), "vcn_dec_%d", j);
200 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
201 if (r)
202 return r;
203
204 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
205 ring = &adev->vcn.inst[j].ring_enc[i];
206 ring->use_doorbell = true;
207
208 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
209 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
210
211 snprintf(ring->name, sizeof(ring->name), "vcn_enc_%d.%d", j, i);
212 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
213 if (r)
214 return r;
215 }
216 }
217
218 if (amdgpu_sriov_vf(adev)) {
219 r = amdgpu_virt_alloc_mm_table(adev);
220 if (r)
221 return r;
222 }
223
224 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
225 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
226
227 return 0;
228 }
229
230 /**
231 * vcn_v2_5_sw_fini - sw fini for VCN block
232 *
233 * @handle: amdgpu_device pointer
234 *
235 * VCN suspend and free up sw allocation
236 */
vcn_v2_5_sw_fini(void * handle)237 static int vcn_v2_5_sw_fini(void *handle)
238 {
239 int r;
240 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
241
242 if (amdgpu_sriov_vf(adev))
243 amdgpu_virt_free_mm_table(adev);
244
245 r = amdgpu_vcn_suspend(adev);
246 if (r)
247 return r;
248
249 r = amdgpu_vcn_sw_fini(adev);
250
251 return r;
252 }
253
254 /**
255 * vcn_v2_5_hw_init - start and test VCN block
256 *
257 * @handle: amdgpu_device pointer
258 *
259 * Initialize the hardware, boot up the VCPU and do some testing
260 */
vcn_v2_5_hw_init(void * handle)261 static int vcn_v2_5_hw_init(void *handle)
262 {
263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
264 struct amdgpu_ring *ring;
265 int i, j, r = 0;
266
267 if (amdgpu_sriov_vf(adev))
268 r = vcn_v2_5_sriov_start(adev);
269
270 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
271 if (adev->vcn.harvest_config & (1 << j))
272 continue;
273
274 if (amdgpu_sriov_vf(adev)) {
275 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
276 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
277 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
278 adev->vcn.inst[j].ring_dec.sched.ready = true;
279 } else {
280
281 ring = &adev->vcn.inst[j].ring_dec;
282
283 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
284 ring->doorbell_index, j);
285
286 r = amdgpu_ring_test_helper(ring);
287 if (r)
288 goto done;
289
290 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
291 ring = &adev->vcn.inst[j].ring_enc[i];
292 r = amdgpu_ring_test_helper(ring);
293 if (r)
294 goto done;
295 }
296 }
297 }
298
299 done:
300 if (!r)
301 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
302 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
303
304 return r;
305 }
306
307 /**
308 * vcn_v2_5_hw_fini - stop the hardware block
309 *
310 * @handle: amdgpu_device pointer
311 *
312 * Stop the VCN block, mark ring as not ready any more
313 */
vcn_v2_5_hw_fini(void * handle)314 static int vcn_v2_5_hw_fini(void *handle)
315 {
316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
317 struct amdgpu_ring *ring;
318 int i, j;
319
320 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
321 if (adev->vcn.harvest_config & (1 << i))
322 continue;
323 ring = &adev->vcn.inst[i].ring_dec;
324
325 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
326 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
327 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
328 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
329
330 ring->sched.ready = false;
331
332 for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
333 ring = &adev->vcn.inst[i].ring_enc[j];
334 ring->sched.ready = false;
335 }
336 }
337
338 return 0;
339 }
340
341 /**
342 * vcn_v2_5_suspend - suspend VCN block
343 *
344 * @handle: amdgpu_device pointer
345 *
346 * HW fini and suspend VCN block
347 */
vcn_v2_5_suspend(void * handle)348 static int vcn_v2_5_suspend(void *handle)
349 {
350 int r;
351 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
352
353 r = vcn_v2_5_hw_fini(adev);
354 if (r)
355 return r;
356
357 r = amdgpu_vcn_suspend(adev);
358
359 return r;
360 }
361
362 /**
363 * vcn_v2_5_resume - resume VCN block
364 *
365 * @handle: amdgpu_device pointer
366 *
367 * Resume firmware and hw init VCN block
368 */
vcn_v2_5_resume(void * handle)369 static int vcn_v2_5_resume(void *handle)
370 {
371 int r;
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373
374 r = amdgpu_vcn_resume(adev);
375 if (r)
376 return r;
377
378 r = vcn_v2_5_hw_init(adev);
379
380 return r;
381 }
382
383 /**
384 * vcn_v2_5_mc_resume - memory controller programming
385 *
386 * @adev: amdgpu_device pointer
387 *
388 * Let the VCN memory controller know it's offsets
389 */
vcn_v2_5_mc_resume(struct amdgpu_device * adev)390 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
391 {
392 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
393 uint32_t offset;
394 int i;
395
396 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
397 if (adev->vcn.harvest_config & (1 << i))
398 continue;
399 /* cache window 0: fw */
400 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
401 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
402 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
403 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
404 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
405 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
406 offset = 0;
407 } else {
408 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
409 lower_32_bits(adev->vcn.inst[i].gpu_addr));
410 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
411 upper_32_bits(adev->vcn.inst[i].gpu_addr));
412 offset = size;
413 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
414 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
415 }
416 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
417
418 /* cache window 1: stack */
419 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
420 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
421 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
422 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
423 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
424 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
425
426 /* cache window 2: context */
427 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
428 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
429 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
430 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
431 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
432 WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
433 }
434 }
435
vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)436 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
437 {
438 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
439 uint32_t offset;
440
441 /* cache window 0: fw */
442 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
443 if (!indirect) {
444 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
445 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
446 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
447 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
448 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
449 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
450 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
451 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
452 } else {
453 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
454 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
455 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
456 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
457 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
458 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
459 }
460 offset = 0;
461 } else {
462 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
463 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
464 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
465 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
466 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
467 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
468 offset = size;
469 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
470 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
471 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
472 }
473
474 if (!indirect)
475 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
476 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
477 else
478 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
479 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
480
481 /* cache window 1: stack */
482 if (!indirect) {
483 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
484 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
485 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
486 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
487 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
488 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
489 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
490 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
491 } else {
492 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
493 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
494 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
495 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
496 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
497 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
498 }
499 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
500 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
501
502 /* cache window 2: context */
503 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
504 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
505 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
506 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
507 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
508 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
509 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
510 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
511 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
512 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
513
514 /* non-cache window */
515 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
516 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
517 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
518 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
519 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
520 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
521 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
522 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
523
524 /* VCN global tiling registers */
525 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
526 UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
527 }
528
529 /**
530 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
531 *
532 * @adev: amdgpu_device pointer
533 *
534 * Disable clock gating for VCN block
535 */
vcn_v2_5_disable_clock_gating(struct amdgpu_device * adev)536 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
537 {
538 uint32_t data;
539 int ret __unused = 0;
540 int i;
541
542 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
543 if (adev->vcn.harvest_config & (1 << i))
544 continue;
545 /* UVD disable CGC */
546 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
547 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
548 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
549 else
550 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
551 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
552 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
553 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
554
555 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
556 data &= ~(UVD_CGC_GATE__SYS_MASK
557 | UVD_CGC_GATE__UDEC_MASK
558 | UVD_CGC_GATE__MPEG2_MASK
559 | UVD_CGC_GATE__REGS_MASK
560 | UVD_CGC_GATE__RBC_MASK
561 | UVD_CGC_GATE__LMI_MC_MASK
562 | UVD_CGC_GATE__LMI_UMC_MASK
563 | UVD_CGC_GATE__IDCT_MASK
564 | UVD_CGC_GATE__MPRD_MASK
565 | UVD_CGC_GATE__MPC_MASK
566 | UVD_CGC_GATE__LBSI_MASK
567 | UVD_CGC_GATE__LRBBM_MASK
568 | UVD_CGC_GATE__UDEC_RE_MASK
569 | UVD_CGC_GATE__UDEC_CM_MASK
570 | UVD_CGC_GATE__UDEC_IT_MASK
571 | UVD_CGC_GATE__UDEC_DB_MASK
572 | UVD_CGC_GATE__UDEC_MP_MASK
573 | UVD_CGC_GATE__WCB_MASK
574 | UVD_CGC_GATE__VCPU_MASK
575 | UVD_CGC_GATE__MMSCH_MASK);
576
577 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
578
579 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret);
580
581 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
582 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
583 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
584 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
585 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
586 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
587 | UVD_CGC_CTRL__SYS_MODE_MASK
588 | UVD_CGC_CTRL__UDEC_MODE_MASK
589 | UVD_CGC_CTRL__MPEG2_MODE_MASK
590 | UVD_CGC_CTRL__REGS_MODE_MASK
591 | UVD_CGC_CTRL__RBC_MODE_MASK
592 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
593 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
594 | UVD_CGC_CTRL__IDCT_MODE_MASK
595 | UVD_CGC_CTRL__MPRD_MODE_MASK
596 | UVD_CGC_CTRL__MPC_MODE_MASK
597 | UVD_CGC_CTRL__LBSI_MODE_MASK
598 | UVD_CGC_CTRL__LRBBM_MODE_MASK
599 | UVD_CGC_CTRL__WCB_MODE_MASK
600 | UVD_CGC_CTRL__VCPU_MODE_MASK
601 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
602 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
603
604 /* turn on */
605 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
606 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
607 | UVD_SUVD_CGC_GATE__SIT_MASK
608 | UVD_SUVD_CGC_GATE__SMP_MASK
609 | UVD_SUVD_CGC_GATE__SCM_MASK
610 | UVD_SUVD_CGC_GATE__SDB_MASK
611 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
612 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
613 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
614 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
615 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
616 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
617 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
618 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
619 | UVD_SUVD_CGC_GATE__SCLR_MASK
620 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
621 | UVD_SUVD_CGC_GATE__ENT_MASK
622 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
623 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
624 | UVD_SUVD_CGC_GATE__SITE_MASK
625 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
626 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
627 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
628 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
629 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
630 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
631
632 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
633 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
634 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
635 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
636 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
637 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
638 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
639 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
640 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
641 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
642 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
643 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
644 }
645 }
646
vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel,int inst_idx,uint8_t indirect)647 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
648 uint8_t sram_sel, int inst_idx, uint8_t indirect)
649 {
650 uint32_t reg_data = 0;
651
652 /* enable sw clock gating control */
653 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
654 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
655 else
656 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
657 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
658 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
659 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
660 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
661 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
662 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
663 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
664 UVD_CGC_CTRL__SYS_MODE_MASK |
665 UVD_CGC_CTRL__UDEC_MODE_MASK |
666 UVD_CGC_CTRL__MPEG2_MODE_MASK |
667 UVD_CGC_CTRL__REGS_MODE_MASK |
668 UVD_CGC_CTRL__RBC_MODE_MASK |
669 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
670 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
671 UVD_CGC_CTRL__IDCT_MODE_MASK |
672 UVD_CGC_CTRL__MPRD_MODE_MASK |
673 UVD_CGC_CTRL__MPC_MODE_MASK |
674 UVD_CGC_CTRL__LBSI_MODE_MASK |
675 UVD_CGC_CTRL__LRBBM_MODE_MASK |
676 UVD_CGC_CTRL__WCB_MODE_MASK |
677 UVD_CGC_CTRL__VCPU_MODE_MASK |
678 UVD_CGC_CTRL__MMSCH_MODE_MASK);
679 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
680 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
681
682 /* turn off clock gating */
683 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
684 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
685
686 /* turn on SUVD clock gating */
687 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
688 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
689
690 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
691 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
692 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
693 }
694
695 /**
696 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
697 *
698 * @adev: amdgpu_device pointer
699 *
700 * Enable clock gating for VCN block
701 */
vcn_v2_5_enable_clock_gating(struct amdgpu_device * adev)702 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
703 {
704 uint32_t data = 0;
705 int i;
706
707 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
708 if (adev->vcn.harvest_config & (1 << i))
709 continue;
710 /* enable UVD CGC */
711 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
712 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
713 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
714 else
715 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
716 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
717 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
718 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
719
720 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
721 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
722 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
723 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
724 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
725 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
726 | UVD_CGC_CTRL__SYS_MODE_MASK
727 | UVD_CGC_CTRL__UDEC_MODE_MASK
728 | UVD_CGC_CTRL__MPEG2_MODE_MASK
729 | UVD_CGC_CTRL__REGS_MODE_MASK
730 | UVD_CGC_CTRL__RBC_MODE_MASK
731 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
732 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
733 | UVD_CGC_CTRL__IDCT_MODE_MASK
734 | UVD_CGC_CTRL__MPRD_MODE_MASK
735 | UVD_CGC_CTRL__MPC_MODE_MASK
736 | UVD_CGC_CTRL__LBSI_MODE_MASK
737 | UVD_CGC_CTRL__LRBBM_MODE_MASK
738 | UVD_CGC_CTRL__WCB_MODE_MASK
739 | UVD_CGC_CTRL__VCPU_MODE_MASK);
740 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
741
742 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
743 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
744 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
745 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
746 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
747 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
748 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
749 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
750 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
751 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
752 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
753 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
754 }
755 }
756
vcn_v2_5_start_dpg_mode(struct amdgpu_device * adev,int inst_idx,bool indirect)757 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
758 {
759 struct amdgpu_ring *ring;
760 uint32_t rb_bufsz, tmp;
761
762 /* disable register anti-hang mechanism */
763 WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
764 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
765 /* enable dynamic power gating mode */
766 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
767 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
768 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
769 WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
770
771 if (indirect)
772 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
773
774 /* enable clock gating */
775 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
776
777 /* enable VCPU clock */
778 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
779 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
780 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
781 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
782 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
783
784 /* disable master interupt */
785 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
786 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
787
788 /* setup mmUVD_LMI_CTRL */
789 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
790 UVD_LMI_CTRL__REQ_MODE_MASK |
791 UVD_LMI_CTRL__CRC_RESET_MASK |
792 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
793 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
794 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
795 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
796 0x00100000L);
797 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
798 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
799
800 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
801 UVD, 0, mmUVD_MPC_CNTL),
802 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
803
804 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
805 UVD, 0, mmUVD_MPC_SET_MUXA0),
806 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
807 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
808 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
809 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
810
811 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
812 UVD, 0, mmUVD_MPC_SET_MUXB0),
813 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
814 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
815 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
816 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
817
818 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
819 UVD, 0, mmUVD_MPC_SET_MUX),
820 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
821 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
822 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
823
824 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
825
826 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
827 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
828 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
829 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
830
831 /* enable LMI MC and UMC channels */
832 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
833 UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
834
835 /* unblock VCPU register access */
836 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
837 UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
838
839 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
840 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
841 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
842 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
843
844 /* enable master interrupt */
845 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
846 UVD, 0, mmUVD_MASTINT_EN),
847 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
848
849 if (indirect)
850 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
851 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
852 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
853
854 ring = &adev->vcn.inst[inst_idx].ring_dec;
855 /* force RBC into idle state */
856 rb_bufsz = order_base_2(ring->ring_size);
857 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
858 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
859 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
860 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
861 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
862 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
863
864 /* set the write pointer delay */
865 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
866
867 /* set the wb address */
868 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
869 (upper_32_bits(ring->gpu_addr) >> 2));
870
871 /* programm the RB_BASE for ring buffer */
872 WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
873 lower_32_bits(ring->gpu_addr));
874 WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
875 upper_32_bits(ring->gpu_addr));
876
877 /* Initialize the ring buffer's read and write pointers */
878 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
879
880 WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
881
882 ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
883 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
884 lower_32_bits(ring->wptr));
885
886 return 0;
887 }
888
vcn_v2_5_start(struct amdgpu_device * adev)889 static int vcn_v2_5_start(struct amdgpu_device *adev)
890 {
891 struct amdgpu_ring *ring;
892 uint32_t rb_bufsz, tmp;
893 int i, j, k, r;
894
895 if (adev->pm.dpm_enabled)
896 amdgpu_dpm_enable_uvd(adev, true);
897
898 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
899 if (adev->vcn.harvest_config & (1 << i))
900 continue;
901 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
902 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
903 continue;
904 }
905
906 /* disable register anti-hang mechanism */
907 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
908 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
909
910 /* set uvd status busy */
911 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
912 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
913 }
914
915 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
916 return 0;
917
918 /*SW clock gating */
919 vcn_v2_5_disable_clock_gating(adev);
920
921 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
922 if (adev->vcn.harvest_config & (1 << i))
923 continue;
924 /* enable VCPU clock */
925 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
926 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
927
928 /* disable master interrupt */
929 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
930 ~UVD_MASTINT_EN__VCPU_EN_MASK);
931
932 /* setup mmUVD_LMI_CTRL */
933 tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
934 tmp &= ~0xff;
935 WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
936 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
937 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
938 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
939 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
940
941 /* setup mmUVD_MPC_CNTL */
942 tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
943 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
944 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
945 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
946
947 /* setup UVD_MPC_SET_MUXA0 */
948 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
949 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
950 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
951 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
952 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
953
954 /* setup UVD_MPC_SET_MUXB0 */
955 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
956 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
957 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
958 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
959 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
960
961 /* setup mmUVD_MPC_SET_MUX */
962 WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
963 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
964 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
965 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
966 }
967
968 vcn_v2_5_mc_resume(adev);
969
970 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
971 if (adev->vcn.harvest_config & (1 << i))
972 continue;
973 /* VCN global tiling registers */
974 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
975 adev->gfx.config.gb_addr_config);
976 WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
977 adev->gfx.config.gb_addr_config);
978
979 /* enable LMI MC and UMC channels */
980 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
981 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
982
983 /* unblock VCPU register access */
984 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
985 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
986
987 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
988 ~UVD_VCPU_CNTL__BLK_RST_MASK);
989
990 for (k = 0; k < 10; ++k) {
991 uint32_t status;
992
993 for (j = 0; j < 100; ++j) {
994 status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
995 if (status & 2)
996 break;
997 if (amdgpu_emu_mode == 1)
998 msleep(500);
999 else
1000 mdelay(10);
1001 }
1002 r = 0;
1003 if (status & 2)
1004 break;
1005
1006 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1007 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
1008 UVD_VCPU_CNTL__BLK_RST_MASK,
1009 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1010 mdelay(10);
1011 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
1012 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1013
1014 mdelay(10);
1015 r = -1;
1016 }
1017
1018 if (r) {
1019 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1020 return r;
1021 }
1022
1023 /* enable master interrupt */
1024 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
1025 UVD_MASTINT_EN__VCPU_EN_MASK,
1026 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1027
1028 /* clear the busy bit of VCN_STATUS */
1029 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
1030 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1031
1032 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
1033
1034 ring = &adev->vcn.inst[i].ring_dec;
1035 /* force RBC into idle state */
1036 rb_bufsz = order_base_2(ring->ring_size);
1037 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1038 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1039 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1040 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1041 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1042 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
1043
1044 /* programm the RB_BASE for ring buffer */
1045 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1046 lower_32_bits(ring->gpu_addr));
1047 WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1048 upper_32_bits(ring->gpu_addr));
1049
1050 /* Initialize the ring buffer's read and write pointers */
1051 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
1052
1053 ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
1054 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
1055 lower_32_bits(ring->wptr));
1056 ring = &adev->vcn.inst[i].ring_enc[0];
1057 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1058 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1059 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1060 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1061 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1062
1063 ring = &adev->vcn.inst[i].ring_enc[1];
1064 WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1065 WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1066 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1067 WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1068 WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1069 }
1070
1071 return 0;
1072 }
1073
vcn_v2_5_mmsch_start(struct amdgpu_device * adev,struct amdgpu_mm_table * table)1074 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1075 struct amdgpu_mm_table *table)
1076 {
1077 uint32_t data = 0, loop = 0, size = 0;
1078 uint64_t addr = table->gpu_addr;
1079 struct mmsch_v1_1_init_header *header = NULL;;
1080
1081 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1082 size = header->total_size;
1083
1084 /*
1085 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1086 * memory descriptor location
1087 */
1088 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1089 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1090
1091 /* 2, update vmid of descriptor */
1092 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1093 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1094 /* use domain0 for MM scheduler */
1095 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1096 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1097
1098 /* 3, notify mmsch about the size of this descriptor */
1099 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1100
1101 /* 4, set resp to zero */
1102 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1103
1104 /*
1105 * 5, kick off the initialization and wait until
1106 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1107 */
1108 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1109
1110 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1111 loop = 10;
1112 while ((data & 0x10000002) != 0x10000002) {
1113 udelay(100);
1114 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1115 loop--;
1116 if (!loop)
1117 break;
1118 }
1119
1120 if (!loop) {
1121 dev_err(adev->dev,
1122 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1123 data);
1124 return -EBUSY;
1125 }
1126
1127 return 0;
1128 }
1129
vcn_v2_5_sriov_start(struct amdgpu_device * adev)1130 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1131 {
1132 struct amdgpu_ring *ring;
1133 uint32_t offset, size, tmp, i, rb_bufsz;
1134 uint32_t table_size = 0;
1135 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1136 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1137 struct mmsch_v1_0_cmd_direct_polling direct_poll __unused = { { 0 } };
1138 struct mmsch_v1_0_cmd_end end = { { 0 } };
1139 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1140 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1141
1142 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1143 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1144 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
1145 end.cmd_header.command_type = MMSCH_COMMAND__END;
1146
1147 header->version = MMSCH_VERSION;
1148 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1149 init_table += header->total_size;
1150
1151 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1152 header->eng[i].table_offset = header->total_size;
1153 header->eng[i].init_status = 0;
1154 header->eng[i].table_size = 0;
1155
1156 table_size = 0;
1157
1158 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1159 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1160 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1161
1162 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1163 /* mc resume*/
1164 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1165 MMSCH_V1_0_INSERT_DIRECT_WT(
1166 SOC15_REG_OFFSET(UVD, i,
1167 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1168 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1169 MMSCH_V1_0_INSERT_DIRECT_WT(
1170 SOC15_REG_OFFSET(UVD, i,
1171 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1172 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1173 offset = 0;
1174 MMSCH_V1_0_INSERT_DIRECT_WT(
1175 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1176 } else {
1177 MMSCH_V1_0_INSERT_DIRECT_WT(
1178 SOC15_REG_OFFSET(UVD, i,
1179 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1180 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1181 MMSCH_V1_0_INSERT_DIRECT_WT(
1182 SOC15_REG_OFFSET(UVD, i,
1183 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1184 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1185 offset = size;
1186 MMSCH_V1_0_INSERT_DIRECT_WT(
1187 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
1188 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1189 }
1190
1191 MMSCH_V1_0_INSERT_DIRECT_WT(
1192 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
1193 size);
1194 MMSCH_V1_0_INSERT_DIRECT_WT(
1195 SOC15_REG_OFFSET(UVD, i,
1196 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1197 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1198 MMSCH_V1_0_INSERT_DIRECT_WT(
1199 SOC15_REG_OFFSET(UVD, i,
1200 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1201 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1202 MMSCH_V1_0_INSERT_DIRECT_WT(
1203 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
1204 0);
1205 MMSCH_V1_0_INSERT_DIRECT_WT(
1206 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
1207 AMDGPU_VCN_STACK_SIZE);
1208 MMSCH_V1_0_INSERT_DIRECT_WT(
1209 SOC15_REG_OFFSET(UVD, i,
1210 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1211 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1212 AMDGPU_VCN_STACK_SIZE));
1213 MMSCH_V1_0_INSERT_DIRECT_WT(
1214 SOC15_REG_OFFSET(UVD, i,
1215 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1216 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1217 AMDGPU_VCN_STACK_SIZE));
1218 MMSCH_V1_0_INSERT_DIRECT_WT(
1219 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
1220 0);
1221 MMSCH_V1_0_INSERT_DIRECT_WT(
1222 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
1223 AMDGPU_VCN_CONTEXT_SIZE);
1224
1225 ring = &adev->vcn.inst[i].ring_enc[0];
1226 ring->wptr = 0;
1227
1228 MMSCH_V1_0_INSERT_DIRECT_WT(
1229 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
1230 lower_32_bits(ring->gpu_addr));
1231 MMSCH_V1_0_INSERT_DIRECT_WT(
1232 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
1233 upper_32_bits(ring->gpu_addr));
1234 MMSCH_V1_0_INSERT_DIRECT_WT(
1235 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
1236 ring->ring_size / 4);
1237
1238 ring = &adev->vcn.inst[i].ring_dec;
1239 ring->wptr = 0;
1240 MMSCH_V1_0_INSERT_DIRECT_WT(
1241 SOC15_REG_OFFSET(UVD, i,
1242 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1243 lower_32_bits(ring->gpu_addr));
1244 MMSCH_V1_0_INSERT_DIRECT_WT(
1245 SOC15_REG_OFFSET(UVD, i,
1246 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1247 upper_32_bits(ring->gpu_addr));
1248
1249 /* force RBC into idle state */
1250 rb_bufsz = order_base_2(ring->ring_size);
1251 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1252 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1253 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1254 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1255 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1256 MMSCH_V1_0_INSERT_DIRECT_WT(
1257 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
1258
1259 /* add end packet */
1260 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1261 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1262 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1263
1264 /* refine header */
1265 header->eng[i].table_size = table_size;
1266 header->total_size += table_size;
1267 }
1268
1269 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1270 }
1271
vcn_v2_5_stop_dpg_mode(struct amdgpu_device * adev,int inst_idx)1272 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1273 {
1274 int ret_code __unused = 0;
1275 uint32_t tmp;
1276
1277 /* Wait for power status to be 1 */
1278 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
1279 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1280
1281 /* wait for read ptr to be equal to write ptr */
1282 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
1283 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1284
1285 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
1286 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
1287
1288 tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1289 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
1290
1291 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
1292 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1293
1294 /* disable dynamic power gating mode */
1295 WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
1296 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1297
1298 return 0;
1299 }
1300
vcn_v2_5_stop(struct amdgpu_device * adev)1301 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1302 {
1303 uint32_t tmp;
1304 int i, r = 0;
1305
1306 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1307 if (adev->vcn.harvest_config & (1 << i))
1308 continue;
1309 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1310 r = vcn_v2_5_stop_dpg_mode(adev, i);
1311 continue;
1312 }
1313
1314 /* wait for vcn idle */
1315 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r);
1316 if (r)
1317 return r;
1318
1319 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1320 UVD_LMI_STATUS__READ_CLEAN_MASK |
1321 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1322 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1323 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
1324 if (r)
1325 return r;
1326
1327 /* block LMI UMC channel */
1328 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1329 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1330 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1331
1332 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1333 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1334 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r);
1335 if (r)
1336 return r;
1337
1338 /* block VCPU register access */
1339 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
1340 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1341 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1342
1343 /* reset VCPU */
1344 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
1345 UVD_VCPU_CNTL__BLK_RST_MASK,
1346 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1347
1348 /* disable VCPU clock */
1349 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
1350 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1351
1352 /* clear status */
1353 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1354
1355 vcn_v2_5_enable_clock_gating(adev);
1356
1357 /* enable register anti-hang mechanism */
1358 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
1359 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1360 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1361 }
1362
1363 if (adev->pm.dpm_enabled)
1364 amdgpu_dpm_enable_uvd(adev, false);
1365
1366 return 0;
1367 }
1368
vcn_v2_5_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1369 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1370 int inst_idx, struct dpg_pause_state *new_state)
1371 {
1372 struct amdgpu_ring *ring;
1373 uint32_t reg_data = 0;
1374 int ret_code;
1375
1376 /* pause/unpause if state is changed */
1377 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1378 DRM_DEBUG("dpg pause state changed %d -> %d",
1379 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1380 reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
1381 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1382
1383 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1384 ret_code = 0;
1385 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
1386 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1387
1388 if (!ret_code) {
1389 /* pause DPG */
1390 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1391 WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1392
1393 /* wait for ACK */
1394 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
1395 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1396 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
1397
1398 /* Restore */
1399 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1400 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1401 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1402 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1403 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1404 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1405
1406 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1407 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1408 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1409 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1410 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1411 WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1412
1413 WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
1414 RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1415
1416 SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
1417 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
1418 }
1419 } else {
1420 /* unpause dpg, no need to wait */
1421 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1422 WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1423 }
1424 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1425 }
1426
1427 return 0;
1428 }
1429
1430 /**
1431 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1432 *
1433 * @ring: amdgpu_ring pointer
1434 *
1435 * Returns the current hardware read pointer
1436 */
vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring * ring)1437 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1438 {
1439 struct amdgpu_device *adev = ring->adev;
1440
1441 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
1442 }
1443
1444 /**
1445 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1446 *
1447 * @ring: amdgpu_ring pointer
1448 *
1449 * Returns the current hardware write pointer
1450 */
vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring * ring)1451 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1452 {
1453 struct amdgpu_device *adev = ring->adev;
1454
1455 if (ring->use_doorbell)
1456 return adev->wb.wb[ring->wptr_offs];
1457 else
1458 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
1459 }
1460
1461 /**
1462 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1463 *
1464 * @ring: amdgpu_ring pointer
1465 *
1466 * Commits the write pointer to the hardware
1467 */
vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring * ring)1468 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1469 {
1470 struct amdgpu_device *adev = ring->adev;
1471
1472 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1473 WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
1474 lower_32_bits(ring->wptr) | 0x80000000);
1475
1476 if (ring->use_doorbell) {
1477 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1478 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1479 } else {
1480 WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1481 }
1482 }
1483
1484 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1485 .type = AMDGPU_RING_TYPE_VCN_DEC,
1486 .align_mask = 0xf,
1487 .vmhub = AMDGPU_MMHUB_1,
1488 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1489 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1490 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1491 .emit_frame_size =
1492 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1493 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1494 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1495 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1496 6,
1497 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1498 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1499 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1500 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1501 .test_ring = vcn_v2_0_dec_ring_test_ring,
1502 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1503 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1504 .insert_start = vcn_v2_0_dec_ring_insert_start,
1505 .insert_end = vcn_v2_0_dec_ring_insert_end,
1506 .pad_ib = amdgpu_ring_generic_pad_ib,
1507 .begin_use = amdgpu_vcn_ring_begin_use,
1508 .end_use = amdgpu_vcn_ring_end_use,
1509 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1510 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1511 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1512 };
1513
1514 /**
1515 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1516 *
1517 * @ring: amdgpu_ring pointer
1518 *
1519 * Returns the current hardware enc read pointer
1520 */
vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring * ring)1521 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1522 {
1523 struct amdgpu_device *adev = ring->adev;
1524
1525 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1526 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
1527 else
1528 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
1529 }
1530
1531 /**
1532 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1533 *
1534 * @ring: amdgpu_ring pointer
1535 *
1536 * Returns the current hardware enc write pointer
1537 */
vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring * ring)1538 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1539 {
1540 struct amdgpu_device *adev = ring->adev;
1541
1542 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1543 if (ring->use_doorbell)
1544 return adev->wb.wb[ring->wptr_offs];
1545 else
1546 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
1547 } else {
1548 if (ring->use_doorbell)
1549 return adev->wb.wb[ring->wptr_offs];
1550 else
1551 return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
1552 }
1553 }
1554
1555 /**
1556 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1557 *
1558 * @ring: amdgpu_ring pointer
1559 *
1560 * Commits the enc write pointer to the hardware
1561 */
vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring * ring)1562 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1563 {
1564 struct amdgpu_device *adev = ring->adev;
1565
1566 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1567 if (ring->use_doorbell) {
1568 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1569 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1570 } else {
1571 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1572 }
1573 } else {
1574 if (ring->use_doorbell) {
1575 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
1576 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1577 } else {
1578 WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1579 }
1580 }
1581 }
1582
1583 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1584 .type = AMDGPU_RING_TYPE_VCN_ENC,
1585 .align_mask = 0x3f,
1586 .nop = VCN_ENC_CMD_NO_OP,
1587 .vmhub = AMDGPU_MMHUB_1,
1588 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1589 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1590 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1591 .emit_frame_size =
1592 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1593 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1594 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1595 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1596 1, /* vcn_v2_0_enc_ring_insert_end */
1597 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1598 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1599 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1600 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1601 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1602 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1603 .insert_nop = amdgpu_ring_insert_nop,
1604 .insert_end = vcn_v2_0_enc_ring_insert_end,
1605 .pad_ib = amdgpu_ring_generic_pad_ib,
1606 .begin_use = amdgpu_vcn_ring_begin_use,
1607 .end_use = amdgpu_vcn_ring_end_use,
1608 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1609 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1610 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1611 };
1612
vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device * adev)1613 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1614 {
1615 int i;
1616
1617 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1618 if (adev->vcn.harvest_config & (1 << i))
1619 continue;
1620 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1621 adev->vcn.inst[i].ring_dec.me = i;
1622 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1623 }
1624 }
1625
vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device * adev)1626 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1627 {
1628 int i, j;
1629
1630 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1631 if (adev->vcn.harvest_config & (1 << j))
1632 continue;
1633 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1634 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1635 adev->vcn.inst[j].ring_enc[i].me = j;
1636 }
1637 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1638 }
1639 }
1640
vcn_v2_5_is_idle(void * handle)1641 static bool vcn_v2_5_is_idle(void *handle)
1642 {
1643 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1644 int i, ret = 1;
1645
1646 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1647 if (adev->vcn.harvest_config & (1 << i))
1648 continue;
1649 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1650 }
1651
1652 return ret;
1653 }
1654
vcn_v2_5_wait_for_idle(void * handle)1655 static int vcn_v2_5_wait_for_idle(void *handle)
1656 {
1657 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1658 int i, ret = 0;
1659
1660 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1661 if (adev->vcn.harvest_config & (1 << i))
1662 continue;
1663 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1664 UVD_STATUS__IDLE, ret);
1665 if (ret)
1666 return ret;
1667 }
1668
1669 return ret;
1670 }
1671
vcn_v2_5_set_clockgating_state(void * handle,enum amd_clockgating_state state)1672 static int vcn_v2_5_set_clockgating_state(void *handle,
1673 enum amd_clockgating_state state)
1674 {
1675 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1676 bool enable = (state == AMD_CG_STATE_GATE);
1677
1678 if (amdgpu_sriov_vf(adev))
1679 return 0;
1680
1681 if (enable) {
1682 if (vcn_v2_5_is_idle(handle))
1683 return -EBUSY;
1684 vcn_v2_5_enable_clock_gating(adev);
1685 } else {
1686 vcn_v2_5_disable_clock_gating(adev);
1687 }
1688
1689 return 0;
1690 }
1691
vcn_v2_5_set_powergating_state(void * handle,enum amd_powergating_state state)1692 static int vcn_v2_5_set_powergating_state(void *handle,
1693 enum amd_powergating_state state)
1694 {
1695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696 int ret;
1697
1698 if (amdgpu_sriov_vf(adev))
1699 return 0;
1700
1701 if(state == adev->vcn.cur_state)
1702 return 0;
1703
1704 if (state == AMD_PG_STATE_GATE)
1705 ret = vcn_v2_5_stop(adev);
1706 else
1707 ret = vcn_v2_5_start(adev);
1708
1709 if(!ret)
1710 adev->vcn.cur_state = state;
1711
1712 return ret;
1713 }
1714
vcn_v2_5_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1715 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1716 struct amdgpu_irq_src *source,
1717 unsigned type,
1718 enum amdgpu_interrupt_state state)
1719 {
1720 return 0;
1721 }
1722
vcn_v2_5_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1723 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1724 struct amdgpu_irq_src *source,
1725 struct amdgpu_iv_entry *entry)
1726 {
1727 uint32_t ip_instance;
1728
1729 switch (entry->client_id) {
1730 case SOC15_IH_CLIENTID_VCN:
1731 ip_instance = 0;
1732 break;
1733 case SOC15_IH_CLIENTID_VCN1:
1734 ip_instance = 1;
1735 break;
1736 default:
1737 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1738 return 0;
1739 }
1740
1741 DRM_DEBUG("IH: VCN TRAP\n");
1742
1743 switch (entry->src_id) {
1744 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1745 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1746 break;
1747 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1748 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1749 break;
1750 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1751 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1752 break;
1753 default:
1754 DRM_ERROR("Unhandled interrupt: %d %d\n",
1755 entry->src_id, entry->src_data[0]);
1756 break;
1757 }
1758
1759 return 0;
1760 }
1761
1762 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1763 .set = vcn_v2_5_set_interrupt_state,
1764 .process = vcn_v2_5_process_interrupt,
1765 };
1766
vcn_v2_5_set_irq_funcs(struct amdgpu_device * adev)1767 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1768 {
1769 int i;
1770
1771 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1772 if (adev->vcn.harvest_config & (1 << i))
1773 continue;
1774 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1775 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1776 }
1777 }
1778
1779 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1780 .name = "vcn_v2_5",
1781 .early_init = vcn_v2_5_early_init,
1782 .late_init = NULL,
1783 .sw_init = vcn_v2_5_sw_init,
1784 .sw_fini = vcn_v2_5_sw_fini,
1785 .hw_init = vcn_v2_5_hw_init,
1786 .hw_fini = vcn_v2_5_hw_fini,
1787 .suspend = vcn_v2_5_suspend,
1788 .resume = vcn_v2_5_resume,
1789 .is_idle = vcn_v2_5_is_idle,
1790 .wait_for_idle = vcn_v2_5_wait_for_idle,
1791 .check_soft_reset = NULL,
1792 .pre_soft_reset = NULL,
1793 .soft_reset = NULL,
1794 .post_soft_reset = NULL,
1795 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1796 .set_powergating_state = vcn_v2_5_set_powergating_state,
1797 };
1798
1799 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1800 {
1801 .type = AMD_IP_BLOCK_TYPE_VCN,
1802 .major = 2,
1803 .minor = 5,
1804 .rev = 0,
1805 .funcs = &vcn_v2_5_ip_funcs,
1806 };
1807