xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_vcn.c (revision 8dfe214903ce3625c937d5fad2469e8a0d1d4d71)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/dmi.h>
30 #include <linux/pci.h>
31 #include <linux/debugfs.h>
32 #include <drm/drm_drv.h>
33 
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vcn.h"
37 #include "soc15d.h"
38 
39 /* Firmware Names */
40 #define FIRMWARE_RAVEN		"amdgpu/raven_vcn.bin"
41 #define FIRMWARE_PICASSO	"amdgpu/picasso_vcn.bin"
42 #define FIRMWARE_RAVEN2		"amdgpu/raven2_vcn.bin"
43 #define FIRMWARE_ARCTURUS	"amdgpu/arcturus_vcn.bin"
44 #define FIRMWARE_RENOIR		"amdgpu/renoir_vcn.bin"
45 #define FIRMWARE_GREEN_SARDINE	"amdgpu/green_sardine_vcn.bin"
46 #define FIRMWARE_NAVI10		"amdgpu/navi10_vcn.bin"
47 #define FIRMWARE_NAVI14		"amdgpu/navi14_vcn.bin"
48 #define FIRMWARE_NAVI12		"amdgpu/navi12_vcn.bin"
49 #define FIRMWARE_SIENNA_CICHLID	"amdgpu/sienna_cichlid_vcn.bin"
50 #define FIRMWARE_NAVY_FLOUNDER	"amdgpu/navy_flounder_vcn.bin"
51 #define FIRMWARE_VANGOGH	"amdgpu/vangogh_vcn.bin"
52 #define FIRMWARE_DIMGREY_CAVEFISH	"amdgpu/dimgrey_cavefish_vcn.bin"
53 #define FIRMWARE_ALDEBARAN	"amdgpu/aldebaran_vcn.bin"
54 #define FIRMWARE_BEIGE_GOBY	"amdgpu/beige_goby_vcn.bin"
55 #define FIRMWARE_YELLOW_CARP	"amdgpu/yellow_carp_vcn.bin"
56 #define FIRMWARE_VCN_3_1_2	"amdgpu/vcn_3_1_2.bin"
57 #define FIRMWARE_VCN4_0_0	"amdgpu/vcn_4_0_0.bin"
58 #define FIRMWARE_VCN4_0_2	"amdgpu/vcn_4_0_2.bin"
59 #define FIRMWARE_VCN4_0_4      "amdgpu/vcn_4_0_4.bin"
60 
61 MODULE_FIRMWARE(FIRMWARE_RAVEN);
62 MODULE_FIRMWARE(FIRMWARE_PICASSO);
63 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
64 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
65 MODULE_FIRMWARE(FIRMWARE_RENOIR);
66 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
67 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
68 MODULE_FIRMWARE(FIRMWARE_NAVI10);
69 MODULE_FIRMWARE(FIRMWARE_NAVI14);
70 MODULE_FIRMWARE(FIRMWARE_NAVI12);
71 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
72 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
73 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
74 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
75 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
76 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
77 MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
78 MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
79 MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
80 MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
81 
82 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
83 
84 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
85 {
86 	unsigned long bo_size;
87 	const char *fw_name;
88 	const struct common_firmware_header *hdr;
89 	unsigned char fw_check;
90 	unsigned int fw_shared_size, log_offset;
91 	int i, r;
92 
93 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
94 	rw_init(&adev->vcn.vcn_pg_lock, "vcnpg");
95 	rw_init(&adev->vcn.vcn1_jpeg1_workaround, "vcnwa");
96 	atomic_set(&adev->vcn.total_submission_cnt, 0);
97 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
98 		atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
99 
100 	switch (adev->ip_versions[UVD_HWIP][0]) {
101 	case IP_VERSION(1, 0, 0):
102 	case IP_VERSION(1, 0, 1):
103 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
104 			fw_name = FIRMWARE_RAVEN2;
105 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
106 			fw_name = FIRMWARE_PICASSO;
107 		else
108 			fw_name = FIRMWARE_RAVEN;
109 		break;
110 	case IP_VERSION(2, 5, 0):
111 		fw_name = FIRMWARE_ARCTURUS;
112 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
113 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
114 			adev->vcn.indirect_sram = true;
115 		break;
116 	case IP_VERSION(2, 2, 0):
117 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
118 			fw_name = FIRMWARE_RENOIR;
119 		else
120 			fw_name = FIRMWARE_GREEN_SARDINE;
121 
122 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
123 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
124 			adev->vcn.indirect_sram = true;
125 		break;
126 	case IP_VERSION(2, 6, 0):
127 		fw_name = FIRMWARE_ALDEBARAN;
128 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
129 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
130 			adev->vcn.indirect_sram = true;
131 		break;
132 	case IP_VERSION(2, 0, 0):
133 		fw_name = FIRMWARE_NAVI10;
134 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
135 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
136 			adev->vcn.indirect_sram = true;
137 		break;
138 	case IP_VERSION(2, 0, 2):
139 		if (adev->asic_type == CHIP_NAVI12)
140 			fw_name = FIRMWARE_NAVI12;
141 		else
142 			fw_name = FIRMWARE_NAVI14;
143 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
144 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
145 			adev->vcn.indirect_sram = true;
146 		break;
147 	case IP_VERSION(3, 0, 0):
148 	case IP_VERSION(3, 0, 64):
149 	case IP_VERSION(3, 0, 192):
150 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
151 			fw_name = FIRMWARE_SIENNA_CICHLID;
152 		else
153 			fw_name = FIRMWARE_NAVY_FLOUNDER;
154 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
155 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
156 			adev->vcn.indirect_sram = true;
157 		break;
158 	case IP_VERSION(3, 0, 2):
159 		fw_name = FIRMWARE_VANGOGH;
160 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
161 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
162 			adev->vcn.indirect_sram = true;
163 		break;
164 	case IP_VERSION(3, 0, 16):
165 		fw_name = FIRMWARE_DIMGREY_CAVEFISH;
166 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
167 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
168 			adev->vcn.indirect_sram = true;
169 		break;
170 	case IP_VERSION(3, 0, 33):
171 		fw_name = FIRMWARE_BEIGE_GOBY;
172 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
173 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
174 			adev->vcn.indirect_sram = true;
175 		break;
176 	case IP_VERSION(3, 1, 1):
177 		fw_name = FIRMWARE_YELLOW_CARP;
178 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
179 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
180 			adev->vcn.indirect_sram = true;
181 		break;
182 	case IP_VERSION(3, 1, 2):
183 		fw_name = FIRMWARE_VCN_3_1_2;
184 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
185 		    (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
186 			adev->vcn.indirect_sram = true;
187 		break;
188 	case IP_VERSION(4, 0, 0):
189 		fw_name = FIRMWARE_VCN4_0_0;
190 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
191 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
192 			adev->vcn.indirect_sram = true;
193 		break;
194 	case IP_VERSION(4, 0, 2):
195 		fw_name = FIRMWARE_VCN4_0_2;
196 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
197 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
198 			adev->vcn.indirect_sram = true;
199 		break;
200 	case IP_VERSION(4, 0, 4):
201 		fw_name = FIRMWARE_VCN4_0_4;
202 		if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
203 			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
204 			adev->vcn.indirect_sram = true;
205 		break;
206 	default:
207 		return -EINVAL;
208 	}
209 
210 	r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
211 	if (r) {
212 		dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
213 			fw_name);
214 		return r;
215 	}
216 
217 	r = amdgpu_ucode_validate(adev->vcn.fw);
218 	if (r) {
219 		dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
220 			fw_name);
221 		release_firmware(adev->vcn.fw);
222 		adev->vcn.fw = NULL;
223 		return r;
224 	}
225 
226 	/*
227 	 * Some Steam Deck's BIOS versions are incompatible with the
228 	 * indirect SRAM mode, leading to amdgpu being unable to get
229 	 * properly probed (and even potentially crashing the kernel).
230 	 * Hence, check for these versions here - notice this is
231 	 * restricted to Vangogh (Deck's APU).
232 	 */
233 	if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
234 		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
235 
236 		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
237 		     !strncmp("F7A0114", bios_ver, 7))) {
238 			adev->vcn.indirect_sram = false;
239 			dev_info(adev->dev,
240 				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
241 		}
242 	}
243 
244 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
245 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
246 
247 	/* Bit 20-23, it is encode major and non-zero for new naming convention.
248 	 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
249 	 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
250 	 * is zero in old naming convention, this field is always zero so far.
251 	 * These four bits are used to tell which naming convention is present.
252 	 */
253 	fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
254 	if (fw_check) {
255 		unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
256 
257 		fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
258 		enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
259 		enc_major = fw_check;
260 		dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
261 		vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
262 		DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
263 			enc_major, enc_minor, dec_ver, vep, fw_rev);
264 	} else {
265 		unsigned int version_major, version_minor, family_id;
266 
267 		family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
268 		version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
269 		version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
270 		DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
271 			version_major, version_minor, family_id);
272 	}
273 
274 	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
275 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
276 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
277 
278 	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)){
279 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
280 		log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
281 	} else {
282 		fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
283 		log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
284 	}
285 
286 	bo_size += fw_shared_size;
287 
288 	if (amdgpu_vcnfw_log)
289 		bo_size += AMDGPU_VCNFW_LOG_SIZE;
290 
291 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
292 		if (adev->vcn.harvest_config & (1 << i))
293 			continue;
294 
295 		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
296 						AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
297 						&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
298 		if (r) {
299 			dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
300 			return r;
301 		}
302 
303 		adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
304 				bo_size - fw_shared_size;
305 		adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
306 				bo_size - fw_shared_size;
307 
308 		adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
309 
310 		if (amdgpu_vcnfw_log) {
311 			adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
312 			adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
313 			adev->vcn.inst[i].fw_shared.log_offset = log_offset;
314 		}
315 
316 		if (adev->vcn.indirect_sram) {
317 			r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
318 					AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
319 					&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
320 			if (r) {
321 				dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
322 				return r;
323 			}
324 		}
325 	}
326 
327 	return 0;
328 }
329 
330 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
331 {
332 	int i, j;
333 
334 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
335 		if (adev->vcn.harvest_config & (1 << j))
336 			continue;
337 
338 		if (adev->vcn.indirect_sram) {
339 			amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
340 						  &adev->vcn.inst[j].dpg_sram_gpu_addr,
341 						  (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
342 		}
343 		kvfree(adev->vcn.inst[j].saved_bo);
344 
345 		amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
346 					  &adev->vcn.inst[j].gpu_addr,
347 					  (void **)&adev->vcn.inst[j].cpu_addr);
348 
349 		amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
350 
351 		for (i = 0; i < adev->vcn.num_enc_rings; ++i)
352 			amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
353 	}
354 
355 	release_firmware(adev->vcn.fw);
356 	mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
357 	mutex_destroy(&adev->vcn.vcn_pg_lock);
358 
359 	return 0;
360 }
361 
362 /* from vcn4 and above, only unified queue is used */
363 static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
364 {
365 	struct amdgpu_device *adev = ring->adev;
366 	bool ret = false;
367 
368 	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
369 		ret = true;
370 
371 	return ret;
372 }
373 
374 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
375 {
376 	bool ret = false;
377 	int vcn_config = adev->vcn.vcn_config[vcn_instance];
378 
379 	if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
380 		ret = true;
381 	} else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) {
382 		ret = true;
383 	} else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
384 		ret = true;
385 	}
386 
387 	return ret;
388 }
389 
390 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
391 {
392 	unsigned size;
393 	void *ptr;
394 	int i, idx;
395 
396 	bool in_ras_intr = amdgpu_ras_intr_triggered();
397 
398 	cancel_delayed_work_sync(&adev->vcn.idle_work);
399 
400 	/* err_event_athub will corrupt VCPU buffer, so we need to
401 	 * restore fw data and clear buffer in amdgpu_vcn_resume() */
402 	if (in_ras_intr)
403 		return 0;
404 
405 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
406 		if (adev->vcn.harvest_config & (1 << i))
407 			continue;
408 		if (adev->vcn.inst[i].vcpu_bo == NULL)
409 			return 0;
410 
411 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
412 		ptr = adev->vcn.inst[i].cpu_addr;
413 
414 		adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
415 		if (!adev->vcn.inst[i].saved_bo)
416 			return -ENOMEM;
417 
418 		if (drm_dev_enter(adev_to_drm(adev), &idx)) {
419 			memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
420 			drm_dev_exit(idx);
421 		}
422 	}
423 	return 0;
424 }
425 
426 int amdgpu_vcn_resume(struct amdgpu_device *adev)
427 {
428 	unsigned size;
429 	void *ptr;
430 	int i, idx;
431 
432 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
433 		if (adev->vcn.harvest_config & (1 << i))
434 			continue;
435 		if (adev->vcn.inst[i].vcpu_bo == NULL)
436 			return -EINVAL;
437 
438 		size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
439 		ptr = adev->vcn.inst[i].cpu_addr;
440 
441 		if (adev->vcn.inst[i].saved_bo != NULL) {
442 			if (drm_dev_enter(adev_to_drm(adev), &idx)) {
443 				memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
444 				drm_dev_exit(idx);
445 			}
446 			kvfree(adev->vcn.inst[i].saved_bo);
447 			adev->vcn.inst[i].saved_bo = NULL;
448 		} else {
449 			const struct common_firmware_header *hdr;
450 			unsigned offset;
451 
452 			hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
453 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
454 				offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
455 				if (drm_dev_enter(adev_to_drm(adev), &idx)) {
456 					memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
457 						    le32_to_cpu(hdr->ucode_size_bytes));
458 					drm_dev_exit(idx);
459 				}
460 				size -= le32_to_cpu(hdr->ucode_size_bytes);
461 				ptr += le32_to_cpu(hdr->ucode_size_bytes);
462 			}
463 			memset_io(ptr, 0, size);
464 		}
465 	}
466 	return 0;
467 }
468 
469 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
470 {
471 	struct amdgpu_device *adev =
472 		container_of(work, struct amdgpu_device, vcn.idle_work.work);
473 	unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
474 	unsigned int i, j;
475 	int r = 0;
476 
477 	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
478 		if (adev->vcn.harvest_config & (1 << j))
479 			continue;
480 
481 		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
482 			fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
483 		}
484 
485 		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
486 			struct dpg_pause_state new_state;
487 
488 			if (fence[j] ||
489 				unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
490 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
491 			else
492 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
493 
494 			adev->vcn.pause_dpg_mode(adev, j, &new_state);
495 		}
496 
497 		fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
498 		fences += fence[j];
499 	}
500 
501 	if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
502 		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
503 		       AMD_PG_STATE_GATE);
504 		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
505 				false);
506 		if (r)
507 			dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
508 	} else {
509 		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
510 	}
511 }
512 
513 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
514 {
515 	struct amdgpu_device *adev = ring->adev;
516 	int r = 0;
517 
518 	atomic_inc(&adev->vcn.total_submission_cnt);
519 
520 	if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
521 		r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
522 				true);
523 		if (r)
524 			dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
525 	}
526 
527 	mutex_lock(&adev->vcn.vcn_pg_lock);
528 	amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
529 	       AMD_PG_STATE_UNGATE);
530 
531 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)	{
532 		struct dpg_pause_state new_state;
533 
534 		if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
535 			atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
536 			new_state.fw_based = VCN_DPG_STATE__PAUSE;
537 		} else {
538 			unsigned int fences = 0;
539 			unsigned int i;
540 
541 			for (i = 0; i < adev->vcn.num_enc_rings; ++i)
542 				fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
543 
544 			if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
545 				new_state.fw_based = VCN_DPG_STATE__PAUSE;
546 			else
547 				new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
548 		}
549 
550 		adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
551 	}
552 	mutex_unlock(&adev->vcn.vcn_pg_lock);
553 }
554 
555 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
556 {
557 	if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
558 		ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
559 		atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
560 
561 	atomic_dec(&ring->adev->vcn.total_submission_cnt);
562 
563 	schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
564 }
565 
566 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
567 {
568 	struct amdgpu_device *adev = ring->adev;
569 	uint32_t tmp = 0;
570 	unsigned i;
571 	int r;
572 
573 	/* VCN in SRIOV does not support direct register read/write */
574 	if (amdgpu_sriov_vf(adev))
575 		return 0;
576 
577 	WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
578 	r = amdgpu_ring_alloc(ring, 3);
579 	if (r)
580 		return r;
581 	amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
582 	amdgpu_ring_write(ring, 0xDEADBEEF);
583 	amdgpu_ring_commit(ring);
584 	for (i = 0; i < adev->usec_timeout; i++) {
585 		tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
586 		if (tmp == 0xDEADBEEF)
587 			break;
588 		udelay(1);
589 	}
590 
591 	if (i >= adev->usec_timeout)
592 		r = -ETIMEDOUT;
593 
594 	return r;
595 }
596 
597 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
598 {
599 	struct amdgpu_device *adev = ring->adev;
600 	uint32_t rptr;
601 	unsigned int i;
602 	int r;
603 
604 	if (amdgpu_sriov_vf(adev))
605 		return 0;
606 
607 	r = amdgpu_ring_alloc(ring, 16);
608 	if (r)
609 		return r;
610 
611 	rptr = amdgpu_ring_get_rptr(ring);
612 
613 	amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
614 	amdgpu_ring_commit(ring);
615 
616 	for (i = 0; i < adev->usec_timeout; i++) {
617 		if (amdgpu_ring_get_rptr(ring) != rptr)
618 			break;
619 		udelay(1);
620 	}
621 
622 	if (i >= adev->usec_timeout)
623 		r = -ETIMEDOUT;
624 
625 	return r;
626 }
627 
628 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
629 				   struct amdgpu_ib *ib_msg,
630 				   struct dma_fence **fence)
631 {
632 	struct amdgpu_device *adev = ring->adev;
633 	struct dma_fence *f = NULL;
634 	struct amdgpu_job *job;
635 	struct amdgpu_ib *ib;
636 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
637 	int i, r;
638 
639 	r = amdgpu_job_alloc_with_ib(adev, 64,
640 					AMDGPU_IB_POOL_DIRECT, &job);
641 	if (r)
642 		goto err;
643 
644 	ib = &job->ibs[0];
645 	ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
646 	ib->ptr[1] = addr;
647 	ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
648 	ib->ptr[3] = addr >> 32;
649 	ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
650 	ib->ptr[5] = 0;
651 	for (i = 6; i < 16; i += 2) {
652 		ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
653 		ib->ptr[i+1] = 0;
654 	}
655 	ib->length_dw = 16;
656 
657 	r = amdgpu_job_submit_direct(job, ring, &f);
658 	if (r)
659 		goto err_free;
660 
661 	amdgpu_ib_free(adev, ib_msg, f);
662 
663 	if (fence)
664 		*fence = dma_fence_get(f);
665 	dma_fence_put(f);
666 
667 	return 0;
668 
669 err_free:
670 	amdgpu_job_free(job);
671 err:
672 	amdgpu_ib_free(adev, ib_msg, f);
673 	return r;
674 }
675 
676 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
677 		struct amdgpu_ib *ib)
678 {
679 	struct amdgpu_device *adev = ring->adev;
680 	uint32_t *msg;
681 	int r, i;
682 
683 	memset(ib, 0, sizeof(*ib));
684 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
685 			AMDGPU_IB_POOL_DIRECT,
686 			ib);
687 	if (r)
688 		return r;
689 
690 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
691 	msg[0] = cpu_to_le32(0x00000028);
692 	msg[1] = cpu_to_le32(0x00000038);
693 	msg[2] = cpu_to_le32(0x00000001);
694 	msg[3] = cpu_to_le32(0x00000000);
695 	msg[4] = cpu_to_le32(handle);
696 	msg[5] = cpu_to_le32(0x00000000);
697 	msg[6] = cpu_to_le32(0x00000001);
698 	msg[7] = cpu_to_le32(0x00000028);
699 	msg[8] = cpu_to_le32(0x00000010);
700 	msg[9] = cpu_to_le32(0x00000000);
701 	msg[10] = cpu_to_le32(0x00000007);
702 	msg[11] = cpu_to_le32(0x00000000);
703 	msg[12] = cpu_to_le32(0x00000780);
704 	msg[13] = cpu_to_le32(0x00000440);
705 	for (i = 14; i < 1024; ++i)
706 		msg[i] = cpu_to_le32(0x0);
707 
708 	return 0;
709 }
710 
711 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
712 					  struct amdgpu_ib *ib)
713 {
714 	struct amdgpu_device *adev = ring->adev;
715 	uint32_t *msg;
716 	int r, i;
717 
718 	memset(ib, 0, sizeof(*ib));
719 	r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
720 			AMDGPU_IB_POOL_DIRECT,
721 			ib);
722 	if (r)
723 		return r;
724 
725 	msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
726 	msg[0] = cpu_to_le32(0x00000028);
727 	msg[1] = cpu_to_le32(0x00000018);
728 	msg[2] = cpu_to_le32(0x00000000);
729 	msg[3] = cpu_to_le32(0x00000002);
730 	msg[4] = cpu_to_le32(handle);
731 	msg[5] = cpu_to_le32(0x00000000);
732 	for (i = 6; i < 1024; ++i)
733 		msg[i] = cpu_to_le32(0x0);
734 
735 	return 0;
736 }
737 
738 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
739 {
740 	struct dma_fence *fence = NULL;
741 	struct amdgpu_ib ib;
742 	long r;
743 
744 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
745 	if (r)
746 		goto error;
747 
748 	r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
749 	if (r)
750 		goto error;
751 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
752 	if (r)
753 		goto error;
754 
755 	r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
756 	if (r)
757 		goto error;
758 
759 	r = dma_fence_wait_timeout(fence, false, timeout);
760 	if (r == 0)
761 		r = -ETIMEDOUT;
762 	else if (r > 0)
763 		r = 0;
764 
765 	dma_fence_put(fence);
766 error:
767 	return r;
768 }
769 
770 static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
771 						uint32_t ib_pack_in_dw, bool enc)
772 {
773 	uint32_t *ib_checksum;
774 
775 	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
776 	ib->ptr[ib->length_dw++] = 0x30000002;
777 	ib_checksum = &ib->ptr[ib->length_dw++];
778 	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
779 
780 	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
781 	ib->ptr[ib->length_dw++] = 0x30000001;
782 	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
783 	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
784 
785 	return ib_checksum;
786 }
787 
788 static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
789 						uint32_t ib_pack_in_dw)
790 {
791 	uint32_t i;
792 	uint32_t checksum = 0;
793 
794 	for (i = 0; i < ib_pack_in_dw; i++)
795 		checksum += *(*ib_checksum + 2 + i);
796 
797 	**ib_checksum = checksum;
798 }
799 
800 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
801 				      struct amdgpu_ib *ib_msg,
802 				      struct dma_fence **fence)
803 {
804 	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
805 	unsigned int ib_size_dw = 64;
806 	struct amdgpu_device *adev = ring->adev;
807 	struct dma_fence *f = NULL;
808 	struct amdgpu_job *job;
809 	struct amdgpu_ib *ib;
810 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
811 	bool sq = amdgpu_vcn_using_unified_queue(ring);
812 	uint32_t *ib_checksum;
813 	uint32_t ib_pack_in_dw;
814 	int i, r;
815 
816 	if (sq)
817 		ib_size_dw += 8;
818 
819 	r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
820 				AMDGPU_IB_POOL_DIRECT, &job);
821 	if (r)
822 		goto err;
823 
824 	ib = &job->ibs[0];
825 	ib->length_dw = 0;
826 
827 	/* single queue headers */
828 	if (sq) {
829 		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
830 						+ 4 + 2; /* engine info + decoding ib in dw */
831 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
832 	}
833 
834 	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
835 	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
836 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
837 	ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
838 	memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
839 
840 	decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
841 	decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
842 	decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
843 
844 	for (i = ib->length_dw; i < ib_size_dw; ++i)
845 		ib->ptr[i] = 0x0;
846 
847 	if (sq)
848 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
849 
850 	r = amdgpu_job_submit_direct(job, ring, &f);
851 	if (r)
852 		goto err_free;
853 
854 	amdgpu_ib_free(adev, ib_msg, f);
855 
856 	if (fence)
857 		*fence = dma_fence_get(f);
858 	dma_fence_put(f);
859 
860 	return 0;
861 
862 err_free:
863 	amdgpu_job_free(job);
864 err:
865 	amdgpu_ib_free(adev, ib_msg, f);
866 	return r;
867 }
868 
869 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
870 {
871 	struct dma_fence *fence = NULL;
872 	struct amdgpu_ib ib;
873 	long r;
874 
875 	r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
876 	if (r)
877 		goto error;
878 
879 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
880 	if (r)
881 		goto error;
882 	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
883 	if (r)
884 		goto error;
885 
886 	r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
887 	if (r)
888 		goto error;
889 
890 	r = dma_fence_wait_timeout(fence, false, timeout);
891 	if (r == 0)
892 		r = -ETIMEDOUT;
893 	else if (r > 0)
894 		r = 0;
895 
896 	dma_fence_put(fence);
897 error:
898 	return r;
899 }
900 
901 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
902 {
903 	struct amdgpu_device *adev = ring->adev;
904 	uint32_t rptr;
905 	unsigned i;
906 	int r;
907 
908 	if (amdgpu_sriov_vf(adev))
909 		return 0;
910 
911 	r = amdgpu_ring_alloc(ring, 16);
912 	if (r)
913 		return r;
914 
915 	rptr = amdgpu_ring_get_rptr(ring);
916 
917 	amdgpu_ring_write(ring, VCN_ENC_CMD_END);
918 	amdgpu_ring_commit(ring);
919 
920 	for (i = 0; i < adev->usec_timeout; i++) {
921 		if (amdgpu_ring_get_rptr(ring) != rptr)
922 			break;
923 		udelay(1);
924 	}
925 
926 	if (i >= adev->usec_timeout)
927 		r = -ETIMEDOUT;
928 
929 	return r;
930 }
931 
932 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
933 					 struct amdgpu_ib *ib_msg,
934 					 struct dma_fence **fence)
935 {
936 	unsigned int ib_size_dw = 16;
937 	struct amdgpu_job *job;
938 	struct amdgpu_ib *ib;
939 	struct dma_fence *f = NULL;
940 	uint32_t *ib_checksum = NULL;
941 	uint64_t addr;
942 	bool sq = amdgpu_vcn_using_unified_queue(ring);
943 	int i, r;
944 
945 	if (sq)
946 		ib_size_dw += 8;
947 
948 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
949 					AMDGPU_IB_POOL_DIRECT, &job);
950 	if (r)
951 		return r;
952 
953 	ib = &job->ibs[0];
954 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
955 
956 	ib->length_dw = 0;
957 
958 	if (sq)
959 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
960 
961 	ib->ptr[ib->length_dw++] = 0x00000018;
962 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
963 	ib->ptr[ib->length_dw++] = handle;
964 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
965 	ib->ptr[ib->length_dw++] = addr;
966 	ib->ptr[ib->length_dw++] = 0x0000000b;
967 
968 	ib->ptr[ib->length_dw++] = 0x00000014;
969 	ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
970 	ib->ptr[ib->length_dw++] = 0x0000001c;
971 	ib->ptr[ib->length_dw++] = 0x00000000;
972 	ib->ptr[ib->length_dw++] = 0x00000000;
973 
974 	ib->ptr[ib->length_dw++] = 0x00000008;
975 	ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
976 
977 	for (i = ib->length_dw; i < ib_size_dw; ++i)
978 		ib->ptr[i] = 0x0;
979 
980 	if (sq)
981 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
982 
983 	r = amdgpu_job_submit_direct(job, ring, &f);
984 	if (r)
985 		goto err;
986 
987 	if (fence)
988 		*fence = dma_fence_get(f);
989 	dma_fence_put(f);
990 
991 	return 0;
992 
993 err:
994 	amdgpu_job_free(job);
995 	return r;
996 }
997 
998 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
999 					  struct amdgpu_ib *ib_msg,
1000 					  struct dma_fence **fence)
1001 {
1002 	unsigned int ib_size_dw = 16;
1003 	struct amdgpu_job *job;
1004 	struct amdgpu_ib *ib;
1005 	struct dma_fence *f = NULL;
1006 	uint32_t *ib_checksum = NULL;
1007 	uint64_t addr;
1008 	bool sq = amdgpu_vcn_using_unified_queue(ring);
1009 	int i, r;
1010 
1011 	if (sq)
1012 		ib_size_dw += 8;
1013 
1014 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
1015 					AMDGPU_IB_POOL_DIRECT, &job);
1016 	if (r)
1017 		return r;
1018 
1019 	ib = &job->ibs[0];
1020 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
1021 
1022 	ib->length_dw = 0;
1023 
1024 	if (sq)
1025 		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1026 
1027 	ib->ptr[ib->length_dw++] = 0x00000018;
1028 	ib->ptr[ib->length_dw++] = 0x00000001;
1029 	ib->ptr[ib->length_dw++] = handle;
1030 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1031 	ib->ptr[ib->length_dw++] = addr;
1032 	ib->ptr[ib->length_dw++] = 0x0000000b;
1033 
1034 	ib->ptr[ib->length_dw++] = 0x00000014;
1035 	ib->ptr[ib->length_dw++] = 0x00000002;
1036 	ib->ptr[ib->length_dw++] = 0x0000001c;
1037 	ib->ptr[ib->length_dw++] = 0x00000000;
1038 	ib->ptr[ib->length_dw++] = 0x00000000;
1039 
1040 	ib->ptr[ib->length_dw++] = 0x00000008;
1041 	ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1042 
1043 	for (i = ib->length_dw; i < ib_size_dw; ++i)
1044 		ib->ptr[i] = 0x0;
1045 
1046 	if (sq)
1047 		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1048 
1049 	r = amdgpu_job_submit_direct(job, ring, &f);
1050 	if (r)
1051 		goto err;
1052 
1053 	if (fence)
1054 		*fence = dma_fence_get(f);
1055 	dma_fence_put(f);
1056 
1057 	return 0;
1058 
1059 err:
1060 	amdgpu_job_free(job);
1061 	return r;
1062 }
1063 
1064 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1065 {
1066 	struct amdgpu_device *adev = ring->adev;
1067 	struct dma_fence *fence = NULL;
1068 	struct amdgpu_ib ib;
1069 	long r;
1070 
1071 	memset(&ib, 0, sizeof(ib));
1072 	r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1073 			AMDGPU_IB_POOL_DIRECT,
1074 			&ib);
1075 	if (r)
1076 		return r;
1077 
1078 	r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1079 	if (r)
1080 		goto error;
1081 
1082 	r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1083 	if (r)
1084 		goto error;
1085 
1086 	r = dma_fence_wait_timeout(fence, false, timeout);
1087 	if (r == 0)
1088 		r = -ETIMEDOUT;
1089 	else if (r > 0)
1090 		r = 0;
1091 
1092 error:
1093 	amdgpu_ib_free(adev, &ib, fence);
1094 	dma_fence_put(fence);
1095 
1096 	return r;
1097 }
1098 
1099 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1100 {
1101 	long r;
1102 
1103 	r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1104 	if (r)
1105 		goto error;
1106 
1107 	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1108 
1109 error:
1110 	return r;
1111 }
1112 
1113 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1114 {
1115 	switch(ring) {
1116 	case 0:
1117 		return AMDGPU_RING_PRIO_0;
1118 	case 1:
1119 		return AMDGPU_RING_PRIO_1;
1120 	case 2:
1121 		return AMDGPU_RING_PRIO_2;
1122 	default:
1123 		return AMDGPU_RING_PRIO_0;
1124 	}
1125 }
1126 
1127 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
1128 {
1129 	int i;
1130 	unsigned int idx;
1131 
1132 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1133 		const struct common_firmware_header *hdr;
1134 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
1135 
1136 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1137 			if (adev->vcn.harvest_config & (1 << i))
1138 				continue;
1139 			/* currently only support 2 FW instances */
1140 			if (i >= 2) {
1141 				dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1142 				break;
1143 			}
1144 			idx = AMDGPU_UCODE_ID_VCN + i;
1145 			adev->firmware.ucode[idx].ucode_id = idx;
1146 			adev->firmware.ucode[idx].fw = adev->vcn.fw;
1147 			adev->firmware.fw_size +=
1148 				roundup2(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1149 		}
1150 		dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
1151 	}
1152 }
1153 
1154 /*
1155  * debugfs for mapping vcn firmware log buffer.
1156  */
1157 #if defined(CONFIG_DEBUG_FS)
1158 static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1159                                              size_t size, loff_t *pos)
1160 {
1161 	struct amdgpu_vcn_inst *vcn;
1162 	void *log_buf;
1163 	volatile struct amdgpu_vcn_fwlog *plog;
1164 	unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1165 	unsigned int read_num[2] = {0};
1166 
1167 	vcn = file_inode(f)->i_private;
1168 	if (!vcn)
1169 		return -ENODEV;
1170 
1171 	if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1172 		return -EFAULT;
1173 
1174 	log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1175 
1176 	plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
1177 	read_pos = plog->rptr;
1178 	write_pos = plog->wptr;
1179 
1180 	if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1181 		return -EFAULT;
1182 
1183 	if (!size || (read_pos == write_pos))
1184 		return 0;
1185 
1186 	if (write_pos > read_pos) {
1187 		available = write_pos - read_pos;
1188 		read_num[0] = min(size, (size_t)available);
1189 	} else {
1190 		read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1191 		available = read_num[0] + write_pos - plog->header_size;
1192 		if (size > available)
1193 			read_num[1] = write_pos - plog->header_size;
1194 		else if (size > read_num[0])
1195 			read_num[1] = size - read_num[0];
1196 		else
1197 			read_num[0] = size;
1198 	}
1199 
1200 	for (i = 0; i < 2; i++) {
1201 		if (read_num[i]) {
1202 			if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1203 				read_pos = plog->header_size;
1204 			if (read_num[i] == copy_to_user((buf + read_bytes),
1205 			                                (log_buf + read_pos), read_num[i]))
1206 				return -EFAULT;
1207 
1208 			read_bytes += read_num[i];
1209 			read_pos += read_num[i];
1210 		}
1211 	}
1212 
1213 	plog->rptr = read_pos;
1214 	*pos += read_bytes;
1215 	return read_bytes;
1216 }
1217 
1218 static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1219 	.owner = THIS_MODULE,
1220 	.read = amdgpu_debugfs_vcn_fwlog_read,
1221 	.llseek = default_llseek
1222 };
1223 #endif
1224 
1225 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1226                                    struct amdgpu_vcn_inst *vcn)
1227 {
1228 #if defined(CONFIG_DEBUG_FS)
1229 	struct drm_minor *minor = adev_to_drm(adev)->primary;
1230 	struct dentry *root = minor->debugfs_root;
1231 	char name[32];
1232 
1233 	sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1234 	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, vcn,
1235 				 &amdgpu_debugfs_vcnfwlog_fops,
1236 				 AMDGPU_VCNFW_LOG_SIZE);
1237 #endif
1238 }
1239 
1240 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1241 {
1242 #if defined(CONFIG_DEBUG_FS)
1243 	volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
1244 	void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1245 	uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1246 	volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1247 	volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1248                                                          + vcn->fw_shared.log_offset;
1249 	*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1250 	fw_log->is_enabled = 1;
1251 	fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1252 	fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1253 	fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1254 
1255 	log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1256 	log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1257 	log_buf->rptr = log_buf->header_size;
1258 	log_buf->wptr = log_buf->header_size;
1259 	log_buf->wrapped = 0;
1260 #endif
1261 }
1262 
1263 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1264 				struct amdgpu_irq_src *source,
1265 				struct amdgpu_iv_entry *entry)
1266 {
1267 	struct ras_common_if *ras_if = adev->vcn.ras_if;
1268 	struct ras_dispatch_if ih_data = {
1269 		.entry = entry,
1270 	};
1271 
1272 	if (!ras_if)
1273 		return 0;
1274 
1275 	ih_data.head = *ras_if;
1276 	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1277 
1278 	return 0;
1279 }
1280