1 /* $NetBSD: amdgpu_vce.c,v 1.5 2020/02/14 14:34:58 maya Exp $ */ 2 3 /* 4 * Copyright 2013 Advanced Micro Devices, Inc. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * The above copyright notice and this permission notice (including the 24 * next paragraph) shall be included in all copies or substantial portions 25 * of the Software. 26 * 27 * Authors: Christian König <christian.koenig@amd.com> 28 */ 29 30 #include <sys/cdefs.h> 31 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vce.c,v 1.5 2020/02/14 14:34:58 maya Exp $"); 32 33 #include <linux/firmware.h> 34 #include <linux/module.h> 35 #include <drm/drmP.h> 36 #include <drm/drm.h> 37 38 #include "amdgpu.h" 39 #include "amdgpu_pm.h" 40 #include "amdgpu_vce.h" 41 #include "cikd.h" 42 43 /* 1 second timeout */ 44 #define VCE_IDLE_TIMEOUT_MS 1000 45 46 /* Firmware Names */ 47 #ifdef CONFIG_DRM_AMDGPU_CIK 48 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" 49 #define FIRMWARE_KABINI "radeon/kabini_vce.bin" 50 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" 51 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" 52 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin" 53 #endif 54 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" 55 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" 56 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" 57 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 58 59 #ifdef CONFIG_DRM_AMDGPU_CIK 60 MODULE_FIRMWARE(FIRMWARE_BONAIRE); 61 MODULE_FIRMWARE(FIRMWARE_KABINI); 62 MODULE_FIRMWARE(FIRMWARE_KAVERI); 63 MODULE_FIRMWARE(FIRMWARE_HAWAII); 64 MODULE_FIRMWARE(FIRMWARE_MULLINS); 65 #endif 66 MODULE_FIRMWARE(FIRMWARE_TONGA); 67 MODULE_FIRMWARE(FIRMWARE_CARRIZO); 68 MODULE_FIRMWARE(FIRMWARE_FIJI); 69 MODULE_FIRMWARE(FIRMWARE_STONEY); 70 71 static void amdgpu_vce_idle_work_handler(struct work_struct *work); 72 73 /** 74 * amdgpu_vce_init - allocate memory, load vce firmware 75 * 76 * @adev: amdgpu_device pointer 77 * 78 * First step to get VCE online, allocate memory and load the firmware 79 */ 80 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) 81 { 82 const char *fw_name; 83 const struct common_firmware_header *hdr; 84 unsigned ucode_version, version_major, version_minor, binary_id; 85 int i, r; 86 87 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); 88 89 switch (adev->asic_type) { 90 #ifdef CONFIG_DRM_AMDGPU_CIK 91 case CHIP_BONAIRE: 92 fw_name = FIRMWARE_BONAIRE; 93 break; 94 case CHIP_KAVERI: 95 fw_name = FIRMWARE_KAVERI; 96 break; 97 case CHIP_KABINI: 98 fw_name = FIRMWARE_KABINI; 99 break; 100 case CHIP_HAWAII: 101 fw_name = FIRMWARE_HAWAII; 102 break; 103 case CHIP_MULLINS: 104 fw_name = FIRMWARE_MULLINS; 105 break; 106 #endif 107 case CHIP_TONGA: 108 fw_name = FIRMWARE_TONGA; 109 break; 110 case CHIP_CARRIZO: 111 fw_name = FIRMWARE_CARRIZO; 112 break; 113 case CHIP_FIJI: 114 fw_name = FIRMWARE_FIJI; 115 break; 116 case CHIP_STONEY: 117 fw_name = FIRMWARE_STONEY; 118 break; 119 120 default: 121 return -EINVAL; 122 } 123 124 r = request_firmware(&adev->vce.fw, fw_name, adev->dev); 125 if (r) { 126 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n", 127 fw_name); 128 return r; 129 } 130 131 r = amdgpu_ucode_validate(adev->vce.fw); 132 if (r) { 133 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", 134 fw_name); 135 release_firmware(adev->vce.fw); 136 adev->vce.fw = NULL; 137 return r; 138 } 139 140 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 141 142 ucode_version = le32_to_cpu(hdr->ucode_version); 143 version_major = (ucode_version >> 20) & 0xfff; 144 version_minor = (ucode_version >> 8) & 0xfff; 145 binary_id = ucode_version & 0xff; 146 DRM_INFO("Found VCE firmware Version: %x.%x Binary ID: %x\n", 147 version_major, version_minor, binary_id); 148 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | 149 (binary_id << 8)); 150 151 /* allocate firmware, stack and heap BO */ 152 153 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 154 AMDGPU_GEM_DOMAIN_VRAM, 155 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 156 NULL, NULL, &adev->vce.vcpu_bo); 157 if (r) { 158 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 159 return r; 160 } 161 162 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 163 if (r) { 164 amdgpu_bo_unref(&adev->vce.vcpu_bo); 165 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 166 return r; 167 } 168 169 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, 170 &adev->vce.gpu_addr); 171 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 172 if (r) { 173 amdgpu_bo_unref(&adev->vce.vcpu_bo); 174 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r); 175 return r; 176 } 177 178 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 179 atomic_set(&adev->vce.handles[i], 0); 180 adev->vce.filp[i] = NULL; 181 } 182 183 return 0; 184 } 185 186 /** 187 * amdgpu_vce_fini - free memory 188 * 189 * @adev: amdgpu_device pointer 190 * 191 * Last step on VCE teardown, free firmware memory 192 */ 193 int amdgpu_vce_sw_fini(struct amdgpu_device *adev) 194 { 195 if (adev->vce.vcpu_bo == NULL) 196 return 0; 197 198 amdgpu_bo_unref(&adev->vce.vcpu_bo); 199 200 amdgpu_ring_fini(&adev->vce.ring[0]); 201 amdgpu_ring_fini(&adev->vce.ring[1]); 202 203 release_firmware(adev->vce.fw); 204 205 return 0; 206 } 207 208 /** 209 * amdgpu_vce_suspend - unpin VCE fw memory 210 * 211 * @adev: amdgpu_device pointer 212 * 213 */ 214 int amdgpu_vce_suspend(struct amdgpu_device *adev) 215 { 216 int i; 217 218 if (adev->vce.vcpu_bo == NULL) 219 return 0; 220 221 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 222 if (atomic_read(&adev->vce.handles[i])) 223 break; 224 225 if (i == AMDGPU_MAX_VCE_HANDLES) 226 return 0; 227 228 cancel_delayed_work_sync(&adev->vce.idle_work); 229 /* TODO: suspending running encoding sessions isn't supported */ 230 return -EINVAL; 231 } 232 233 /** 234 * amdgpu_vce_resume - pin VCE fw memory 235 * 236 * @adev: amdgpu_device pointer 237 * 238 */ 239 int amdgpu_vce_resume(struct amdgpu_device *adev) 240 { 241 void *cpu_addr; 242 const struct common_firmware_header *hdr; 243 unsigned offset; 244 int r; 245 246 if (adev->vce.vcpu_bo == NULL) 247 return -EINVAL; 248 249 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); 250 if (r) { 251 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); 252 return r; 253 } 254 255 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); 256 if (r) { 257 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 258 dev_err(adev->dev, "(%d) VCE map failed\n", r); 259 return r; 260 } 261 262 hdr = (const struct common_firmware_header *)adev->vce.fw->data; 263 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); 264 memcpy(cpu_addr, (adev->vce.fw->data) + offset, 265 (adev->vce.fw->size) - offset); 266 267 amdgpu_bo_kunmap(adev->vce.vcpu_bo); 268 269 amdgpu_bo_unreserve(adev->vce.vcpu_bo); 270 271 return 0; 272 } 273 274 /** 275 * amdgpu_vce_idle_work_handler - power off VCE 276 * 277 * @work: pointer to work structure 278 * 279 * power of VCE when it's not used any more 280 */ 281 static void amdgpu_vce_idle_work_handler(struct work_struct *work) 282 { 283 struct amdgpu_device *adev = 284 container_of(work, struct amdgpu_device, vce.idle_work.work); 285 286 if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) && 287 (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) { 288 if (adev->pm.dpm_enabled) { 289 amdgpu_dpm_enable_vce(adev, false); 290 } else { 291 amdgpu_asic_set_vce_clocks(adev, 0, 0); 292 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 293 AMD_PG_STATE_GATE); 294 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 295 AMD_CG_STATE_GATE); 296 } 297 } else { 298 schedule_delayed_work(&adev->vce.idle_work, 299 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 300 } 301 } 302 303 /** 304 * amdgpu_vce_note_usage - power up VCE 305 * 306 * @adev: amdgpu_device pointer 307 * 308 * Make sure VCE is powerd up when we want to use it 309 */ 310 static void amdgpu_vce_note_usage(struct amdgpu_device *adev) 311 { 312 bool streams_changed = false; 313 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 314 set_clocks &= schedule_delayed_work(&adev->vce.idle_work, 315 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS)); 316 317 if (adev->pm.dpm_enabled) { 318 /* XXX figure out if the streams changed */ 319 streams_changed = false; 320 } 321 322 if (set_clocks || streams_changed) { 323 if (adev->pm.dpm_enabled) { 324 amdgpu_dpm_enable_vce(adev, true); 325 } else { 326 amdgpu_asic_set_vce_clocks(adev, 53300, 40000); 327 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 328 AMD_CG_STATE_UNGATE); 329 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 330 AMD_PG_STATE_UNGATE); 331 332 } 333 } 334 } 335 336 /** 337 * amdgpu_vce_free_handles - free still open VCE handles 338 * 339 * @adev: amdgpu_device pointer 340 * @filp: drm file pointer 341 * 342 * Close all VCE handles still open by this file pointer 343 */ 344 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) 345 { 346 struct amdgpu_ring *ring = &adev->vce.ring[0]; 347 int i, r; 348 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 349 uint32_t handle = atomic_read(&adev->vce.handles[i]); 350 if (!handle || adev->vce.filp[i] != filp) 351 continue; 352 353 amdgpu_vce_note_usage(adev); 354 355 r = amdgpu_vce_get_destroy_msg(ring, handle, NULL); 356 if (r) 357 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 358 359 adev->vce.filp[i] = NULL; 360 atomic_set(&adev->vce.handles[i], 0); 361 } 362 } 363 364 static int amdgpu_vce_free_job( 365 struct amdgpu_job *job) 366 { 367 amdgpu_ib_free(job->adev, job->ibs); 368 kfree(job->ibs); 369 return 0; 370 } 371 372 /** 373 * amdgpu_vce_get_create_msg - generate a VCE create msg 374 * 375 * @adev: amdgpu_device pointer 376 * @ring: ring we should submit the msg to 377 * @handle: VCE session handle to use 378 * @fence: optional fence to return 379 * 380 * Open up a stream for HW test 381 */ 382 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 383 struct fence **fence) 384 { 385 const unsigned ib_size_dw = 1024; 386 struct amdgpu_ib *ib = NULL; 387 struct fence *f = NULL; 388 struct amdgpu_device *adev = ring->adev; 389 uint64_t dummy; 390 int i, r; 391 392 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 393 if (!ib) 394 return -ENOMEM; 395 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); 396 if (r) { 397 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 398 kfree(ib); 399 return r; 400 } 401 402 dummy = ib->gpu_addr + 1024; 403 404 /* stitch together an VCE create msg */ 405 ib->length_dw = 0; 406 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 407 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 408 ib->ptr[ib->length_dw++] = handle; 409 410 if ((ring->adev->vce.fw_version >> 24) >= 52) 411 ib->ptr[ib->length_dw++] = 0x00000040; /* len */ 412 else 413 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 414 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 415 ib->ptr[ib->length_dw++] = 0x00000000; 416 ib->ptr[ib->length_dw++] = 0x00000042; 417 ib->ptr[ib->length_dw++] = 0x0000000a; 418 ib->ptr[ib->length_dw++] = 0x00000001; 419 ib->ptr[ib->length_dw++] = 0x00000080; 420 ib->ptr[ib->length_dw++] = 0x00000060; 421 ib->ptr[ib->length_dw++] = 0x00000100; 422 ib->ptr[ib->length_dw++] = 0x00000100; 423 ib->ptr[ib->length_dw++] = 0x0000000c; 424 ib->ptr[ib->length_dw++] = 0x00000000; 425 if ((ring->adev->vce.fw_version >> 24) >= 52) { 426 ib->ptr[ib->length_dw++] = 0x00000000; 427 ib->ptr[ib->length_dw++] = 0x00000000; 428 ib->ptr[ib->length_dw++] = 0x00000000; 429 ib->ptr[ib->length_dw++] = 0x00000000; 430 } 431 432 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 433 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 434 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 435 ib->ptr[ib->length_dw++] = dummy; 436 ib->ptr[ib->length_dw++] = 0x00000001; 437 438 for (i = ib->length_dw; i < ib_size_dw; ++i) 439 ib->ptr[i] = 0x0; 440 441 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 442 &amdgpu_vce_free_job, 443 AMDGPU_FENCE_OWNER_UNDEFINED, 444 &f); 445 if (r) 446 goto err; 447 if (fence) 448 *fence = fence_get(f); 449 fence_put(f); 450 if (amdgpu_enable_scheduler) 451 return 0; 452 err: 453 amdgpu_ib_free(adev, ib); 454 kfree(ib); 455 return r; 456 } 457 458 /** 459 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg 460 * 461 * @adev: amdgpu_device pointer 462 * @ring: ring we should submit the msg to 463 * @handle: VCE session handle to use 464 * @fence: optional fence to return 465 * 466 * Close up a stream for HW test or if userspace failed to do so 467 */ 468 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 469 struct fence **fence) 470 { 471 const unsigned ib_size_dw = 1024; 472 struct amdgpu_ib *ib = NULL; 473 struct fence *f = NULL; 474 struct amdgpu_device *adev = ring->adev; 475 uint64_t dummy; 476 int i, r; 477 478 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 479 if (!ib) 480 return -ENOMEM; 481 482 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, ib); 483 if (r) { 484 kfree(ib); 485 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 486 return r; 487 } 488 489 dummy = ib->gpu_addr + 1024; 490 491 /* stitch together an VCE destroy msg */ 492 ib->length_dw = 0; 493 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ 494 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 495 ib->ptr[ib->length_dw++] = handle; 496 497 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 498 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 499 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 500 ib->ptr[ib->length_dw++] = dummy; 501 ib->ptr[ib->length_dw++] = 0x00000001; 502 503 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ 504 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ 505 506 for (i = ib->length_dw; i < ib_size_dw; ++i) 507 ib->ptr[i] = 0x0; 508 r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, 509 &amdgpu_vce_free_job, 510 AMDGPU_FENCE_OWNER_UNDEFINED, 511 &f); 512 if (r) 513 goto err; 514 if (fence) 515 *fence = fence_get(f); 516 fence_put(f); 517 if (amdgpu_enable_scheduler) 518 return 0; 519 err: 520 amdgpu_ib_free(adev, ib); 521 kfree(ib); 522 return r; 523 } 524 525 /** 526 * amdgpu_vce_cs_reloc - command submission relocation 527 * 528 * @p: parser context 529 * @lo: address of lower dword 530 * @hi: address of higher dword 531 * @size: minimum size 532 * 533 * Patch relocation inside command stream with real buffer address 534 */ 535 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, 536 int lo, int hi, unsigned size, uint32_t index) 537 { 538 struct amdgpu_bo_va_mapping *mapping; 539 struct amdgpu_ib *ib = &p->ibs[ib_idx]; 540 struct amdgpu_bo *bo; 541 uint64_t addr; 542 543 if (index == 0xffffffff) 544 index = 0; 545 546 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 547 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 548 addr += ((uint64_t)size) * ((uint64_t)index); 549 550 mapping = amdgpu_cs_find_mapping(p, addr, &bo); 551 if (mapping == NULL) { 552 DRM_ERROR("Can't find BO for addr 0x%010"PRIx64" %d %d %d %d\n", 553 addr, lo, hi, size, index); 554 return -EINVAL; 555 } 556 557 if ((addr + (uint64_t)size) > 558 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { 559 DRM_ERROR("BO to small for addr 0x%010"PRIx64" %d %d\n", 560 addr, lo, hi); 561 return -EINVAL; 562 } 563 564 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; 565 addr += amdgpu_bo_gpu_offset(bo); 566 addr -= ((uint64_t)size) * ((uint64_t)index); 567 568 ib->ptr[lo] = addr & 0xFFFFFFFF; 569 ib->ptr[hi] = addr >> 32; 570 571 return 0; 572 } 573 574 /** 575 * amdgpu_vce_validate_handle - validate stream handle 576 * 577 * @p: parser context 578 * @handle: handle to validate 579 * @allocated: allocated a new handle? 580 * 581 * Validates the handle and return the found session index or -EINVAL 582 * we we don't have another free session index. 583 */ 584 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 585 uint32_t handle, bool *allocated) 586 { 587 unsigned i; 588 589 *allocated = false; 590 591 /* validate the handle */ 592 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 593 if (atomic_read(&p->adev->vce.handles[i]) == handle) { 594 if (p->adev->vce.filp[i] != p->filp) { 595 DRM_ERROR("VCE handle collision detected!\n"); 596 return -EINVAL; 597 } 598 return i; 599 } 600 } 601 602 /* handle not found try to alloc a new one */ 603 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 604 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 605 p->adev->vce.filp[i] = p->filp; 606 p->adev->vce.img_size[i] = 0; 607 *allocated = true; 608 return i; 609 } 610 } 611 612 DRM_ERROR("No more free VCE handles!\n"); 613 return -EINVAL; 614 } 615 616 /** 617 * amdgpu_vce_cs_parse - parse and validate the command stream 618 * 619 * @p: parser context 620 * 621 */ 622 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 623 { 624 struct amdgpu_ib *ib = &p->ibs[ib_idx]; 625 unsigned fb_idx = 0, bs_idx = 0; 626 int session_idx = -1; 627 bool destroyed = false; 628 bool created = false; 629 bool allocated = false; 630 uint32_t tmp, handle = 0; 631 uint32_t *size = &tmp; 632 int i, r = 0, idx = 0; 633 634 amdgpu_vce_note_usage(p->adev); 635 636 while (idx < ib->length_dw) { 637 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 638 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 639 640 if ((len < 8) || (len & 3)) { 641 DRM_ERROR("invalid VCE command length (%d)!\n", len); 642 r = -EINVAL; 643 goto out; 644 } 645 646 if (destroyed) { 647 DRM_ERROR("No other command allowed after destroy!\n"); 648 r = -EINVAL; 649 goto out; 650 } 651 652 switch (cmd) { 653 case 0x00000001: // session 654 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 655 session_idx = amdgpu_vce_validate_handle(p, handle, 656 &allocated); 657 if (session_idx < 0) 658 return session_idx; 659 size = &p->adev->vce.img_size[session_idx]; 660 break; 661 662 case 0x00000002: // task info 663 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 664 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 665 break; 666 667 case 0x01000001: // create 668 created = true; 669 if (!allocated) { 670 DRM_ERROR("Handle already in use!\n"); 671 r = -EINVAL; 672 goto out; 673 } 674 675 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * 676 amdgpu_get_ib_value(p, ib_idx, idx + 10) * 677 8 * 3 / 2; 678 break; 679 680 case 0x04000001: // config extension 681 case 0x04000002: // pic control 682 case 0x04000005: // rate control 683 case 0x04000007: // motion estimation 684 case 0x04000008: // rdo 685 case 0x04000009: // vui 686 case 0x05000002: // auxiliary buffer 687 break; 688 689 case 0x03000001: // encode 690 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 691 *size, 0); 692 if (r) 693 goto out; 694 695 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, 696 *size / 3, 0); 697 if (r) 698 goto out; 699 break; 700 701 case 0x02000001: // destroy 702 destroyed = true; 703 break; 704 705 case 0x05000001: // context buffer 706 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 707 *size * 2, 0); 708 if (r) 709 goto out; 710 break; 711 712 case 0x05000004: // video bitstream buffer 713 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 714 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 715 tmp, bs_idx); 716 if (r) 717 goto out; 718 break; 719 720 case 0x05000005: // feedback buffer 721 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 722 4096, fb_idx); 723 if (r) 724 goto out; 725 break; 726 727 default: 728 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 729 r = -EINVAL; 730 goto out; 731 } 732 733 if (session_idx == -1) { 734 DRM_ERROR("no session command at start of IB\n"); 735 r = -EINVAL; 736 goto out; 737 } 738 739 idx += len / 4; 740 } 741 742 if (allocated && !created) { 743 DRM_ERROR("New session without create command!\n"); 744 r = -ENOENT; 745 } 746 747 out: 748 if ((!r && destroyed) || (r && allocated)) { 749 /* 750 * IB contains a destroy msg or we have allocated an 751 * handle and got an error, anyway free the handle 752 */ 753 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 754 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); 755 } 756 757 return r; 758 } 759 760 /** 761 * amdgpu_vce_ring_emit_semaphore - emit a semaphore command 762 * 763 * @ring: engine to use 764 * @semaphore: address of semaphore 765 * @emit_wait: true=emit wait, false=emit signal 766 * 767 */ 768 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, 769 struct amdgpu_semaphore *semaphore, 770 bool emit_wait) 771 { 772 uint64_t addr = semaphore->gpu_addr; 773 774 amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE); 775 amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); 776 amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); 777 amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); 778 if (!emit_wait) 779 amdgpu_ring_write(ring, VCE_CMD_END); 780 781 return true; 782 } 783 784 /** 785 * amdgpu_vce_ring_emit_ib - execute indirect buffer 786 * 787 * @ring: engine to use 788 * @ib: the IB to execute 789 * 790 */ 791 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 792 { 793 amdgpu_ring_write(ring, VCE_CMD_IB); 794 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 795 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 796 amdgpu_ring_write(ring, ib->length_dw); 797 } 798 799 /** 800 * amdgpu_vce_ring_emit_fence - add a fence command to the ring 801 * 802 * @ring: engine to use 803 * @fence: the fence 804 * 805 */ 806 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 807 unsigned flags) 808 { 809 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 810 811 amdgpu_ring_write(ring, VCE_CMD_FENCE); 812 amdgpu_ring_write(ring, addr); 813 amdgpu_ring_write(ring, upper_32_bits(addr)); 814 amdgpu_ring_write(ring, seq); 815 amdgpu_ring_write(ring, VCE_CMD_TRAP); 816 amdgpu_ring_write(ring, VCE_CMD_END); 817 } 818 819 /** 820 * amdgpu_vce_ring_test_ring - test if VCE ring is working 821 * 822 * @ring: the engine to test on 823 * 824 */ 825 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) 826 { 827 struct amdgpu_device *adev = ring->adev; 828 uint32_t rptr = amdgpu_ring_get_rptr(ring); 829 unsigned i; 830 int r; 831 832 r = amdgpu_ring_lock(ring, 16); 833 if (r) { 834 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", 835 ring->idx, r); 836 return r; 837 } 838 amdgpu_ring_write(ring, VCE_CMD_END); 839 amdgpu_ring_unlock_commit(ring); 840 841 for (i = 0; i < adev->usec_timeout; i++) { 842 if (amdgpu_ring_get_rptr(ring) != rptr) 843 break; 844 DRM_UDELAY(1); 845 } 846 847 if (i < adev->usec_timeout) { 848 DRM_INFO("ring test on %d succeeded in %d usecs\n", 849 ring->idx, i); 850 } else { 851 DRM_ERROR("amdgpu: ring %d test failed\n", 852 ring->idx); 853 r = -ETIMEDOUT; 854 } 855 856 return r; 857 } 858 859 /** 860 * amdgpu_vce_ring_test_ib - test if VCE IBs are working 861 * 862 * @ring: the engine to test on 863 * 864 */ 865 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) 866 { 867 struct fence *fence = NULL; 868 int r; 869 870 /* skip vce ring1 ib test for now, since it's not reliable */ 871 if (ring == &ring->adev->vce.ring[1]) 872 return 0; 873 874 r = amdgpu_vce_get_create_msg(ring, 1, NULL); 875 if (r) { 876 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); 877 goto error; 878 } 879 880 r = amdgpu_vce_get_destroy_msg(ring, 1, &fence); 881 if (r) { 882 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 883 goto error; 884 } 885 886 r = fence_wait(fence, false); 887 if (r) { 888 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 889 } else { 890 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 891 } 892 error: 893 fence_put(fence); 894 return r; 895 } 896