1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static void psp_set_funcs(struct amdgpu_device *adev); 41 42 static int psp_sysfs_init(struct amdgpu_device *adev); 43 static void psp_sysfs_fini(struct amdgpu_device *adev); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 psp_set_funcs(adev); 84 85 switch (adev->asic_type) { 86 case CHIP_VEGA10: 87 case CHIP_VEGA12: 88 psp_v3_1_set_psp_funcs(psp); 89 psp->autoload_supported = false; 90 break; 91 case CHIP_RAVEN: 92 psp_v10_0_set_psp_funcs(psp); 93 psp->autoload_supported = false; 94 break; 95 case CHIP_VEGA20: 96 case CHIP_ARCTURUS: 97 psp_v11_0_set_psp_funcs(psp); 98 psp->autoload_supported = false; 99 break; 100 case CHIP_NAVI10: 101 case CHIP_NAVI14: 102 case CHIP_NAVI12: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static int psp_sw_init(void *handle) 121 { 122 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 123 struct psp_context *psp = &adev->psp; 124 int ret; 125 126 ret = psp_init_microcode(psp); 127 if (ret) { 128 DRM_ERROR("Failed to load psp firmware!\n"); 129 return ret; 130 } 131 132 ret = psp_mem_training_init(psp); 133 if (ret) { 134 DRM_ERROR("Failed to initialize memory training!\n"); 135 return ret; 136 } 137 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 138 if (ret) { 139 DRM_ERROR("Failed to process memory training!\n"); 140 return ret; 141 } 142 143 if (adev->asic_type == CHIP_NAVI10) { 144 ret= psp_sysfs_init(adev); 145 if (ret) { 146 return ret; 147 } 148 } 149 150 return 0; 151 } 152 153 static int psp_sw_fini(void *handle) 154 { 155 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 156 157 psp_mem_training_fini(&adev->psp); 158 release_firmware(adev->psp.sos_fw); 159 adev->psp.sos_fw = NULL; 160 release_firmware(adev->psp.asd_fw); 161 adev->psp.asd_fw = NULL; 162 if (adev->psp.ta_fw) { 163 release_firmware(adev->psp.ta_fw); 164 adev->psp.ta_fw = NULL; 165 } 166 167 if (adev->asic_type == CHIP_NAVI10) 168 psp_sysfs_fini(adev); 169 170 return 0; 171 } 172 173 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 174 uint32_t reg_val, uint32_t mask, bool check_changed) 175 { 176 uint32_t val; 177 int i; 178 struct amdgpu_device *adev = psp->adev; 179 180 for (i = 0; i < adev->usec_timeout; i++) { 181 val = RREG32(reg_index); 182 if (check_changed) { 183 if (val != reg_val) 184 return 0; 185 } else { 186 if ((val & mask) == reg_val) 187 return 0; 188 } 189 udelay(1); 190 } 191 192 return -ETIME; 193 } 194 195 static int 196 psp_cmd_submit_buf(struct psp_context *psp, 197 struct amdgpu_firmware_info *ucode, 198 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 199 { 200 int ret; 201 int index; 202 int timeout = 2000; 203 bool ras_intr = false; 204 205 mutex_lock(&psp->mutex); 206 207 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 208 209 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 210 211 index = atomic_inc_return(&psp->fence_value); 212 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 213 if (ret) { 214 atomic_dec(&psp->fence_value); 215 mutex_unlock(&psp->mutex); 216 return ret; 217 } 218 219 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 220 while (*((unsigned int *)psp->fence_buf) != index) { 221 if (--timeout == 0) 222 break; 223 /* 224 * Shouldn't wait for timeout when err_event_athub occurs, 225 * because gpu reset thread triggered and lock resource should 226 * be released for psp resume sequence. 227 */ 228 ras_intr = amdgpu_ras_intr_triggered(); 229 if (ras_intr) 230 break; 231 drm_msleep(1); 232 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 233 } 234 235 /* In some cases, psp response status is not 0 even there is no 236 * problem while the command is submitted. Some version of PSP FW 237 * doesn't write 0 to that field. 238 * So here we would like to only print a warning instead of an error 239 * during psp initialization to avoid breaking hw_init and it doesn't 240 * return -EINVAL. 241 */ 242 if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 243 if (ucode) 244 DRM_WARN("failed to load ucode id (%d) ", 245 ucode->ucode_id); 246 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 247 psp->cmd_buf_mem->cmd_id, 248 psp->cmd_buf_mem->resp.status); 249 if (!timeout) { 250 mutex_unlock(&psp->mutex); 251 return -EINVAL; 252 } 253 } 254 255 /* get xGMI session id from response buffer */ 256 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 257 258 if (ucode) { 259 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 260 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 261 } 262 mutex_unlock(&psp->mutex); 263 264 return ret; 265 } 266 267 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 268 struct psp_gfx_cmd_resp *cmd, 269 uint64_t tmr_mc, uint32_t size) 270 { 271 if (psp_support_vmr_ring(psp)) 272 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 273 else 274 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 275 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 276 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 277 cmd->cmd.cmd_setup_tmr.buf_size = size; 278 } 279 280 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 281 uint64_t pri_buf_mc, uint32_t size) 282 { 283 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 284 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 285 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 286 cmd->cmd.cmd_load_toc.toc_size = size; 287 } 288 289 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 290 static int psp_load_toc(struct psp_context *psp, 291 uint32_t *tmr_size) 292 { 293 int ret; 294 struct psp_gfx_cmd_resp *cmd; 295 296 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 297 if (!cmd) 298 return -ENOMEM; 299 /* Copy toc to psp firmware private buffer */ 300 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 301 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 302 303 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 304 305 ret = psp_cmd_submit_buf(psp, NULL, cmd, 306 psp->fence_buf_mc_addr); 307 if (!ret) 308 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 309 kfree(cmd); 310 return ret; 311 } 312 313 /* Set up Trusted Memory Region */ 314 static int psp_tmr_init(struct psp_context *psp) 315 { 316 int ret; 317 int tmr_size; 318 void *tmr_buf; 319 void **pptr; 320 321 /* 322 * According to HW engineer, they prefer the TMR address be "naturally 323 * aligned" , e.g. the start address be an integer divide of TMR size. 324 * 325 * Note: this memory need be reserved till the driver 326 * uninitializes. 327 */ 328 tmr_size = PSP_TMR_SIZE; 329 330 /* For ASICs support RLC autoload, psp will parse the toc 331 * and calculate the total size of TMR needed */ 332 if (!amdgpu_sriov_vf(psp->adev) && 333 psp->toc_start_addr && 334 psp->toc_bin_size && 335 psp->fw_pri_buf) { 336 ret = psp_load_toc(psp, &tmr_size); 337 if (ret) { 338 DRM_ERROR("Failed to load toc\n"); 339 return ret; 340 } 341 } 342 343 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 344 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 345 AMDGPU_GEM_DOMAIN_VRAM, 346 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 347 348 return ret; 349 } 350 351 static int psp_tmr_load(struct psp_context *psp) 352 { 353 int ret; 354 struct psp_gfx_cmd_resp *cmd; 355 356 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 357 if (!cmd) 358 return -ENOMEM; 359 360 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 361 amdgpu_bo_size(psp->tmr_bo)); 362 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 363 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 364 365 ret = psp_cmd_submit_buf(psp, NULL, cmd, 366 psp->fence_buf_mc_addr); 367 368 kfree(cmd); 369 370 return ret; 371 } 372 373 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 374 struct psp_gfx_cmd_resp *cmd) 375 { 376 if (amdgpu_sriov_vf(psp->adev)) 377 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 378 else 379 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 380 } 381 382 static int psp_tmr_unload(struct psp_context *psp) 383 { 384 int ret; 385 struct psp_gfx_cmd_resp *cmd; 386 387 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 388 if (!cmd) 389 return -ENOMEM; 390 391 psp_prep_tmr_unload_cmd_buf(psp, cmd); 392 DRM_INFO("free PSP TMR buffer\n"); 393 394 ret = psp_cmd_submit_buf(psp, NULL, cmd, 395 psp->fence_buf_mc_addr); 396 397 kfree(cmd); 398 399 return ret; 400 } 401 402 static int psp_tmr_terminate(struct psp_context *psp) 403 { 404 int ret; 405 void *tmr_buf; 406 void **pptr; 407 408 ret = psp_tmr_unload(psp); 409 if (ret) 410 return ret; 411 412 /* free TMR memory buffer */ 413 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 414 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 415 416 return 0; 417 } 418 419 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 420 uint64_t asd_mc, uint32_t size) 421 { 422 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 423 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 424 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 425 cmd->cmd.cmd_load_ta.app_len = size; 426 427 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 428 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 429 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 430 } 431 432 static int psp_asd_load(struct psp_context *psp) 433 { 434 int ret; 435 struct psp_gfx_cmd_resp *cmd; 436 437 /* If PSP version doesn't match ASD version, asd loading will be failed. 438 * add workaround to bypass it for sriov now. 439 * TODO: add version check to make it common 440 */ 441 if (amdgpu_sriov_vf(psp->adev)) 442 return 0; 443 444 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 445 if (!cmd) 446 return -ENOMEM; 447 448 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 449 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 450 451 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 452 psp->asd_ucode_size); 453 454 ret = psp_cmd_submit_buf(psp, NULL, cmd, 455 psp->fence_buf_mc_addr); 456 if (!ret) { 457 psp->asd_context.asd_initialized = true; 458 psp->asd_context.session_id = cmd->resp.session_id; 459 } 460 461 kfree(cmd); 462 463 return ret; 464 } 465 466 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 467 uint32_t session_id) 468 { 469 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 470 cmd->cmd.cmd_unload_ta.session_id = session_id; 471 } 472 473 static int psp_asd_unload(struct psp_context *psp) 474 { 475 int ret; 476 struct psp_gfx_cmd_resp *cmd; 477 478 if (amdgpu_sriov_vf(psp->adev)) 479 return 0; 480 481 if (!psp->asd_context.asd_initialized) 482 return 0; 483 484 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 485 if (!cmd) 486 return -ENOMEM; 487 488 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 489 490 ret = psp_cmd_submit_buf(psp, NULL, cmd, 491 psp->fence_buf_mc_addr); 492 if (!ret) 493 psp->asd_context.asd_initialized = false; 494 495 kfree(cmd); 496 497 return ret; 498 } 499 500 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 501 uint32_t id, uint32_t value) 502 { 503 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 504 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 505 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 506 } 507 508 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 509 uint32_t value) 510 { 511 struct psp_gfx_cmd_resp *cmd = NULL; 512 int ret = 0; 513 514 if (reg >= PSP_REG_LAST) 515 return -EINVAL; 516 517 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 518 if (!cmd) 519 return -ENOMEM; 520 521 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 522 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 523 524 kfree(cmd); 525 return ret; 526 } 527 528 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 529 uint64_t ta_bin_mc, 530 uint32_t ta_bin_size, 531 uint64_t ta_shared_mc, 532 uint32_t ta_shared_size) 533 { 534 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 535 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 536 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 537 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 538 539 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 540 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 541 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 542 } 543 544 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 545 { 546 int ret; 547 548 /* 549 * Allocate 16k memory aligned to 4k from Frame Buffer (local 550 * physical) for xgmi ta <-> Driver 551 */ 552 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 553 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 554 &psp->xgmi_context.xgmi_shared_bo, 555 &psp->xgmi_context.xgmi_shared_mc_addr, 556 &psp->xgmi_context.xgmi_shared_buf); 557 558 return ret; 559 } 560 561 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 562 uint32_t ta_cmd_id, 563 uint32_t session_id) 564 { 565 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 566 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 567 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 568 } 569 570 int psp_ta_invoke(struct psp_context *psp, 571 uint32_t ta_cmd_id, 572 uint32_t session_id) 573 { 574 int ret; 575 struct psp_gfx_cmd_resp *cmd; 576 577 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 578 if (!cmd) 579 return -ENOMEM; 580 581 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 582 583 ret = psp_cmd_submit_buf(psp, NULL, cmd, 584 psp->fence_buf_mc_addr); 585 586 kfree(cmd); 587 588 return ret; 589 } 590 591 static int psp_xgmi_load(struct psp_context *psp) 592 { 593 int ret; 594 struct psp_gfx_cmd_resp *cmd; 595 596 /* 597 * TODO: bypass the loading in sriov for now 598 */ 599 600 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 601 if (!cmd) 602 return -ENOMEM; 603 604 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 605 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 606 607 psp_prep_ta_load_cmd_buf(cmd, 608 psp->fw_pri_mc_addr, 609 psp->ta_xgmi_ucode_size, 610 psp->xgmi_context.xgmi_shared_mc_addr, 611 PSP_XGMI_SHARED_MEM_SIZE); 612 613 ret = psp_cmd_submit_buf(psp, NULL, cmd, 614 psp->fence_buf_mc_addr); 615 616 if (!ret) { 617 psp->xgmi_context.initialized = 1; 618 psp->xgmi_context.session_id = cmd->resp.session_id; 619 } 620 621 kfree(cmd); 622 623 return ret; 624 } 625 626 static int psp_xgmi_unload(struct psp_context *psp) 627 { 628 int ret; 629 struct psp_gfx_cmd_resp *cmd; 630 struct amdgpu_device *adev = psp->adev; 631 632 /* XGMI TA unload currently is not supported on Arcturus */ 633 if (adev->asic_type == CHIP_ARCTURUS) 634 return 0; 635 636 /* 637 * TODO: bypass the unloading in sriov for now 638 */ 639 640 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 641 if (!cmd) 642 return -ENOMEM; 643 644 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 645 646 ret = psp_cmd_submit_buf(psp, NULL, cmd, 647 psp->fence_buf_mc_addr); 648 649 kfree(cmd); 650 651 return ret; 652 } 653 654 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 655 { 656 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 657 } 658 659 int psp_xgmi_terminate(struct psp_context *psp) 660 { 661 int ret; 662 663 if (!psp->xgmi_context.initialized) 664 return 0; 665 666 ret = psp_xgmi_unload(psp); 667 if (ret) 668 return ret; 669 670 psp->xgmi_context.initialized = 0; 671 672 /* free xgmi shared memory */ 673 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 674 &psp->xgmi_context.xgmi_shared_mc_addr, 675 &psp->xgmi_context.xgmi_shared_buf); 676 677 return 0; 678 } 679 680 int psp_xgmi_initialize(struct psp_context *psp) 681 { 682 struct ta_xgmi_shared_memory *xgmi_cmd; 683 int ret; 684 685 if (!psp->adev->psp.ta_fw || 686 !psp->adev->psp.ta_xgmi_ucode_size || 687 !psp->adev->psp.ta_xgmi_start_addr) 688 return -ENOENT; 689 690 if (!psp->xgmi_context.initialized) { 691 ret = psp_xgmi_init_shared_buf(psp); 692 if (ret) 693 return ret; 694 } 695 696 /* Load XGMI TA */ 697 ret = psp_xgmi_load(psp); 698 if (ret) 699 return ret; 700 701 /* Initialize XGMI session */ 702 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 703 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 704 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 705 706 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 707 708 return ret; 709 } 710 711 // ras begin 712 static int psp_ras_init_shared_buf(struct psp_context *psp) 713 { 714 int ret; 715 716 /* 717 * Allocate 16k memory aligned to 4k from Frame Buffer (local 718 * physical) for ras ta <-> Driver 719 */ 720 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 721 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 722 &psp->ras.ras_shared_bo, 723 &psp->ras.ras_shared_mc_addr, 724 &psp->ras.ras_shared_buf); 725 726 return ret; 727 } 728 729 static int psp_ras_load(struct psp_context *psp) 730 { 731 int ret; 732 struct psp_gfx_cmd_resp *cmd; 733 734 /* 735 * TODO: bypass the loading in sriov for now 736 */ 737 if (amdgpu_sriov_vf(psp->adev)) 738 return 0; 739 740 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 741 if (!cmd) 742 return -ENOMEM; 743 744 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 745 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 746 747 psp_prep_ta_load_cmd_buf(cmd, 748 psp->fw_pri_mc_addr, 749 psp->ta_ras_ucode_size, 750 psp->ras.ras_shared_mc_addr, 751 PSP_RAS_SHARED_MEM_SIZE); 752 753 ret = psp_cmd_submit_buf(psp, NULL, cmd, 754 psp->fence_buf_mc_addr); 755 756 if (!ret) { 757 psp->ras.ras_initialized = true; 758 psp->ras.session_id = cmd->resp.session_id; 759 } 760 761 kfree(cmd); 762 763 return ret; 764 } 765 766 static int psp_ras_unload(struct psp_context *psp) 767 { 768 int ret; 769 struct psp_gfx_cmd_resp *cmd; 770 771 /* 772 * TODO: bypass the unloading in sriov for now 773 */ 774 if (amdgpu_sriov_vf(psp->adev)) 775 return 0; 776 777 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 778 if (!cmd) 779 return -ENOMEM; 780 781 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 782 783 ret = psp_cmd_submit_buf(psp, NULL, cmd, 784 psp->fence_buf_mc_addr); 785 786 kfree(cmd); 787 788 return ret; 789 } 790 791 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 792 { 793 /* 794 * TODO: bypass the loading in sriov for now 795 */ 796 if (amdgpu_sriov_vf(psp->adev)) 797 return 0; 798 799 return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 800 } 801 802 int psp_ras_enable_features(struct psp_context *psp, 803 union ta_ras_cmd_input *info, bool enable) 804 { 805 struct ta_ras_shared_memory *ras_cmd; 806 int ret; 807 808 if (!psp->ras.ras_initialized) 809 return -EINVAL; 810 811 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 812 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 813 814 if (enable) 815 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 816 else 817 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 818 819 ras_cmd->ras_in_message = *info; 820 821 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 822 if (ret) 823 return -EINVAL; 824 825 return ras_cmd->ras_status; 826 } 827 828 static int psp_ras_terminate(struct psp_context *psp) 829 { 830 int ret; 831 832 /* 833 * TODO: bypass the terminate in sriov for now 834 */ 835 if (amdgpu_sriov_vf(psp->adev)) 836 return 0; 837 838 if (!psp->ras.ras_initialized) 839 return 0; 840 841 ret = psp_ras_unload(psp); 842 if (ret) 843 return ret; 844 845 psp->ras.ras_initialized = false; 846 847 /* free ras shared memory */ 848 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 849 &psp->ras.ras_shared_mc_addr, 850 &psp->ras.ras_shared_buf); 851 852 return 0; 853 } 854 855 static int psp_ras_initialize(struct psp_context *psp) 856 { 857 int ret; 858 859 /* 860 * TODO: bypass the initialize in sriov for now 861 */ 862 if (amdgpu_sriov_vf(psp->adev)) 863 return 0; 864 865 if (!psp->adev->psp.ta_ras_ucode_size || 866 !psp->adev->psp.ta_ras_start_addr) { 867 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 868 return 0; 869 } 870 871 if (!psp->ras.ras_initialized) { 872 ret = psp_ras_init_shared_buf(psp); 873 if (ret) 874 return ret; 875 } 876 877 ret = psp_ras_load(psp); 878 if (ret) 879 return ret; 880 881 return 0; 882 } 883 // ras end 884 885 // HDCP start 886 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 887 { 888 int ret; 889 890 /* 891 * Allocate 16k memory aligned to 4k from Frame Buffer (local 892 * physical) for hdcp ta <-> Driver 893 */ 894 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 895 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 896 &psp->hdcp_context.hdcp_shared_bo, 897 &psp->hdcp_context.hdcp_shared_mc_addr, 898 &psp->hdcp_context.hdcp_shared_buf); 899 900 return ret; 901 } 902 903 static int psp_hdcp_load(struct psp_context *psp) 904 { 905 int ret; 906 struct psp_gfx_cmd_resp *cmd; 907 908 /* 909 * TODO: bypass the loading in sriov for now 910 */ 911 if (amdgpu_sriov_vf(psp->adev)) 912 return 0; 913 914 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 915 if (!cmd) 916 return -ENOMEM; 917 918 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 919 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 920 psp->ta_hdcp_ucode_size); 921 922 psp_prep_ta_load_cmd_buf(cmd, 923 psp->fw_pri_mc_addr, 924 psp->ta_hdcp_ucode_size, 925 psp->hdcp_context.hdcp_shared_mc_addr, 926 PSP_HDCP_SHARED_MEM_SIZE); 927 928 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 929 930 if (!ret) { 931 psp->hdcp_context.hdcp_initialized = true; 932 psp->hdcp_context.session_id = cmd->resp.session_id; 933 } 934 935 kfree(cmd); 936 937 return ret; 938 } 939 static int psp_hdcp_initialize(struct psp_context *psp) 940 { 941 int ret; 942 943 /* 944 * TODO: bypass the initialize in sriov for now 945 */ 946 if (amdgpu_sriov_vf(psp->adev)) 947 return 0; 948 949 if (!psp->adev->psp.ta_hdcp_ucode_size || 950 !psp->adev->psp.ta_hdcp_start_addr) { 951 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 952 return 0; 953 } 954 955 if (!psp->hdcp_context.hdcp_initialized) { 956 ret = psp_hdcp_init_shared_buf(psp); 957 if (ret) 958 return ret; 959 } 960 961 ret = psp_hdcp_load(psp); 962 if (ret) 963 return ret; 964 965 return 0; 966 } 967 968 static int psp_hdcp_unload(struct psp_context *psp) 969 { 970 int ret; 971 struct psp_gfx_cmd_resp *cmd; 972 973 /* 974 * TODO: bypass the unloading in sriov for now 975 */ 976 if (amdgpu_sriov_vf(psp->adev)) 977 return 0; 978 979 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 980 if (!cmd) 981 return -ENOMEM; 982 983 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 984 985 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 986 987 kfree(cmd); 988 989 return ret; 990 } 991 992 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 993 { 994 /* 995 * TODO: bypass the loading in sriov for now 996 */ 997 if (amdgpu_sriov_vf(psp->adev)) 998 return 0; 999 1000 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 1001 } 1002 1003 static int psp_hdcp_terminate(struct psp_context *psp) 1004 { 1005 int ret; 1006 1007 /* 1008 * TODO: bypass the terminate in sriov for now 1009 */ 1010 if (amdgpu_sriov_vf(psp->adev)) 1011 return 0; 1012 1013 if (!psp->hdcp_context.hdcp_initialized) 1014 return 0; 1015 1016 ret = psp_hdcp_unload(psp); 1017 if (ret) 1018 return ret; 1019 1020 psp->hdcp_context.hdcp_initialized = false; 1021 1022 /* free hdcp shared memory */ 1023 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 1024 &psp->hdcp_context.hdcp_shared_mc_addr, 1025 &psp->hdcp_context.hdcp_shared_buf); 1026 1027 return 0; 1028 } 1029 // HDCP end 1030 1031 // DTM start 1032 static int psp_dtm_init_shared_buf(struct psp_context *psp) 1033 { 1034 int ret; 1035 1036 /* 1037 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1038 * physical) for dtm ta <-> Driver 1039 */ 1040 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 1041 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1042 &psp->dtm_context.dtm_shared_bo, 1043 &psp->dtm_context.dtm_shared_mc_addr, 1044 &psp->dtm_context.dtm_shared_buf); 1045 1046 return ret; 1047 } 1048 1049 static int psp_dtm_load(struct psp_context *psp) 1050 { 1051 int ret; 1052 struct psp_gfx_cmd_resp *cmd; 1053 1054 /* 1055 * TODO: bypass the loading in sriov for now 1056 */ 1057 if (amdgpu_sriov_vf(psp->adev)) 1058 return 0; 1059 1060 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1061 if (!cmd) 1062 return -ENOMEM; 1063 1064 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1065 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1066 1067 psp_prep_ta_load_cmd_buf(cmd, 1068 psp->fw_pri_mc_addr, 1069 psp->ta_dtm_ucode_size, 1070 psp->dtm_context.dtm_shared_mc_addr, 1071 PSP_DTM_SHARED_MEM_SIZE); 1072 1073 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1074 1075 if (!ret) { 1076 psp->dtm_context.dtm_initialized = true; 1077 psp->dtm_context.session_id = cmd->resp.session_id; 1078 } 1079 1080 kfree(cmd); 1081 1082 return ret; 1083 } 1084 1085 static int psp_dtm_initialize(struct psp_context *psp) 1086 { 1087 int ret; 1088 1089 /* 1090 * TODO: bypass the initialize in sriov for now 1091 */ 1092 if (amdgpu_sriov_vf(psp->adev)) 1093 return 0; 1094 1095 if (!psp->adev->psp.ta_dtm_ucode_size || 1096 !psp->adev->psp.ta_dtm_start_addr) { 1097 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1098 return 0; 1099 } 1100 1101 if (!psp->dtm_context.dtm_initialized) { 1102 ret = psp_dtm_init_shared_buf(psp); 1103 if (ret) 1104 return ret; 1105 } 1106 1107 ret = psp_dtm_load(psp); 1108 if (ret) 1109 return ret; 1110 1111 return 0; 1112 } 1113 1114 static int psp_dtm_unload(struct psp_context *psp) 1115 { 1116 int ret; 1117 struct psp_gfx_cmd_resp *cmd; 1118 1119 /* 1120 * TODO: bypass the unloading in sriov for now 1121 */ 1122 if (amdgpu_sriov_vf(psp->adev)) 1123 return 0; 1124 1125 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1126 if (!cmd) 1127 return -ENOMEM; 1128 1129 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1130 1131 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1132 1133 kfree(cmd); 1134 1135 return ret; 1136 } 1137 1138 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1139 { 1140 /* 1141 * TODO: bypass the loading in sriov for now 1142 */ 1143 if (amdgpu_sriov_vf(psp->adev)) 1144 return 0; 1145 1146 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1147 } 1148 1149 static int psp_dtm_terminate(struct psp_context *psp) 1150 { 1151 int ret; 1152 1153 /* 1154 * TODO: bypass the terminate in sriov for now 1155 */ 1156 if (amdgpu_sriov_vf(psp->adev)) 1157 return 0; 1158 1159 if (!psp->dtm_context.dtm_initialized) 1160 return 0; 1161 1162 ret = psp_dtm_unload(psp); 1163 if (ret) 1164 return ret; 1165 1166 psp->dtm_context.dtm_initialized = false; 1167 1168 /* free hdcp shared memory */ 1169 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1170 &psp->dtm_context.dtm_shared_mc_addr, 1171 &psp->dtm_context.dtm_shared_buf); 1172 1173 return 0; 1174 } 1175 // DTM end 1176 1177 static int psp_hw_start(struct psp_context *psp) 1178 { 1179 struct amdgpu_device *adev = psp->adev; 1180 int ret; 1181 1182 if (!amdgpu_sriov_vf(adev)) { 1183 if (psp->kdb_bin_size && 1184 (psp->funcs->bootloader_load_kdb != NULL)) { 1185 ret = psp_bootloader_load_kdb(psp); 1186 if (ret) { 1187 DRM_ERROR("PSP load kdb failed!\n"); 1188 return ret; 1189 } 1190 } 1191 1192 ret = psp_bootloader_load_sysdrv(psp); 1193 if (ret) { 1194 DRM_ERROR("PSP load sysdrv failed!\n"); 1195 return ret; 1196 } 1197 1198 ret = psp_bootloader_load_sos(psp); 1199 if (ret) { 1200 DRM_ERROR("PSP load sos failed!\n"); 1201 return ret; 1202 } 1203 } 1204 1205 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1206 if (ret) { 1207 DRM_ERROR("PSP create ring failed!\n"); 1208 return ret; 1209 } 1210 1211 ret = psp_tmr_init(psp); 1212 if (ret) { 1213 DRM_ERROR("PSP tmr init failed!\n"); 1214 return ret; 1215 } 1216 1217 /* 1218 * For those ASICs with DF Cstate management centralized 1219 * to PMFW, TMR setup should be performed after PMFW 1220 * loaded and before other non-psp firmware loaded. 1221 */ 1222 if (!psp->pmfw_centralized_cstate_management) { 1223 ret = psp_tmr_load(psp); 1224 if (ret) { 1225 DRM_ERROR("PSP load tmr failed!\n"); 1226 return ret; 1227 } 1228 } 1229 1230 return 0; 1231 } 1232 1233 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1234 enum psp_gfx_fw_type *type) 1235 { 1236 switch (ucode->ucode_id) { 1237 case AMDGPU_UCODE_ID_SDMA0: 1238 *type = GFX_FW_TYPE_SDMA0; 1239 break; 1240 case AMDGPU_UCODE_ID_SDMA1: 1241 *type = GFX_FW_TYPE_SDMA1; 1242 break; 1243 case AMDGPU_UCODE_ID_SDMA2: 1244 *type = GFX_FW_TYPE_SDMA2; 1245 break; 1246 case AMDGPU_UCODE_ID_SDMA3: 1247 *type = GFX_FW_TYPE_SDMA3; 1248 break; 1249 case AMDGPU_UCODE_ID_SDMA4: 1250 *type = GFX_FW_TYPE_SDMA4; 1251 break; 1252 case AMDGPU_UCODE_ID_SDMA5: 1253 *type = GFX_FW_TYPE_SDMA5; 1254 break; 1255 case AMDGPU_UCODE_ID_SDMA6: 1256 *type = GFX_FW_TYPE_SDMA6; 1257 break; 1258 case AMDGPU_UCODE_ID_SDMA7: 1259 *type = GFX_FW_TYPE_SDMA7; 1260 break; 1261 case AMDGPU_UCODE_ID_CP_CE: 1262 *type = GFX_FW_TYPE_CP_CE; 1263 break; 1264 case AMDGPU_UCODE_ID_CP_PFP: 1265 *type = GFX_FW_TYPE_CP_PFP; 1266 break; 1267 case AMDGPU_UCODE_ID_CP_ME: 1268 *type = GFX_FW_TYPE_CP_ME; 1269 break; 1270 case AMDGPU_UCODE_ID_CP_MEC1: 1271 *type = GFX_FW_TYPE_CP_MEC; 1272 break; 1273 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1274 *type = GFX_FW_TYPE_CP_MEC_ME1; 1275 break; 1276 case AMDGPU_UCODE_ID_CP_MEC2: 1277 *type = GFX_FW_TYPE_CP_MEC; 1278 break; 1279 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1280 *type = GFX_FW_TYPE_CP_MEC_ME2; 1281 break; 1282 case AMDGPU_UCODE_ID_RLC_G: 1283 *type = GFX_FW_TYPE_RLC_G; 1284 break; 1285 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1286 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1287 break; 1288 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1289 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1290 break; 1291 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1292 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1293 break; 1294 case AMDGPU_UCODE_ID_SMC: 1295 *type = GFX_FW_TYPE_SMU; 1296 break; 1297 case AMDGPU_UCODE_ID_UVD: 1298 *type = GFX_FW_TYPE_UVD; 1299 break; 1300 case AMDGPU_UCODE_ID_UVD1: 1301 *type = GFX_FW_TYPE_UVD1; 1302 break; 1303 case AMDGPU_UCODE_ID_VCE: 1304 *type = GFX_FW_TYPE_VCE; 1305 break; 1306 case AMDGPU_UCODE_ID_VCN: 1307 *type = GFX_FW_TYPE_VCN; 1308 break; 1309 case AMDGPU_UCODE_ID_VCN1: 1310 *type = GFX_FW_TYPE_VCN1; 1311 break; 1312 case AMDGPU_UCODE_ID_DMCU_ERAM: 1313 *type = GFX_FW_TYPE_DMCU_ERAM; 1314 break; 1315 case AMDGPU_UCODE_ID_DMCU_INTV: 1316 *type = GFX_FW_TYPE_DMCU_ISR; 1317 break; 1318 case AMDGPU_UCODE_ID_VCN0_RAM: 1319 *type = GFX_FW_TYPE_VCN0_RAM; 1320 break; 1321 case AMDGPU_UCODE_ID_VCN1_RAM: 1322 *type = GFX_FW_TYPE_VCN1_RAM; 1323 break; 1324 case AMDGPU_UCODE_ID_DMCUB: 1325 *type = GFX_FW_TYPE_DMUB; 1326 break; 1327 case AMDGPU_UCODE_ID_MAXIMUM: 1328 default: 1329 return -EINVAL; 1330 } 1331 1332 return 0; 1333 } 1334 1335 static void psp_print_fw_hdr(struct psp_context *psp, 1336 struct amdgpu_firmware_info *ucode) 1337 { 1338 struct amdgpu_device *adev = psp->adev; 1339 struct common_firmware_header *hdr; 1340 1341 switch (ucode->ucode_id) { 1342 case AMDGPU_UCODE_ID_SDMA0: 1343 case AMDGPU_UCODE_ID_SDMA1: 1344 case AMDGPU_UCODE_ID_SDMA2: 1345 case AMDGPU_UCODE_ID_SDMA3: 1346 case AMDGPU_UCODE_ID_SDMA4: 1347 case AMDGPU_UCODE_ID_SDMA5: 1348 case AMDGPU_UCODE_ID_SDMA6: 1349 case AMDGPU_UCODE_ID_SDMA7: 1350 hdr = (struct common_firmware_header *) 1351 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1352 amdgpu_ucode_print_sdma_hdr(hdr); 1353 break; 1354 case AMDGPU_UCODE_ID_CP_CE: 1355 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1356 amdgpu_ucode_print_gfx_hdr(hdr); 1357 break; 1358 case AMDGPU_UCODE_ID_CP_PFP: 1359 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1360 amdgpu_ucode_print_gfx_hdr(hdr); 1361 break; 1362 case AMDGPU_UCODE_ID_CP_ME: 1363 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1364 amdgpu_ucode_print_gfx_hdr(hdr); 1365 break; 1366 case AMDGPU_UCODE_ID_CP_MEC1: 1367 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1368 amdgpu_ucode_print_gfx_hdr(hdr); 1369 break; 1370 case AMDGPU_UCODE_ID_RLC_G: 1371 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1372 amdgpu_ucode_print_rlc_hdr(hdr); 1373 break; 1374 case AMDGPU_UCODE_ID_SMC: 1375 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1376 amdgpu_ucode_print_smc_hdr(hdr); 1377 break; 1378 default: 1379 break; 1380 } 1381 } 1382 1383 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1384 struct psp_gfx_cmd_resp *cmd) 1385 { 1386 int ret; 1387 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1388 1389 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1390 1391 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1392 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1393 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1394 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1395 1396 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1397 if (ret) 1398 DRM_ERROR("Unknown firmware type\n"); 1399 1400 return ret; 1401 } 1402 1403 static int psp_execute_np_fw_load(struct psp_context *psp, 1404 struct amdgpu_firmware_info *ucode) 1405 { 1406 int ret = 0; 1407 1408 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1409 if (ret) 1410 return ret; 1411 1412 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1413 psp->fence_buf_mc_addr); 1414 1415 return ret; 1416 } 1417 1418 static int psp_np_fw_load(struct psp_context *psp) 1419 { 1420 int i, ret; 1421 struct amdgpu_firmware_info *ucode; 1422 struct amdgpu_device* adev = psp->adev; 1423 1424 if (psp->autoload_supported || 1425 psp->pmfw_centralized_cstate_management) { 1426 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1427 if (!ucode->fw || amdgpu_sriov_vf(adev)) 1428 goto out; 1429 1430 ret = psp_execute_np_fw_load(psp, ucode); 1431 if (ret) 1432 return ret; 1433 } 1434 1435 if (psp->pmfw_centralized_cstate_management) { 1436 ret = psp_tmr_load(psp); 1437 if (ret) { 1438 DRM_ERROR("PSP load tmr failed!\n"); 1439 return ret; 1440 } 1441 } 1442 1443 out: 1444 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1445 ucode = &adev->firmware.ucode[i]; 1446 if (!ucode->fw) 1447 continue; 1448 1449 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1450 (psp_smu_reload_quirk(psp) || 1451 psp->autoload_supported || 1452 psp->pmfw_centralized_cstate_management)) 1453 continue; 1454 1455 if (amdgpu_sriov_vf(adev) && 1456 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1457 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1458 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1459 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1460 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1461 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1462 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1463 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1464 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1465 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1466 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1467 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1468 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1469 /*skip ucode loading in SRIOV VF */ 1470 continue; 1471 1472 if (psp->autoload_supported && 1473 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1474 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1475 /* skip mec JT when autoload is enabled */ 1476 continue; 1477 1478 psp_print_fw_hdr(psp, ucode); 1479 1480 ret = psp_execute_np_fw_load(psp, ucode); 1481 if (ret) 1482 return ret; 1483 1484 /* Start rlc autoload after psp recieved all the gfx firmware */ 1485 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1486 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1487 ret = psp_rlc_autoload(psp); 1488 if (ret) { 1489 DRM_ERROR("Failed to start rlc autoload\n"); 1490 return ret; 1491 } 1492 } 1493 #if 0 1494 /* check if firmware loaded sucessfully */ 1495 if (!amdgpu_psp_check_fw_loading_status(adev, i)) 1496 return -EINVAL; 1497 #endif 1498 } 1499 1500 return 0; 1501 } 1502 1503 static int psp_load_fw(struct amdgpu_device *adev) 1504 { 1505 int ret; 1506 struct psp_context *psp = &adev->psp; 1507 1508 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { 1509 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 1510 goto skip_memalloc; 1511 } 1512 1513 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1514 if (!psp->cmd) 1515 return -ENOMEM; 1516 1517 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1518 AMDGPU_GEM_DOMAIN_GTT, 1519 &psp->fw_pri_bo, 1520 &psp->fw_pri_mc_addr, 1521 &psp->fw_pri_buf); 1522 if (ret) 1523 goto failed; 1524 1525 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1526 AMDGPU_GEM_DOMAIN_VRAM, 1527 &psp->fence_buf_bo, 1528 &psp->fence_buf_mc_addr, 1529 &psp->fence_buf); 1530 if (ret) 1531 goto failed; 1532 1533 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 1534 AMDGPU_GEM_DOMAIN_VRAM, 1535 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1536 (void **)&psp->cmd_buf_mem); 1537 if (ret) 1538 goto failed; 1539 1540 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 1541 1542 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 1543 if (ret) { 1544 DRM_ERROR("PSP ring init failed!\n"); 1545 goto failed; 1546 } 1547 1548 skip_memalloc: 1549 ret = psp_hw_start(psp); 1550 if (ret) 1551 goto failed; 1552 1553 ret = psp_np_fw_load(psp); 1554 if (ret) 1555 goto failed; 1556 1557 ret = psp_asd_load(psp); 1558 if (ret) { 1559 DRM_ERROR("PSP load asd failed!\n"); 1560 return ret; 1561 } 1562 1563 if (psp->adev->psp.ta_fw) { 1564 ret = psp_ras_initialize(psp); 1565 if (ret) 1566 dev_err(psp->adev->dev, 1567 "RAS: Failed to initialize RAS\n"); 1568 1569 ret = psp_hdcp_initialize(psp); 1570 if (ret) 1571 dev_err(psp->adev->dev, 1572 "HDCP: Failed to initialize HDCP\n"); 1573 1574 ret = psp_dtm_initialize(psp); 1575 if (ret) 1576 dev_err(psp->adev->dev, 1577 "DTM: Failed to initialize DTM\n"); 1578 } 1579 1580 return 0; 1581 1582 failed: 1583 /* 1584 * all cleanup jobs (xgmi terminate, ras terminate, 1585 * ring destroy, cmd/fence/fw buffers destory, 1586 * psp->cmd destory) are delayed to psp_hw_fini 1587 */ 1588 return ret; 1589 } 1590 1591 static int psp_hw_init(void *handle) 1592 { 1593 int ret; 1594 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1595 1596 mutex_lock(&adev->firmware.mutex); 1597 /* 1598 * This sequence is just used on hw_init only once, no need on 1599 * resume. 1600 */ 1601 ret = amdgpu_ucode_init_bo(adev); 1602 if (ret) 1603 goto failed; 1604 1605 ret = psp_load_fw(adev); 1606 if (ret) { 1607 DRM_ERROR("PSP firmware loading failed\n"); 1608 goto failed; 1609 } 1610 1611 mutex_unlock(&adev->firmware.mutex); 1612 return 0; 1613 1614 failed: 1615 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 1616 mutex_unlock(&adev->firmware.mutex); 1617 return -EINVAL; 1618 } 1619 1620 static int psp_hw_fini(void *handle) 1621 { 1622 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1623 struct psp_context *psp = &adev->psp; 1624 1625 if (psp->adev->psp.ta_fw) { 1626 psp_ras_terminate(psp); 1627 psp_dtm_terminate(psp); 1628 psp_hdcp_terminate(psp); 1629 } 1630 1631 psp_asd_unload(psp); 1632 1633 psp_tmr_terminate(psp); 1634 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 1635 1636 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 1637 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 1638 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 1639 &psp->fence_buf_mc_addr, &psp->fence_buf); 1640 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 1641 (void **)&psp->cmd_buf_mem); 1642 1643 kfree(psp->cmd); 1644 psp->cmd = NULL; 1645 1646 return 0; 1647 } 1648 1649 static int psp_suspend(void *handle) 1650 { 1651 int ret; 1652 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1653 struct psp_context *psp = &adev->psp; 1654 1655 if (adev->gmc.xgmi.num_physical_nodes > 1 && 1656 psp->xgmi_context.initialized == 1) { 1657 ret = psp_xgmi_terminate(psp); 1658 if (ret) { 1659 DRM_ERROR("Failed to terminate xgmi ta\n"); 1660 return ret; 1661 } 1662 } 1663 1664 if (psp->adev->psp.ta_fw) { 1665 ret = psp_ras_terminate(psp); 1666 if (ret) { 1667 DRM_ERROR("Failed to terminate ras ta\n"); 1668 return ret; 1669 } 1670 ret = psp_hdcp_terminate(psp); 1671 if (ret) { 1672 DRM_ERROR("Failed to terminate hdcp ta\n"); 1673 return ret; 1674 } 1675 ret = psp_dtm_terminate(psp); 1676 if (ret) { 1677 DRM_ERROR("Failed to terminate dtm ta\n"); 1678 return ret; 1679 } 1680 } 1681 1682 ret = psp_asd_unload(psp); 1683 if (ret) { 1684 DRM_ERROR("Failed to unload asd\n"); 1685 return ret; 1686 } 1687 1688 ret = psp_tmr_terminate(psp); 1689 if (ret) { 1690 DRM_ERROR("Falied to terminate tmr\n"); 1691 return ret; 1692 } 1693 1694 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 1695 if (ret) { 1696 DRM_ERROR("PSP ring stop failed\n"); 1697 return ret; 1698 } 1699 1700 return 0; 1701 } 1702 1703 static int psp_resume(void *handle) 1704 { 1705 int ret; 1706 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1707 struct psp_context *psp = &adev->psp; 1708 1709 DRM_INFO("PSP is resuming...\n"); 1710 1711 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 1712 if (ret) { 1713 DRM_ERROR("Failed to process memory training!\n"); 1714 return ret; 1715 } 1716 1717 mutex_lock(&adev->firmware.mutex); 1718 1719 ret = psp_hw_start(psp); 1720 if (ret) 1721 goto failed; 1722 1723 ret = psp_np_fw_load(psp); 1724 if (ret) 1725 goto failed; 1726 1727 ret = psp_asd_load(psp); 1728 if (ret) { 1729 DRM_ERROR("PSP load asd failed!\n"); 1730 goto failed; 1731 } 1732 1733 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1734 ret = psp_xgmi_initialize(psp); 1735 /* Warning the XGMI seesion initialize failure 1736 * Instead of stop driver initialization 1737 */ 1738 if (ret) 1739 dev_err(psp->adev->dev, 1740 "XGMI: Failed to initialize XGMI session\n"); 1741 } 1742 1743 if (psp->adev->psp.ta_fw) { 1744 ret = psp_ras_initialize(psp); 1745 if (ret) 1746 dev_err(psp->adev->dev, 1747 "RAS: Failed to initialize RAS\n"); 1748 1749 ret = psp_hdcp_initialize(psp); 1750 if (ret) 1751 dev_err(psp->adev->dev, 1752 "HDCP: Failed to initialize HDCP\n"); 1753 1754 ret = psp_dtm_initialize(psp); 1755 if (ret) 1756 dev_err(psp->adev->dev, 1757 "DTM: Failed to initialize DTM\n"); 1758 } 1759 1760 mutex_unlock(&adev->firmware.mutex); 1761 1762 return 0; 1763 1764 failed: 1765 DRM_ERROR("PSP resume failed\n"); 1766 mutex_unlock(&adev->firmware.mutex); 1767 return ret; 1768 } 1769 1770 int psp_gpu_reset(struct amdgpu_device *adev) 1771 { 1772 int ret; 1773 1774 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1775 return 0; 1776 1777 mutex_lock(&adev->psp.mutex); 1778 ret = psp_mode1_reset(&adev->psp); 1779 mutex_unlock(&adev->psp.mutex); 1780 1781 return ret; 1782 } 1783 1784 int psp_rlc_autoload_start(struct psp_context *psp) 1785 { 1786 int ret; 1787 struct psp_gfx_cmd_resp *cmd; 1788 1789 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1790 if (!cmd) 1791 return -ENOMEM; 1792 1793 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 1794 1795 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1796 psp->fence_buf_mc_addr); 1797 kfree(cmd); 1798 return ret; 1799 } 1800 1801 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 1802 uint64_t cmd_gpu_addr, int cmd_size) 1803 { 1804 struct amdgpu_firmware_info ucode = {0}; 1805 1806 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 1807 AMDGPU_UCODE_ID_VCN0_RAM; 1808 ucode.mc_addr = cmd_gpu_addr; 1809 ucode.ucode_size = cmd_size; 1810 1811 return psp_execute_np_fw_load(&adev->psp, &ucode); 1812 } 1813 1814 int psp_ring_cmd_submit(struct psp_context *psp, 1815 uint64_t cmd_buf_mc_addr, 1816 uint64_t fence_mc_addr, 1817 int index) 1818 { 1819 unsigned int psp_write_ptr_reg = 0; 1820 struct psp_gfx_rb_frame *write_frame; 1821 struct psp_ring *ring = &psp->km_ring; 1822 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 1823 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 1824 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 1825 struct amdgpu_device *adev = psp->adev; 1826 uint32_t ring_size_dw = ring->ring_size / 4; 1827 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 1828 1829 /* KM (GPCOM) prepare write pointer */ 1830 psp_write_ptr_reg = psp_ring_get_wptr(psp); 1831 1832 /* Update KM RB frame pointer to new frame */ 1833 /* write_frame ptr increments by size of rb_frame in bytes */ 1834 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 1835 if ((psp_write_ptr_reg % ring_size_dw) == 0) 1836 write_frame = ring_buffer_start; 1837 else 1838 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 1839 /* Check invalid write_frame ptr address */ 1840 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 1841 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 1842 ring_buffer_start, ring_buffer_end, write_frame); 1843 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 1844 return -EINVAL; 1845 } 1846 1847 /* Initialize KM RB frame */ 1848 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 1849 1850 /* Update KM RB frame */ 1851 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 1852 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 1853 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 1854 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 1855 write_frame->fence_value = index; 1856 amdgpu_asic_flush_hdp(adev, NULL); 1857 1858 /* Update the write Pointer in DWORDs */ 1859 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 1860 psp_ring_set_wptr(psp, psp_write_ptr_reg); 1861 return 0; 1862 } 1863 1864 static bool psp_check_fw_loading_status(struct amdgpu_device *adev, 1865 enum AMDGPU_UCODE_ID ucode_type) 1866 { 1867 struct amdgpu_firmware_info *ucode = NULL; 1868 1869 if (!adev->firmware.fw_size) 1870 return false; 1871 1872 ucode = &adev->firmware.ucode[ucode_type]; 1873 if (!ucode->fw || !ucode->ucode_size) 1874 return false; 1875 1876 return psp_compare_sram_data(&adev->psp, ucode, ucode_type); 1877 } 1878 1879 static int psp_set_clockgating_state(void *handle, 1880 enum amd_clockgating_state state) 1881 { 1882 return 0; 1883 } 1884 1885 static int psp_set_powergating_state(void *handle, 1886 enum amd_powergating_state state) 1887 { 1888 return 0; 1889 } 1890 1891 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 1892 struct device_attribute *attr, 1893 char *buf) 1894 { 1895 struct drm_device *ddev = dev_get_drvdata(dev); 1896 struct amdgpu_device *adev = ddev->dev_private; 1897 uint32_t fw_ver; 1898 int ret; 1899 1900 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1901 DRM_INFO("PSP block is not ready yet."); 1902 return -EBUSY; 1903 } 1904 1905 mutex_lock(&adev->psp.mutex); 1906 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 1907 mutex_unlock(&adev->psp.mutex); 1908 1909 if (ret) { 1910 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 1911 return ret; 1912 } 1913 1914 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 1915 } 1916 1917 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 1918 struct device_attribute *attr, 1919 const char *buf, 1920 size_t count) 1921 { 1922 STUB(); 1923 return -ENOSYS; 1924 #ifdef notyet 1925 struct drm_device *ddev = dev_get_drvdata(dev); 1926 struct amdgpu_device *adev = ddev->dev_private; 1927 void *cpu_addr; 1928 dma_addr_t dma_addr; 1929 int ret; 1930 char fw_name[100]; 1931 const struct firmware *usbc_pd_fw; 1932 1933 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 1934 DRM_INFO("PSP block is not ready yet."); 1935 return -EBUSY; 1936 } 1937 1938 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 1939 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 1940 if (ret) 1941 goto fail; 1942 1943 /* We need contiguous physical mem to place the FW for psp to access */ 1944 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 1945 1946 ret = dma_mapping_error(adev->dev, dma_addr); 1947 if (ret) 1948 goto rel_buf; 1949 1950 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 1951 1952 /* 1953 * x86 specific workaround. 1954 * Without it the buffer is invisible in PSP. 1955 * 1956 * TODO Remove once PSP starts snooping CPU cache 1957 */ 1958 #ifdef CONFIG_X86 1959 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 1960 #endif 1961 1962 mutex_lock(&adev->psp.mutex); 1963 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 1964 mutex_unlock(&adev->psp.mutex); 1965 1966 rel_buf: 1967 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 1968 release_firmware(usbc_pd_fw); 1969 1970 fail: 1971 if (ret) { 1972 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 1973 return ret; 1974 } 1975 1976 return count; 1977 #endif 1978 } 1979 1980 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 1981 psp_usbc_pd_fw_sysfs_read, 1982 psp_usbc_pd_fw_sysfs_write); 1983 1984 1985 1986 const struct amd_ip_funcs psp_ip_funcs = { 1987 .name = "psp", 1988 .early_init = psp_early_init, 1989 .late_init = NULL, 1990 .sw_init = psp_sw_init, 1991 .sw_fini = psp_sw_fini, 1992 .hw_init = psp_hw_init, 1993 .hw_fini = psp_hw_fini, 1994 .suspend = psp_suspend, 1995 .resume = psp_resume, 1996 .is_idle = NULL, 1997 .check_soft_reset = NULL, 1998 .wait_for_idle = NULL, 1999 .soft_reset = NULL, 2000 .set_clockgating_state = psp_set_clockgating_state, 2001 .set_powergating_state = psp_set_powergating_state, 2002 }; 2003 2004 static int psp_sysfs_init(struct amdgpu_device *adev) 2005 { 2006 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 2007 2008 if (ret) 2009 DRM_ERROR("Failed to create USBC PD FW control file!"); 2010 2011 return ret; 2012 } 2013 2014 static void psp_sysfs_fini(struct amdgpu_device *adev) 2015 { 2016 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 2017 } 2018 2019 static const struct amdgpu_psp_funcs psp_funcs = { 2020 .check_fw_loading_status = psp_check_fw_loading_status, 2021 }; 2022 2023 static void psp_set_funcs(struct amdgpu_device *adev) 2024 { 2025 if (NULL == adev->firmware.funcs) 2026 adev->firmware.funcs = &psp_funcs; 2027 } 2028 2029 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 2030 { 2031 .type = AMD_IP_BLOCK_TYPE_PSP, 2032 .major = 3, 2033 .minor = 1, 2034 .rev = 0, 2035 .funcs = &psp_ip_funcs, 2036 }; 2037 2038 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 2039 { 2040 .type = AMD_IP_BLOCK_TYPE_PSP, 2041 .major = 10, 2042 .minor = 0, 2043 .rev = 0, 2044 .funcs = &psp_ip_funcs, 2045 }; 2046 2047 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 2048 { 2049 .type = AMD_IP_BLOCK_TYPE_PSP, 2050 .major = 11, 2051 .minor = 0, 2052 .rev = 0, 2053 .funcs = &psp_ip_funcs, 2054 }; 2055 2056 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2057 { 2058 .type = AMD_IP_BLOCK_TYPE_PSP, 2059 .major = 12, 2060 .minor = 0, 2061 .rev = 0, 2062 .funcs = &psp_ip_funcs, 2063 }; 2064