1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Huang Rui 23 * 24 */ 25 26 #include <linux/firmware.h> 27 #include <linux/dma-mapping.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_psp.h" 31 #include "amdgpu_ucode.h" 32 #include "soc15_common.h" 33 #include "psp_v3_1.h" 34 #include "psp_v10_0.h" 35 #include "psp_v11_0.h" 36 #include "psp_v12_0.h" 37 38 #include "amdgpu_ras.h" 39 40 static int psp_sysfs_init(struct amdgpu_device *adev); 41 static void psp_sysfs_fini(struct amdgpu_device *adev); 42 43 static int psp_load_smu_fw(struct psp_context *psp); 44 45 /* 46 * Due to DF Cstate management centralized to PMFW, the firmware 47 * loading sequence will be updated as below: 48 * - Load KDB 49 * - Load SYS_DRV 50 * - Load tOS 51 * - Load PMFW 52 * - Setup TMR 53 * - Load other non-psp fw 54 * - Load ASD 55 * - Load XGMI/RAS/HDCP/DTM TA if any 56 * 57 * This new sequence is required for 58 * - Arcturus 59 * - Navi12 and onwards 60 */ 61 static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) 62 { 63 struct amdgpu_device *adev = psp->adev; 64 65 psp->pmfw_centralized_cstate_management = false; 66 67 if (amdgpu_sriov_vf(adev)) 68 return; 69 70 if (adev->flags & AMD_IS_APU) 71 return; 72 73 if ((adev->asic_type == CHIP_ARCTURUS) || 74 (adev->asic_type >= CHIP_NAVI12)) 75 psp->pmfw_centralized_cstate_management = true; 76 } 77 78 static int psp_early_init(void *handle) 79 { 80 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 81 struct psp_context *psp = &adev->psp; 82 83 switch (adev->asic_type) { 84 case CHIP_VEGA10: 85 case CHIP_VEGA12: 86 psp_v3_1_set_psp_funcs(psp); 87 psp->autoload_supported = false; 88 break; 89 case CHIP_RAVEN: 90 psp_v10_0_set_psp_funcs(psp); 91 psp->autoload_supported = false; 92 break; 93 case CHIP_VEGA20: 94 case CHIP_ARCTURUS: 95 psp_v11_0_set_psp_funcs(psp); 96 psp->autoload_supported = false; 97 break; 98 case CHIP_NAVI10: 99 case CHIP_NAVI14: 100 case CHIP_NAVI12: 101 case CHIP_SIENNA_CICHLID: 102 case CHIP_NAVY_FLOUNDER: 103 psp_v11_0_set_psp_funcs(psp); 104 psp->autoload_supported = true; 105 break; 106 case CHIP_RENOIR: 107 psp_v12_0_set_psp_funcs(psp); 108 break; 109 default: 110 return -EINVAL; 111 } 112 113 psp->adev = adev; 114 115 psp_check_pmfw_centralized_cstate_management(psp); 116 117 return 0; 118 } 119 120 static void psp_memory_training_fini(struct psp_context *psp) 121 { 122 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 123 124 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; 125 kfree(ctx->sys_cache); 126 ctx->sys_cache = NULL; 127 } 128 129 static int psp_memory_training_init(struct psp_context *psp) 130 { 131 int ret; 132 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 133 134 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { 135 DRM_DEBUG("memory training is not supported!\n"); 136 return 0; 137 } 138 139 ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); 140 if (ctx->sys_cache == NULL) { 141 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); 142 ret = -ENOMEM; 143 goto Err_out; 144 } 145 146 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", 147 ctx->train_data_size, 148 ctx->p2c_train_data_offset, 149 ctx->c2p_train_data_offset); 150 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; 151 return 0; 152 153 Err_out: 154 psp_memory_training_fini(psp); 155 return ret; 156 } 157 158 static int psp_sw_init(void *handle) 159 { 160 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 161 struct psp_context *psp = &adev->psp; 162 int ret; 163 164 if (!amdgpu_sriov_vf(adev)) { 165 ret = psp_init_microcode(psp); 166 if (ret) { 167 DRM_ERROR("Failed to load psp firmware!\n"); 168 return ret; 169 } 170 } 171 172 ret = psp_memory_training_init(psp); 173 if (ret) { 174 DRM_ERROR("Failed to initialize memory training!\n"); 175 return ret; 176 } 177 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 178 if (ret) { 179 DRM_ERROR("Failed to process memory training!\n"); 180 return ret; 181 } 182 183 if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) { 184 ret= psp_sysfs_init(adev); 185 if (ret) { 186 return ret; 187 } 188 } 189 190 return 0; 191 } 192 193 static int psp_sw_fini(void *handle) 194 { 195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 196 197 psp_memory_training_fini(&adev->psp); 198 if (adev->psp.sos_fw) { 199 release_firmware(adev->psp.sos_fw); 200 adev->psp.sos_fw = NULL; 201 } 202 if (adev->psp.asd_fw) { 203 release_firmware(adev->psp.asd_fw); 204 adev->psp.asd_fw = NULL; 205 } 206 if (adev->psp.ta_fw) { 207 release_firmware(adev->psp.ta_fw); 208 adev->psp.ta_fw = NULL; 209 } 210 211 if (adev->asic_type == CHIP_NAVI10 || 212 adev->asic_type == CHIP_SIENNA_CICHLID) 213 psp_sysfs_fini(adev); 214 215 return 0; 216 } 217 218 int psp_wait_for(struct psp_context *psp, uint32_t reg_index, 219 uint32_t reg_val, uint32_t mask, bool check_changed) 220 { 221 uint32_t val; 222 int i; 223 struct amdgpu_device *adev = psp->adev; 224 225 if (psp->adev->in_pci_err_recovery) 226 return 0; 227 228 for (i = 0; i < adev->usec_timeout; i++) { 229 val = RREG32(reg_index); 230 if (check_changed) { 231 if (val != reg_val) 232 return 0; 233 } else { 234 if ((val & mask) == reg_val) 235 return 0; 236 } 237 udelay(1); 238 } 239 240 return -ETIME; 241 } 242 243 static int 244 psp_cmd_submit_buf(struct psp_context *psp, 245 struct amdgpu_firmware_info *ucode, 246 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) 247 { 248 int ret; 249 int index; 250 int timeout = 2000; 251 bool ras_intr = false; 252 bool skip_unsupport = false; 253 254 if (psp->adev->in_pci_err_recovery) 255 return 0; 256 257 mutex_lock(&psp->mutex); 258 259 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); 260 261 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); 262 263 index = atomic_inc_return(&psp->fence_value); 264 ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); 265 if (ret) { 266 atomic_dec(&psp->fence_value); 267 mutex_unlock(&psp->mutex); 268 return ret; 269 } 270 271 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 272 while (*((unsigned int *)psp->fence_buf) != index) { 273 if (--timeout == 0) 274 break; 275 /* 276 * Shouldn't wait for timeout when err_event_athub occurs, 277 * because gpu reset thread triggered and lock resource should 278 * be released for psp resume sequence. 279 */ 280 ras_intr = amdgpu_ras_intr_triggered(); 281 if (ras_intr) 282 break; 283 drm_msleep(1); 284 amdgpu_asic_invalidate_hdp(psp->adev, NULL); 285 } 286 287 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ 288 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED || 289 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev); 290 291 /* In some cases, psp response status is not 0 even there is no 292 * problem while the command is submitted. Some version of PSP FW 293 * doesn't write 0 to that field. 294 * So here we would like to only print a warning instead of an error 295 * during psp initialization to avoid breaking hw_init and it doesn't 296 * return -EINVAL. 297 */ 298 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) { 299 if (ucode) 300 DRM_WARN("failed to load ucode id (%d) ", 301 ucode->ucode_id); 302 DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 303 psp->cmd_buf_mem->cmd_id, 304 psp->cmd_buf_mem->resp.status); 305 if (!timeout) { 306 mutex_unlock(&psp->mutex); 307 return -EINVAL; 308 } 309 } 310 311 /* get xGMI session id from response buffer */ 312 cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; 313 314 if (ucode) { 315 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; 316 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; 317 } 318 mutex_unlock(&psp->mutex); 319 320 return ret; 321 } 322 323 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 324 struct psp_gfx_cmd_resp *cmd, 325 uint64_t tmr_mc, uint32_t size) 326 { 327 if (amdgpu_sriov_vf(psp->adev)) 328 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 329 else 330 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; 331 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 332 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 333 cmd->cmd.cmd_setup_tmr.buf_size = size; 334 } 335 336 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, 337 uint64_t pri_buf_mc, uint32_t size) 338 { 339 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC; 340 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc); 341 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc); 342 cmd->cmd.cmd_load_toc.toc_size = size; 343 } 344 345 /* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */ 346 static int psp_load_toc(struct psp_context *psp, 347 uint32_t *tmr_size) 348 { 349 int ret; 350 struct psp_gfx_cmd_resp *cmd; 351 352 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 353 if (!cmd) 354 return -ENOMEM; 355 /* Copy toc to psp firmware private buffer */ 356 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 357 memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); 358 359 psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); 360 361 ret = psp_cmd_submit_buf(psp, NULL, cmd, 362 psp->fence_buf_mc_addr); 363 if (!ret) 364 *tmr_size = psp->cmd_buf_mem->resp.tmr_size; 365 kfree(cmd); 366 return ret; 367 } 368 369 /* Set up Trusted Memory Region */ 370 static int psp_tmr_init(struct psp_context *psp) 371 { 372 int ret; 373 int tmr_size; 374 void *tmr_buf; 375 void **pptr; 376 377 /* 378 * According to HW engineer, they prefer the TMR address be "naturally 379 * aligned" , e.g. the start address be an integer divide of TMR size. 380 * 381 * Note: this memory need be reserved till the driver 382 * uninitializes. 383 */ 384 tmr_size = PSP_TMR_SIZE; 385 386 /* For ASICs support RLC autoload, psp will parse the toc 387 * and calculate the total size of TMR needed */ 388 if (!amdgpu_sriov_vf(psp->adev) && 389 psp->toc_start_addr && 390 psp->toc_bin_size && 391 psp->fw_pri_buf) { 392 ret = psp_load_toc(psp, &tmr_size); 393 if (ret) { 394 DRM_ERROR("Failed to load toc\n"); 395 return ret; 396 } 397 } 398 399 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 400 ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE, 401 AMDGPU_GEM_DOMAIN_VRAM, 402 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 403 404 return ret; 405 } 406 407 static int psp_clear_vf_fw(struct psp_context *psp) 408 { 409 int ret; 410 struct psp_gfx_cmd_resp *cmd; 411 412 if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12) 413 return 0; 414 415 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 416 if (!cmd) 417 return -ENOMEM; 418 419 cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW; 420 421 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 422 kfree(cmd); 423 424 return ret; 425 } 426 427 static bool psp_skip_tmr(struct psp_context *psp) 428 { 429 switch (psp->adev->asic_type) { 430 case CHIP_NAVI12: 431 case CHIP_SIENNA_CICHLID: 432 return true; 433 default: 434 return false; 435 } 436 } 437 438 static int psp_tmr_load(struct psp_context *psp) 439 { 440 int ret; 441 struct psp_gfx_cmd_resp *cmd; 442 443 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. 444 * Already set up by host driver. 445 */ 446 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) 447 return 0; 448 449 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 450 if (!cmd) 451 return -ENOMEM; 452 453 psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 454 amdgpu_bo_size(psp->tmr_bo)); 455 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 456 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 457 458 ret = psp_cmd_submit_buf(psp, NULL, cmd, 459 psp->fence_buf_mc_addr); 460 461 kfree(cmd); 462 463 return ret; 464 } 465 466 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, 467 struct psp_gfx_cmd_resp *cmd) 468 { 469 if (amdgpu_sriov_vf(psp->adev)) 470 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; 471 else 472 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; 473 } 474 475 static int psp_tmr_unload(struct psp_context *psp) 476 { 477 int ret; 478 struct psp_gfx_cmd_resp *cmd; 479 480 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 481 if (!cmd) 482 return -ENOMEM; 483 484 psp_prep_tmr_unload_cmd_buf(psp, cmd); 485 DRM_INFO("free PSP TMR buffer\n"); 486 487 ret = psp_cmd_submit_buf(psp, NULL, cmd, 488 psp->fence_buf_mc_addr); 489 490 kfree(cmd); 491 492 return ret; 493 } 494 495 static int psp_tmr_terminate(struct psp_context *psp) 496 { 497 int ret; 498 void *tmr_buf; 499 void **pptr; 500 501 ret = psp_tmr_unload(psp); 502 if (ret) 503 return ret; 504 505 /* free TMR memory buffer */ 506 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 507 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); 508 509 return 0; 510 } 511 512 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 513 uint64_t asd_mc, uint32_t size) 514 { 515 cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; 516 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); 517 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); 518 cmd->cmd.cmd_load_ta.app_len = size; 519 520 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; 521 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; 522 cmd->cmd.cmd_load_ta.cmd_buf_len = 0; 523 } 524 525 static int psp_asd_load(struct psp_context *psp) 526 { 527 int ret; 528 struct psp_gfx_cmd_resp *cmd; 529 530 /* If PSP version doesn't match ASD version, asd loading will be failed. 531 * add workaround to bypass it for sriov now. 532 * TODO: add version check to make it common 533 */ 534 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw) 535 return 0; 536 537 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 538 if (!cmd) 539 return -ENOMEM; 540 541 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 542 memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); 543 544 psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 545 psp->asd_ucode_size); 546 547 ret = psp_cmd_submit_buf(psp, NULL, cmd, 548 psp->fence_buf_mc_addr); 549 if (!ret) { 550 psp->asd_context.asd_initialized = true; 551 psp->asd_context.session_id = cmd->resp.session_id; 552 } 553 554 kfree(cmd); 555 556 return ret; 557 } 558 559 static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 560 uint32_t session_id) 561 { 562 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 563 cmd->cmd.cmd_unload_ta.session_id = session_id; 564 } 565 566 static int psp_asd_unload(struct psp_context *psp) 567 { 568 int ret; 569 struct psp_gfx_cmd_resp *cmd; 570 571 if (amdgpu_sriov_vf(psp->adev)) 572 return 0; 573 574 if (!psp->asd_context.asd_initialized) 575 return 0; 576 577 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 578 if (!cmd) 579 return -ENOMEM; 580 581 psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 582 583 ret = psp_cmd_submit_buf(psp, NULL, cmd, 584 psp->fence_buf_mc_addr); 585 if (!ret) 586 psp->asd_context.asd_initialized = false; 587 588 kfree(cmd); 589 590 return ret; 591 } 592 593 static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd, 594 uint32_t id, uint32_t value) 595 { 596 cmd->cmd_id = GFX_CMD_ID_PROG_REG; 597 cmd->cmd.cmd_setup_reg_prog.reg_value = value; 598 cmd->cmd.cmd_setup_reg_prog.reg_id = id; 599 } 600 601 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, 602 uint32_t value) 603 { 604 struct psp_gfx_cmd_resp *cmd = NULL; 605 int ret = 0; 606 607 if (reg >= PSP_REG_LAST) 608 return -EINVAL; 609 610 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 611 if (!cmd) 612 return -ENOMEM; 613 614 psp_prep_reg_prog_cmd_buf(cmd, reg, value); 615 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 616 617 kfree(cmd); 618 return ret; 619 } 620 621 static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 622 uint64_t ta_bin_mc, 623 uint32_t ta_bin_size, 624 uint64_t ta_shared_mc, 625 uint32_t ta_shared_size) 626 { 627 cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 628 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 629 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 630 cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 631 632 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 633 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 634 cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 635 } 636 637 static int psp_xgmi_init_shared_buf(struct psp_context *psp) 638 { 639 int ret; 640 641 /* 642 * Allocate 16k memory aligned to 4k from Frame Buffer (local 643 * physical) for xgmi ta <-> Driver 644 */ 645 ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, 646 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 647 &psp->xgmi_context.xgmi_shared_bo, 648 &psp->xgmi_context.xgmi_shared_mc_addr, 649 &psp->xgmi_context.xgmi_shared_buf); 650 651 return ret; 652 } 653 654 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 655 uint32_t ta_cmd_id, 656 uint32_t session_id) 657 { 658 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 659 cmd->cmd.cmd_invoke_cmd.session_id = session_id; 660 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 661 } 662 663 static int psp_ta_invoke(struct psp_context *psp, 664 uint32_t ta_cmd_id, 665 uint32_t session_id) 666 { 667 int ret; 668 struct psp_gfx_cmd_resp *cmd; 669 670 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 671 if (!cmd) 672 return -ENOMEM; 673 674 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 675 676 ret = psp_cmd_submit_buf(psp, NULL, cmd, 677 psp->fence_buf_mc_addr); 678 679 kfree(cmd); 680 681 return ret; 682 } 683 684 static int psp_xgmi_load(struct psp_context *psp) 685 { 686 int ret; 687 struct psp_gfx_cmd_resp *cmd; 688 689 /* 690 * TODO: bypass the loading in sriov for now 691 */ 692 693 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 694 if (!cmd) 695 return -ENOMEM; 696 697 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 698 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 699 700 psp_prep_ta_load_cmd_buf(cmd, 701 psp->fw_pri_mc_addr, 702 psp->ta_xgmi_ucode_size, 703 psp->xgmi_context.xgmi_shared_mc_addr, 704 PSP_XGMI_SHARED_MEM_SIZE); 705 706 ret = psp_cmd_submit_buf(psp, NULL, cmd, 707 psp->fence_buf_mc_addr); 708 709 if (!ret) { 710 psp->xgmi_context.initialized = 1; 711 psp->xgmi_context.session_id = cmd->resp.session_id; 712 } 713 714 kfree(cmd); 715 716 return ret; 717 } 718 719 static int psp_xgmi_unload(struct psp_context *psp) 720 { 721 int ret; 722 struct psp_gfx_cmd_resp *cmd; 723 struct amdgpu_device *adev = psp->adev; 724 725 /* XGMI TA unload currently is not supported on Arcturus */ 726 if (adev->asic_type == CHIP_ARCTURUS) 727 return 0; 728 729 /* 730 * TODO: bypass the unloading in sriov for now 731 */ 732 733 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 734 if (!cmd) 735 return -ENOMEM; 736 737 psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 738 739 ret = psp_cmd_submit_buf(psp, NULL, cmd, 740 psp->fence_buf_mc_addr); 741 742 kfree(cmd); 743 744 return ret; 745 } 746 747 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 748 { 749 return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 750 } 751 752 int psp_xgmi_terminate(struct psp_context *psp) 753 { 754 int ret; 755 756 if (!psp->xgmi_context.initialized) 757 return 0; 758 759 ret = psp_xgmi_unload(psp); 760 if (ret) 761 return ret; 762 763 psp->xgmi_context.initialized = 0; 764 765 /* free xgmi shared memory */ 766 amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, 767 &psp->xgmi_context.xgmi_shared_mc_addr, 768 &psp->xgmi_context.xgmi_shared_buf); 769 770 return 0; 771 } 772 773 int psp_xgmi_initialize(struct psp_context *psp) 774 { 775 struct ta_xgmi_shared_memory *xgmi_cmd; 776 int ret; 777 778 if (!psp->adev->psp.ta_fw || 779 !psp->adev->psp.ta_xgmi_ucode_size || 780 !psp->adev->psp.ta_xgmi_start_addr) 781 return -ENOENT; 782 783 if (!psp->xgmi_context.initialized) { 784 ret = psp_xgmi_init_shared_buf(psp); 785 if (ret) 786 return ret; 787 } 788 789 /* Load XGMI TA */ 790 ret = psp_xgmi_load(psp); 791 if (ret) 792 return ret; 793 794 /* Initialize XGMI session */ 795 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); 796 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 797 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; 798 799 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 800 801 return ret; 802 } 803 804 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 805 { 806 struct ta_xgmi_shared_memory *xgmi_cmd; 807 int ret; 808 809 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 810 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 811 812 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; 813 814 /* Invoke xgmi ta to get hive id */ 815 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 816 if (ret) 817 return ret; 818 819 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 820 821 return 0; 822 } 823 824 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 825 { 826 struct ta_xgmi_shared_memory *xgmi_cmd; 827 int ret; 828 829 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 830 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 831 832 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; 833 834 /* Invoke xgmi ta to get the node id */ 835 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 836 if (ret) 837 return ret; 838 839 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 840 841 return 0; 842 } 843 844 int psp_xgmi_get_topology_info(struct psp_context *psp, 845 int number_devices, 846 struct psp_xgmi_topology_info *topology) 847 { 848 struct ta_xgmi_shared_memory *xgmi_cmd; 849 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 850 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; 851 int i; 852 int ret; 853 854 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 855 return -EINVAL; 856 857 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 858 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 859 860 /* Fill in the shared memory with topology information as input */ 861 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 862 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; 863 topology_info_input->num_nodes = number_devices; 864 865 for (i = 0; i < topology_info_input->num_nodes; i++) { 866 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 867 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 868 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; 869 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 870 } 871 872 /* Invoke xgmi ta to get the topology information */ 873 ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); 874 if (ret) 875 return ret; 876 877 /* Read the output topology information from the shared memory */ 878 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; 879 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; 880 for (i = 0; i < topology->num_nodes; i++) { 881 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; 882 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; 883 topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; 884 topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; 885 } 886 887 return 0; 888 } 889 890 int psp_xgmi_set_topology_info(struct psp_context *psp, 891 int number_devices, 892 struct psp_xgmi_topology_info *topology) 893 { 894 struct ta_xgmi_shared_memory *xgmi_cmd; 895 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; 896 int i; 897 898 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) 899 return -EINVAL; 900 901 xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; 902 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); 903 904 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; 905 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; 906 topology_info_input->num_nodes = number_devices; 907 908 for (i = 0; i < topology_info_input->num_nodes; i++) { 909 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; 910 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; 911 topology_info_input->nodes[i].is_sharing_enabled = 1; 912 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; 913 } 914 915 /* Invoke xgmi ta to set topology information */ 916 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 917 } 918 919 // ras begin 920 static int psp_ras_init_shared_buf(struct psp_context *psp) 921 { 922 int ret; 923 924 /* 925 * Allocate 16k memory aligned to 4k from Frame Buffer (local 926 * physical) for ras ta <-> Driver 927 */ 928 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE, 929 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 930 &psp->ras.ras_shared_bo, 931 &psp->ras.ras_shared_mc_addr, 932 &psp->ras.ras_shared_buf); 933 934 return ret; 935 } 936 937 static int psp_ras_load(struct psp_context *psp) 938 { 939 int ret; 940 struct psp_gfx_cmd_resp *cmd; 941 struct ta_ras_shared_memory *ras_cmd; 942 943 /* 944 * TODO: bypass the loading in sriov for now 945 */ 946 if (amdgpu_sriov_vf(psp->adev)) 947 return 0; 948 949 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 950 if (!cmd) 951 return -ENOMEM; 952 953 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 954 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 955 956 psp_prep_ta_load_cmd_buf(cmd, 957 psp->fw_pri_mc_addr, 958 psp->ta_ras_ucode_size, 959 psp->ras.ras_shared_mc_addr, 960 PSP_RAS_SHARED_MEM_SIZE); 961 962 ret = psp_cmd_submit_buf(psp, NULL, cmd, 963 psp->fence_buf_mc_addr); 964 965 ras_cmd = (struct ta_ras_shared_memory*)psp->ras.ras_shared_buf; 966 967 if (!ret) { 968 psp->ras.session_id = cmd->resp.session_id; 969 970 if (!ras_cmd->ras_status) 971 psp->ras.ras_initialized = true; 972 else 973 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); 974 } 975 976 if (ret || ras_cmd->ras_status) 977 amdgpu_ras_fini(psp->adev); 978 979 kfree(cmd); 980 981 return ret; 982 } 983 984 static int psp_ras_unload(struct psp_context *psp) 985 { 986 int ret; 987 struct psp_gfx_cmd_resp *cmd; 988 989 /* 990 * TODO: bypass the unloading in sriov for now 991 */ 992 if (amdgpu_sriov_vf(psp->adev)) 993 return 0; 994 995 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 996 if (!cmd) 997 return -ENOMEM; 998 999 psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 1000 1001 ret = psp_cmd_submit_buf(psp, NULL, cmd, 1002 psp->fence_buf_mc_addr); 1003 1004 kfree(cmd); 1005 1006 return ret; 1007 } 1008 1009 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1010 { 1011 struct ta_ras_shared_memory *ras_cmd; 1012 int ret; 1013 1014 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1015 1016 /* 1017 * TODO: bypass the loading in sriov for now 1018 */ 1019 if (amdgpu_sriov_vf(psp->adev)) 1020 return 0; 1021 1022 ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 1023 1024 if (amdgpu_ras_intr_triggered()) 1025 return ret; 1026 1027 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) 1028 { 1029 DRM_WARN("RAS: Unsupported Interface"); 1030 return -EINVAL; 1031 } 1032 1033 if (!ret) { 1034 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { 1035 dev_warn(psp->adev->dev, "ECC switch disabled\n"); 1036 1037 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE; 1038 } 1039 else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) 1040 dev_warn(psp->adev->dev, 1041 "RAS internal register access blocked\n"); 1042 } 1043 1044 return ret; 1045 } 1046 1047 int psp_ras_enable_features(struct psp_context *psp, 1048 union ta_ras_cmd_input *info, bool enable) 1049 { 1050 struct ta_ras_shared_memory *ras_cmd; 1051 int ret; 1052 1053 if (!psp->ras.ras_initialized) 1054 return -EINVAL; 1055 1056 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1057 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1058 1059 if (enable) 1060 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES; 1061 else 1062 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES; 1063 1064 ras_cmd->ras_in_message = *info; 1065 1066 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1067 if (ret) 1068 return -EINVAL; 1069 1070 return ras_cmd->ras_status; 1071 } 1072 1073 static int psp_ras_terminate(struct psp_context *psp) 1074 { 1075 int ret; 1076 1077 /* 1078 * TODO: bypass the terminate in sriov for now 1079 */ 1080 if (amdgpu_sriov_vf(psp->adev)) 1081 return 0; 1082 1083 if (!psp->ras.ras_initialized) 1084 return 0; 1085 1086 ret = psp_ras_unload(psp); 1087 if (ret) 1088 return ret; 1089 1090 psp->ras.ras_initialized = false; 1091 1092 /* free ras shared memory */ 1093 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, 1094 &psp->ras.ras_shared_mc_addr, 1095 &psp->ras.ras_shared_buf); 1096 1097 return 0; 1098 } 1099 1100 static int psp_ras_initialize(struct psp_context *psp) 1101 { 1102 int ret; 1103 1104 /* 1105 * TODO: bypass the initialize in sriov for now 1106 */ 1107 if (amdgpu_sriov_vf(psp->adev)) 1108 return 0; 1109 1110 if (!psp->adev->psp.ta_ras_ucode_size || 1111 !psp->adev->psp.ta_ras_start_addr) { 1112 dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 1113 return 0; 1114 } 1115 1116 if (!psp->ras.ras_initialized) { 1117 ret = psp_ras_init_shared_buf(psp); 1118 if (ret) 1119 return ret; 1120 } 1121 1122 ret = psp_ras_load(psp); 1123 if (ret) 1124 return ret; 1125 1126 return 0; 1127 } 1128 1129 int psp_ras_trigger_error(struct psp_context *psp, 1130 struct ta_ras_trigger_error_input *info) 1131 { 1132 struct ta_ras_shared_memory *ras_cmd; 1133 int ret; 1134 1135 if (!psp->ras.ras_initialized) 1136 return -EINVAL; 1137 1138 ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; 1139 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); 1140 1141 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR; 1142 ras_cmd->ras_in_message.trigger_error = *info; 1143 1144 ret = psp_ras_invoke(psp, ras_cmd->cmd_id); 1145 if (ret) 1146 return -EINVAL; 1147 1148 /* If err_event_athub occurs error inject was successful, however 1149 return status from TA is no long reliable */ 1150 if (amdgpu_ras_intr_triggered()) 1151 return 0; 1152 1153 return ras_cmd->ras_status; 1154 } 1155 // ras end 1156 1157 // HDCP start 1158 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 1159 { 1160 int ret; 1161 1162 /* 1163 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1164 * physical) for hdcp ta <-> Driver 1165 */ 1166 ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, 1167 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1168 &psp->hdcp_context.hdcp_shared_bo, 1169 &psp->hdcp_context.hdcp_shared_mc_addr, 1170 &psp->hdcp_context.hdcp_shared_buf); 1171 1172 return ret; 1173 } 1174 1175 static int psp_hdcp_load(struct psp_context *psp) 1176 { 1177 int ret; 1178 struct psp_gfx_cmd_resp *cmd; 1179 1180 /* 1181 * TODO: bypass the loading in sriov for now 1182 */ 1183 if (amdgpu_sriov_vf(psp->adev)) 1184 return 0; 1185 1186 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1187 if (!cmd) 1188 return -ENOMEM; 1189 1190 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1191 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 1192 psp->ta_hdcp_ucode_size); 1193 1194 psp_prep_ta_load_cmd_buf(cmd, 1195 psp->fw_pri_mc_addr, 1196 psp->ta_hdcp_ucode_size, 1197 psp->hdcp_context.hdcp_shared_mc_addr, 1198 PSP_HDCP_SHARED_MEM_SIZE); 1199 1200 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1201 1202 if (!ret) { 1203 psp->hdcp_context.hdcp_initialized = true; 1204 psp->hdcp_context.session_id = cmd->resp.session_id; 1205 rw_init(&psp->hdcp_context.mutex, "pspcp"); 1206 } 1207 1208 kfree(cmd); 1209 1210 return ret; 1211 } 1212 static int psp_hdcp_initialize(struct psp_context *psp) 1213 { 1214 int ret; 1215 1216 /* 1217 * TODO: bypass the initialize in sriov for now 1218 */ 1219 if (amdgpu_sriov_vf(psp->adev)) 1220 return 0; 1221 1222 if (!psp->adev->psp.ta_hdcp_ucode_size || 1223 !psp->adev->psp.ta_hdcp_start_addr) { 1224 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); 1225 return 0; 1226 } 1227 1228 if (!psp->hdcp_context.hdcp_initialized) { 1229 ret = psp_hdcp_init_shared_buf(psp); 1230 if (ret) 1231 return ret; 1232 } 1233 1234 ret = psp_hdcp_load(psp); 1235 if (ret) 1236 return ret; 1237 1238 return 0; 1239 } 1240 1241 static int psp_hdcp_unload(struct psp_context *psp) 1242 { 1243 int ret; 1244 struct psp_gfx_cmd_resp *cmd; 1245 1246 /* 1247 * TODO: bypass the unloading in sriov for now 1248 */ 1249 if (amdgpu_sriov_vf(psp->adev)) 1250 return 0; 1251 1252 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1253 if (!cmd) 1254 return -ENOMEM; 1255 1256 psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 1257 1258 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1259 1260 kfree(cmd); 1261 1262 return ret; 1263 } 1264 1265 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1266 { 1267 /* 1268 * TODO: bypass the loading in sriov for now 1269 */ 1270 if (amdgpu_sriov_vf(psp->adev)) 1271 return 0; 1272 1273 return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 1274 } 1275 1276 static int psp_hdcp_terminate(struct psp_context *psp) 1277 { 1278 int ret; 1279 1280 /* 1281 * TODO: bypass the terminate in sriov for now 1282 */ 1283 if (amdgpu_sriov_vf(psp->adev)) 1284 return 0; 1285 1286 if (!psp->hdcp_context.hdcp_initialized) { 1287 if (psp->hdcp_context.hdcp_shared_buf) 1288 goto out; 1289 else 1290 return 0; 1291 } 1292 1293 ret = psp_hdcp_unload(psp); 1294 if (ret) 1295 return ret; 1296 1297 psp->hdcp_context.hdcp_initialized = false; 1298 1299 out: 1300 /* free hdcp shared memory */ 1301 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, 1302 &psp->hdcp_context.hdcp_shared_mc_addr, 1303 &psp->hdcp_context.hdcp_shared_buf); 1304 1305 return 0; 1306 } 1307 // HDCP end 1308 1309 // DTM start 1310 static int psp_dtm_init_shared_buf(struct psp_context *psp) 1311 { 1312 int ret; 1313 1314 /* 1315 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1316 * physical) for dtm ta <-> Driver 1317 */ 1318 ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, 1319 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1320 &psp->dtm_context.dtm_shared_bo, 1321 &psp->dtm_context.dtm_shared_mc_addr, 1322 &psp->dtm_context.dtm_shared_buf); 1323 1324 return ret; 1325 } 1326 1327 static int psp_dtm_load(struct psp_context *psp) 1328 { 1329 int ret; 1330 struct psp_gfx_cmd_resp *cmd; 1331 1332 /* 1333 * TODO: bypass the loading in sriov for now 1334 */ 1335 if (amdgpu_sriov_vf(psp->adev)) 1336 return 0; 1337 1338 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1339 if (!cmd) 1340 return -ENOMEM; 1341 1342 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1343 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 1344 1345 psp_prep_ta_load_cmd_buf(cmd, 1346 psp->fw_pri_mc_addr, 1347 psp->ta_dtm_ucode_size, 1348 psp->dtm_context.dtm_shared_mc_addr, 1349 PSP_DTM_SHARED_MEM_SIZE); 1350 1351 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1352 1353 if (!ret) { 1354 psp->dtm_context.dtm_initialized = true; 1355 psp->dtm_context.session_id = cmd->resp.session_id; 1356 rw_init(&psp->dtm_context.mutex, "pspdtm"); 1357 } 1358 1359 kfree(cmd); 1360 1361 return ret; 1362 } 1363 1364 static int psp_dtm_initialize(struct psp_context *psp) 1365 { 1366 int ret; 1367 1368 /* 1369 * TODO: bypass the initialize in sriov for now 1370 */ 1371 if (amdgpu_sriov_vf(psp->adev)) 1372 return 0; 1373 1374 if (!psp->adev->psp.ta_dtm_ucode_size || 1375 !psp->adev->psp.ta_dtm_start_addr) { 1376 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); 1377 return 0; 1378 } 1379 1380 if (!psp->dtm_context.dtm_initialized) { 1381 ret = psp_dtm_init_shared_buf(psp); 1382 if (ret) 1383 return ret; 1384 } 1385 1386 ret = psp_dtm_load(psp); 1387 if (ret) 1388 return ret; 1389 1390 return 0; 1391 } 1392 1393 static int psp_dtm_unload(struct psp_context *psp) 1394 { 1395 int ret; 1396 struct psp_gfx_cmd_resp *cmd; 1397 1398 /* 1399 * TODO: bypass the unloading in sriov for now 1400 */ 1401 if (amdgpu_sriov_vf(psp->adev)) 1402 return 0; 1403 1404 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1405 if (!cmd) 1406 return -ENOMEM; 1407 1408 psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id); 1409 1410 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1411 1412 kfree(cmd); 1413 1414 return ret; 1415 } 1416 1417 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1418 { 1419 /* 1420 * TODO: bypass the loading in sriov for now 1421 */ 1422 if (amdgpu_sriov_vf(psp->adev)) 1423 return 0; 1424 1425 return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1426 } 1427 1428 static int psp_dtm_terminate(struct psp_context *psp) 1429 { 1430 int ret; 1431 1432 /* 1433 * TODO: bypass the terminate in sriov for now 1434 */ 1435 if (amdgpu_sriov_vf(psp->adev)) 1436 return 0; 1437 1438 if (!psp->dtm_context.dtm_initialized) { 1439 if (psp->dtm_context.dtm_shared_buf) 1440 goto out; 1441 else 1442 return 0; 1443 } 1444 1445 ret = psp_dtm_unload(psp); 1446 if (ret) 1447 return ret; 1448 1449 psp->dtm_context.dtm_initialized = false; 1450 1451 out: 1452 /* free hdcp shared memory */ 1453 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, 1454 &psp->dtm_context.dtm_shared_mc_addr, 1455 &psp->dtm_context.dtm_shared_buf); 1456 1457 return 0; 1458 } 1459 // DTM end 1460 1461 // RAP start 1462 static int psp_rap_init_shared_buf(struct psp_context *psp) 1463 { 1464 int ret; 1465 1466 /* 1467 * Allocate 16k memory aligned to 4k from Frame Buffer (local 1468 * physical) for rap ta <-> Driver 1469 */ 1470 ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE, 1471 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1472 &psp->rap_context.rap_shared_bo, 1473 &psp->rap_context.rap_shared_mc_addr, 1474 &psp->rap_context.rap_shared_buf); 1475 1476 return ret; 1477 } 1478 1479 static int psp_rap_load(struct psp_context *psp) 1480 { 1481 int ret; 1482 struct psp_gfx_cmd_resp *cmd; 1483 1484 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1485 if (!cmd) 1486 return -ENOMEM; 1487 1488 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 1489 memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size); 1490 1491 psp_prep_ta_load_cmd_buf(cmd, 1492 psp->fw_pri_mc_addr, 1493 psp->ta_rap_ucode_size, 1494 psp->rap_context.rap_shared_mc_addr, 1495 PSP_RAP_SHARED_MEM_SIZE); 1496 1497 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1498 1499 if (!ret) { 1500 psp->rap_context.rap_initialized = true; 1501 psp->rap_context.session_id = cmd->resp.session_id; 1502 rw_init(&psp->rap_context.mutex, "psprap"); 1503 } 1504 1505 kfree(cmd); 1506 1507 return ret; 1508 } 1509 1510 static int psp_rap_unload(struct psp_context *psp) 1511 { 1512 int ret; 1513 struct psp_gfx_cmd_resp *cmd; 1514 1515 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1516 if (!cmd) 1517 return -ENOMEM; 1518 1519 psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id); 1520 1521 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1522 1523 kfree(cmd); 1524 1525 return ret; 1526 } 1527 1528 static int psp_rap_initialize(struct psp_context *psp) 1529 { 1530 int ret; 1531 1532 /* 1533 * TODO: bypass the initialize in sriov for now 1534 */ 1535 if (amdgpu_sriov_vf(psp->adev)) 1536 return 0; 1537 1538 if (!psp->adev->psp.ta_rap_ucode_size || 1539 !psp->adev->psp.ta_rap_start_addr) { 1540 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); 1541 return 0; 1542 } 1543 1544 if (!psp->rap_context.rap_initialized) { 1545 ret = psp_rap_init_shared_buf(psp); 1546 if (ret) 1547 return ret; 1548 } 1549 1550 ret = psp_rap_load(psp); 1551 if (ret) 1552 return ret; 1553 1554 ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE); 1555 if (ret != TA_RAP_STATUS__SUCCESS) { 1556 psp_rap_unload(psp); 1557 1558 amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, 1559 &psp->rap_context.rap_shared_mc_addr, 1560 &psp->rap_context.rap_shared_buf); 1561 1562 psp->rap_context.rap_initialized = false; 1563 1564 dev_warn(psp->adev->dev, "RAP TA initialize fail.\n"); 1565 return -EINVAL; 1566 } 1567 1568 return 0; 1569 } 1570 1571 static int psp_rap_terminate(struct psp_context *psp) 1572 { 1573 int ret; 1574 1575 if (!psp->rap_context.rap_initialized) 1576 return 0; 1577 1578 ret = psp_rap_unload(psp); 1579 1580 psp->rap_context.rap_initialized = false; 1581 1582 /* free rap shared memory */ 1583 amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo, 1584 &psp->rap_context.rap_shared_mc_addr, 1585 &psp->rap_context.rap_shared_buf); 1586 1587 return ret; 1588 } 1589 1590 int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1591 { 1592 struct ta_rap_shared_memory *rap_cmd; 1593 int ret; 1594 1595 if (!psp->rap_context.rap_initialized) 1596 return -EINVAL; 1597 1598 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE && 1599 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0) 1600 return -EINVAL; 1601 1602 mutex_lock(&psp->rap_context.mutex); 1603 1604 rap_cmd = (struct ta_rap_shared_memory *) 1605 psp->rap_context.rap_shared_buf; 1606 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory)); 1607 1608 rap_cmd->cmd_id = ta_cmd_id; 1609 rap_cmd->validation_method_id = METHOD_A; 1610 1611 ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id); 1612 if (ret) { 1613 mutex_unlock(&psp->rap_context.mutex); 1614 return ret; 1615 } 1616 1617 mutex_unlock(&psp->rap_context.mutex); 1618 1619 return rap_cmd->rap_status; 1620 } 1621 // RAP end 1622 1623 static int psp_hw_start(struct psp_context *psp) 1624 { 1625 struct amdgpu_device *adev = psp->adev; 1626 int ret; 1627 1628 if (!amdgpu_sriov_vf(adev)) { 1629 if (psp->kdb_bin_size && 1630 (psp->funcs->bootloader_load_kdb != NULL)) { 1631 ret = psp_bootloader_load_kdb(psp); 1632 if (ret) { 1633 DRM_ERROR("PSP load kdb failed!\n"); 1634 return ret; 1635 } 1636 } 1637 1638 if (psp->spl_bin_size) { 1639 ret = psp_bootloader_load_spl(psp); 1640 if (ret) { 1641 DRM_ERROR("PSP load spl failed!\n"); 1642 return ret; 1643 } 1644 } 1645 1646 ret = psp_bootloader_load_sysdrv(psp); 1647 if (ret) { 1648 DRM_ERROR("PSP load sysdrv failed!\n"); 1649 return ret; 1650 } 1651 1652 ret = psp_bootloader_load_sos(psp); 1653 if (ret) { 1654 DRM_ERROR("PSP load sos failed!\n"); 1655 return ret; 1656 } 1657 } 1658 1659 ret = psp_ring_create(psp, PSP_RING_TYPE__KM); 1660 if (ret) { 1661 DRM_ERROR("PSP create ring failed!\n"); 1662 return ret; 1663 } 1664 1665 ret = psp_clear_vf_fw(psp); 1666 if (ret) { 1667 DRM_ERROR("PSP clear vf fw!\n"); 1668 return ret; 1669 } 1670 1671 ret = psp_tmr_init(psp); 1672 if (ret) { 1673 DRM_ERROR("PSP tmr init failed!\n"); 1674 return ret; 1675 } 1676 1677 /* 1678 * For ASICs with DF Cstate management centralized 1679 * to PMFW, TMR setup should be performed after PMFW 1680 * loaded and before other non-psp firmware loaded. 1681 */ 1682 if (psp->pmfw_centralized_cstate_management) { 1683 ret = psp_load_smu_fw(psp); 1684 if (ret) 1685 return ret; 1686 } 1687 1688 ret = psp_tmr_load(psp); 1689 if (ret) { 1690 DRM_ERROR("PSP load tmr failed!\n"); 1691 return ret; 1692 } 1693 1694 return 0; 1695 } 1696 1697 static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, 1698 enum psp_gfx_fw_type *type) 1699 { 1700 switch (ucode->ucode_id) { 1701 case AMDGPU_UCODE_ID_SDMA0: 1702 *type = GFX_FW_TYPE_SDMA0; 1703 break; 1704 case AMDGPU_UCODE_ID_SDMA1: 1705 *type = GFX_FW_TYPE_SDMA1; 1706 break; 1707 case AMDGPU_UCODE_ID_SDMA2: 1708 *type = GFX_FW_TYPE_SDMA2; 1709 break; 1710 case AMDGPU_UCODE_ID_SDMA3: 1711 *type = GFX_FW_TYPE_SDMA3; 1712 break; 1713 case AMDGPU_UCODE_ID_SDMA4: 1714 *type = GFX_FW_TYPE_SDMA4; 1715 break; 1716 case AMDGPU_UCODE_ID_SDMA5: 1717 *type = GFX_FW_TYPE_SDMA5; 1718 break; 1719 case AMDGPU_UCODE_ID_SDMA6: 1720 *type = GFX_FW_TYPE_SDMA6; 1721 break; 1722 case AMDGPU_UCODE_ID_SDMA7: 1723 *type = GFX_FW_TYPE_SDMA7; 1724 break; 1725 case AMDGPU_UCODE_ID_CP_MES: 1726 *type = GFX_FW_TYPE_CP_MES; 1727 break; 1728 case AMDGPU_UCODE_ID_CP_MES_DATA: 1729 *type = GFX_FW_TYPE_MES_STACK; 1730 break; 1731 case AMDGPU_UCODE_ID_CP_CE: 1732 *type = GFX_FW_TYPE_CP_CE; 1733 break; 1734 case AMDGPU_UCODE_ID_CP_PFP: 1735 *type = GFX_FW_TYPE_CP_PFP; 1736 break; 1737 case AMDGPU_UCODE_ID_CP_ME: 1738 *type = GFX_FW_TYPE_CP_ME; 1739 break; 1740 case AMDGPU_UCODE_ID_CP_MEC1: 1741 *type = GFX_FW_TYPE_CP_MEC; 1742 break; 1743 case AMDGPU_UCODE_ID_CP_MEC1_JT: 1744 *type = GFX_FW_TYPE_CP_MEC_ME1; 1745 break; 1746 case AMDGPU_UCODE_ID_CP_MEC2: 1747 *type = GFX_FW_TYPE_CP_MEC; 1748 break; 1749 case AMDGPU_UCODE_ID_CP_MEC2_JT: 1750 *type = GFX_FW_TYPE_CP_MEC_ME2; 1751 break; 1752 case AMDGPU_UCODE_ID_RLC_G: 1753 *type = GFX_FW_TYPE_RLC_G; 1754 break; 1755 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: 1756 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; 1757 break; 1758 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: 1759 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; 1760 break; 1761 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: 1762 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; 1763 break; 1764 case AMDGPU_UCODE_ID_RLC_IRAM: 1765 *type = GFX_FW_TYPE_RLC_IRAM; 1766 break; 1767 case AMDGPU_UCODE_ID_RLC_DRAM: 1768 *type = GFX_FW_TYPE_RLC_DRAM_BOOT; 1769 break; 1770 case AMDGPU_UCODE_ID_SMC: 1771 *type = GFX_FW_TYPE_SMU; 1772 break; 1773 case AMDGPU_UCODE_ID_UVD: 1774 *type = GFX_FW_TYPE_UVD; 1775 break; 1776 case AMDGPU_UCODE_ID_UVD1: 1777 *type = GFX_FW_TYPE_UVD1; 1778 break; 1779 case AMDGPU_UCODE_ID_VCE: 1780 *type = GFX_FW_TYPE_VCE; 1781 break; 1782 case AMDGPU_UCODE_ID_VCN: 1783 *type = GFX_FW_TYPE_VCN; 1784 break; 1785 case AMDGPU_UCODE_ID_VCN1: 1786 *type = GFX_FW_TYPE_VCN1; 1787 break; 1788 case AMDGPU_UCODE_ID_DMCU_ERAM: 1789 *type = GFX_FW_TYPE_DMCU_ERAM; 1790 break; 1791 case AMDGPU_UCODE_ID_DMCU_INTV: 1792 *type = GFX_FW_TYPE_DMCU_ISR; 1793 break; 1794 case AMDGPU_UCODE_ID_VCN0_RAM: 1795 *type = GFX_FW_TYPE_VCN0_RAM; 1796 break; 1797 case AMDGPU_UCODE_ID_VCN1_RAM: 1798 *type = GFX_FW_TYPE_VCN1_RAM; 1799 break; 1800 case AMDGPU_UCODE_ID_DMCUB: 1801 *type = GFX_FW_TYPE_DMUB; 1802 break; 1803 case AMDGPU_UCODE_ID_MAXIMUM: 1804 default: 1805 return -EINVAL; 1806 } 1807 1808 return 0; 1809 } 1810 1811 static void psp_print_fw_hdr(struct psp_context *psp, 1812 struct amdgpu_firmware_info *ucode) 1813 { 1814 struct amdgpu_device *adev = psp->adev; 1815 struct common_firmware_header *hdr; 1816 1817 switch (ucode->ucode_id) { 1818 case AMDGPU_UCODE_ID_SDMA0: 1819 case AMDGPU_UCODE_ID_SDMA1: 1820 case AMDGPU_UCODE_ID_SDMA2: 1821 case AMDGPU_UCODE_ID_SDMA3: 1822 case AMDGPU_UCODE_ID_SDMA4: 1823 case AMDGPU_UCODE_ID_SDMA5: 1824 case AMDGPU_UCODE_ID_SDMA6: 1825 case AMDGPU_UCODE_ID_SDMA7: 1826 hdr = (struct common_firmware_header *) 1827 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data; 1828 amdgpu_ucode_print_sdma_hdr(hdr); 1829 break; 1830 case AMDGPU_UCODE_ID_CP_CE: 1831 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data; 1832 amdgpu_ucode_print_gfx_hdr(hdr); 1833 break; 1834 case AMDGPU_UCODE_ID_CP_PFP: 1835 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data; 1836 amdgpu_ucode_print_gfx_hdr(hdr); 1837 break; 1838 case AMDGPU_UCODE_ID_CP_ME: 1839 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data; 1840 amdgpu_ucode_print_gfx_hdr(hdr); 1841 break; 1842 case AMDGPU_UCODE_ID_CP_MEC1: 1843 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data; 1844 amdgpu_ucode_print_gfx_hdr(hdr); 1845 break; 1846 case AMDGPU_UCODE_ID_RLC_G: 1847 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data; 1848 amdgpu_ucode_print_rlc_hdr(hdr); 1849 break; 1850 case AMDGPU_UCODE_ID_SMC: 1851 hdr = (struct common_firmware_header *)adev->pm.fw->data; 1852 amdgpu_ucode_print_smc_hdr(hdr); 1853 break; 1854 default: 1855 break; 1856 } 1857 } 1858 1859 static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, 1860 struct psp_gfx_cmd_resp *cmd) 1861 { 1862 int ret; 1863 uint64_t fw_mem_mc_addr = ucode->mc_addr; 1864 1865 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 1866 1867 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; 1868 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); 1869 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); 1870 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; 1871 1872 ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); 1873 if (ret) 1874 DRM_ERROR("Unknown firmware type\n"); 1875 1876 return ret; 1877 } 1878 1879 static int psp_execute_np_fw_load(struct psp_context *psp, 1880 struct amdgpu_firmware_info *ucode) 1881 { 1882 int ret = 0; 1883 1884 ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); 1885 if (ret) 1886 return ret; 1887 1888 ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, 1889 psp->fence_buf_mc_addr); 1890 1891 return ret; 1892 } 1893 1894 static int psp_load_smu_fw(struct psp_context *psp) 1895 { 1896 int ret; 1897 struct amdgpu_device* adev = psp->adev; 1898 struct amdgpu_firmware_info *ucode = 1899 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 1900 struct amdgpu_ras *ras = psp->ras.ras; 1901 1902 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 1903 return 0; 1904 1905 1906 if (amdgpu_in_reset(adev) && ras && ras->supported) { 1907 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 1908 if (ret) { 1909 DRM_WARN("Failed to set MP1 state prepare for reload\n"); 1910 } 1911 } 1912 1913 ret = psp_execute_np_fw_load(psp, ucode); 1914 1915 if (ret) 1916 DRM_ERROR("PSP load smu failed!\n"); 1917 1918 return ret; 1919 } 1920 1921 static bool fw_load_skip_check(struct psp_context *psp, 1922 struct amdgpu_firmware_info *ucode) 1923 { 1924 if (!ucode->fw) 1925 return true; 1926 1927 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1928 (psp_smu_reload_quirk(psp) || 1929 psp->autoload_supported || 1930 psp->pmfw_centralized_cstate_management)) 1931 return true; 1932 1933 if (amdgpu_sriov_vf(psp->adev) && 1934 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 1935 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 1936 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 1937 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 1938 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 1939 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 1940 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 1941 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 1942 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1943 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1944 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1945 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1946 || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1947 /*skip ucode loading in SRIOV VF */ 1948 return true; 1949 1950 if (psp->autoload_supported && 1951 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || 1952 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) 1953 /* skip mec JT when autoload is enabled */ 1954 return true; 1955 1956 return false; 1957 } 1958 1959 static int psp_np_fw_load(struct psp_context *psp) 1960 { 1961 int i, ret; 1962 struct amdgpu_firmware_info *ucode; 1963 struct amdgpu_device* adev = psp->adev; 1964 1965 if (psp->autoload_supported && 1966 !psp->pmfw_centralized_cstate_management) { 1967 ret = psp_load_smu_fw(psp); 1968 if (ret) 1969 return ret; 1970 } 1971 1972 for (i = 0; i < adev->firmware.max_ucodes; i++) { 1973 ucode = &adev->firmware.ucode[i]; 1974 1975 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && 1976 !fw_load_skip_check(psp, ucode)) { 1977 ret = psp_load_smu_fw(psp); 1978 if (ret) 1979 return ret; 1980 continue; 1981 } 1982 1983 if (fw_load_skip_check(psp, ucode)) 1984 continue; 1985 1986 if (psp->autoload_supported && 1987 (adev->asic_type == CHIP_SIENNA_CICHLID || 1988 adev->asic_type == CHIP_NAVY_FLOUNDER) && 1989 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 1990 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 1991 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) 1992 /* PSP only receive one SDMA fw for sienna_cichlid, 1993 * as all four sdma fw are same */ 1994 continue; 1995 1996 psp_print_fw_hdr(psp, ucode); 1997 1998 ret = psp_execute_np_fw_load(psp, ucode); 1999 if (ret) 2000 return ret; 2001 2002 /* Start rlc autoload after psp recieved all the gfx firmware */ 2003 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 2004 AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 2005 ret = psp_rlc_autoload_start(psp); 2006 if (ret) { 2007 DRM_ERROR("Failed to start rlc autoload\n"); 2008 return ret; 2009 } 2010 } 2011 } 2012 2013 return 0; 2014 } 2015 2016 static int psp_load_fw(struct amdgpu_device *adev) 2017 { 2018 int ret; 2019 struct psp_context *psp = &adev->psp; 2020 2021 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) { 2022 psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ 2023 goto skip_memalloc; 2024 } 2025 2026 psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 2027 if (!psp->cmd) 2028 return -ENOMEM; 2029 2030 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 2031 AMDGPU_GEM_DOMAIN_GTT, 2032 &psp->fw_pri_bo, 2033 &psp->fw_pri_mc_addr, 2034 &psp->fw_pri_buf); 2035 if (ret) 2036 goto failed; 2037 2038 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 2039 AMDGPU_GEM_DOMAIN_VRAM, 2040 &psp->fence_buf_bo, 2041 &psp->fence_buf_mc_addr, 2042 &psp->fence_buf); 2043 if (ret) 2044 goto failed; 2045 2046 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE, 2047 AMDGPU_GEM_DOMAIN_VRAM, 2048 &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 2049 (void **)&psp->cmd_buf_mem); 2050 if (ret) 2051 goto failed; 2052 2053 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE); 2054 2055 ret = psp_ring_init(psp, PSP_RING_TYPE__KM); 2056 if (ret) { 2057 DRM_ERROR("PSP ring init failed!\n"); 2058 goto failed; 2059 } 2060 2061 skip_memalloc: 2062 ret = psp_hw_start(psp); 2063 if (ret) 2064 goto failed; 2065 2066 ret = psp_np_fw_load(psp); 2067 if (ret) 2068 goto failed; 2069 2070 ret = psp_asd_load(psp); 2071 if (ret) { 2072 DRM_ERROR("PSP load asd failed!\n"); 2073 return ret; 2074 } 2075 2076 if (psp->adev->psp.ta_fw) { 2077 ret = psp_ras_initialize(psp); 2078 if (ret) 2079 dev_err(psp->adev->dev, 2080 "RAS: Failed to initialize RAS\n"); 2081 2082 ret = psp_hdcp_initialize(psp); 2083 if (ret) 2084 dev_err(psp->adev->dev, 2085 "HDCP: Failed to initialize HDCP\n"); 2086 2087 ret = psp_dtm_initialize(psp); 2088 if (ret) 2089 dev_err(psp->adev->dev, 2090 "DTM: Failed to initialize DTM\n"); 2091 2092 ret = psp_rap_initialize(psp); 2093 if (ret) 2094 dev_err(psp->adev->dev, 2095 "RAP: Failed to initialize RAP\n"); 2096 } 2097 2098 return 0; 2099 2100 failed: 2101 /* 2102 * all cleanup jobs (xgmi terminate, ras terminate, 2103 * ring destroy, cmd/fence/fw buffers destory, 2104 * psp->cmd destory) are delayed to psp_hw_fini 2105 */ 2106 return ret; 2107 } 2108 2109 static int psp_hw_init(void *handle) 2110 { 2111 int ret; 2112 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2113 2114 mutex_lock(&adev->firmware.mutex); 2115 /* 2116 * This sequence is just used on hw_init only once, no need on 2117 * resume. 2118 */ 2119 ret = amdgpu_ucode_init_bo(adev); 2120 if (ret) 2121 goto failed; 2122 2123 ret = psp_load_fw(adev); 2124 if (ret) { 2125 DRM_ERROR("PSP firmware loading failed\n"); 2126 goto failed; 2127 } 2128 2129 mutex_unlock(&adev->firmware.mutex); 2130 return 0; 2131 2132 failed: 2133 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; 2134 mutex_unlock(&adev->firmware.mutex); 2135 return -EINVAL; 2136 } 2137 2138 static int psp_hw_fini(void *handle) 2139 { 2140 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2141 struct psp_context *psp = &adev->psp; 2142 int ret; 2143 2144 if (psp->adev->psp.ta_fw) { 2145 psp_ras_terminate(psp); 2146 psp_rap_terminate(psp); 2147 psp_dtm_terminate(psp); 2148 psp_hdcp_terminate(psp); 2149 } 2150 2151 psp_asd_unload(psp); 2152 ret = psp_clear_vf_fw(psp); 2153 if (ret) { 2154 DRM_ERROR("PSP clear vf fw!\n"); 2155 return ret; 2156 } 2157 2158 psp_tmr_terminate(psp); 2159 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 2160 2161 amdgpu_bo_free_kernel(&psp->fw_pri_bo, 2162 &psp->fw_pri_mc_addr, &psp->fw_pri_buf); 2163 amdgpu_bo_free_kernel(&psp->fence_buf_bo, 2164 &psp->fence_buf_mc_addr, &psp->fence_buf); 2165 amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr, 2166 (void **)&psp->cmd_buf_mem); 2167 2168 kfree(psp->cmd); 2169 psp->cmd = NULL; 2170 2171 return 0; 2172 } 2173 2174 static int psp_suspend(void *handle) 2175 { 2176 int ret; 2177 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2178 struct psp_context *psp = &adev->psp; 2179 2180 if (adev->gmc.xgmi.num_physical_nodes > 1 && 2181 psp->xgmi_context.initialized == 1) { 2182 ret = psp_xgmi_terminate(psp); 2183 if (ret) { 2184 DRM_ERROR("Failed to terminate xgmi ta\n"); 2185 return ret; 2186 } 2187 } 2188 2189 if (psp->adev->psp.ta_fw) { 2190 ret = psp_ras_terminate(psp); 2191 if (ret) { 2192 DRM_ERROR("Failed to terminate ras ta\n"); 2193 return ret; 2194 } 2195 ret = psp_hdcp_terminate(psp); 2196 if (ret) { 2197 DRM_ERROR("Failed to terminate hdcp ta\n"); 2198 return ret; 2199 } 2200 ret = psp_dtm_terminate(psp); 2201 if (ret) { 2202 DRM_ERROR("Failed to terminate dtm ta\n"); 2203 return ret; 2204 } 2205 ret = psp_rap_terminate(psp); 2206 if (ret) { 2207 DRM_ERROR("Failed to terminate rap ta\n"); 2208 return ret; 2209 } 2210 } 2211 2212 ret = psp_asd_unload(psp); 2213 if (ret) { 2214 DRM_ERROR("Failed to unload asd\n"); 2215 return ret; 2216 } 2217 2218 ret = psp_tmr_terminate(psp); 2219 if (ret) { 2220 DRM_ERROR("Failed to terminate tmr\n"); 2221 return ret; 2222 } 2223 2224 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); 2225 if (ret) { 2226 DRM_ERROR("PSP ring stop failed\n"); 2227 return ret; 2228 } 2229 2230 return 0; 2231 } 2232 2233 static int psp_resume(void *handle) 2234 { 2235 int ret; 2236 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2237 struct psp_context *psp = &adev->psp; 2238 2239 DRM_INFO("PSP is resuming...\n"); 2240 2241 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2242 if (ret) { 2243 DRM_ERROR("Failed to process memory training!\n"); 2244 return ret; 2245 } 2246 2247 mutex_lock(&adev->firmware.mutex); 2248 2249 ret = psp_hw_start(psp); 2250 if (ret) 2251 goto failed; 2252 2253 ret = psp_np_fw_load(psp); 2254 if (ret) 2255 goto failed; 2256 2257 ret = psp_asd_load(psp); 2258 if (ret) { 2259 DRM_ERROR("PSP load asd failed!\n"); 2260 goto failed; 2261 } 2262 2263 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2264 ret = psp_xgmi_initialize(psp); 2265 /* Warning the XGMI seesion initialize failure 2266 * Instead of stop driver initialization 2267 */ 2268 if (ret) 2269 dev_err(psp->adev->dev, 2270 "XGMI: Failed to initialize XGMI session\n"); 2271 } 2272 2273 if (psp->adev->psp.ta_fw) { 2274 ret = psp_ras_initialize(psp); 2275 if (ret) 2276 dev_err(psp->adev->dev, 2277 "RAS: Failed to initialize RAS\n"); 2278 2279 ret = psp_hdcp_initialize(psp); 2280 if (ret) 2281 dev_err(psp->adev->dev, 2282 "HDCP: Failed to initialize HDCP\n"); 2283 2284 ret = psp_dtm_initialize(psp); 2285 if (ret) 2286 dev_err(psp->adev->dev, 2287 "DTM: Failed to initialize DTM\n"); 2288 2289 ret = psp_rap_initialize(psp); 2290 if (ret) 2291 dev_err(psp->adev->dev, 2292 "RAP: Failed to initialize RAP\n"); 2293 } 2294 2295 mutex_unlock(&adev->firmware.mutex); 2296 2297 return 0; 2298 2299 failed: 2300 DRM_ERROR("PSP resume failed\n"); 2301 mutex_unlock(&adev->firmware.mutex); 2302 return ret; 2303 } 2304 2305 int psp_gpu_reset(struct amdgpu_device *adev) 2306 { 2307 int ret; 2308 2309 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 2310 return 0; 2311 2312 mutex_lock(&adev->psp.mutex); 2313 ret = psp_mode1_reset(&adev->psp); 2314 mutex_unlock(&adev->psp.mutex); 2315 2316 return ret; 2317 } 2318 2319 int psp_rlc_autoload_start(struct psp_context *psp) 2320 { 2321 int ret; 2322 struct psp_gfx_cmd_resp *cmd; 2323 2324 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 2325 if (!cmd) 2326 return -ENOMEM; 2327 2328 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC; 2329 2330 ret = psp_cmd_submit_buf(psp, NULL, cmd, 2331 psp->fence_buf_mc_addr); 2332 kfree(cmd); 2333 return ret; 2334 } 2335 2336 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx, 2337 uint64_t cmd_gpu_addr, int cmd_size) 2338 { 2339 struct amdgpu_firmware_info ucode = {0}; 2340 2341 ucode.ucode_id = inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : 2342 AMDGPU_UCODE_ID_VCN0_RAM; 2343 ucode.mc_addr = cmd_gpu_addr; 2344 ucode.ucode_size = cmd_size; 2345 2346 return psp_execute_np_fw_load(&adev->psp, &ucode); 2347 } 2348 2349 int psp_ring_cmd_submit(struct psp_context *psp, 2350 uint64_t cmd_buf_mc_addr, 2351 uint64_t fence_mc_addr, 2352 int index) 2353 { 2354 unsigned int psp_write_ptr_reg = 0; 2355 struct psp_gfx_rb_frame *write_frame; 2356 struct psp_ring *ring = &psp->km_ring; 2357 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem; 2358 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start + 2359 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1; 2360 struct amdgpu_device *adev = psp->adev; 2361 uint32_t ring_size_dw = ring->ring_size / 4; 2362 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 2363 2364 /* KM (GPCOM) prepare write pointer */ 2365 psp_write_ptr_reg = psp_ring_get_wptr(psp); 2366 2367 /* Update KM RB frame pointer to new frame */ 2368 /* write_frame ptr increments by size of rb_frame in bytes */ 2369 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */ 2370 if ((psp_write_ptr_reg % ring_size_dw) == 0) 2371 write_frame = ring_buffer_start; 2372 else 2373 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw); 2374 /* Check invalid write_frame ptr address */ 2375 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) { 2376 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n", 2377 ring_buffer_start, ring_buffer_end, write_frame); 2378 DRM_ERROR("write_frame is pointing to address out of bounds\n"); 2379 return -EINVAL; 2380 } 2381 2382 /* Initialize KM RB frame */ 2383 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); 2384 2385 /* Update KM RB frame */ 2386 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); 2387 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); 2388 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); 2389 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); 2390 write_frame->fence_value = index; 2391 amdgpu_asic_flush_hdp(adev, NULL); 2392 2393 /* Update the write Pointer in DWORDs */ 2394 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 2395 psp_ring_set_wptr(psp, psp_write_ptr_reg); 2396 return 0; 2397 } 2398 2399 int psp_init_asd_microcode(struct psp_context *psp, 2400 const char *chip_name) 2401 { 2402 struct amdgpu_device *adev = psp->adev; 2403 char fw_name[30]; 2404 const struct psp_firmware_header_v1_0 *asd_hdr; 2405 int err = 0; 2406 2407 if (!chip_name) { 2408 dev_err(adev->dev, "invalid chip name for asd microcode\n"); 2409 return -EINVAL; 2410 } 2411 2412 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 2413 err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); 2414 if (err) 2415 goto out; 2416 2417 err = amdgpu_ucode_validate(adev->psp.asd_fw); 2418 if (err) 2419 goto out; 2420 2421 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 2422 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 2423 adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 2424 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 2425 adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 2426 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 2427 return 0; 2428 out: 2429 dev_err(adev->dev, "fail to initialize asd microcode\n"); 2430 release_firmware(adev->psp.asd_fw); 2431 adev->psp.asd_fw = NULL; 2432 return err; 2433 } 2434 2435 int psp_init_sos_microcode(struct psp_context *psp, 2436 const char *chip_name) 2437 { 2438 struct amdgpu_device *adev = psp->adev; 2439 char fw_name[30]; 2440 const struct psp_firmware_header_v1_0 *sos_hdr; 2441 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; 2442 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 2443 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 2444 int err = 0; 2445 2446 if (!chip_name) { 2447 dev_err(adev->dev, "invalid chip name for sos microcode\n"); 2448 return -EINVAL; 2449 } 2450 2451 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name); 2452 err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev); 2453 if (err) 2454 goto out; 2455 2456 err = amdgpu_ucode_validate(adev->psp.sos_fw); 2457 if (err) 2458 goto out; 2459 2460 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 2461 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 2462 2463 switch (sos_hdr->header.header_version_major) { 2464 case 1: 2465 adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 2466 adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); 2467 adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); 2468 adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); 2469 adev->psp.sys_start_addr = (uint8_t *)sos_hdr + 2470 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 2471 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2472 le32_to_cpu(sos_hdr->sos_offset_bytes); 2473 if (sos_hdr->header.header_version_minor == 1) { 2474 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 2475 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); 2476 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2477 le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); 2478 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); 2479 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2480 le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); 2481 } 2482 if (sos_hdr->header.header_version_minor == 2) { 2483 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 2484 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); 2485 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2486 le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); 2487 } 2488 if (sos_hdr->header.header_version_minor == 3) { 2489 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 2490 adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes); 2491 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2492 le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes); 2493 adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes); 2494 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2495 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes); 2496 adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes); 2497 adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr + 2498 le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes); 2499 } 2500 break; 2501 default: 2502 dev_err(adev->dev, 2503 "unsupported psp sos firmware\n"); 2504 err = -EINVAL; 2505 goto out; 2506 } 2507 2508 return 0; 2509 out: 2510 dev_err(adev->dev, 2511 "failed to init sos firmware\n"); 2512 release_firmware(adev->psp.sos_fw); 2513 adev->psp.sos_fw = NULL; 2514 2515 return err; 2516 } 2517 2518 int parse_ta_bin_descriptor(struct psp_context *psp, 2519 const struct ta_fw_bin_desc *desc, 2520 const struct ta_firmware_header_v2_0 *ta_hdr) 2521 { 2522 uint8_t *ucode_start_addr = NULL; 2523 2524 if (!psp || !desc || !ta_hdr) 2525 return -EINVAL; 2526 2527 ucode_start_addr = (uint8_t *)ta_hdr + 2528 le32_to_cpu(desc->offset_bytes) + 2529 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 2530 2531 switch (desc->fw_type) { 2532 case TA_FW_TYPE_PSP_ASD: 2533 psp->asd_fw_version = le32_to_cpu(desc->fw_version); 2534 psp->asd_feature_version = le32_to_cpu(desc->fw_version); 2535 psp->asd_ucode_size = le32_to_cpu(desc->size_bytes); 2536 psp->asd_start_addr = ucode_start_addr; 2537 psp->asd_fw = psp->ta_fw; 2538 break; 2539 case TA_FW_TYPE_PSP_XGMI: 2540 psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version); 2541 psp->ta_xgmi_ucode_size = le32_to_cpu(desc->size_bytes); 2542 psp->ta_xgmi_start_addr = ucode_start_addr; 2543 break; 2544 case TA_FW_TYPE_PSP_RAS: 2545 psp->ta_ras_ucode_version = le32_to_cpu(desc->fw_version); 2546 psp->ta_ras_ucode_size = le32_to_cpu(desc->size_bytes); 2547 psp->ta_ras_start_addr = ucode_start_addr; 2548 break; 2549 case TA_FW_TYPE_PSP_HDCP: 2550 psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version); 2551 psp->ta_hdcp_ucode_size = le32_to_cpu(desc->size_bytes); 2552 psp->ta_hdcp_start_addr = ucode_start_addr; 2553 break; 2554 case TA_FW_TYPE_PSP_DTM: 2555 psp->ta_dtm_ucode_version = le32_to_cpu(desc->fw_version); 2556 psp->ta_dtm_ucode_size = le32_to_cpu(desc->size_bytes); 2557 psp->ta_dtm_start_addr = ucode_start_addr; 2558 break; 2559 case TA_FW_TYPE_PSP_RAP: 2560 psp->ta_rap_ucode_version = le32_to_cpu(desc->fw_version); 2561 psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes); 2562 psp->ta_rap_start_addr = ucode_start_addr; 2563 break; 2564 default: 2565 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); 2566 break; 2567 } 2568 2569 return 0; 2570 } 2571 2572 int psp_init_ta_microcode(struct psp_context *psp, 2573 const char *chip_name) 2574 { 2575 struct amdgpu_device *adev = psp->adev; 2576 char fw_name[30]; 2577 const struct ta_firmware_header_v2_0 *ta_hdr; 2578 int err = 0; 2579 int ta_index = 0; 2580 2581 if (!chip_name) { 2582 dev_err(adev->dev, "invalid chip name for ta microcode\n"); 2583 return -EINVAL; 2584 } 2585 2586 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 2587 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 2588 if (err) 2589 goto out; 2590 2591 err = amdgpu_ucode_validate(adev->psp.ta_fw); 2592 if (err) 2593 goto out; 2594 2595 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data; 2596 2597 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) { 2598 dev_err(adev->dev, "unsupported TA header version\n"); 2599 err = -EINVAL; 2600 goto out; 2601 } 2602 2603 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_TA_PACKAGING) { 2604 dev_err(adev->dev, "packed TA count exceeds maximum limit\n"); 2605 err = -EINVAL; 2606 goto out; 2607 } 2608 2609 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) { 2610 err = parse_ta_bin_descriptor(psp, 2611 &ta_hdr->ta_fw_bin[ta_index], 2612 ta_hdr); 2613 if (err) 2614 goto out; 2615 } 2616 2617 return 0; 2618 out: 2619 dev_err(adev->dev, "fail to initialize ta microcode\n"); 2620 release_firmware(adev->psp.ta_fw); 2621 adev->psp.ta_fw = NULL; 2622 return err; 2623 } 2624 2625 static int psp_set_clockgating_state(void *handle, 2626 enum amd_clockgating_state state) 2627 { 2628 return 0; 2629 } 2630 2631 static int psp_set_powergating_state(void *handle, 2632 enum amd_powergating_state state) 2633 { 2634 return 0; 2635 } 2636 2637 static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, 2638 struct device_attribute *attr, 2639 char *buf) 2640 { 2641 STUB(); 2642 return -ENOSYS; 2643 #ifdef notyet 2644 struct drm_device *ddev = dev_get_drvdata(dev); 2645 struct amdgpu_device *adev = drm_to_adev(ddev); 2646 uint32_t fw_ver; 2647 int ret; 2648 2649 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2650 DRM_INFO("PSP block is not ready yet."); 2651 return -EBUSY; 2652 } 2653 2654 mutex_lock(&adev->psp.mutex); 2655 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); 2656 mutex_unlock(&adev->psp.mutex); 2657 2658 if (ret) { 2659 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); 2660 return ret; 2661 } 2662 2663 return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); 2664 } 2665 2666 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, 2667 struct device_attribute *attr, 2668 const char *buf, 2669 size_t count) 2670 { 2671 struct drm_device *ddev = dev_get_drvdata(dev); 2672 struct amdgpu_device *adev = drm_to_adev(ddev); 2673 void *cpu_addr; 2674 dma_addr_t dma_addr; 2675 int ret; 2676 char fw_name[100]; 2677 const struct firmware *usbc_pd_fw; 2678 2679 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { 2680 DRM_INFO("PSP block is not ready yet."); 2681 return -EBUSY; 2682 } 2683 2684 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); 2685 ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); 2686 if (ret) 2687 goto fail; 2688 2689 /* We need contiguous physical mem to place the FW for psp to access */ 2690 cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); 2691 2692 ret = dma_mapping_error(adev->dev, dma_addr); 2693 if (ret) 2694 goto rel_buf; 2695 2696 memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); 2697 2698 /* 2699 * x86 specific workaround. 2700 * Without it the buffer is invisible in PSP. 2701 * 2702 * TODO Remove once PSP starts snooping CPU cache 2703 */ 2704 #ifdef CONFIG_X86 2705 clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); 2706 #endif 2707 2708 mutex_lock(&adev->psp.mutex); 2709 ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); 2710 mutex_unlock(&adev->psp.mutex); 2711 2712 rel_buf: 2713 dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); 2714 release_firmware(usbc_pd_fw); 2715 2716 fail: 2717 if (ret) { 2718 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); 2719 return ret; 2720 } 2721 2722 return count; 2723 #endif 2724 } 2725 2726 static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, 2727 psp_usbc_pd_fw_sysfs_read, 2728 psp_usbc_pd_fw_sysfs_write); 2729 2730 2731 2732 const struct amd_ip_funcs psp_ip_funcs = { 2733 .name = "psp", 2734 .early_init = psp_early_init, 2735 .late_init = NULL, 2736 .sw_init = psp_sw_init, 2737 .sw_fini = psp_sw_fini, 2738 .hw_init = psp_hw_init, 2739 .hw_fini = psp_hw_fini, 2740 .suspend = psp_suspend, 2741 .resume = psp_resume, 2742 .is_idle = NULL, 2743 .check_soft_reset = NULL, 2744 .wait_for_idle = NULL, 2745 .soft_reset = NULL, 2746 .set_clockgating_state = psp_set_clockgating_state, 2747 .set_powergating_state = psp_set_powergating_state, 2748 }; 2749 2750 static int psp_sysfs_init(struct amdgpu_device *adev) 2751 { 2752 int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); 2753 2754 if (ret) 2755 DRM_ERROR("Failed to create USBC PD FW control file!"); 2756 2757 return ret; 2758 } 2759 2760 static void psp_sysfs_fini(struct amdgpu_device *adev) 2761 { 2762 device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); 2763 } 2764 2765 const struct amdgpu_ip_block_version psp_v3_1_ip_block = 2766 { 2767 .type = AMD_IP_BLOCK_TYPE_PSP, 2768 .major = 3, 2769 .minor = 1, 2770 .rev = 0, 2771 .funcs = &psp_ip_funcs, 2772 }; 2773 2774 const struct amdgpu_ip_block_version psp_v10_0_ip_block = 2775 { 2776 .type = AMD_IP_BLOCK_TYPE_PSP, 2777 .major = 10, 2778 .minor = 0, 2779 .rev = 0, 2780 .funcs = &psp_ip_funcs, 2781 }; 2782 2783 const struct amdgpu_ip_block_version psp_v11_0_ip_block = 2784 { 2785 .type = AMD_IP_BLOCK_TYPE_PSP, 2786 .major = 11, 2787 .minor = 0, 2788 .rev = 0, 2789 .funcs = &psp_ip_funcs, 2790 }; 2791 2792 const struct amdgpu_ip_block_version psp_v12_0_ip_block = 2793 { 2794 .type = AMD_IP_BLOCK_TYPE_PSP, 2795 .major = 12, 2796 .minor = 0, 2797 .rev = 0, 2798 .funcs = &psp_ip_funcs, 2799 }; 2800