1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/power_supply.h> 29 #include <linux/kthread.h> 30 #include <linux/module.h> 31 #include <linux/console.h> 32 #include <linux/slab.h> 33 #include <linux/iommu.h> 34 #include <linux/pci.h> 35 #include <linux/devcoredump.h> 36 #include <generated/utsrelease.h> 37 #include <linux/pci-p2pdma.h> 38 39 #include <drm/drm_aperture.h> 40 #include <drm/drm_atomic_helper.h> 41 #include <drm/drm_probe_helper.h> 42 #include <drm/amdgpu_drm.h> 43 #include <linux/vgaarb.h> 44 #include <linux/vga_switcheroo.h> 45 #include <linux/efi.h> 46 #include "amdgpu.h" 47 #include "amdgpu_trace.h" 48 #include "amdgpu_i2c.h" 49 #include "atom.h" 50 #include "amdgpu_atombios.h" 51 #include "amdgpu_atomfirmware.h" 52 #include "amd_pcie.h" 53 #ifdef CONFIG_DRM_AMDGPU_SI 54 #include "si.h" 55 #endif 56 #ifdef CONFIG_DRM_AMDGPU_CIK 57 #include "cik.h" 58 #endif 59 #include "vi.h" 60 #include "soc15.h" 61 #include "nv.h" 62 #include "bif/bif_4_1_d.h" 63 #include <linux/firmware.h> 64 #include "amdgpu_vf_error.h" 65 66 #include "amdgpu_amdkfd.h" 67 #include "amdgpu_pm.h" 68 69 #include "amdgpu_xgmi.h" 70 #include "amdgpu_ras.h" 71 #include "amdgpu_pmu.h" 72 #include "amdgpu_fru_eeprom.h" 73 #include "amdgpu_reset.h" 74 75 #include <linux/suspend.h> 76 #include <drm/task_barrier.h> 77 #include <linux/pm_runtime.h> 78 79 #include <drm/drm_drv.h> 80 81 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 82 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); 83 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); 84 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); 85 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); 86 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); 87 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 88 89 #define AMDGPU_RESUME_MS 2000 90 #define AMDGPU_MAX_RETRY_LIMIT 2 91 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL) 92 93 static const struct drm_driver amdgpu_kms_driver; 94 95 const char *amdgpu_asic_name[] = { 96 "TAHITI", 97 "PITCAIRN", 98 "VERDE", 99 "OLAND", 100 "HAINAN", 101 "BONAIRE", 102 "KAVERI", 103 "KABINI", 104 "HAWAII", 105 "MULLINS", 106 "TOPAZ", 107 "TONGA", 108 "FIJI", 109 "CARRIZO", 110 "STONEY", 111 "POLARIS10", 112 "POLARIS11", 113 "POLARIS12", 114 "VEGAM", 115 "VEGA10", 116 "VEGA12", 117 "VEGA20", 118 "RAVEN", 119 "ARCTURUS", 120 "RENOIR", 121 "ALDEBARAN", 122 "NAVI10", 123 "CYAN_SKILLFISH", 124 "NAVI14", 125 "NAVI12", 126 "SIENNA_CICHLID", 127 "NAVY_FLOUNDER", 128 "VANGOGH", 129 "DIMGREY_CAVEFISH", 130 "BEIGE_GOBY", 131 "YELLOW_CARP", 132 "IP DISCOVERY", 133 "LAST", 134 }; 135 136 /** 137 * DOC: pcie_replay_count 138 * 139 * The amdgpu driver provides a sysfs API for reporting the total number 140 * of PCIe replays (NAKs) 141 * The file pcie_replay_count is used for this and returns the total 142 * number of replays as a sum of the NAKs generated and NAKs received 143 */ 144 145 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev, 146 struct device_attribute *attr, char *buf) 147 { 148 struct drm_device *ddev = dev_get_drvdata(dev); 149 struct amdgpu_device *adev = drm_to_adev(ddev); 150 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev); 151 152 return sysfs_emit(buf, "%llu\n", cnt); 153 } 154 155 static DEVICE_ATTR(pcie_replay_count, S_IRUGO, 156 amdgpu_device_get_pcie_replay_count, NULL); 157 158 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); 159 160 /** 161 * DOC: product_name 162 * 163 * The amdgpu driver provides a sysfs API for reporting the product name 164 * for the device 165 * The file serial_number is used for this and returns the product name 166 * as returned from the FRU. 167 * NOTE: This is only available for certain server cards 168 */ 169 170 static ssize_t amdgpu_device_get_product_name(struct device *dev, 171 struct device_attribute *attr, char *buf) 172 { 173 struct drm_device *ddev = dev_get_drvdata(dev); 174 struct amdgpu_device *adev = drm_to_adev(ddev); 175 176 return sysfs_emit(buf, "%s\n", adev->product_name); 177 } 178 179 static DEVICE_ATTR(product_name, S_IRUGO, 180 amdgpu_device_get_product_name, NULL); 181 182 /** 183 * DOC: product_number 184 * 185 * The amdgpu driver provides a sysfs API for reporting the part number 186 * for the device 187 * The file serial_number is used for this and returns the part number 188 * as returned from the FRU. 189 * NOTE: This is only available for certain server cards 190 */ 191 192 static ssize_t amdgpu_device_get_product_number(struct device *dev, 193 struct device_attribute *attr, char *buf) 194 { 195 struct drm_device *ddev = dev_get_drvdata(dev); 196 struct amdgpu_device *adev = drm_to_adev(ddev); 197 198 return sysfs_emit(buf, "%s\n", adev->product_number); 199 } 200 201 static DEVICE_ATTR(product_number, S_IRUGO, 202 amdgpu_device_get_product_number, NULL); 203 204 /** 205 * DOC: serial_number 206 * 207 * The amdgpu driver provides a sysfs API for reporting the serial number 208 * for the device 209 * The file serial_number is used for this and returns the serial number 210 * as returned from the FRU. 211 * NOTE: This is only available for certain server cards 212 */ 213 214 static ssize_t amdgpu_device_get_serial_number(struct device *dev, 215 struct device_attribute *attr, char *buf) 216 { 217 struct drm_device *ddev = dev_get_drvdata(dev); 218 struct amdgpu_device *adev = drm_to_adev(ddev); 219 220 return sysfs_emit(buf, "%s\n", adev->serial); 221 } 222 223 static DEVICE_ATTR(serial_number, S_IRUGO, 224 amdgpu_device_get_serial_number, NULL); 225 226 /** 227 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control 228 * 229 * @dev: drm_device pointer 230 * 231 * Returns true if the device is a dGPU with ATPX power control, 232 * otherwise return false. 233 */ 234 bool amdgpu_device_supports_px(struct drm_device *dev) 235 { 236 struct amdgpu_device *adev = drm_to_adev(dev); 237 238 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) 239 return true; 240 return false; 241 } 242 243 /** 244 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 245 * 246 * @dev: drm_device pointer 247 * 248 * Returns true if the device is a dGPU with ACPI power control, 249 * otherwise return false. 250 */ 251 bool amdgpu_device_supports_boco(struct drm_device *dev) 252 { 253 struct amdgpu_device *adev = drm_to_adev(dev); 254 255 if (adev->has_pr3 || 256 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid())) 257 return true; 258 return false; 259 } 260 261 /** 262 * amdgpu_device_supports_baco - Does the device support BACO 263 * 264 * @dev: drm_device pointer 265 * 266 * Returns true if the device supporte BACO, 267 * otherwise return false. 268 */ 269 bool amdgpu_device_supports_baco(struct drm_device *dev) 270 { 271 struct amdgpu_device *adev = drm_to_adev(dev); 272 273 return amdgpu_asic_supports_baco(adev); 274 } 275 276 /** 277 * amdgpu_device_supports_smart_shift - Is the device dGPU with 278 * smart shift support 279 * 280 * @dev: drm_device pointer 281 * 282 * Returns true if the device is a dGPU with Smart Shift support, 283 * otherwise returns false. 284 */ 285 bool amdgpu_device_supports_smart_shift(struct drm_device *dev) 286 { 287 return (amdgpu_device_supports_boco(dev) && 288 amdgpu_acpi_is_power_shift_control_supported()); 289 } 290 291 /* 292 * VRAM access helper functions 293 */ 294 295 /** 296 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA 297 * 298 * @adev: amdgpu_device pointer 299 * @pos: offset of the buffer in vram 300 * @buf: virtual address of the buffer in system memory 301 * @size: read/write size, sizeof(@buf) must > @size 302 * @write: true - write to vram, otherwise - read from vram 303 */ 304 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, 305 void *buf, size_t size, bool write) 306 { 307 unsigned long flags; 308 uint32_t hi = ~0, tmp = 0; 309 uint32_t *data = buf; 310 uint64_t last; 311 int idx; 312 313 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 314 return; 315 316 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); 317 318 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 319 for (last = pos + size; pos < last; pos += 4) { 320 tmp = pos >> 31; 321 322 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); 323 if (tmp != hi) { 324 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); 325 hi = tmp; 326 } 327 if (write) 328 WREG32_NO_KIQ(mmMM_DATA, *data++); 329 else 330 *data++ = RREG32_NO_KIQ(mmMM_DATA); 331 } 332 333 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 334 drm_dev_exit(idx); 335 } 336 337 /** 338 * amdgpu_device_aper_access - access vram by vram aperature 339 * 340 * @adev: amdgpu_device pointer 341 * @pos: offset of the buffer in vram 342 * @buf: virtual address of the buffer in system memory 343 * @size: read/write size, sizeof(@buf) must > @size 344 * @write: true - write to vram, otherwise - read from vram 345 * 346 * The return value means how many bytes have been transferred. 347 */ 348 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, 349 void *buf, size_t size, bool write) 350 { 351 #ifdef CONFIG_64BIT 352 void __iomem *addr; 353 size_t count = 0; 354 uint64_t last; 355 356 if (!adev->mman.aper_base_kaddr) 357 return 0; 358 359 last = min(pos + size, adev->gmc.visible_vram_size); 360 if (last > pos) { 361 addr = adev->mman.aper_base_kaddr + pos; 362 count = last - pos; 363 364 if (write) { 365 memcpy_toio(addr, buf, count); 366 mb(); 367 amdgpu_device_flush_hdp(adev, NULL); 368 } else { 369 amdgpu_device_invalidate_hdp(adev, NULL); 370 mb(); 371 memcpy_fromio(buf, addr, count); 372 } 373 374 } 375 376 return count; 377 #else 378 return 0; 379 #endif 380 } 381 382 /** 383 * amdgpu_device_vram_access - read/write a buffer in vram 384 * 385 * @adev: amdgpu_device pointer 386 * @pos: offset of the buffer in vram 387 * @buf: virtual address of the buffer in system memory 388 * @size: read/write size, sizeof(@buf) must > @size 389 * @write: true - write to vram, otherwise - read from vram 390 */ 391 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, 392 void *buf, size_t size, bool write) 393 { 394 size_t count; 395 396 /* try to using vram apreature to access vram first */ 397 count = amdgpu_device_aper_access(adev, pos, buf, size, write); 398 size -= count; 399 if (size) { 400 /* using MM to access rest vram */ 401 pos += count; 402 buf += count; 403 amdgpu_device_mm_access(adev, pos, buf, size, write); 404 } 405 } 406 407 /* 408 * register access helper functions. 409 */ 410 411 /* Check if hw access should be skipped because of hotplug or device error */ 412 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) 413 { 414 if (adev->no_hw_access) 415 return true; 416 417 #ifdef CONFIG_LOCKDEP 418 /* 419 * This is a bit complicated to understand, so worth a comment. What we assert 420 * here is that the GPU reset is not running on another thread in parallel. 421 * 422 * For this we trylock the read side of the reset semaphore, if that succeeds 423 * we know that the reset is not running in paralell. 424 * 425 * If the trylock fails we assert that we are either already holding the read 426 * side of the lock or are the reset thread itself and hold the write side of 427 * the lock. 428 */ 429 if (in_task()) { 430 if (down_read_trylock(&adev->reset_domain->sem)) 431 up_read(&adev->reset_domain->sem); 432 else 433 lockdep_assert_held(&adev->reset_domain->sem); 434 } 435 #endif 436 return false; 437 } 438 439 /** 440 * amdgpu_device_rreg - read a memory mapped IO or indirect register 441 * 442 * @adev: amdgpu_device pointer 443 * @reg: dword aligned register offset 444 * @acc_flags: access flags which require special behavior 445 * 446 * Returns the 32 bit value from the offset specified. 447 */ 448 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, 449 uint32_t reg, uint32_t acc_flags) 450 { 451 uint32_t ret; 452 453 if (amdgpu_device_skip_hw_access(adev)) 454 return 0; 455 456 if ((reg * 4) < adev->rmmio_size) { 457 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 458 amdgpu_sriov_runtime(adev) && 459 down_read_trylock(&adev->reset_domain->sem)) { 460 ret = amdgpu_kiq_rreg(adev, reg); 461 up_read(&adev->reset_domain->sem); 462 } else { 463 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 464 } 465 } else { 466 ret = adev->pcie_rreg(adev, reg * 4); 467 } 468 469 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); 470 471 return ret; 472 } 473 474 /* 475 * MMIO register read with bytes helper functions 476 * @offset:bytes offset from MMIO start 477 * 478 */ 479 480 /** 481 * amdgpu_mm_rreg8 - read a memory mapped IO register 482 * 483 * @adev: amdgpu_device pointer 484 * @offset: byte aligned register offset 485 * 486 * Returns the 8 bit value from the offset specified. 487 */ 488 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) 489 { 490 if (amdgpu_device_skip_hw_access(adev)) 491 return 0; 492 493 if (offset < adev->rmmio_size) 494 return (readb(adev->rmmio + offset)); 495 BUG(); 496 } 497 498 /* 499 * MMIO register write with bytes helper functions 500 * @offset:bytes offset from MMIO start 501 * @value: the value want to be written to the register 502 * 503 */ 504 /** 505 * amdgpu_mm_wreg8 - read a memory mapped IO register 506 * 507 * @adev: amdgpu_device pointer 508 * @offset: byte aligned register offset 509 * @value: 8 bit value to write 510 * 511 * Writes the value specified to the offset specified. 512 */ 513 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) 514 { 515 if (amdgpu_device_skip_hw_access(adev)) 516 return; 517 518 if (offset < adev->rmmio_size) 519 writeb(value, adev->rmmio + offset); 520 else 521 BUG(); 522 } 523 524 /** 525 * amdgpu_device_wreg - write to a memory mapped IO or indirect register 526 * 527 * @adev: amdgpu_device pointer 528 * @reg: dword aligned register offset 529 * @v: 32 bit value to write to the register 530 * @acc_flags: access flags which require special behavior 531 * 532 * Writes the value specified to the offset specified. 533 */ 534 void amdgpu_device_wreg(struct amdgpu_device *adev, 535 uint32_t reg, uint32_t v, 536 uint32_t acc_flags) 537 { 538 if (amdgpu_device_skip_hw_access(adev)) 539 return; 540 541 if ((reg * 4) < adev->rmmio_size) { 542 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 543 amdgpu_sriov_runtime(adev) && 544 down_read_trylock(&adev->reset_domain->sem)) { 545 amdgpu_kiq_wreg(adev, reg, v); 546 up_read(&adev->reset_domain->sem); 547 } else { 548 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 549 } 550 } else { 551 adev->pcie_wreg(adev, reg * 4, v); 552 } 553 554 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); 555 } 556 557 /** 558 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range 559 * 560 * @adev: amdgpu_device pointer 561 * @reg: mmio/rlc register 562 * @v: value to write 563 * 564 * this function is invoked only for the debugfs register access 565 */ 566 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, 567 uint32_t reg, uint32_t v) 568 { 569 if (amdgpu_device_skip_hw_access(adev)) 570 return; 571 572 if (amdgpu_sriov_fullaccess(adev) && 573 adev->gfx.rlc.funcs && 574 adev->gfx.rlc.funcs->is_rlcg_access_range) { 575 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) 576 return amdgpu_sriov_wreg(adev, reg, v, 0, 0); 577 } else if ((reg * 4) >= adev->rmmio_size) { 578 adev->pcie_wreg(adev, reg * 4, v); 579 } else { 580 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 581 } 582 } 583 584 /** 585 * amdgpu_mm_rdoorbell - read a doorbell dword 586 * 587 * @adev: amdgpu_device pointer 588 * @index: doorbell index 589 * 590 * Returns the value in the doorbell aperture at the 591 * requested doorbell index (CIK). 592 */ 593 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) 594 { 595 if (amdgpu_device_skip_hw_access(adev)) 596 return 0; 597 598 if (index < adev->doorbell.num_doorbells) { 599 return readl(adev->doorbell.ptr + index); 600 } else { 601 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 602 return 0; 603 } 604 } 605 606 /** 607 * amdgpu_mm_wdoorbell - write a doorbell dword 608 * 609 * @adev: amdgpu_device pointer 610 * @index: doorbell index 611 * @v: value to write 612 * 613 * Writes @v to the doorbell aperture at the 614 * requested doorbell index (CIK). 615 */ 616 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) 617 { 618 if (amdgpu_device_skip_hw_access(adev)) 619 return; 620 621 if (index < adev->doorbell.num_doorbells) { 622 writel(v, adev->doorbell.ptr + index); 623 } else { 624 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 625 } 626 } 627 628 /** 629 * amdgpu_mm_rdoorbell64 - read a doorbell Qword 630 * 631 * @adev: amdgpu_device pointer 632 * @index: doorbell index 633 * 634 * Returns the value in the doorbell aperture at the 635 * requested doorbell index (VEGA10+). 636 */ 637 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) 638 { 639 if (amdgpu_device_skip_hw_access(adev)) 640 return 0; 641 642 if (index < adev->doorbell.num_doorbells) { 643 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index)); 644 } else { 645 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 646 return 0; 647 } 648 } 649 650 /** 651 * amdgpu_mm_wdoorbell64 - write a doorbell Qword 652 * 653 * @adev: amdgpu_device pointer 654 * @index: doorbell index 655 * @v: value to write 656 * 657 * Writes @v to the doorbell aperture at the 658 * requested doorbell index (VEGA10+). 659 */ 660 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) 661 { 662 if (amdgpu_device_skip_hw_access(adev)) 663 return; 664 665 if (index < adev->doorbell.num_doorbells) { 666 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v); 667 } else { 668 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 669 } 670 } 671 672 /** 673 * amdgpu_device_indirect_rreg - read an indirect register 674 * 675 * @adev: amdgpu_device pointer 676 * @pcie_index: mmio register offset 677 * @pcie_data: mmio register offset 678 * @reg_addr: indirect register address to read from 679 * 680 * Returns the value of indirect register @reg_addr 681 */ 682 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, 683 u32 pcie_index, u32 pcie_data, 684 u32 reg_addr) 685 { 686 unsigned long flags; 687 u32 r; 688 void __iomem *pcie_index_offset; 689 void __iomem *pcie_data_offset; 690 691 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 692 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 693 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 694 695 writel(reg_addr, pcie_index_offset); 696 readl(pcie_index_offset); 697 r = readl(pcie_data_offset); 698 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 699 700 return r; 701 } 702 703 /** 704 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register 705 * 706 * @adev: amdgpu_device pointer 707 * @pcie_index: mmio register offset 708 * @pcie_data: mmio register offset 709 * @reg_addr: indirect register address to read from 710 * 711 * Returns the value of indirect register @reg_addr 712 */ 713 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, 714 u32 pcie_index, u32 pcie_data, 715 u32 reg_addr) 716 { 717 unsigned long flags; 718 u64 r; 719 void __iomem *pcie_index_offset; 720 void __iomem *pcie_data_offset; 721 722 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 723 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 724 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 725 726 /* read low 32 bits */ 727 writel(reg_addr, pcie_index_offset); 728 readl(pcie_index_offset); 729 r = readl(pcie_data_offset); 730 /* read high 32 bits */ 731 writel(reg_addr + 4, pcie_index_offset); 732 readl(pcie_index_offset); 733 r |= ((u64)readl(pcie_data_offset) << 32); 734 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 735 736 return r; 737 } 738 739 /** 740 * amdgpu_device_indirect_wreg - write an indirect register address 741 * 742 * @adev: amdgpu_device pointer 743 * @pcie_index: mmio register offset 744 * @pcie_data: mmio register offset 745 * @reg_addr: indirect register offset 746 * @reg_data: indirect register data 747 * 748 */ 749 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, 750 u32 pcie_index, u32 pcie_data, 751 u32 reg_addr, u32 reg_data) 752 { 753 unsigned long flags; 754 void __iomem *pcie_index_offset; 755 void __iomem *pcie_data_offset; 756 757 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 758 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 759 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 760 761 writel(reg_addr, pcie_index_offset); 762 readl(pcie_index_offset); 763 writel(reg_data, pcie_data_offset); 764 readl(pcie_data_offset); 765 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 766 } 767 768 /** 769 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address 770 * 771 * @adev: amdgpu_device pointer 772 * @pcie_index: mmio register offset 773 * @pcie_data: mmio register offset 774 * @reg_addr: indirect register offset 775 * @reg_data: indirect register data 776 * 777 */ 778 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, 779 u32 pcie_index, u32 pcie_data, 780 u32 reg_addr, u64 reg_data) 781 { 782 unsigned long flags; 783 void __iomem *pcie_index_offset; 784 void __iomem *pcie_data_offset; 785 786 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 787 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 788 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 789 790 /* write low 32 bits */ 791 writel(reg_addr, pcie_index_offset); 792 readl(pcie_index_offset); 793 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 794 readl(pcie_data_offset); 795 /* write high 32 bits */ 796 writel(reg_addr + 4, pcie_index_offset); 797 readl(pcie_index_offset); 798 writel((u32)(reg_data >> 32), pcie_data_offset); 799 readl(pcie_data_offset); 800 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 801 } 802 803 /** 804 * amdgpu_invalid_rreg - dummy reg read function 805 * 806 * @adev: amdgpu_device pointer 807 * @reg: offset of register 808 * 809 * Dummy register read function. Used for register blocks 810 * that certain asics don't have (all asics). 811 * Returns the value in the register. 812 */ 813 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 814 { 815 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 816 BUG(); 817 return 0; 818 } 819 820 /** 821 * amdgpu_invalid_wreg - dummy reg write function 822 * 823 * @adev: amdgpu_device pointer 824 * @reg: offset of register 825 * @v: value to write to the register 826 * 827 * Dummy register read function. Used for register blocks 828 * that certain asics don't have (all asics). 829 */ 830 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 831 { 832 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 833 reg, v); 834 BUG(); 835 } 836 837 /** 838 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function 839 * 840 * @adev: amdgpu_device pointer 841 * @reg: offset of register 842 * 843 * Dummy register read function. Used for register blocks 844 * that certain asics don't have (all asics). 845 * Returns the value in the register. 846 */ 847 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 848 { 849 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 850 BUG(); 851 return 0; 852 } 853 854 /** 855 * amdgpu_invalid_wreg64 - dummy reg write function 856 * 857 * @adev: amdgpu_device pointer 858 * @reg: offset of register 859 * @v: value to write to the register 860 * 861 * Dummy register read function. Used for register blocks 862 * that certain asics don't have (all asics). 863 */ 864 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 865 { 866 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 867 reg, v); 868 BUG(); 869 } 870 871 /** 872 * amdgpu_block_invalid_rreg - dummy reg read function 873 * 874 * @adev: amdgpu_device pointer 875 * @block: offset of instance 876 * @reg: offset of register 877 * 878 * Dummy register read function. Used for register blocks 879 * that certain asics don't have (all asics). 880 * Returns the value in the register. 881 */ 882 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 883 uint32_t block, uint32_t reg) 884 { 885 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 886 reg, block); 887 BUG(); 888 return 0; 889 } 890 891 /** 892 * amdgpu_block_invalid_wreg - dummy reg write function 893 * 894 * @adev: amdgpu_device pointer 895 * @block: offset of instance 896 * @reg: offset of register 897 * @v: value to write to the register 898 * 899 * Dummy register read function. Used for register blocks 900 * that certain asics don't have (all asics). 901 */ 902 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, 903 uint32_t block, 904 uint32_t reg, uint32_t v) 905 { 906 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 907 reg, block, v); 908 BUG(); 909 } 910 911 /** 912 * amdgpu_device_asic_init - Wrapper for atom asic_init 913 * 914 * @adev: amdgpu_device pointer 915 * 916 * Does any asic specific work and then calls atom asic init. 917 */ 918 static int amdgpu_device_asic_init(struct amdgpu_device *adev) 919 { 920 amdgpu_asic_pre_asic_init(adev); 921 922 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) 923 return amdgpu_atomfirmware_asic_init(adev, true); 924 else 925 return amdgpu_atom_asic_init(adev->mode_info.atom_context); 926 } 927 928 /** 929 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 930 * 931 * @adev: amdgpu_device pointer 932 * 933 * Allocates a scratch page of VRAM for use by various things in the 934 * driver. 935 */ 936 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 937 { 938 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 939 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 940 &adev->vram_scratch.robj, 941 &adev->vram_scratch.gpu_addr, 942 (void **)&adev->vram_scratch.ptr); 943 } 944 945 /** 946 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 947 * 948 * @adev: amdgpu_device pointer 949 * 950 * Frees the VRAM scratch page. 951 */ 952 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 953 { 954 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 955 } 956 957 /** 958 * amdgpu_device_program_register_sequence - program an array of registers. 959 * 960 * @adev: amdgpu_device pointer 961 * @registers: pointer to the register array 962 * @array_size: size of the register array 963 * 964 * Programs an array or registers with and and or masks. 965 * This is a helper for setting golden registers. 966 */ 967 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, 968 const u32 *registers, 969 const u32 array_size) 970 { 971 u32 tmp, reg, and_mask, or_mask; 972 int i; 973 974 if (array_size % 3) 975 return; 976 977 for (i = 0; i < array_size; i +=3) { 978 reg = registers[i + 0]; 979 and_mask = registers[i + 1]; 980 or_mask = registers[i + 2]; 981 982 if (and_mask == 0xffffffff) { 983 tmp = or_mask; 984 } else { 985 tmp = RREG32(reg); 986 tmp &= ~and_mask; 987 if (adev->family >= AMDGPU_FAMILY_AI) 988 tmp |= (or_mask & and_mask); 989 else 990 tmp |= or_mask; 991 } 992 WREG32(reg, tmp); 993 } 994 } 995 996 /** 997 * amdgpu_device_pci_config_reset - reset the GPU 998 * 999 * @adev: amdgpu_device pointer 1000 * 1001 * Resets the GPU using the pci config reset sequence. 1002 * Only applicable to asics prior to vega10. 1003 */ 1004 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) 1005 { 1006 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); 1007 } 1008 1009 /** 1010 * amdgpu_device_pci_reset - reset the GPU using generic PCI means 1011 * 1012 * @adev: amdgpu_device pointer 1013 * 1014 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). 1015 */ 1016 int amdgpu_device_pci_reset(struct amdgpu_device *adev) 1017 { 1018 STUB(); 1019 return -ENOSYS; 1020 #ifdef notyet 1021 return pci_reset_function(adev->pdev); 1022 #endif 1023 } 1024 1025 /* 1026 * GPU doorbell aperture helpers function. 1027 */ 1028 /** 1029 * amdgpu_device_doorbell_init - Init doorbell driver information. 1030 * 1031 * @adev: amdgpu_device pointer 1032 * 1033 * Init doorbell driver information (CIK) 1034 * Returns 0 on success, error on failure. 1035 */ 1036 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) 1037 { 1038 1039 /* No doorbell on SI hardware generation */ 1040 if (adev->asic_type < CHIP_BONAIRE) { 1041 adev->doorbell.base = 0; 1042 adev->doorbell.size = 0; 1043 adev->doorbell.num_doorbells = 0; 1044 adev->doorbell.ptr = NULL; 1045 return 0; 1046 } 1047 1048 #ifdef __linux__ 1049 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) 1050 return -EINVAL; 1051 #endif 1052 1053 amdgpu_asic_init_doorbell_index(adev); 1054 1055 /* doorbell bar mapping */ 1056 #ifdef __linux__ 1057 adev->doorbell.base = pci_resource_start(adev->pdev, 2); 1058 adev->doorbell.size = pci_resource_len(adev->pdev, 2); 1059 #endif 1060 1061 if (adev->enable_mes) { 1062 adev->doorbell.num_doorbells = 1063 adev->doorbell.size / sizeof(u32); 1064 } else { 1065 adev->doorbell.num_doorbells = 1066 min_t(u32, adev->doorbell.size / sizeof(u32), 1067 adev->doorbell_index.max_assignment+1); 1068 if (adev->doorbell.num_doorbells == 0) 1069 return -EINVAL; 1070 1071 /* For Vega, reserve and map two pages on doorbell BAR since SDMA 1072 * paging queue doorbell use the second page. The 1073 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the 1074 * doorbells are in the first page. So with paging queue enabled, 1075 * the max num_doorbells should + 1 page (0x400 in dword) 1076 */ 1077 if (adev->asic_type >= CHIP_VEGA10) 1078 adev->doorbell.num_doorbells += 0x400; 1079 } 1080 1081 #ifdef __linux__ 1082 adev->doorbell.ptr = ioremap(adev->doorbell.base, 1083 adev->doorbell.num_doorbells * 1084 sizeof(u32)); 1085 if (adev->doorbell.ptr == NULL) 1086 return -ENOMEM; 1087 #endif 1088 1089 return 0; 1090 } 1091 1092 /** 1093 * amdgpu_device_doorbell_fini - Tear down doorbell driver information. 1094 * 1095 * @adev: amdgpu_device pointer 1096 * 1097 * Tear down doorbell driver information (CIK) 1098 */ 1099 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) 1100 { 1101 #ifdef __linux__ 1102 iounmap(adev->doorbell.ptr); 1103 #else 1104 if (adev->doorbell.size > 0) 1105 bus_space_unmap(adev->doorbell.bst, adev->doorbell.bsh, 1106 adev->doorbell.size); 1107 #endif 1108 adev->doorbell.ptr = NULL; 1109 } 1110 1111 1112 1113 /* 1114 * amdgpu_device_wb_*() 1115 * Writeback is the method by which the GPU updates special pages in memory 1116 * with the status of certain GPU events (fences, ring pointers,etc.). 1117 */ 1118 1119 /** 1120 * amdgpu_device_wb_fini - Disable Writeback and free memory 1121 * 1122 * @adev: amdgpu_device pointer 1123 * 1124 * Disables Writeback and frees the Writeback memory (all asics). 1125 * Used at driver shutdown. 1126 */ 1127 static void amdgpu_device_wb_fini(struct amdgpu_device *adev) 1128 { 1129 if (adev->wb.wb_obj) { 1130 amdgpu_bo_free_kernel(&adev->wb.wb_obj, 1131 &adev->wb.gpu_addr, 1132 (void **)&adev->wb.wb); 1133 adev->wb.wb_obj = NULL; 1134 } 1135 } 1136 1137 /** 1138 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory 1139 * 1140 * @adev: amdgpu_device pointer 1141 * 1142 * Initializes writeback and allocates writeback memory (all asics). 1143 * Used at driver startup. 1144 * Returns 0 on success or an -error on failure. 1145 */ 1146 static int amdgpu_device_wb_init(struct amdgpu_device *adev) 1147 { 1148 int r; 1149 1150 if (adev->wb.wb_obj == NULL) { 1151 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ 1152 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, 1153 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 1154 &adev->wb.wb_obj, &adev->wb.gpu_addr, 1155 (void **)&adev->wb.wb); 1156 if (r) { 1157 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 1158 return r; 1159 } 1160 1161 adev->wb.num_wb = AMDGPU_MAX_WB; 1162 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 1163 1164 /* clear wb memory */ 1165 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); 1166 } 1167 1168 return 0; 1169 } 1170 1171 /** 1172 * amdgpu_device_wb_get - Allocate a wb entry 1173 * 1174 * @adev: amdgpu_device pointer 1175 * @wb: wb index 1176 * 1177 * Allocate a wb slot for use by the driver (all asics). 1178 * Returns 0 on success or -EINVAL on failure. 1179 */ 1180 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) 1181 { 1182 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); 1183 1184 if (offset < adev->wb.num_wb) { 1185 __set_bit(offset, adev->wb.used); 1186 *wb = offset << 3; /* convert to dw offset */ 1187 return 0; 1188 } else { 1189 return -EINVAL; 1190 } 1191 } 1192 1193 /** 1194 * amdgpu_device_wb_free - Free a wb entry 1195 * 1196 * @adev: amdgpu_device pointer 1197 * @wb: wb index 1198 * 1199 * Free a wb slot allocated for use by the driver (all asics) 1200 */ 1201 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 1202 { 1203 wb >>= 3; 1204 if (wb < adev->wb.num_wb) 1205 __clear_bit(wb, adev->wb.used); 1206 } 1207 1208 /** 1209 * amdgpu_device_resize_fb_bar - try to resize FB BAR 1210 * 1211 * @adev: amdgpu_device pointer 1212 * 1213 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not 1214 * to fail, but if any of the BARs is not accessible after the size we abort 1215 * driver loading by returning -ENODEV. 1216 */ 1217 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) 1218 { 1219 #ifdef __linux__ 1220 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); 1221 struct pci_bus *root; 1222 struct resource *res; 1223 unsigned i; 1224 u16 cmd; 1225 int r; 1226 1227 /* Bypass for VF */ 1228 if (amdgpu_sriov_vf(adev)) 1229 return 0; 1230 1231 /* skip if the bios has already enabled large BAR */ 1232 if (adev->gmc.real_vram_size && 1233 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) 1234 return 0; 1235 1236 /* Check if the root BUS has 64bit memory resources */ 1237 root = adev->pdev->bus; 1238 while (root->parent) 1239 root = root->parent; 1240 1241 pci_bus_for_each_resource(root, res, i) { 1242 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && 1243 res->start > 0x100000000ull) 1244 break; 1245 } 1246 1247 /* Trying to resize is pointless without a root hub window above 4GB */ 1248 if (!res) 1249 return 0; 1250 1251 /* Limit the BAR size to what is available */ 1252 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, 1253 rbar_size); 1254 1255 /* Disable memory decoding while we change the BAR addresses and size */ 1256 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); 1257 pci_write_config_word(adev->pdev, PCI_COMMAND, 1258 cmd & ~PCI_COMMAND_MEMORY); 1259 1260 /* Free the VRAM and doorbell BAR, we most likely need to move both. */ 1261 amdgpu_device_doorbell_fini(adev); 1262 if (adev->asic_type >= CHIP_BONAIRE) 1263 pci_release_resource(adev->pdev, 2); 1264 1265 pci_release_resource(adev->pdev, 0); 1266 1267 r = pci_resize_resource(adev->pdev, 0, rbar_size); 1268 if (r == -ENOSPC) 1269 DRM_INFO("Not enough PCI address space for a large BAR."); 1270 else if (r && r != -ENOTSUPP) 1271 DRM_ERROR("Problem resizing BAR0 (%d).", r); 1272 1273 pci_assign_unassigned_bus_resources(adev->pdev->bus); 1274 1275 /* When the doorbell or fb BAR isn't available we have no chance of 1276 * using the device. 1277 */ 1278 r = amdgpu_device_doorbell_init(adev); 1279 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) 1280 return -ENODEV; 1281 1282 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); 1283 #endif /* __linux__ */ 1284 1285 return 0; 1286 } 1287 1288 /* 1289 * GPU helpers function. 1290 */ 1291 /** 1292 * amdgpu_device_need_post - check if the hw need post or not 1293 * 1294 * @adev: amdgpu_device pointer 1295 * 1296 * Check if the asic has been initialized (all asics) at driver startup 1297 * or post is needed if hw reset is performed. 1298 * Returns true if need or false if not. 1299 */ 1300 bool amdgpu_device_need_post(struct amdgpu_device *adev) 1301 { 1302 uint32_t reg; 1303 1304 if (amdgpu_sriov_vf(adev)) 1305 return false; 1306 1307 if (amdgpu_passthrough(adev)) { 1308 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot 1309 * some old smc fw still need driver do vPost otherwise gpu hang, while 1310 * those smc fw version above 22.15 doesn't have this flaw, so we force 1311 * vpost executed for smc version below 22.15 1312 */ 1313 if (adev->asic_type == CHIP_FIJI) { 1314 int err; 1315 uint32_t fw_ver; 1316 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); 1317 /* force vPost if error occured */ 1318 if (err) 1319 return true; 1320 1321 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 1322 if (fw_ver < 0x00160e00) 1323 return true; 1324 } 1325 } 1326 1327 /* Don't post if we need to reset whole hive on init */ 1328 if (adev->gmc.xgmi.pending_reset) 1329 return false; 1330 1331 if (adev->has_hw_reset) { 1332 adev->has_hw_reset = false; 1333 return true; 1334 } 1335 1336 /* bios scratch used on CIK+ */ 1337 if (adev->asic_type >= CHIP_BONAIRE) 1338 return amdgpu_atombios_scratch_need_asic_init(adev); 1339 1340 /* check MEM_SIZE for older asics */ 1341 reg = amdgpu_asic_get_config_memsize(adev); 1342 1343 if ((reg != 0) && (reg != 0xffffffff)) 1344 return false; 1345 1346 return true; 1347 } 1348 1349 /** 1350 * amdgpu_device_should_use_aspm - check if the device should program ASPM 1351 * 1352 * @adev: amdgpu_device pointer 1353 * 1354 * Confirm whether the module parameter and pcie bridge agree that ASPM should 1355 * be set for this device. 1356 * 1357 * Returns true if it should be used or false if not. 1358 */ 1359 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) 1360 { 1361 switch (amdgpu_aspm) { 1362 case -1: 1363 break; 1364 case 0: 1365 return false; 1366 case 1: 1367 return true; 1368 default: 1369 return false; 1370 } 1371 return pcie_aspm_enabled(adev->pdev); 1372 } 1373 1374 /* if we get transitioned to only one device, take VGA back */ 1375 /** 1376 * amdgpu_device_vga_set_decode - enable/disable vga decode 1377 * 1378 * @pdev: PCI device pointer 1379 * @state: enable/disable vga decode 1380 * 1381 * Enable/disable vga decode (all asics). 1382 * Returns VGA resource flags. 1383 */ 1384 #ifdef notyet 1385 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev, 1386 bool state) 1387 { 1388 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev)); 1389 amdgpu_asic_set_vga_state(adev, state); 1390 if (state) 1391 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 1392 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1393 else 1394 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 1395 } 1396 #endif 1397 1398 /** 1399 * amdgpu_device_check_block_size - validate the vm block size 1400 * 1401 * @adev: amdgpu_device pointer 1402 * 1403 * Validates the vm block size specified via module parameter. 1404 * The vm block size defines number of bits in page table versus page directory, 1405 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1406 * page table and the remaining bits are in the page directory. 1407 */ 1408 static void amdgpu_device_check_block_size(struct amdgpu_device *adev) 1409 { 1410 /* defines number of bits in page table versus page directory, 1411 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1412 * page table and the remaining bits are in the page directory */ 1413 if (amdgpu_vm_block_size == -1) 1414 return; 1415 1416 if (amdgpu_vm_block_size < 9) { 1417 dev_warn(adev->dev, "VM page table size (%d) too small\n", 1418 amdgpu_vm_block_size); 1419 amdgpu_vm_block_size = -1; 1420 } 1421 } 1422 1423 /** 1424 * amdgpu_device_check_vm_size - validate the vm size 1425 * 1426 * @adev: amdgpu_device pointer 1427 * 1428 * Validates the vm size in GB specified via module parameter. 1429 * The VM size is the size of the GPU virtual memory space in GB. 1430 */ 1431 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) 1432 { 1433 /* no need to check the default value */ 1434 if (amdgpu_vm_size == -1) 1435 return; 1436 1437 if (amdgpu_vm_size < 1) { 1438 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", 1439 amdgpu_vm_size); 1440 amdgpu_vm_size = -1; 1441 } 1442 } 1443 1444 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 1445 { 1446 #ifdef __linux__ 1447 struct sysinfo si; 1448 #endif 1449 bool is_os_64 = (sizeof(void *) == 8); 1450 uint64_t total_memory; 1451 uint64_t dram_size_seven_GB = 0x1B8000000; 1452 uint64_t dram_size_three_GB = 0xB8000000; 1453 1454 if (amdgpu_smu_memory_pool_size == 0) 1455 return; 1456 1457 if (!is_os_64) { 1458 DRM_WARN("Not 64-bit OS, feature not supported\n"); 1459 goto def_value; 1460 } 1461 #ifdef __linux__ 1462 si_meminfo(&si); 1463 total_memory = (uint64_t)si.totalram * si.mem_unit; 1464 #else 1465 total_memory = ptoa(physmem); 1466 #endif 1467 1468 if ((amdgpu_smu_memory_pool_size == 1) || 1469 (amdgpu_smu_memory_pool_size == 2)) { 1470 if (total_memory < dram_size_three_GB) 1471 goto def_value1; 1472 } else if ((amdgpu_smu_memory_pool_size == 4) || 1473 (amdgpu_smu_memory_pool_size == 8)) { 1474 if (total_memory < dram_size_seven_GB) 1475 goto def_value1; 1476 } else { 1477 DRM_WARN("Smu memory pool size not supported\n"); 1478 goto def_value; 1479 } 1480 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; 1481 1482 return; 1483 1484 def_value1: 1485 DRM_WARN("No enough system memory\n"); 1486 def_value: 1487 adev->pm.smu_prv_buffer_size = 0; 1488 } 1489 1490 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) 1491 { 1492 if (!(adev->flags & AMD_IS_APU) || 1493 adev->asic_type < CHIP_RAVEN) 1494 return 0; 1495 1496 switch (adev->asic_type) { 1497 case CHIP_RAVEN: 1498 if (adev->pdev->device == 0x15dd) 1499 adev->apu_flags |= AMD_APU_IS_RAVEN; 1500 if (adev->pdev->device == 0x15d8) 1501 adev->apu_flags |= AMD_APU_IS_PICASSO; 1502 break; 1503 case CHIP_RENOIR: 1504 if ((adev->pdev->device == 0x1636) || 1505 (adev->pdev->device == 0x164c)) 1506 adev->apu_flags |= AMD_APU_IS_RENOIR; 1507 else 1508 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; 1509 break; 1510 case CHIP_VANGOGH: 1511 adev->apu_flags |= AMD_APU_IS_VANGOGH; 1512 break; 1513 case CHIP_YELLOW_CARP: 1514 break; 1515 case CHIP_CYAN_SKILLFISH: 1516 if ((adev->pdev->device == 0x13FE) || 1517 (adev->pdev->device == 0x143F)) 1518 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; 1519 break; 1520 default: 1521 break; 1522 } 1523 1524 return 0; 1525 } 1526 1527 /** 1528 * amdgpu_device_check_arguments - validate module params 1529 * 1530 * @adev: amdgpu_device pointer 1531 * 1532 * Validates certain module parameters and updates 1533 * the associated values used by the driver (all asics). 1534 */ 1535 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1536 { 1537 if (amdgpu_sched_jobs < 4) { 1538 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1539 amdgpu_sched_jobs); 1540 amdgpu_sched_jobs = 4; 1541 } else if (!is_power_of_2(amdgpu_sched_jobs)){ 1542 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", 1543 amdgpu_sched_jobs); 1544 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); 1545 } 1546 1547 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) { 1548 /* gart size must be greater or equal to 32M */ 1549 dev_warn(adev->dev, "gart size (%d) too small\n", 1550 amdgpu_gart_size); 1551 amdgpu_gart_size = -1; 1552 } 1553 1554 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) { 1555 /* gtt size must be greater or equal to 32M */ 1556 dev_warn(adev->dev, "gtt size (%d) too small\n", 1557 amdgpu_gtt_size); 1558 amdgpu_gtt_size = -1; 1559 } 1560 1561 /* valid range is between 4 and 9 inclusive */ 1562 if (amdgpu_vm_fragment_size != -1 && 1563 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) { 1564 dev_warn(adev->dev, "valid range is between 4 and 9\n"); 1565 amdgpu_vm_fragment_size = -1; 1566 } 1567 1568 if (amdgpu_sched_hw_submission < 2) { 1569 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n", 1570 amdgpu_sched_hw_submission); 1571 amdgpu_sched_hw_submission = 2; 1572 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) { 1573 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n", 1574 amdgpu_sched_hw_submission); 1575 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission); 1576 } 1577 1578 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) { 1579 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n"); 1580 amdgpu_reset_method = -1; 1581 } 1582 1583 amdgpu_device_check_smu_prv_buffer_size(adev); 1584 1585 amdgpu_device_check_vm_size(adev); 1586 1587 amdgpu_device_check_block_size(adev); 1588 1589 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1590 1591 return 0; 1592 } 1593 1594 #ifdef __linux__ 1595 /** 1596 * amdgpu_switcheroo_set_state - set switcheroo state 1597 * 1598 * @pdev: pci dev pointer 1599 * @state: vga_switcheroo state 1600 * 1601 * Callback for the switcheroo driver. Suspends or resumes the 1602 * the asics before or after it is powered up using ACPI methods. 1603 */ 1604 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, 1605 enum vga_switcheroo_state state) 1606 { 1607 struct drm_device *dev = pci_get_drvdata(pdev); 1608 int r; 1609 1610 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) 1611 return; 1612 1613 if (state == VGA_SWITCHEROO_ON) { 1614 pr_info("switched on\n"); 1615 /* don't suspend or resume card normally */ 1616 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1617 1618 pci_set_power_state(pdev, PCI_D0); 1619 amdgpu_device_load_pci_state(pdev); 1620 r = pci_enable_device(pdev); 1621 if (r) 1622 DRM_WARN("pci_enable_device failed (%d)\n", r); 1623 amdgpu_device_resume(dev, true); 1624 1625 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1626 } else { 1627 pr_info("switched off\n"); 1628 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1629 amdgpu_device_suspend(dev, true); 1630 amdgpu_device_cache_pci_state(pdev); 1631 /* Shut down the device */ 1632 pci_disable_device(pdev); 1633 pci_set_power_state(pdev, PCI_D3cold); 1634 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1635 } 1636 } 1637 1638 /** 1639 * amdgpu_switcheroo_can_switch - see if switcheroo state can change 1640 * 1641 * @pdev: pci dev pointer 1642 * 1643 * Callback for the switcheroo driver. Check of the switcheroo 1644 * state can be changed. 1645 * Returns true if the state can be changed, false if not. 1646 */ 1647 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev) 1648 { 1649 struct drm_device *dev = pci_get_drvdata(pdev); 1650 1651 /* 1652 * FIXME: open_count is protected by drm_global_mutex but that would lead to 1653 * locking inversion with the driver load path. And the access here is 1654 * completely racy anyway. So don't bother with locking for now. 1655 */ 1656 return atomic_read(&dev->open_count) == 0; 1657 } 1658 #endif /* __linux__ */ 1659 1660 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { 1661 #ifdef notyet 1662 .set_gpu_state = amdgpu_switcheroo_set_state, 1663 .reprobe = NULL, 1664 .can_switch = amdgpu_switcheroo_can_switch, 1665 #endif 1666 }; 1667 1668 /** 1669 * amdgpu_device_ip_set_clockgating_state - set the CG state 1670 * 1671 * @dev: amdgpu_device pointer 1672 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1673 * @state: clockgating state (gate or ungate) 1674 * 1675 * Sets the requested clockgating state for all instances of 1676 * the hardware IP specified. 1677 * Returns the error code from the last instance. 1678 */ 1679 int amdgpu_device_ip_set_clockgating_state(void *dev, 1680 enum amd_ip_block_type block_type, 1681 enum amd_clockgating_state state) 1682 { 1683 struct amdgpu_device *adev = dev; 1684 int i, r = 0; 1685 1686 for (i = 0; i < adev->num_ip_blocks; i++) { 1687 if (!adev->ip_blocks[i].status.valid) 1688 continue; 1689 if (adev->ip_blocks[i].version->type != block_type) 1690 continue; 1691 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state) 1692 continue; 1693 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 1694 (void *)adev, state); 1695 if (r) 1696 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 1697 adev->ip_blocks[i].version->funcs->name, r); 1698 } 1699 return r; 1700 } 1701 1702 /** 1703 * amdgpu_device_ip_set_powergating_state - set the PG state 1704 * 1705 * @dev: amdgpu_device pointer 1706 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1707 * @state: powergating state (gate or ungate) 1708 * 1709 * Sets the requested powergating state for all instances of 1710 * the hardware IP specified. 1711 * Returns the error code from the last instance. 1712 */ 1713 int amdgpu_device_ip_set_powergating_state(void *dev, 1714 enum amd_ip_block_type block_type, 1715 enum amd_powergating_state state) 1716 { 1717 struct amdgpu_device *adev = dev; 1718 int i, r = 0; 1719 1720 for (i = 0; i < adev->num_ip_blocks; i++) { 1721 if (!adev->ip_blocks[i].status.valid) 1722 continue; 1723 if (adev->ip_blocks[i].version->type != block_type) 1724 continue; 1725 if (!adev->ip_blocks[i].version->funcs->set_powergating_state) 1726 continue; 1727 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 1728 (void *)adev, state); 1729 if (r) 1730 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 1731 adev->ip_blocks[i].version->funcs->name, r); 1732 } 1733 return r; 1734 } 1735 1736 /** 1737 * amdgpu_device_ip_get_clockgating_state - get the CG state 1738 * 1739 * @adev: amdgpu_device pointer 1740 * @flags: clockgating feature flags 1741 * 1742 * Walks the list of IPs on the device and updates the clockgating 1743 * flags for each IP. 1744 * Updates @flags with the feature flags for each hardware IP where 1745 * clockgating is enabled. 1746 */ 1747 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, 1748 u64 *flags) 1749 { 1750 int i; 1751 1752 for (i = 0; i < adev->num_ip_blocks; i++) { 1753 if (!adev->ip_blocks[i].status.valid) 1754 continue; 1755 if (adev->ip_blocks[i].version->funcs->get_clockgating_state) 1756 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags); 1757 } 1758 } 1759 1760 /** 1761 * amdgpu_device_ip_wait_for_idle - wait for idle 1762 * 1763 * @adev: amdgpu_device pointer 1764 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1765 * 1766 * Waits for the request hardware IP to be idle. 1767 * Returns 0 for success or a negative error code on failure. 1768 */ 1769 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, 1770 enum amd_ip_block_type block_type) 1771 { 1772 int i, r; 1773 1774 for (i = 0; i < adev->num_ip_blocks; i++) { 1775 if (!adev->ip_blocks[i].status.valid) 1776 continue; 1777 if (adev->ip_blocks[i].version->type == block_type) { 1778 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); 1779 if (r) 1780 return r; 1781 break; 1782 } 1783 } 1784 return 0; 1785 1786 } 1787 1788 /** 1789 * amdgpu_device_ip_is_idle - is the hardware IP idle 1790 * 1791 * @adev: amdgpu_device pointer 1792 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) 1793 * 1794 * Check if the hardware IP is idle or not. 1795 * Returns true if it the IP is idle, false if not. 1796 */ 1797 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, 1798 enum amd_ip_block_type block_type) 1799 { 1800 int i; 1801 1802 for (i = 0; i < adev->num_ip_blocks; i++) { 1803 if (!adev->ip_blocks[i].status.valid) 1804 continue; 1805 if (adev->ip_blocks[i].version->type == block_type) 1806 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); 1807 } 1808 return true; 1809 1810 } 1811 1812 /** 1813 * amdgpu_device_ip_get_ip_block - get a hw IP pointer 1814 * 1815 * @adev: amdgpu_device pointer 1816 * @type: Type of hardware IP (SMU, GFX, UVD, etc.) 1817 * 1818 * Returns a pointer to the hardware IP block structure 1819 * if it exists for the asic, otherwise NULL. 1820 */ 1821 struct amdgpu_ip_block * 1822 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, 1823 enum amd_ip_block_type type) 1824 { 1825 int i; 1826 1827 for (i = 0; i < adev->num_ip_blocks; i++) 1828 if (adev->ip_blocks[i].version->type == type) 1829 return &adev->ip_blocks[i]; 1830 1831 return NULL; 1832 } 1833 1834 /** 1835 * amdgpu_device_ip_block_version_cmp 1836 * 1837 * @adev: amdgpu_device pointer 1838 * @type: enum amd_ip_block_type 1839 * @major: major version 1840 * @minor: minor version 1841 * 1842 * return 0 if equal or greater 1843 * return 1 if smaller or the ip_block doesn't exist 1844 */ 1845 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, 1846 enum amd_ip_block_type type, 1847 u32 major, u32 minor) 1848 { 1849 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); 1850 1851 if (ip_block && ((ip_block->version->major > major) || 1852 ((ip_block->version->major == major) && 1853 (ip_block->version->minor >= minor)))) 1854 return 0; 1855 1856 return 1; 1857 } 1858 1859 /** 1860 * amdgpu_device_ip_block_add 1861 * 1862 * @adev: amdgpu_device pointer 1863 * @ip_block_version: pointer to the IP to add 1864 * 1865 * Adds the IP block driver information to the collection of IPs 1866 * on the asic. 1867 */ 1868 int amdgpu_device_ip_block_add(struct amdgpu_device *adev, 1869 const struct amdgpu_ip_block_version *ip_block_version) 1870 { 1871 if (!ip_block_version) 1872 return -EINVAL; 1873 1874 switch (ip_block_version->type) { 1875 case AMD_IP_BLOCK_TYPE_VCN: 1876 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK) 1877 return 0; 1878 break; 1879 case AMD_IP_BLOCK_TYPE_JPEG: 1880 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK) 1881 return 0; 1882 break; 1883 default: 1884 break; 1885 } 1886 1887 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks, 1888 ip_block_version->funcs->name); 1889 1890 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; 1891 1892 return 0; 1893 } 1894 1895 /** 1896 * amdgpu_device_enable_virtual_display - enable virtual display feature 1897 * 1898 * @adev: amdgpu_device pointer 1899 * 1900 * Enabled the virtual display feature if the user has enabled it via 1901 * the module parameter virtual_display. This feature provides a virtual 1902 * display hardware on headless boards or in virtualized environments. 1903 * This function parses and validates the configuration string specified by 1904 * the user and configues the virtual display configuration (number of 1905 * virtual connectors, crtcs, etc.) specified. 1906 */ 1907 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) 1908 { 1909 adev->enable_virtual_display = false; 1910 1911 #ifdef notyet 1912 if (amdgpu_virtual_display) { 1913 const char *pci_address_name = pci_name(adev->pdev); 1914 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; 1915 1916 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1917 pciaddstr_tmp = pciaddstr; 1918 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { 1919 pciaddname = strsep(&pciaddname_tmp, ","); 1920 if (!strcmp("all", pciaddname) 1921 || !strcmp(pci_address_name, pciaddname)) { 1922 long num_crtc; 1923 int res = -1; 1924 1925 adev->enable_virtual_display = true; 1926 1927 if (pciaddname_tmp) 1928 res = kstrtol(pciaddname_tmp, 10, 1929 &num_crtc); 1930 1931 if (!res) { 1932 if (num_crtc < 1) 1933 num_crtc = 1; 1934 if (num_crtc > 6) 1935 num_crtc = 6; 1936 adev->mode_info.num_crtc = num_crtc; 1937 } else { 1938 adev->mode_info.num_crtc = 1; 1939 } 1940 break; 1941 } 1942 } 1943 1944 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 1945 amdgpu_virtual_display, pci_address_name, 1946 adev->enable_virtual_display, adev->mode_info.num_crtc); 1947 1948 kfree(pciaddstr); 1949 } 1950 #endif 1951 } 1952 1953 /** 1954 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware 1955 * 1956 * @adev: amdgpu_device pointer 1957 * 1958 * Parses the asic configuration parameters specified in the gpu info 1959 * firmware and makes them availale to the driver for use in configuring 1960 * the asic. 1961 * Returns 0 on success, -EINVAL on failure. 1962 */ 1963 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) 1964 { 1965 const char *chip_name; 1966 char fw_name[40]; 1967 int err; 1968 const struct gpu_info_firmware_header_v1_0 *hdr; 1969 1970 adev->firmware.gpu_info_fw = NULL; 1971 1972 if (adev->mman.discovery_bin) { 1973 /* 1974 * FIXME: The bounding box is still needed by Navi12, so 1975 * temporarily read it from gpu_info firmware. Should be dropped 1976 * when DAL no longer needs it. 1977 */ 1978 if (adev->asic_type != CHIP_NAVI12) 1979 return 0; 1980 } 1981 1982 switch (adev->asic_type) { 1983 default: 1984 return 0; 1985 case CHIP_VEGA10: 1986 chip_name = "vega10"; 1987 break; 1988 case CHIP_VEGA12: 1989 chip_name = "vega12"; 1990 break; 1991 case CHIP_RAVEN: 1992 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1993 chip_name = "raven2"; 1994 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 1995 chip_name = "picasso"; 1996 else 1997 chip_name = "raven"; 1998 break; 1999 case CHIP_ARCTURUS: 2000 chip_name = "arcturus"; 2001 break; 2002 case CHIP_NAVI12: 2003 chip_name = "navi12"; 2004 break; 2005 } 2006 2007 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); 2008 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev); 2009 if (err) { 2010 dev_err(adev->dev, 2011 "Failed to load gpu_info firmware \"%s\"\n", 2012 fw_name); 2013 goto out; 2014 } 2015 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw); 2016 if (err) { 2017 dev_err(adev->dev, 2018 "Failed to validate gpu_info firmware \"%s\"\n", 2019 fw_name); 2020 goto out; 2021 } 2022 2023 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data; 2024 amdgpu_ucode_print_gpu_info_hdr(&hdr->header); 2025 2026 switch (hdr->version_major) { 2027 case 1: 2028 { 2029 const struct gpu_info_firmware_v1_0 *gpu_info_fw = 2030 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + 2031 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2032 2033 /* 2034 * Should be droped when DAL no longer needs it. 2035 */ 2036 if (adev->asic_type == CHIP_NAVI12) 2037 goto parse_soc_bounding_box; 2038 2039 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); 2040 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); 2041 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); 2042 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se); 2043 adev->gfx.config.max_texture_channel_caches = 2044 le32_to_cpu(gpu_info_fw->gc_num_tccs); 2045 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs); 2046 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds); 2047 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth); 2048 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth); 2049 adev->gfx.config.double_offchip_lds_buf = 2050 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer); 2051 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size); 2052 adev->gfx.cu_info.max_waves_per_simd = 2053 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd); 2054 adev->gfx.cu_info.max_scratch_slots_per_cu = 2055 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu); 2056 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size); 2057 if (hdr->version_minor >= 1) { 2058 const struct gpu_info_firmware_v1_1 *gpu_info_fw = 2059 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data + 2060 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2061 adev->gfx.config.num_sc_per_sh = 2062 le32_to_cpu(gpu_info_fw->num_sc_per_sh); 2063 adev->gfx.config.num_packer_per_sc = 2064 le32_to_cpu(gpu_info_fw->num_packer_per_sc); 2065 } 2066 2067 parse_soc_bounding_box: 2068 /* 2069 * soc bounding box info is not integrated in disocovery table, 2070 * we always need to parse it from gpu info firmware if needed. 2071 */ 2072 if (hdr->version_minor == 2) { 2073 const struct gpu_info_firmware_v1_2 *gpu_info_fw = 2074 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + 2075 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2076 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box; 2077 } 2078 break; 2079 } 2080 default: 2081 dev_err(adev->dev, 2082 "Unsupported gpu_info table %d\n", hdr->header.ucode_version); 2083 err = -EINVAL; 2084 goto out; 2085 } 2086 out: 2087 return err; 2088 } 2089 2090 /** 2091 * amdgpu_device_ip_early_init - run early init for hardware IPs 2092 * 2093 * @adev: amdgpu_device pointer 2094 * 2095 * Early initialization pass for hardware IPs. The hardware IPs that make 2096 * up each asic are discovered each IP's early_init callback is run. This 2097 * is the first stage in initializing the asic. 2098 * Returns 0 on success, negative error code on failure. 2099 */ 2100 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) 2101 { 2102 struct drm_device *dev = adev_to_drm(adev); 2103 struct pci_dev *parent; 2104 int i, r; 2105 2106 amdgpu_device_enable_virtual_display(adev); 2107 2108 if (amdgpu_sriov_vf(adev)) { 2109 r = amdgpu_virt_request_full_gpu(adev, true); 2110 if (r) 2111 return r; 2112 } 2113 2114 switch (adev->asic_type) { 2115 #ifdef CONFIG_DRM_AMDGPU_SI 2116 case CHIP_VERDE: 2117 case CHIP_TAHITI: 2118 case CHIP_PITCAIRN: 2119 case CHIP_OLAND: 2120 case CHIP_HAINAN: 2121 adev->family = AMDGPU_FAMILY_SI; 2122 r = si_set_ip_blocks(adev); 2123 if (r) 2124 return r; 2125 break; 2126 #endif 2127 #ifdef CONFIG_DRM_AMDGPU_CIK 2128 case CHIP_BONAIRE: 2129 case CHIP_HAWAII: 2130 case CHIP_KAVERI: 2131 case CHIP_KABINI: 2132 case CHIP_MULLINS: 2133 if (adev->flags & AMD_IS_APU) 2134 adev->family = AMDGPU_FAMILY_KV; 2135 else 2136 adev->family = AMDGPU_FAMILY_CI; 2137 2138 r = cik_set_ip_blocks(adev); 2139 if (r) 2140 return r; 2141 break; 2142 #endif 2143 case CHIP_TOPAZ: 2144 case CHIP_TONGA: 2145 case CHIP_FIJI: 2146 case CHIP_POLARIS10: 2147 case CHIP_POLARIS11: 2148 case CHIP_POLARIS12: 2149 case CHIP_VEGAM: 2150 case CHIP_CARRIZO: 2151 case CHIP_STONEY: 2152 if (adev->flags & AMD_IS_APU) 2153 adev->family = AMDGPU_FAMILY_CZ; 2154 else 2155 adev->family = AMDGPU_FAMILY_VI; 2156 2157 r = vi_set_ip_blocks(adev); 2158 if (r) 2159 return r; 2160 break; 2161 default: 2162 r = amdgpu_discovery_set_ip_blocks(adev); 2163 if (r) 2164 return r; 2165 break; 2166 } 2167 2168 if (amdgpu_has_atpx() && 2169 (amdgpu_is_atpx_hybrid() || 2170 amdgpu_has_atpx_dgpu_power_cntl()) && 2171 ((adev->flags & AMD_IS_APU) == 0) && 2172 !pci_is_thunderbolt_attached(dev->pdev)) 2173 adev->flags |= AMD_IS_PX; 2174 2175 if (!(adev->flags & AMD_IS_APU)) { 2176 parent = pci_upstream_bridge(adev->pdev); 2177 adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 2178 } 2179 2180 amdgpu_amdkfd_device_probe(adev); 2181 2182 adev->pm.pp_feature = amdgpu_pp_feature_mask; 2183 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) 2184 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 2185 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) 2186 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; 2187 2188 for (i = 0; i < adev->num_ip_blocks; i++) { 2189 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2190 DRM_ERROR("disabled ip block: %d <%s>\n", 2191 i, adev->ip_blocks[i].version->funcs->name); 2192 adev->ip_blocks[i].status.valid = false; 2193 } else { 2194 if (adev->ip_blocks[i].version->funcs->early_init) { 2195 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); 2196 if (r == -ENOENT) { 2197 adev->ip_blocks[i].status.valid = false; 2198 } else if (r) { 2199 DRM_ERROR("early_init of IP block <%s> failed %d\n", 2200 adev->ip_blocks[i].version->funcs->name, r); 2201 return r; 2202 } else { 2203 adev->ip_blocks[i].status.valid = true; 2204 } 2205 } else { 2206 adev->ip_blocks[i].status.valid = true; 2207 } 2208 } 2209 /* get the vbios after the asic_funcs are set up */ 2210 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2211 r = amdgpu_device_parse_gpu_info_fw(adev); 2212 if (r) 2213 return r; 2214 2215 /* Read BIOS */ 2216 if (!amdgpu_get_bios(adev)) 2217 return -EINVAL; 2218 2219 r = amdgpu_atombios_init(adev); 2220 if (r) { 2221 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 2222 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0); 2223 return r; 2224 } 2225 2226 /*get pf2vf msg info at it's earliest time*/ 2227 if (amdgpu_sriov_vf(adev)) 2228 amdgpu_virt_init_data_exchange(adev); 2229 2230 } 2231 } 2232 2233 adev->cg_flags &= amdgpu_cg_mask; 2234 adev->pg_flags &= amdgpu_pg_mask; 2235 2236 return 0; 2237 } 2238 2239 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) 2240 { 2241 int i, r; 2242 2243 for (i = 0; i < adev->num_ip_blocks; i++) { 2244 if (!adev->ip_blocks[i].status.sw) 2245 continue; 2246 if (adev->ip_blocks[i].status.hw) 2247 continue; 2248 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2249 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) || 2250 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2251 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2252 if (r) { 2253 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2254 adev->ip_blocks[i].version->funcs->name, r); 2255 return r; 2256 } 2257 adev->ip_blocks[i].status.hw = true; 2258 } 2259 } 2260 2261 return 0; 2262 } 2263 2264 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) 2265 { 2266 int i, r; 2267 2268 for (i = 0; i < adev->num_ip_blocks; i++) { 2269 if (!adev->ip_blocks[i].status.sw) 2270 continue; 2271 if (adev->ip_blocks[i].status.hw) 2272 continue; 2273 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2274 if (r) { 2275 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2276 adev->ip_blocks[i].version->funcs->name, r); 2277 return r; 2278 } 2279 adev->ip_blocks[i].status.hw = true; 2280 } 2281 2282 return 0; 2283 } 2284 2285 static int amdgpu_device_fw_loading(struct amdgpu_device *adev) 2286 { 2287 int r = 0; 2288 int i; 2289 uint32_t smu_version; 2290 2291 if (adev->asic_type >= CHIP_VEGA10) { 2292 for (i = 0; i < adev->num_ip_blocks; i++) { 2293 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) 2294 continue; 2295 2296 if (!adev->ip_blocks[i].status.sw) 2297 continue; 2298 2299 /* no need to do the fw loading again if already done*/ 2300 if (adev->ip_blocks[i].status.hw == true) 2301 break; 2302 2303 if (amdgpu_in_reset(adev) || adev->in_suspend) { 2304 r = adev->ip_blocks[i].version->funcs->resume(adev); 2305 if (r) { 2306 DRM_ERROR("resume of IP block <%s> failed %d\n", 2307 adev->ip_blocks[i].version->funcs->name, r); 2308 return r; 2309 } 2310 } else { 2311 r = adev->ip_blocks[i].version->funcs->hw_init(adev); 2312 if (r) { 2313 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2314 adev->ip_blocks[i].version->funcs->name, r); 2315 return r; 2316 } 2317 } 2318 2319 adev->ip_blocks[i].status.hw = true; 2320 break; 2321 } 2322 } 2323 2324 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 2325 r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 2326 2327 return r; 2328 } 2329 2330 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) 2331 { 2332 long timeout; 2333 int r, i; 2334 2335 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2336 struct amdgpu_ring *ring = adev->rings[i]; 2337 2338 /* No need to setup the GPU scheduler for rings that don't need it */ 2339 if (!ring || ring->no_scheduler) 2340 continue; 2341 2342 switch (ring->funcs->type) { 2343 case AMDGPU_RING_TYPE_GFX: 2344 timeout = adev->gfx_timeout; 2345 break; 2346 case AMDGPU_RING_TYPE_COMPUTE: 2347 timeout = adev->compute_timeout; 2348 break; 2349 case AMDGPU_RING_TYPE_SDMA: 2350 timeout = adev->sdma_timeout; 2351 break; 2352 default: 2353 timeout = adev->video_timeout; 2354 break; 2355 } 2356 2357 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, 2358 ring->num_hw_submission, amdgpu_job_hang_limit, 2359 timeout, adev->reset_domain->wq, 2360 ring->sched_score, ring->name, 2361 adev->dev); 2362 if (r) { 2363 DRM_ERROR("Failed to create scheduler on ring %s.\n", 2364 ring->name); 2365 return r; 2366 } 2367 } 2368 2369 return 0; 2370 } 2371 2372 2373 /** 2374 * amdgpu_device_ip_init - run init for hardware IPs 2375 * 2376 * @adev: amdgpu_device pointer 2377 * 2378 * Main initialization pass for hardware IPs. The list of all the hardware 2379 * IPs that make up the asic is walked and the sw_init and hw_init callbacks 2380 * are run. sw_init initializes the software state associated with each IP 2381 * and hw_init initializes the hardware associated with each IP. 2382 * Returns 0 on success, negative error code on failure. 2383 */ 2384 static int amdgpu_device_ip_init(struct amdgpu_device *adev) 2385 { 2386 int i, r; 2387 2388 r = amdgpu_ras_init(adev); 2389 if (r) 2390 return r; 2391 2392 for (i = 0; i < adev->num_ip_blocks; i++) { 2393 if (!adev->ip_blocks[i].status.valid) 2394 continue; 2395 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); 2396 if (r) { 2397 DRM_ERROR("sw_init of IP block <%s> failed %d\n", 2398 adev->ip_blocks[i].version->funcs->name, r); 2399 goto init_failed; 2400 } 2401 adev->ip_blocks[i].status.sw = true; 2402 2403 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) { 2404 /* need to do common hw init early so everything is set up for gmc */ 2405 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2406 if (r) { 2407 DRM_ERROR("hw_init %d failed %d\n", i, r); 2408 goto init_failed; 2409 } 2410 adev->ip_blocks[i].status.hw = true; 2411 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2412 /* need to do gmc hw init early so we can allocate gpu mem */ 2413 /* Try to reserve bad pages early */ 2414 if (amdgpu_sriov_vf(adev)) 2415 amdgpu_virt_exchange_data(adev); 2416 2417 r = amdgpu_device_vram_scratch_init(adev); 2418 if (r) { 2419 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 2420 goto init_failed; 2421 } 2422 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 2423 if (r) { 2424 DRM_ERROR("hw_init %d failed %d\n", i, r); 2425 goto init_failed; 2426 } 2427 r = amdgpu_device_wb_init(adev); 2428 if (r) { 2429 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 2430 goto init_failed; 2431 } 2432 adev->ip_blocks[i].status.hw = true; 2433 2434 /* right after GMC hw init, we create CSA */ 2435 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 2436 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2437 AMDGPU_GEM_DOMAIN_VRAM, 2438 AMDGPU_CSA_SIZE); 2439 if (r) { 2440 DRM_ERROR("allocate CSA failed %d\n", r); 2441 goto init_failed; 2442 } 2443 } 2444 } 2445 } 2446 2447 if (amdgpu_sriov_vf(adev)) 2448 amdgpu_virt_init_data_exchange(adev); 2449 2450 r = amdgpu_ib_pool_init(adev); 2451 if (r) { 2452 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 2453 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r); 2454 goto init_failed; 2455 } 2456 2457 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ 2458 if (r) 2459 goto init_failed; 2460 2461 r = amdgpu_device_ip_hw_init_phase1(adev); 2462 if (r) 2463 goto init_failed; 2464 2465 r = amdgpu_device_fw_loading(adev); 2466 if (r) 2467 goto init_failed; 2468 2469 r = amdgpu_device_ip_hw_init_phase2(adev); 2470 if (r) 2471 goto init_failed; 2472 2473 /* 2474 * retired pages will be loaded from eeprom and reserved here, 2475 * it should be called after amdgpu_device_ip_hw_init_phase2 since 2476 * for some ASICs the RAS EEPROM code relies on SMU fully functioning 2477 * for I2C communication which only true at this point. 2478 * 2479 * amdgpu_ras_recovery_init may fail, but the upper only cares the 2480 * failure from bad gpu situation and stop amdgpu init process 2481 * accordingly. For other failed cases, it will still release all 2482 * the resource and print error message, rather than returning one 2483 * negative value to upper level. 2484 * 2485 * Note: theoretically, this should be called before all vram allocations 2486 * to protect retired page from abusing 2487 */ 2488 r = amdgpu_ras_recovery_init(adev); 2489 if (r) 2490 goto init_failed; 2491 2492 /** 2493 * In case of XGMI grab extra reference for reset domain for this device 2494 */ 2495 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2496 if (amdgpu_xgmi_add_device(adev) == 0) { 2497 if (!amdgpu_sriov_vf(adev)) { 2498 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2499 2500 if (WARN_ON(!hive)) { 2501 r = -ENOENT; 2502 goto init_failed; 2503 } 2504 2505 if (!hive->reset_domain || 2506 !amdgpu_reset_get_reset_domain(hive->reset_domain)) { 2507 r = -ENOENT; 2508 amdgpu_put_xgmi_hive(hive); 2509 goto init_failed; 2510 } 2511 2512 /* Drop the early temporary reset domain we created for device */ 2513 amdgpu_reset_put_reset_domain(adev->reset_domain); 2514 adev->reset_domain = hive->reset_domain; 2515 amdgpu_put_xgmi_hive(hive); 2516 } 2517 } 2518 } 2519 2520 r = amdgpu_device_init_schedulers(adev); 2521 if (r) 2522 goto init_failed; 2523 2524 /* Don't init kfd if whole hive need to be reset during init */ 2525 if (!adev->gmc.xgmi.pending_reset) 2526 amdgpu_amdkfd_device_init(adev); 2527 2528 amdgpu_fru_get_product_info(adev); 2529 2530 init_failed: 2531 if (amdgpu_sriov_vf(adev)) 2532 amdgpu_virt_release_full_gpu(adev, true); 2533 2534 return r; 2535 } 2536 2537 /** 2538 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer 2539 * 2540 * @adev: amdgpu_device pointer 2541 * 2542 * Writes a reset magic value to the gart pointer in VRAM. The driver calls 2543 * this function before a GPU reset. If the value is retained after a 2544 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents. 2545 */ 2546 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) 2547 { 2548 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); 2549 } 2550 2551 /** 2552 * amdgpu_device_check_vram_lost - check if vram is valid 2553 * 2554 * @adev: amdgpu_device pointer 2555 * 2556 * Checks the reset magic value written to the gart pointer in VRAM. 2557 * The driver calls this after a GPU reset to see if the contents of 2558 * VRAM is lost or now. 2559 * returns true if vram is lost, false if not. 2560 */ 2561 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) 2562 { 2563 if (memcmp(adev->gart.ptr, adev->reset_magic, 2564 AMDGPU_RESET_MAGIC_NUM)) 2565 return true; 2566 2567 if (!amdgpu_in_reset(adev)) 2568 return false; 2569 2570 /* 2571 * For all ASICs with baco/mode1 reset, the VRAM is 2572 * always assumed to be lost. 2573 */ 2574 switch (amdgpu_asic_reset_method(adev)) { 2575 case AMD_RESET_METHOD_BACO: 2576 case AMD_RESET_METHOD_MODE1: 2577 return true; 2578 default: 2579 return false; 2580 } 2581 } 2582 2583 /** 2584 * amdgpu_device_set_cg_state - set clockgating for amdgpu device 2585 * 2586 * @adev: amdgpu_device pointer 2587 * @state: clockgating state (gate or ungate) 2588 * 2589 * The list of all the hardware IPs that make up the asic is walked and the 2590 * set_clockgating_state callbacks are run. 2591 * Late initialization pass enabling clockgating for hardware IPs. 2592 * Fini or suspend, pass disabling clockgating for hardware IPs. 2593 * Returns 0 on success, negative error code on failure. 2594 */ 2595 2596 int amdgpu_device_set_cg_state(struct amdgpu_device *adev, 2597 enum amd_clockgating_state state) 2598 { 2599 int i, j, r; 2600 2601 if (amdgpu_emu_mode == 1) 2602 return 0; 2603 2604 for (j = 0; j < adev->num_ip_blocks; j++) { 2605 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2606 if (!adev->ip_blocks[i].status.late_initialized) 2607 continue; 2608 /* skip CG for GFX on S0ix */ 2609 if (adev->in_s0ix && 2610 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2611 continue; 2612 /* skip CG for VCE/UVD, it's handled specially */ 2613 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2614 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2615 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2616 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2617 adev->ip_blocks[i].version->funcs->set_clockgating_state) { 2618 /* enable clockgating to save power */ 2619 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, 2620 state); 2621 if (r) { 2622 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 2623 adev->ip_blocks[i].version->funcs->name, r); 2624 return r; 2625 } 2626 } 2627 } 2628 2629 return 0; 2630 } 2631 2632 int amdgpu_device_set_pg_state(struct amdgpu_device *adev, 2633 enum amd_powergating_state state) 2634 { 2635 int i, j, r; 2636 2637 if (amdgpu_emu_mode == 1) 2638 return 0; 2639 2640 for (j = 0; j < adev->num_ip_blocks; j++) { 2641 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2642 if (!adev->ip_blocks[i].status.late_initialized) 2643 continue; 2644 /* skip PG for GFX on S0ix */ 2645 if (adev->in_s0ix && 2646 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2647 continue; 2648 /* skip CG for VCE/UVD, it's handled specially */ 2649 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2650 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && 2651 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && 2652 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG && 2653 adev->ip_blocks[i].version->funcs->set_powergating_state) { 2654 /* enable powergating to save power */ 2655 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, 2656 state); 2657 if (r) { 2658 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 2659 adev->ip_blocks[i].version->funcs->name, r); 2660 return r; 2661 } 2662 } 2663 } 2664 return 0; 2665 } 2666 2667 static int amdgpu_device_enable_mgpu_fan_boost(void) 2668 { 2669 struct amdgpu_gpu_instance *gpu_ins; 2670 struct amdgpu_device *adev; 2671 int i, ret = 0; 2672 2673 mutex_lock(&mgpu_info.mutex); 2674 2675 /* 2676 * MGPU fan boost feature should be enabled 2677 * only when there are two or more dGPUs in 2678 * the system 2679 */ 2680 if (mgpu_info.num_dgpu < 2) 2681 goto out; 2682 2683 for (i = 0; i < mgpu_info.num_dgpu; i++) { 2684 gpu_ins = &(mgpu_info.gpu_ins[i]); 2685 adev = gpu_ins->adev; 2686 if (!(adev->flags & AMD_IS_APU) && 2687 !gpu_ins->mgpu_fan_enabled) { 2688 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); 2689 if (ret) 2690 break; 2691 2692 gpu_ins->mgpu_fan_enabled = 1; 2693 } 2694 } 2695 2696 out: 2697 mutex_unlock(&mgpu_info.mutex); 2698 2699 return ret; 2700 } 2701 2702 /** 2703 * amdgpu_device_ip_late_init - run late init for hardware IPs 2704 * 2705 * @adev: amdgpu_device pointer 2706 * 2707 * Late initialization pass for hardware IPs. The list of all the hardware 2708 * IPs that make up the asic is walked and the late_init callbacks are run. 2709 * late_init covers any special initialization that an IP requires 2710 * after all of the have been initialized or something that needs to happen 2711 * late in the init process. 2712 * Returns 0 on success, negative error code on failure. 2713 */ 2714 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) 2715 { 2716 struct amdgpu_gpu_instance *gpu_instance; 2717 int i = 0, r; 2718 2719 for (i = 0; i < adev->num_ip_blocks; i++) { 2720 if (!adev->ip_blocks[i].status.hw) 2721 continue; 2722 if (adev->ip_blocks[i].version->funcs->late_init) { 2723 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 2724 if (r) { 2725 DRM_ERROR("late_init of IP block <%s> failed %d\n", 2726 adev->ip_blocks[i].version->funcs->name, r); 2727 return r; 2728 } 2729 } 2730 adev->ip_blocks[i].status.late_initialized = true; 2731 } 2732 2733 r = amdgpu_ras_late_init(adev); 2734 if (r) { 2735 DRM_ERROR("amdgpu_ras_late_init failed %d", r); 2736 return r; 2737 } 2738 2739 amdgpu_ras_set_error_query_ready(adev, true); 2740 2741 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 2742 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); 2743 2744 amdgpu_device_fill_reset_magic(adev); 2745 2746 r = amdgpu_device_enable_mgpu_fan_boost(); 2747 if (r) 2748 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 2749 2750 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ 2751 if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)|| 2752 adev->asic_type == CHIP_ALDEBARAN )) 2753 amdgpu_dpm_handle_passthrough_sbr(adev, true); 2754 2755 if (adev->gmc.xgmi.num_physical_nodes > 1) { 2756 mutex_lock(&mgpu_info.mutex); 2757 2758 /* 2759 * Reset device p-state to low as this was booted with high. 2760 * 2761 * This should be performed only after all devices from the same 2762 * hive get initialized. 2763 * 2764 * However, it's unknown how many device in the hive in advance. 2765 * As this is counted one by one during devices initializations. 2766 * 2767 * So, we wait for all XGMI interlinked devices initialized. 2768 * This may bring some delays as those devices may come from 2769 * different hives. But that should be OK. 2770 */ 2771 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { 2772 for (i = 0; i < mgpu_info.num_gpu; i++) { 2773 gpu_instance = &(mgpu_info.gpu_ins[i]); 2774 if (gpu_instance->adev->flags & AMD_IS_APU) 2775 continue; 2776 2777 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 2778 AMDGPU_XGMI_PSTATE_MIN); 2779 if (r) { 2780 DRM_ERROR("pstate setting failed (%d).\n", r); 2781 break; 2782 } 2783 } 2784 } 2785 2786 mutex_unlock(&mgpu_info.mutex); 2787 } 2788 2789 return 0; 2790 } 2791 2792 /** 2793 * amdgpu_device_smu_fini_early - smu hw_fini wrapper 2794 * 2795 * @adev: amdgpu_device pointer 2796 * 2797 * For ASICs need to disable SMC first 2798 */ 2799 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev) 2800 { 2801 int i, r; 2802 2803 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 2804 return; 2805 2806 for (i = 0; i < adev->num_ip_blocks; i++) { 2807 if (!adev->ip_blocks[i].status.hw) 2808 continue; 2809 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 2810 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2811 /* XXX handle errors */ 2812 if (r) { 2813 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2814 adev->ip_blocks[i].version->funcs->name, r); 2815 } 2816 adev->ip_blocks[i].status.hw = false; 2817 break; 2818 } 2819 } 2820 } 2821 2822 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) 2823 { 2824 int i, r; 2825 2826 for (i = 0; i < adev->num_ip_blocks; i++) { 2827 if (!adev->ip_blocks[i].version->funcs->early_fini) 2828 continue; 2829 2830 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); 2831 if (r) { 2832 DRM_DEBUG("early_fini of IP block <%s> failed %d\n", 2833 adev->ip_blocks[i].version->funcs->name, r); 2834 } 2835 } 2836 2837 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2838 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2839 2840 amdgpu_amdkfd_suspend(adev, false); 2841 2842 /* Workaroud for ASICs need to disable SMC first */ 2843 amdgpu_device_smu_fini_early(adev); 2844 2845 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2846 if (!adev->ip_blocks[i].status.hw) 2847 continue; 2848 2849 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); 2850 /* XXX handle errors */ 2851 if (r) { 2852 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 2853 adev->ip_blocks[i].version->funcs->name, r); 2854 } 2855 2856 adev->ip_blocks[i].status.hw = false; 2857 } 2858 2859 if (amdgpu_sriov_vf(adev)) { 2860 if (amdgpu_virt_release_full_gpu(adev, false)) 2861 DRM_ERROR("failed to release exclusive mode on fini\n"); 2862 } 2863 2864 return 0; 2865 } 2866 2867 /** 2868 * amdgpu_device_ip_fini - run fini for hardware IPs 2869 * 2870 * @adev: amdgpu_device pointer 2871 * 2872 * Main teardown pass for hardware IPs. The list of all the hardware 2873 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks 2874 * are run. hw_fini tears down the hardware associated with each IP 2875 * and sw_fini tears down any software state associated with each IP. 2876 * Returns 0 on success, negative error code on failure. 2877 */ 2878 static int amdgpu_device_ip_fini(struct amdgpu_device *adev) 2879 { 2880 int i, r; 2881 2882 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) 2883 amdgpu_virt_release_ras_err_handler_data(adev); 2884 2885 if (adev->gmc.xgmi.num_physical_nodes > 1) 2886 amdgpu_xgmi_remove_device(adev); 2887 2888 amdgpu_amdkfd_device_fini_sw(adev); 2889 2890 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2891 if (!adev->ip_blocks[i].status.sw) 2892 continue; 2893 2894 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 2895 amdgpu_ucode_free_bo(adev); 2896 amdgpu_free_static_csa(&adev->virt.csa_obj); 2897 amdgpu_device_wb_fini(adev); 2898 amdgpu_device_vram_scratch_fini(adev); 2899 amdgpu_ib_pool_fini(adev); 2900 } 2901 2902 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 2903 /* XXX handle errors */ 2904 if (r) { 2905 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 2906 adev->ip_blocks[i].version->funcs->name, r); 2907 } 2908 adev->ip_blocks[i].status.sw = false; 2909 adev->ip_blocks[i].status.valid = false; 2910 } 2911 2912 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2913 if (!adev->ip_blocks[i].status.late_initialized) 2914 continue; 2915 if (adev->ip_blocks[i].version->funcs->late_fini) 2916 adev->ip_blocks[i].version->funcs->late_fini((void *)adev); 2917 adev->ip_blocks[i].status.late_initialized = false; 2918 } 2919 2920 amdgpu_ras_fini(adev); 2921 2922 return 0; 2923 } 2924 2925 /** 2926 * amdgpu_device_delayed_init_work_handler - work handler for IB tests 2927 * 2928 * @work: work_struct. 2929 */ 2930 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) 2931 { 2932 struct amdgpu_device *adev = 2933 container_of(work, struct amdgpu_device, delayed_init_work.work); 2934 int r; 2935 2936 r = amdgpu_ib_ring_tests(adev); 2937 if (r) 2938 DRM_ERROR("ib ring test failed (%d).\n", r); 2939 } 2940 2941 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 2942 { 2943 struct amdgpu_device *adev = 2944 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work); 2945 2946 WARN_ON_ONCE(adev->gfx.gfx_off_state); 2947 WARN_ON_ONCE(adev->gfx.gfx_off_req_count); 2948 2949 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true)) 2950 adev->gfx.gfx_off_state = true; 2951 } 2952 2953 /** 2954 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1) 2955 * 2956 * @adev: amdgpu_device pointer 2957 * 2958 * Main suspend function for hardware IPs. The list of all the hardware 2959 * IPs that make up the asic is walked, clockgating is disabled and the 2960 * suspend callbacks are run. suspend puts the hardware and software state 2961 * in each IP into a state suitable for suspend. 2962 * Returns 0 on success, negative error code on failure. 2963 */ 2964 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) 2965 { 2966 int i, r; 2967 2968 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2969 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2970 2971 /* 2972 * Per PMFW team's suggestion, driver needs to handle gfxoff 2973 * and df cstate features disablement for gpu reset(e.g. Mode1Reset) 2974 * scenario. Add the missing df cstate disablement here. 2975 */ 2976 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) 2977 dev_warn(adev->dev, "Failed to disallow df cstate"); 2978 2979 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2980 if (!adev->ip_blocks[i].status.valid) 2981 continue; 2982 2983 /* displays are handled separately */ 2984 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) 2985 continue; 2986 2987 /* XXX handle errors */ 2988 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2989 /* XXX handle errors */ 2990 if (r) { 2991 DRM_ERROR("suspend of IP block <%s> failed %d\n", 2992 adev->ip_blocks[i].version->funcs->name, r); 2993 return r; 2994 } 2995 2996 adev->ip_blocks[i].status.hw = false; 2997 } 2998 2999 return 0; 3000 } 3001 3002 /** 3003 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2) 3004 * 3005 * @adev: amdgpu_device pointer 3006 * 3007 * Main suspend function for hardware IPs. The list of all the hardware 3008 * IPs that make up the asic is walked, clockgating is disabled and the 3009 * suspend callbacks are run. suspend puts the hardware and software state 3010 * in each IP into a state suitable for suspend. 3011 * Returns 0 on success, negative error code on failure. 3012 */ 3013 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) 3014 { 3015 int i, r; 3016 3017 if (adev->in_s0ix) 3018 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); 3019 3020 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3021 if (!adev->ip_blocks[i].status.valid) 3022 continue; 3023 /* displays are handled in phase1 */ 3024 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) 3025 continue; 3026 /* PSP lost connection when err_event_athub occurs */ 3027 if (amdgpu_ras_intr_triggered() && 3028 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 3029 adev->ip_blocks[i].status.hw = false; 3030 continue; 3031 } 3032 3033 /* skip unnecessary suspend if we do not initialize them yet */ 3034 if (adev->gmc.xgmi.pending_reset && 3035 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3036 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC || 3037 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3038 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) { 3039 adev->ip_blocks[i].status.hw = false; 3040 continue; 3041 } 3042 3043 /* skip suspend of gfx/mes and psp for S0ix 3044 * gfx is in gfxoff state, so on resume it will exit gfxoff just 3045 * like at runtime. PSP is also part of the always on hardware 3046 * so no need to suspend it. 3047 */ 3048 if (adev->in_s0ix && 3049 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 3050 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 3051 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) 3052 continue; 3053 3054 /* XXX handle errors */ 3055 r = adev->ip_blocks[i].version->funcs->suspend(adev); 3056 /* XXX handle errors */ 3057 if (r) { 3058 DRM_ERROR("suspend of IP block <%s> failed %d\n", 3059 adev->ip_blocks[i].version->funcs->name, r); 3060 } 3061 adev->ip_blocks[i].status.hw = false; 3062 /* handle putting the SMC in the appropriate state */ 3063 if(!amdgpu_sriov_vf(adev)){ 3064 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3065 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 3066 if (r) { 3067 DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 3068 adev->mp1_state, r); 3069 return r; 3070 } 3071 } 3072 } 3073 } 3074 3075 return 0; 3076 } 3077 3078 /** 3079 * amdgpu_device_ip_suspend - run suspend for hardware IPs 3080 * 3081 * @adev: amdgpu_device pointer 3082 * 3083 * Main suspend function for hardware IPs. The list of all the hardware 3084 * IPs that make up the asic is walked, clockgating is disabled and the 3085 * suspend callbacks are run. suspend puts the hardware and software state 3086 * in each IP into a state suitable for suspend. 3087 * Returns 0 on success, negative error code on failure. 3088 */ 3089 int amdgpu_device_ip_suspend(struct amdgpu_device *adev) 3090 { 3091 int r; 3092 3093 if (amdgpu_sriov_vf(adev)) { 3094 amdgpu_virt_fini_data_exchange(adev); 3095 amdgpu_virt_request_full_gpu(adev, false); 3096 } 3097 3098 r = amdgpu_device_ip_suspend_phase1(adev); 3099 if (r) 3100 return r; 3101 r = amdgpu_device_ip_suspend_phase2(adev); 3102 3103 if (amdgpu_sriov_vf(adev)) 3104 amdgpu_virt_release_full_gpu(adev, false); 3105 3106 return r; 3107 } 3108 3109 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) 3110 { 3111 int i, r; 3112 3113 static enum amd_ip_block_type ip_order[] = { 3114 AMD_IP_BLOCK_TYPE_COMMON, 3115 AMD_IP_BLOCK_TYPE_GMC, 3116 AMD_IP_BLOCK_TYPE_PSP, 3117 AMD_IP_BLOCK_TYPE_IH, 3118 }; 3119 3120 for (i = 0; i < adev->num_ip_blocks; i++) { 3121 int j; 3122 struct amdgpu_ip_block *block; 3123 3124 block = &adev->ip_blocks[i]; 3125 block->status.hw = false; 3126 3127 for (j = 0; j < ARRAY_SIZE(ip_order); j++) { 3128 3129 if (block->version->type != ip_order[j] || 3130 !block->status.valid) 3131 continue; 3132 3133 r = block->version->funcs->hw_init(adev); 3134 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3135 if (r) 3136 return r; 3137 block->status.hw = true; 3138 } 3139 } 3140 3141 return 0; 3142 } 3143 3144 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) 3145 { 3146 int i, r; 3147 3148 static enum amd_ip_block_type ip_order[] = { 3149 AMD_IP_BLOCK_TYPE_SMC, 3150 AMD_IP_BLOCK_TYPE_DCE, 3151 AMD_IP_BLOCK_TYPE_GFX, 3152 AMD_IP_BLOCK_TYPE_SDMA, 3153 AMD_IP_BLOCK_TYPE_UVD, 3154 AMD_IP_BLOCK_TYPE_VCE, 3155 AMD_IP_BLOCK_TYPE_VCN 3156 }; 3157 3158 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { 3159 int j; 3160 struct amdgpu_ip_block *block; 3161 3162 for (j = 0; j < adev->num_ip_blocks; j++) { 3163 block = &adev->ip_blocks[j]; 3164 3165 if (block->version->type != ip_order[i] || 3166 !block->status.valid || 3167 block->status.hw) 3168 continue; 3169 3170 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 3171 r = block->version->funcs->resume(adev); 3172 else 3173 r = block->version->funcs->hw_init(adev); 3174 3175 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 3176 if (r) 3177 return r; 3178 block->status.hw = true; 3179 } 3180 } 3181 3182 return 0; 3183 } 3184 3185 /** 3186 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs 3187 * 3188 * @adev: amdgpu_device pointer 3189 * 3190 * First resume function for hardware IPs. The list of all the hardware 3191 * IPs that make up the asic is walked and the resume callbacks are run for 3192 * COMMON, GMC, and IH. resume puts the hardware into a functional state 3193 * after a suspend and updates the software state as necessary. This 3194 * function is also used for restoring the GPU after a GPU reset. 3195 * Returns 0 on success, negative error code on failure. 3196 */ 3197 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) 3198 { 3199 int i, r; 3200 3201 for (i = 0; i < adev->num_ip_blocks; i++) { 3202 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3203 continue; 3204 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3205 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3206 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3207 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) { 3208 3209 r = adev->ip_blocks[i].version->funcs->resume(adev); 3210 if (r) { 3211 DRM_ERROR("resume of IP block <%s> failed %d\n", 3212 adev->ip_blocks[i].version->funcs->name, r); 3213 return r; 3214 } 3215 adev->ip_blocks[i].status.hw = true; 3216 } 3217 } 3218 3219 return 0; 3220 } 3221 3222 /** 3223 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs 3224 * 3225 * @adev: amdgpu_device pointer 3226 * 3227 * First resume function for hardware IPs. The list of all the hardware 3228 * IPs that make up the asic is walked and the resume callbacks are run for 3229 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a 3230 * functional state after a suspend and updates the software state as 3231 * necessary. This function is also used for restoring the GPU after a GPU 3232 * reset. 3233 * Returns 0 on success, negative error code on failure. 3234 */ 3235 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) 3236 { 3237 int i, r; 3238 3239 for (i = 0; i < adev->num_ip_blocks; i++) { 3240 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) 3241 continue; 3242 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3243 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3244 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3245 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) 3246 continue; 3247 r = adev->ip_blocks[i].version->funcs->resume(adev); 3248 if (r) { 3249 DRM_ERROR("resume of IP block <%s> failed %d\n", 3250 adev->ip_blocks[i].version->funcs->name, r); 3251 return r; 3252 } 3253 adev->ip_blocks[i].status.hw = true; 3254 3255 if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3256 /* disable gfxoff for IP resume. The gfxoff will be re-enabled in 3257 * amdgpu_device_resume() after IP resume. 3258 */ 3259 amdgpu_gfx_off_ctrl(adev, false); 3260 DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); 3261 } 3262 3263 } 3264 3265 return 0; 3266 } 3267 3268 /** 3269 * amdgpu_device_ip_resume - run resume for hardware IPs 3270 * 3271 * @adev: amdgpu_device pointer 3272 * 3273 * Main resume function for hardware IPs. The hardware IPs 3274 * are split into two resume functions because they are 3275 * are also used in in recovering from a GPU reset and some additional 3276 * steps need to be take between them. In this case (S3/S4) they are 3277 * run sequentially. 3278 * Returns 0 on success, negative error code on failure. 3279 */ 3280 static int amdgpu_device_ip_resume(struct amdgpu_device *adev) 3281 { 3282 int r; 3283 3284 r = amdgpu_amdkfd_resume_iommu(adev); 3285 if (r) 3286 return r; 3287 3288 r = amdgpu_device_ip_resume_phase1(adev); 3289 if (r) 3290 return r; 3291 3292 r = amdgpu_device_fw_loading(adev); 3293 if (r) 3294 return r; 3295 3296 r = amdgpu_device_ip_resume_phase2(adev); 3297 3298 return r; 3299 } 3300 3301 /** 3302 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV 3303 * 3304 * @adev: amdgpu_device pointer 3305 * 3306 * Query the VBIOS data tables to determine if the board supports SR-IOV. 3307 */ 3308 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) 3309 { 3310 if (amdgpu_sriov_vf(adev)) { 3311 if (adev->is_atom_fw) { 3312 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) 3313 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3314 } else { 3315 if (amdgpu_atombios_has_gpu_virtualization_table(adev)) 3316 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; 3317 } 3318 3319 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)) 3320 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0); 3321 } 3322 } 3323 3324 /** 3325 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic 3326 * 3327 * @asic_type: AMD asic type 3328 * 3329 * Check if there is DC (new modesetting infrastructre) support for an asic. 3330 * returns true if DC has support, false if not. 3331 */ 3332 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) 3333 { 3334 switch (asic_type) { 3335 #ifdef CONFIG_DRM_AMDGPU_SI 3336 case CHIP_HAINAN: 3337 #endif 3338 case CHIP_TOPAZ: 3339 /* chips with no display hardware */ 3340 return false; 3341 #if defined(CONFIG_DRM_AMD_DC) 3342 case CHIP_TAHITI: 3343 case CHIP_PITCAIRN: 3344 case CHIP_VERDE: 3345 case CHIP_OLAND: 3346 /* 3347 * We have systems in the wild with these ASICs that require 3348 * LVDS and VGA support which is not supported with DC. 3349 * 3350 * Fallback to the non-DC driver here by default so as not to 3351 * cause regressions. 3352 */ 3353 #if defined(CONFIG_DRM_AMD_DC_SI) 3354 return amdgpu_dc > 0; 3355 #else 3356 return false; 3357 #endif 3358 case CHIP_BONAIRE: 3359 case CHIP_KAVERI: 3360 case CHIP_KABINI: 3361 case CHIP_MULLINS: 3362 /* 3363 * We have systems in the wild with these ASICs that require 3364 * VGA support which is not supported with DC. 3365 * 3366 * Fallback to the non-DC driver here by default so as not to 3367 * cause regressions. 3368 */ 3369 return amdgpu_dc > 0; 3370 default: 3371 return amdgpu_dc != 0; 3372 #else 3373 default: 3374 if (amdgpu_dc > 0) 3375 DRM_INFO_ONCE("Display Core has been requested via kernel parameter " 3376 "but isn't supported by ASIC, ignoring\n"); 3377 return false; 3378 #endif 3379 } 3380 } 3381 3382 /** 3383 * amdgpu_device_has_dc_support - check if dc is supported 3384 * 3385 * @adev: amdgpu_device pointer 3386 * 3387 * Returns true for supported, false for not supported 3388 */ 3389 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) 3390 { 3391 if (amdgpu_sriov_vf(adev) || 3392 adev->enable_virtual_display || 3393 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 3394 return false; 3395 3396 return amdgpu_device_asic_has_dc_support(adev->asic_type); 3397 } 3398 3399 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) 3400 { 3401 struct amdgpu_device *adev = 3402 container_of(__work, struct amdgpu_device, xgmi_reset_work); 3403 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 3404 3405 /* It's a bug to not have a hive within this function */ 3406 if (WARN_ON(!hive)) 3407 return; 3408 3409 /* 3410 * Use task barrier to synchronize all xgmi reset works across the 3411 * hive. task_barrier_enter and task_barrier_exit will block 3412 * until all the threads running the xgmi reset works reach 3413 * those points. task_barrier_full will do both blocks. 3414 */ 3415 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 3416 3417 task_barrier_enter(&hive->tb); 3418 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); 3419 3420 if (adev->asic_reset_res) 3421 goto fail; 3422 3423 task_barrier_exit(&hive->tb); 3424 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); 3425 3426 if (adev->asic_reset_res) 3427 goto fail; 3428 3429 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && 3430 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 3431 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); 3432 } else { 3433 3434 task_barrier_full(&hive->tb); 3435 adev->asic_reset_res = amdgpu_asic_reset(adev); 3436 } 3437 3438 fail: 3439 if (adev->asic_reset_res) 3440 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 3441 adev->asic_reset_res, adev_to_drm(adev)->unique); 3442 amdgpu_put_xgmi_hive(hive); 3443 } 3444 3445 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) 3446 { 3447 char *input = amdgpu_lockup_timeout; 3448 char *timeout_setting = NULL; 3449 int index = 0; 3450 long timeout; 3451 int ret = 0; 3452 3453 /* 3454 * By default timeout for non compute jobs is 10000 3455 * and 60000 for compute jobs. 3456 * In SR-IOV or passthrough mode, timeout for compute 3457 * jobs are 60000 by default. 3458 */ 3459 adev->gfx_timeout = msecs_to_jiffies(10000); 3460 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3461 if (amdgpu_sriov_vf(adev)) 3462 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? 3463 msecs_to_jiffies(60000) : msecs_to_jiffies(10000); 3464 else 3465 adev->compute_timeout = msecs_to_jiffies(60000); 3466 3467 #ifdef notyet 3468 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3469 while ((timeout_setting = strsep(&input, ",")) && 3470 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { 3471 ret = kstrtol(timeout_setting, 0, &timeout); 3472 if (ret) 3473 return ret; 3474 3475 if (timeout == 0) { 3476 index++; 3477 continue; 3478 } else if (timeout < 0) { 3479 timeout = MAX_SCHEDULE_TIMEOUT; 3480 dev_warn(adev->dev, "lockup timeout disabled"); 3481 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 3482 } else { 3483 timeout = msecs_to_jiffies(timeout); 3484 } 3485 3486 switch (index++) { 3487 case 0: 3488 adev->gfx_timeout = timeout; 3489 break; 3490 case 1: 3491 adev->compute_timeout = timeout; 3492 break; 3493 case 2: 3494 adev->sdma_timeout = timeout; 3495 break; 3496 case 3: 3497 adev->video_timeout = timeout; 3498 break; 3499 default: 3500 break; 3501 } 3502 } 3503 /* 3504 * There is only one value specified and 3505 * it should apply to all non-compute jobs. 3506 */ 3507 if (index == 1) { 3508 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; 3509 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) 3510 adev->compute_timeout = adev->gfx_timeout; 3511 } 3512 } 3513 #endif 3514 3515 return ret; 3516 } 3517 3518 /** 3519 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU 3520 * 3521 * @adev: amdgpu_device pointer 3522 * 3523 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode 3524 */ 3525 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev) 3526 { 3527 #ifdef notyet 3528 struct iommu_domain *domain; 3529 3530 domain = iommu_get_domain_for_dev(adev->dev); 3531 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY) 3532 #endif 3533 adev->ram_is_direct_mapped = true; 3534 } 3535 3536 static const struct attribute *amdgpu_dev_attributes[] = { 3537 &dev_attr_product_name.attr, 3538 &dev_attr_product_number.attr, 3539 &dev_attr_serial_number.attr, 3540 &dev_attr_pcie_replay_count.attr, 3541 NULL 3542 }; 3543 3544 /** 3545 * amdgpu_device_init - initialize the driver 3546 * 3547 * @adev: amdgpu_device pointer 3548 * @flags: driver flags 3549 * 3550 * Initializes the driver info and hw (all asics). 3551 * Returns 0 for success or an error on failure. 3552 * Called at driver startup. 3553 */ 3554 int amdgpu_device_init(struct amdgpu_device *adev, 3555 uint32_t flags) 3556 { 3557 struct drm_device *ddev = adev_to_drm(adev); 3558 struct pci_dev *pdev = adev->pdev; 3559 int r, i; 3560 bool px = false; 3561 u32 max_MBps; 3562 3563 adev->shutdown = false; 3564 adev->flags = flags; 3565 3566 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) 3567 adev->asic_type = amdgpu_force_asic_type; 3568 else 3569 adev->asic_type = flags & AMD_ASIC_MASK; 3570 3571 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; 3572 if (amdgpu_emu_mode == 1) 3573 adev->usec_timeout *= 10; 3574 adev->gmc.gart_size = 512 * 1024 * 1024; 3575 adev->accel_working = false; 3576 adev->num_rings = 0; 3577 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); 3578 adev->mman.buffer_funcs = NULL; 3579 adev->mman.buffer_funcs_ring = NULL; 3580 adev->vm_manager.vm_pte_funcs = NULL; 3581 adev->vm_manager.vm_pte_num_scheds = 0; 3582 adev->gmc.gmc_funcs = NULL; 3583 adev->harvest_ip_mask = 0x0; 3584 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 3585 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 3586 3587 adev->smc_rreg = &amdgpu_invalid_rreg; 3588 adev->smc_wreg = &amdgpu_invalid_wreg; 3589 adev->pcie_rreg = &amdgpu_invalid_rreg; 3590 adev->pcie_wreg = &amdgpu_invalid_wreg; 3591 adev->pciep_rreg = &amdgpu_invalid_rreg; 3592 adev->pciep_wreg = &amdgpu_invalid_wreg; 3593 adev->pcie_rreg64 = &amdgpu_invalid_rreg64; 3594 adev->pcie_wreg64 = &amdgpu_invalid_wreg64; 3595 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; 3596 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 3597 adev->didt_rreg = &amdgpu_invalid_rreg; 3598 adev->didt_wreg = &amdgpu_invalid_wreg; 3599 adev->gc_cac_rreg = &amdgpu_invalid_rreg; 3600 adev->gc_cac_wreg = &amdgpu_invalid_wreg; 3601 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 3602 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 3603 3604 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 3605 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 3606 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 3607 3608 /* mutex initialization are all done here so we 3609 * can recall function without having locking issues */ 3610 rw_init(&adev->firmware.mutex, "agfw"); 3611 rw_init(&adev->pm.mutex, "agpm"); 3612 rw_init(&adev->gfx.gpu_clock_mutex, "gfxclk"); 3613 rw_init(&adev->srbm_mutex, "srbm"); 3614 rw_init(&adev->gfx.pipe_reserve_mutex, "pipers"); 3615 rw_init(&adev->gfx.gfx_off_mutex, "gfxoff"); 3616 rw_init(&adev->grbm_idx_mutex, "grbmidx"); 3617 rw_init(&adev->mn_lock, "agpumn"); 3618 rw_init(&adev->virt.vf_errors.lock, "vferr"); 3619 hash_init(adev->mn_hash); 3620 rw_init(&adev->psp.mutex, "agpsp"); 3621 rw_init(&adev->notifier_lock, "agnf"); 3622 rw_init(&adev->pm.stable_pstate_ctx_lock, "agps"); 3623 rw_init(&adev->benchmark_mutex, "agbm"); 3624 3625 amdgpu_device_init_apu_flags(adev); 3626 3627 r = amdgpu_device_check_arguments(adev); 3628 if (r) 3629 return r; 3630 3631 mtx_init(&adev->mmio_idx_lock, IPL_TTY); 3632 mtx_init(&adev->smc_idx_lock, IPL_TTY); 3633 mtx_init(&adev->pcie_idx_lock, IPL_TTY); 3634 mtx_init(&adev->uvd_ctx_idx_lock, IPL_TTY); 3635 mtx_init(&adev->didt_idx_lock, IPL_TTY); 3636 mtx_init(&adev->gc_cac_idx_lock, IPL_TTY); 3637 mtx_init(&adev->se_cac_idx_lock, IPL_TTY); 3638 mtx_init(&adev->audio_endpt_idx_lock, IPL_TTY); 3639 mtx_init(&adev->mm_stats.lock, IPL_NONE); 3640 3641 INIT_LIST_HEAD(&adev->shadow_list); 3642 rw_init(&adev->shadow_list_lock, "sdwlst"); 3643 3644 INIT_LIST_HEAD(&adev->reset_list); 3645 3646 INIT_LIST_HEAD(&adev->ras_list); 3647 3648 INIT_DELAYED_WORK(&adev->delayed_init_work, 3649 amdgpu_device_delayed_init_work_handler); 3650 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, 3651 amdgpu_device_delay_enable_gfx_off); 3652 3653 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); 3654 3655 adev->gfx.gfx_off_req_count = 1; 3656 adev->gfx.gfx_off_residency = 0; 3657 adev->gfx.gfx_off_entrycount = 0; 3658 adev->pm.ac_power = power_supply_is_system_supplied() > 0; 3659 3660 atomic_set(&adev->throttling_logging_enabled, 1); 3661 /* 3662 * If throttling continues, logging will be performed every minute 3663 * to avoid log flooding. "-1" is subtracted since the thermal 3664 * throttling interrupt comes every second. Thus, the total logging 3665 * interval is 59 seconds(retelimited printk interval) + 1(waiting 3666 * for throttling interrupt) = 60 seconds. 3667 */ 3668 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1); 3669 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE); 3670 3671 #ifdef __linux__ 3672 /* Registers mapping */ 3673 /* TODO: block userspace mapping of io register */ 3674 if (adev->asic_type >= CHIP_BONAIRE) { 3675 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 3676 adev->rmmio_size = pci_resource_len(adev->pdev, 5); 3677 } else { 3678 adev->rmmio_base = pci_resource_start(adev->pdev, 2); 3679 adev->rmmio_size = pci_resource_len(adev->pdev, 2); 3680 } 3681 3682 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) 3683 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); 3684 3685 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); 3686 if (adev->rmmio == NULL) { 3687 return -ENOMEM; 3688 } 3689 #endif 3690 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 3691 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); 3692 3693 amdgpu_device_get_pcie_info(adev); 3694 3695 if (amdgpu_mcbp) 3696 DRM_INFO("MCBP is enabled\n"); 3697 3698 /* 3699 * Reset domain needs to be present early, before XGMI hive discovered 3700 * (if any) and intitialized to use reset sem and in_gpu reset flag 3701 * early on during init and before calling to RREG32. 3702 */ 3703 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev"); 3704 if (!adev->reset_domain) 3705 return -ENOMEM; 3706 3707 /* detect hw virtualization here */ 3708 amdgpu_detect_virtualization(adev); 3709 3710 r = amdgpu_device_get_job_timeout_settings(adev); 3711 if (r) { 3712 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); 3713 return r; 3714 } 3715 3716 /* early init functions */ 3717 r = amdgpu_device_ip_early_init(adev); 3718 if (r) 3719 return r; 3720 3721 /* Get rid of things like offb */ 3722 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver); 3723 if (r) 3724 return r; 3725 3726 /* Enable TMZ based on IP_VERSION */ 3727 amdgpu_gmc_tmz_set(adev); 3728 3729 amdgpu_gmc_noretry_set(adev); 3730 /* Need to get xgmi info early to decide the reset behavior*/ 3731 if (adev->gmc.xgmi.supported) { 3732 r = adev->gfxhub.funcs->get_xgmi_info(adev); 3733 if (r) 3734 return r; 3735 } 3736 3737 /* enable PCIE atomic ops */ 3738 #ifdef notyet 3739 if (amdgpu_sriov_vf(adev)) 3740 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) 3741 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == 3742 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3743 else 3744 adev->have_atomics_support = 3745 !pci_enable_atomic_ops_to_root(adev->pdev, 3746 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 3747 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 3748 if (!adev->have_atomics_support) 3749 dev_info(adev->dev, "PCIE atomic ops is not supported\n"); 3750 #else 3751 adev->have_atomics_support = false; 3752 #endif 3753 3754 /* doorbell bar mapping and doorbell index init*/ 3755 amdgpu_device_doorbell_init(adev); 3756 3757 if (amdgpu_emu_mode == 1) { 3758 /* post the asic on emulation mode */ 3759 emu_soc_asic_init(adev); 3760 goto fence_driver_init; 3761 } 3762 3763 amdgpu_reset_init(adev); 3764 3765 /* detect if we are with an SRIOV vbios */ 3766 amdgpu_device_detect_sriov_bios(adev); 3767 3768 /* check if we need to reset the asic 3769 * E.g., driver was not cleanly unloaded previously, etc. 3770 */ 3771 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { 3772 if (adev->gmc.xgmi.num_physical_nodes) { 3773 dev_info(adev->dev, "Pending hive reset.\n"); 3774 adev->gmc.xgmi.pending_reset = true; 3775 /* Only need to init necessary block for SMU to handle the reset */ 3776 for (i = 0; i < adev->num_ip_blocks; i++) { 3777 if (!adev->ip_blocks[i].status.valid) 3778 continue; 3779 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 3780 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 3781 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || 3782 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) { 3783 DRM_DEBUG("IP %s disabled for hw_init.\n", 3784 adev->ip_blocks[i].version->funcs->name); 3785 adev->ip_blocks[i].status.hw = true; 3786 } 3787 } 3788 } else { 3789 r = amdgpu_asic_reset(adev); 3790 if (r) { 3791 dev_err(adev->dev, "asic reset on init failed\n"); 3792 goto failed; 3793 } 3794 } 3795 } 3796 3797 pci_enable_pcie_error_reporting(adev->pdev); 3798 3799 /* Post card if necessary */ 3800 if (amdgpu_device_need_post(adev)) { 3801 if (!adev->bios) { 3802 dev_err(adev->dev, "no vBIOS found\n"); 3803 r = -EINVAL; 3804 goto failed; 3805 } 3806 DRM_INFO("GPU posting now...\n"); 3807 r = amdgpu_device_asic_init(adev); 3808 if (r) { 3809 dev_err(adev->dev, "gpu post error!\n"); 3810 goto failed; 3811 } 3812 } 3813 3814 if (adev->is_atom_fw) { 3815 /* Initialize clocks */ 3816 r = amdgpu_atomfirmware_get_clock_info(adev); 3817 if (r) { 3818 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n"); 3819 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3820 goto failed; 3821 } 3822 } else { 3823 /* Initialize clocks */ 3824 r = amdgpu_atombios_get_clock_info(adev); 3825 if (r) { 3826 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 3827 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0); 3828 goto failed; 3829 } 3830 /* init i2c buses */ 3831 if (!amdgpu_device_has_dc_support(adev)) 3832 amdgpu_atombios_i2c_init(adev); 3833 } 3834 3835 fence_driver_init: 3836 /* Fence driver */ 3837 r = amdgpu_fence_driver_sw_init(adev); 3838 if (r) { 3839 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n"); 3840 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0); 3841 goto failed; 3842 } 3843 3844 /* init the mode config */ 3845 drm_mode_config_init(adev_to_drm(adev)); 3846 3847 r = amdgpu_device_ip_init(adev); 3848 if (r) { 3849 /* failed in exclusive mode due to timeout */ 3850 if (amdgpu_sriov_vf(adev) && 3851 !amdgpu_sriov_runtime(adev) && 3852 amdgpu_virt_mmio_blocked(adev) && 3853 !amdgpu_virt_wait_reset(adev)) { 3854 dev_err(adev->dev, "VF exclusive mode timeout\n"); 3855 /* Don't send request since VF is inactive. */ 3856 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 3857 adev->virt.ops = NULL; 3858 r = -EAGAIN; 3859 goto release_ras_con; 3860 } 3861 dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); 3862 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 3863 goto release_ras_con; 3864 } 3865 3866 amdgpu_fence_driver_hw_init(adev); 3867 3868 dev_info(adev->dev, 3869 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 3870 adev->gfx.config.max_shader_engines, 3871 adev->gfx.config.max_sh_per_se, 3872 adev->gfx.config.max_cu_per_sh, 3873 adev->gfx.cu_info.number); 3874 3875 #ifdef __OpenBSD__ 3876 { 3877 const char *chip_name; 3878 uint32_t version = adev->ip_versions[GC_HWIP][0]; 3879 int maj, min, rev; 3880 3881 switch (adev->asic_type) { 3882 case CHIP_RAVEN: 3883 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 3884 chip_name = "RAVEN2"; 3885 else if (adev->apu_flags & AMD_APU_IS_PICASSO) 3886 chip_name = "PICASSO"; 3887 else 3888 chip_name = "RAVEN"; 3889 break; 3890 case CHIP_RENOIR: 3891 if (adev->apu_flags & AMD_APU_IS_RENOIR) 3892 chip_name = "RENOIR"; 3893 else 3894 chip_name = "GREEN_SARDINE"; 3895 break; 3896 default: 3897 chip_name = amdgpu_asic_name[adev->asic_type]; 3898 } 3899 3900 printf("%s: %s", adev->self.dv_xname, chip_name); 3901 /* show graphics/compute ip block version, not set on < GFX9 */ 3902 if (version) { 3903 maj = IP_VERSION_MAJ(version); 3904 min = IP_VERSION_MIN(version); 3905 rev = IP_VERSION_REV(version); 3906 printf(" GC %d.%d.%d", maj, min, rev); 3907 } 3908 printf(" %d CU rev 0x%02x\n", adev->gfx.cu_info.number, adev->rev_id); 3909 } 3910 #endif 3911 3912 adev->accel_working = true; 3913 3914 amdgpu_vm_check_compute_bug(adev); 3915 3916 /* Initialize the buffer migration limit. */ 3917 if (amdgpu_moverate >= 0) 3918 max_MBps = amdgpu_moverate; 3919 else 3920 max_MBps = 8; /* Allow 8 MB/s. */ 3921 /* Get a log2 for easy divisions. */ 3922 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); 3923 3924 r = amdgpu_pm_sysfs_init(adev); 3925 if (r) { 3926 adev->pm_sysfs_en = false; 3927 DRM_ERROR("registering pm debugfs failed (%d).\n", r); 3928 } else 3929 adev->pm_sysfs_en = true; 3930 3931 r = amdgpu_ucode_sysfs_init(adev); 3932 if (r) { 3933 adev->ucode_sysfs_en = false; 3934 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 3935 } else 3936 adev->ucode_sysfs_en = true; 3937 3938 r = amdgpu_psp_sysfs_init(adev); 3939 if (r) { 3940 adev->psp_sysfs_en = false; 3941 if (!amdgpu_sriov_vf(adev)) 3942 DRM_ERROR("Creating psp sysfs failed\n"); 3943 } else 3944 adev->psp_sysfs_en = true; 3945 3946 /* 3947 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost. 3948 * Otherwise the mgpu fan boost feature will be skipped due to the 3949 * gpu instance is counted less. 3950 */ 3951 amdgpu_register_gpu_instance(adev); 3952 3953 /* enable clockgating, etc. after ib tests, etc. since some blocks require 3954 * explicit gating rather than handling it automatically. 3955 */ 3956 if (!adev->gmc.xgmi.pending_reset) { 3957 r = amdgpu_device_ip_late_init(adev); 3958 if (r) { 3959 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); 3960 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); 3961 goto release_ras_con; 3962 } 3963 /* must succeed. */ 3964 amdgpu_ras_resume(adev); 3965 queue_delayed_work(system_wq, &adev->delayed_init_work, 3966 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3967 } 3968 3969 if (amdgpu_sriov_vf(adev)) 3970 flush_delayed_work(&adev->delayed_init_work); 3971 3972 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); 3973 if (r) 3974 dev_err(adev->dev, "Could not create amdgpu device attr\n"); 3975 3976 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 3977 r = amdgpu_pmu_init(adev); 3978 if (r) 3979 dev_err(adev->dev, "amdgpu_pmu_init failed\n"); 3980 3981 /* Have stored pci confspace at hand for restore in sudden PCI error */ 3982 if (amdgpu_device_cache_pci_state(adev->pdev)) 3983 pci_restore_state(pdev); 3984 3985 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ 3986 /* this will fail for cards that aren't VGA class devices, just 3987 * ignore it */ 3988 #ifdef notyet 3989 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3990 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); 3991 #endif 3992 3993 if (amdgpu_device_supports_px(ddev)) { 3994 px = true; 3995 vga_switcheroo_register_client(adev->pdev, 3996 &amdgpu_switcheroo_ops, px); 3997 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3998 } 3999 4000 if (adev->gmc.xgmi.pending_reset) 4001 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, 4002 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4003 4004 amdgpu_device_check_iommu_direct_map(adev); 4005 4006 return 0; 4007 4008 release_ras_con: 4009 amdgpu_release_ras_context(adev); 4010 4011 failed: 4012 amdgpu_vf_error_trans_all(adev); 4013 4014 return r; 4015 } 4016 4017 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) 4018 { 4019 STUB(); 4020 #ifdef notyet 4021 /* Clear all CPU mappings pointing to this device */ 4022 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); 4023 #endif 4024 4025 /* Unmap all mapped bars - Doorbell, registers and VRAM */ 4026 amdgpu_device_doorbell_fini(adev); 4027 4028 #ifdef __linux__ 4029 iounmap(adev->rmmio); 4030 adev->rmmio = NULL; 4031 if (adev->mman.aper_base_kaddr) 4032 iounmap(adev->mman.aper_base_kaddr); 4033 adev->mman.aper_base_kaddr = NULL; 4034 #else 4035 if (adev->rmmio_size > 0) 4036 bus_space_unmap(adev->rmmio_bst, adev->rmmio_bsh, 4037 adev->rmmio_size); 4038 adev->rmmio_size = 0; 4039 adev->rmmio = NULL; 4040 if (adev->mman.aper_base_kaddr) 4041 bus_space_unmap(adev->memt, adev->mman.aper_bsh, 4042 adev->gmc.visible_vram_size); 4043 adev->mman.aper_base_kaddr = NULL; 4044 #endif 4045 4046 /* Memory manager related */ 4047 if (!adev->gmc.xgmi.connected_to_cpu) { 4048 #ifdef __linux__ 4049 arch_phys_wc_del(adev->gmc.vram_mtrr); 4050 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); 4051 #else 4052 drm_mtrr_del(0, adev->gmc.aper_base, adev->gmc.aper_size, DRM_MTRR_WC); 4053 #endif 4054 } 4055 } 4056 4057 /** 4058 * amdgpu_device_fini_hw - tear down the driver 4059 * 4060 * @adev: amdgpu_device pointer 4061 * 4062 * Tear down the driver info (all asics). 4063 * Called at driver shutdown. 4064 */ 4065 void amdgpu_device_fini_hw(struct amdgpu_device *adev) 4066 { 4067 dev_info(adev->dev, "amdgpu: finishing device.\n"); 4068 flush_delayed_work(&adev->delayed_init_work); 4069 adev->shutdown = true; 4070 4071 /* make sure IB test finished before entering exclusive mode 4072 * to avoid preemption on IB test 4073 * */ 4074 if (amdgpu_sriov_vf(adev)) { 4075 amdgpu_virt_request_full_gpu(adev, false); 4076 amdgpu_virt_fini_data_exchange(adev); 4077 } 4078 4079 /* disable all interrupts */ 4080 amdgpu_irq_disable_all(adev); 4081 if (adev->mode_info.mode_config_initialized){ 4082 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) 4083 drm_helper_force_disable_all(adev_to_drm(adev)); 4084 else 4085 drm_atomic_helper_shutdown(adev_to_drm(adev)); 4086 } 4087 amdgpu_fence_driver_hw_fini(adev); 4088 4089 if (adev->mman.initialized) { 4090 flush_delayed_work(&adev->mman.bdev.wq); 4091 ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 4092 } 4093 4094 if (adev->pm_sysfs_en) 4095 amdgpu_pm_sysfs_fini(adev); 4096 if (adev->ucode_sysfs_en) 4097 amdgpu_ucode_sysfs_fini(adev); 4098 if (adev->psp_sysfs_en) 4099 amdgpu_psp_sysfs_fini(adev); 4100 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); 4101 4102 /* disable ras feature must before hw fini */ 4103 amdgpu_ras_pre_fini(adev); 4104 4105 amdgpu_device_ip_fini_early(adev); 4106 4107 amdgpu_irq_fini_hw(adev); 4108 4109 if (adev->mman.initialized) 4110 ttm_device_clear_dma_mappings(&adev->mman.bdev); 4111 4112 amdgpu_gart_dummy_page_fini(adev); 4113 4114 amdgpu_device_unmap_mmio(adev); 4115 4116 } 4117 4118 void amdgpu_device_fini_sw(struct amdgpu_device *adev) 4119 { 4120 int idx; 4121 4122 amdgpu_fence_driver_sw_fini(adev); 4123 amdgpu_device_ip_fini(adev); 4124 release_firmware(adev->firmware.gpu_info_fw); 4125 adev->firmware.gpu_info_fw = NULL; 4126 adev->accel_working = false; 4127 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); 4128 4129 amdgpu_reset_fini(adev); 4130 4131 /* free i2c buses */ 4132 if (!amdgpu_device_has_dc_support(adev)) 4133 amdgpu_i2c_fini(adev); 4134 4135 if (amdgpu_emu_mode != 1) 4136 amdgpu_atombios_fini(adev); 4137 4138 kfree(adev->bios); 4139 adev->bios = NULL; 4140 if (amdgpu_device_supports_px(adev_to_drm(adev))) { 4141 vga_switcheroo_unregister_client(adev->pdev); 4142 vga_switcheroo_fini_domain_pm_ops(adev->dev); 4143 } 4144 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 4145 vga_client_unregister(adev->pdev); 4146 4147 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 4148 #ifdef __linux__ 4149 iounmap(adev->rmmio); 4150 adev->rmmio = NULL; 4151 #else 4152 if (adev->rmmio_size > 0) 4153 bus_space_unmap(adev->rmmio_bst, adev->rmmio_bsh, 4154 adev->rmmio_size); 4155 adev->rmmio_size = 0; 4156 adev->rmmio = NULL; 4157 #endif 4158 amdgpu_device_doorbell_fini(adev); 4159 drm_dev_exit(idx); 4160 } 4161 4162 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4163 amdgpu_pmu_fini(adev); 4164 if (adev->mman.discovery_bin) 4165 amdgpu_discovery_fini(adev); 4166 4167 amdgpu_reset_put_reset_domain(adev->reset_domain); 4168 adev->reset_domain = NULL; 4169 4170 kfree(adev->pci_state); 4171 4172 } 4173 4174 /** 4175 * amdgpu_device_evict_resources - evict device resources 4176 * @adev: amdgpu device object 4177 * 4178 * Evicts all ttm device resources(vram BOs, gart table) from the lru list 4179 * of the vram memory type. Mainly used for evicting device resources 4180 * at suspend time. 4181 * 4182 */ 4183 static int amdgpu_device_evict_resources(struct amdgpu_device *adev) 4184 { 4185 int ret; 4186 4187 /* No need to evict vram on APUs for suspend to ram or s2idle */ 4188 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) 4189 return 0; 4190 4191 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 4192 if (ret) 4193 DRM_WARN("evicting device resources failed\n"); 4194 return ret; 4195 } 4196 4197 /* 4198 * Suspend & resume. 4199 */ 4200 /** 4201 * amdgpu_device_suspend - initiate device suspend 4202 * 4203 * @dev: drm dev pointer 4204 * @fbcon : notify the fbdev of suspend 4205 * 4206 * Puts the hw in the suspend state (all asics). 4207 * Returns 0 for success or an error on failure. 4208 * Called at driver suspend. 4209 */ 4210 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 4211 { 4212 struct amdgpu_device *adev = drm_to_adev(dev); 4213 int r = 0; 4214 4215 if (adev->shutdown) 4216 return 0; 4217 4218 #ifdef notyet 4219 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4220 return 0; 4221 #endif 4222 4223 adev->in_suspend = true; 4224 4225 if (amdgpu_sriov_vf(adev)) { 4226 amdgpu_virt_fini_data_exchange(adev); 4227 r = amdgpu_virt_request_full_gpu(adev, false); 4228 if (r) 4229 return r; 4230 } 4231 4232 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) 4233 DRM_WARN("smart shift update failed\n"); 4234 4235 drm_kms_helper_poll_disable(dev); 4236 4237 if (fbcon) 4238 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); 4239 4240 cancel_delayed_work_sync(&adev->delayed_init_work); 4241 4242 amdgpu_ras_suspend(adev); 4243 4244 amdgpu_device_ip_suspend_phase1(adev); 4245 4246 if (!adev->in_s0ix) 4247 amdgpu_amdkfd_suspend(adev, adev->in_runpm); 4248 4249 r = amdgpu_device_evict_resources(adev); 4250 if (r) 4251 return r; 4252 4253 amdgpu_fence_driver_hw_fini(adev); 4254 4255 amdgpu_device_ip_suspend_phase2(adev); 4256 4257 if (amdgpu_sriov_vf(adev)) 4258 amdgpu_virt_release_full_gpu(adev, false); 4259 4260 return 0; 4261 } 4262 4263 /** 4264 * amdgpu_device_resume - initiate device resume 4265 * 4266 * @dev: drm dev pointer 4267 * @fbcon : notify the fbdev of resume 4268 * 4269 * Bring the hw back to operating state (all asics). 4270 * Returns 0 for success or an error on failure. 4271 * Called at driver resume. 4272 */ 4273 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 4274 { 4275 struct amdgpu_device *adev = drm_to_adev(dev); 4276 int r = 0; 4277 4278 if (amdgpu_sriov_vf(adev)) { 4279 r = amdgpu_virt_request_full_gpu(adev, true); 4280 if (r) 4281 return r; 4282 } 4283 4284 #ifdef notyet 4285 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 4286 return 0; 4287 #endif 4288 4289 if (adev->in_s0ix) 4290 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); 4291 4292 /* post card */ 4293 if (amdgpu_device_need_post(adev)) { 4294 r = amdgpu_device_asic_init(adev); 4295 if (r) 4296 dev_err(adev->dev, "amdgpu asic init failed\n"); 4297 } 4298 4299 r = amdgpu_device_ip_resume(adev); 4300 4301 /* no matter what r is, always need to properly release full GPU */ 4302 if (amdgpu_sriov_vf(adev)) { 4303 amdgpu_virt_init_data_exchange(adev); 4304 amdgpu_virt_release_full_gpu(adev, true); 4305 } 4306 4307 if (r) { 4308 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); 4309 return r; 4310 } 4311 amdgpu_fence_driver_hw_init(adev); 4312 4313 r = amdgpu_device_ip_late_init(adev); 4314 if (r) 4315 return r; 4316 4317 queue_delayed_work(system_wq, &adev->delayed_init_work, 4318 msecs_to_jiffies(AMDGPU_RESUME_MS)); 4319 4320 if (!adev->in_s0ix) { 4321 r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 4322 if (r) 4323 return r; 4324 } 4325 4326 /* Make sure IB tests flushed */ 4327 flush_delayed_work(&adev->delayed_init_work); 4328 4329 if (adev->in_s0ix) { 4330 /* re-enable gfxoff after IP resume. This re-enables gfxoff after 4331 * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). 4332 */ 4333 amdgpu_gfx_off_ctrl(adev, true); 4334 DRM_DEBUG("will enable gfxoff for the mission mode\n"); 4335 } 4336 if (fbcon) 4337 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); 4338 4339 drm_kms_helper_poll_enable(dev); 4340 4341 amdgpu_ras_resume(adev); 4342 4343 /* 4344 * Most of the connector probing functions try to acquire runtime pm 4345 * refs to ensure that the GPU is powered on when connector polling is 4346 * performed. Since we're calling this from a runtime PM callback, 4347 * trying to acquire rpm refs will cause us to deadlock. 4348 * 4349 * Since we're guaranteed to be holding the rpm lock, it's safe to 4350 * temporarily disable the rpm helpers so this doesn't deadlock us. 4351 */ 4352 #if defined(CONFIG_PM) && defined(__linux__) 4353 dev->dev->power.disable_depth++; 4354 #endif 4355 if (!amdgpu_device_has_dc_support(adev)) 4356 drm_helper_hpd_irq_event(dev); 4357 else 4358 drm_kms_helper_hotplug_event(dev); 4359 #if defined(CONFIG_PM) && defined(__linux__) 4360 dev->dev->power.disable_depth--; 4361 #endif 4362 adev->in_suspend = false; 4363 4364 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 4365 DRM_WARN("smart shift update failed\n"); 4366 4367 return 0; 4368 } 4369 4370 /** 4371 * amdgpu_device_ip_check_soft_reset - did soft reset succeed 4372 * 4373 * @adev: amdgpu_device pointer 4374 * 4375 * The list of all the hardware IPs that make up the asic is walked and 4376 * the check_soft_reset callbacks are run. check_soft_reset determines 4377 * if the asic is still hung or not. 4378 * Returns true if any of the IPs are still in a hung state, false if not. 4379 */ 4380 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) 4381 { 4382 int i; 4383 bool asic_hang = false; 4384 4385 if (amdgpu_sriov_vf(adev)) 4386 return true; 4387 4388 if (amdgpu_asic_need_full_reset(adev)) 4389 return true; 4390 4391 for (i = 0; i < adev->num_ip_blocks; i++) { 4392 if (!adev->ip_blocks[i].status.valid) 4393 continue; 4394 if (adev->ip_blocks[i].version->funcs->check_soft_reset) 4395 adev->ip_blocks[i].status.hang = 4396 adev->ip_blocks[i].version->funcs->check_soft_reset(adev); 4397 if (adev->ip_blocks[i].status.hang) { 4398 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); 4399 asic_hang = true; 4400 } 4401 } 4402 return asic_hang; 4403 } 4404 4405 /** 4406 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset 4407 * 4408 * @adev: amdgpu_device pointer 4409 * 4410 * The list of all the hardware IPs that make up the asic is walked and the 4411 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset 4412 * handles any IP specific hardware or software state changes that are 4413 * necessary for a soft reset to succeed. 4414 * Returns 0 on success, negative error code on failure. 4415 */ 4416 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) 4417 { 4418 int i, r = 0; 4419 4420 for (i = 0; i < adev->num_ip_blocks; i++) { 4421 if (!adev->ip_blocks[i].status.valid) 4422 continue; 4423 if (adev->ip_blocks[i].status.hang && 4424 adev->ip_blocks[i].version->funcs->pre_soft_reset) { 4425 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); 4426 if (r) 4427 return r; 4428 } 4429 } 4430 4431 return 0; 4432 } 4433 4434 /** 4435 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed 4436 * 4437 * @adev: amdgpu_device pointer 4438 * 4439 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu 4440 * reset is necessary to recover. 4441 * Returns true if a full asic reset is required, false if not. 4442 */ 4443 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) 4444 { 4445 int i; 4446 4447 if (amdgpu_asic_need_full_reset(adev)) 4448 return true; 4449 4450 for (i = 0; i < adev->num_ip_blocks; i++) { 4451 if (!adev->ip_blocks[i].status.valid) 4452 continue; 4453 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || 4454 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || 4455 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || 4456 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) || 4457 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { 4458 if (adev->ip_blocks[i].status.hang) { 4459 dev_info(adev->dev, "Some block need full reset!\n"); 4460 return true; 4461 } 4462 } 4463 } 4464 return false; 4465 } 4466 4467 /** 4468 * amdgpu_device_ip_soft_reset - do a soft reset 4469 * 4470 * @adev: amdgpu_device pointer 4471 * 4472 * The list of all the hardware IPs that make up the asic is walked and the 4473 * soft_reset callbacks are run if the block is hung. soft_reset handles any 4474 * IP specific hardware or software state changes that are necessary to soft 4475 * reset the IP. 4476 * Returns 0 on success, negative error code on failure. 4477 */ 4478 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) 4479 { 4480 int i, r = 0; 4481 4482 for (i = 0; i < adev->num_ip_blocks; i++) { 4483 if (!adev->ip_blocks[i].status.valid) 4484 continue; 4485 if (adev->ip_blocks[i].status.hang && 4486 adev->ip_blocks[i].version->funcs->soft_reset) { 4487 r = adev->ip_blocks[i].version->funcs->soft_reset(adev); 4488 if (r) 4489 return r; 4490 } 4491 } 4492 4493 return 0; 4494 } 4495 4496 /** 4497 * amdgpu_device_ip_post_soft_reset - clean up from soft reset 4498 * 4499 * @adev: amdgpu_device pointer 4500 * 4501 * The list of all the hardware IPs that make up the asic is walked and the 4502 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset 4503 * handles any IP specific hardware or software state changes that are 4504 * necessary after the IP has been soft reset. 4505 * Returns 0 on success, negative error code on failure. 4506 */ 4507 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) 4508 { 4509 int i, r = 0; 4510 4511 for (i = 0; i < adev->num_ip_blocks; i++) { 4512 if (!adev->ip_blocks[i].status.valid) 4513 continue; 4514 if (adev->ip_blocks[i].status.hang && 4515 adev->ip_blocks[i].version->funcs->post_soft_reset) 4516 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); 4517 if (r) 4518 return r; 4519 } 4520 4521 return 0; 4522 } 4523 4524 /** 4525 * amdgpu_device_recover_vram - Recover some VRAM contents 4526 * 4527 * @adev: amdgpu_device pointer 4528 * 4529 * Restores the contents of VRAM buffers from the shadows in GTT. Used to 4530 * restore things like GPUVM page tables after a GPU reset where 4531 * the contents of VRAM might be lost. 4532 * 4533 * Returns: 4534 * 0 on success, negative error code on failure. 4535 */ 4536 static int amdgpu_device_recover_vram(struct amdgpu_device *adev) 4537 { 4538 struct dma_fence *fence = NULL, *next = NULL; 4539 struct amdgpu_bo *shadow; 4540 struct amdgpu_bo_vm *vmbo; 4541 long r = 1, tmo; 4542 4543 if (amdgpu_sriov_runtime(adev)) 4544 tmo = msecs_to_jiffies(8000); 4545 else 4546 tmo = msecs_to_jiffies(100); 4547 4548 dev_info(adev->dev, "recover vram bo from shadow start\n"); 4549 mutex_lock(&adev->shadow_list_lock); 4550 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { 4551 shadow = &vmbo->bo; 4552 /* No need to recover an evicted BO */ 4553 if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4554 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || 4555 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) 4556 continue; 4557 4558 r = amdgpu_bo_restore_shadow(shadow, &next); 4559 if (r) 4560 break; 4561 4562 if (fence) { 4563 tmo = dma_fence_wait_timeout(fence, false, tmo); 4564 dma_fence_put(fence); 4565 fence = next; 4566 if (tmo == 0) { 4567 r = -ETIMEDOUT; 4568 break; 4569 } else if (tmo < 0) { 4570 r = tmo; 4571 break; 4572 } 4573 } else { 4574 fence = next; 4575 } 4576 } 4577 mutex_unlock(&adev->shadow_list_lock); 4578 4579 if (fence) 4580 tmo = dma_fence_wait_timeout(fence, false, tmo); 4581 dma_fence_put(fence); 4582 4583 if (r < 0 || tmo <= 0) { 4584 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 4585 return -EIO; 4586 } 4587 4588 dev_info(adev->dev, "recover vram bo from shadow done\n"); 4589 return 0; 4590 } 4591 4592 4593 /** 4594 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf 4595 * 4596 * @adev: amdgpu_device pointer 4597 * @from_hypervisor: request from hypervisor 4598 * 4599 * do VF FLR and reinitialize Asic 4600 * return 0 means succeeded otherwise failed 4601 */ 4602 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, 4603 bool from_hypervisor) 4604 { 4605 int r; 4606 struct amdgpu_hive_info *hive = NULL; 4607 int retry_limit = 0; 4608 4609 retry: 4610 amdgpu_amdkfd_pre_reset(adev); 4611 4612 if (from_hypervisor) 4613 r = amdgpu_virt_request_full_gpu(adev, true); 4614 else 4615 r = amdgpu_virt_reset_gpu(adev); 4616 if (r) 4617 return r; 4618 4619 /* Resume IP prior to SMC */ 4620 r = amdgpu_device_ip_reinit_early_sriov(adev); 4621 if (r) 4622 goto error; 4623 4624 amdgpu_virt_init_data_exchange(adev); 4625 4626 r = amdgpu_device_fw_loading(adev); 4627 if (r) 4628 return r; 4629 4630 /* now we are okay to resume SMC/CP/SDMA */ 4631 r = amdgpu_device_ip_reinit_late_sriov(adev); 4632 if (r) 4633 goto error; 4634 4635 hive = amdgpu_get_xgmi_hive(adev); 4636 /* Update PSP FW topology after reset */ 4637 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) 4638 r = amdgpu_xgmi_update_topology(hive, adev); 4639 4640 if (hive) 4641 amdgpu_put_xgmi_hive(hive); 4642 4643 if (!r) { 4644 amdgpu_irq_gpu_reset_resume_helper(adev); 4645 r = amdgpu_ib_ring_tests(adev); 4646 4647 amdgpu_amdkfd_post_reset(adev); 4648 } 4649 4650 error: 4651 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 4652 amdgpu_inc_vram_lost(adev); 4653 r = amdgpu_device_recover_vram(adev); 4654 } 4655 amdgpu_virt_release_full_gpu(adev, true); 4656 4657 if (AMDGPU_RETRY_SRIOV_RESET(r)) { 4658 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) { 4659 retry_limit++; 4660 goto retry; 4661 } else 4662 DRM_ERROR("GPU reset retry is beyond the retry limit\n"); 4663 } 4664 4665 return r; 4666 } 4667 4668 /** 4669 * amdgpu_device_has_job_running - check if there is any job in mirror list 4670 * 4671 * @adev: amdgpu_device pointer 4672 * 4673 * check if there is any job in mirror list 4674 */ 4675 bool amdgpu_device_has_job_running(struct amdgpu_device *adev) 4676 { 4677 int i; 4678 struct drm_sched_job *job; 4679 4680 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4681 struct amdgpu_ring *ring = adev->rings[i]; 4682 4683 if (!ring || !ring->sched.thread) 4684 continue; 4685 4686 spin_lock(&ring->sched.job_list_lock); 4687 job = list_first_entry_or_null(&ring->sched.pending_list, 4688 struct drm_sched_job, list); 4689 spin_unlock(&ring->sched.job_list_lock); 4690 if (job) 4691 return true; 4692 } 4693 return false; 4694 } 4695 4696 /** 4697 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery 4698 * 4699 * @adev: amdgpu_device pointer 4700 * 4701 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover 4702 * a hung GPU. 4703 */ 4704 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) 4705 { 4706 4707 if (amdgpu_gpu_recovery == 0) 4708 goto disabled; 4709 4710 if (!amdgpu_device_ip_check_soft_reset(adev)) { 4711 dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); 4712 return false; 4713 } 4714 4715 if (amdgpu_sriov_vf(adev)) 4716 return true; 4717 4718 if (amdgpu_gpu_recovery == -1) { 4719 switch (adev->asic_type) { 4720 #ifdef CONFIG_DRM_AMDGPU_SI 4721 case CHIP_VERDE: 4722 case CHIP_TAHITI: 4723 case CHIP_PITCAIRN: 4724 case CHIP_OLAND: 4725 case CHIP_HAINAN: 4726 #endif 4727 #ifdef CONFIG_DRM_AMDGPU_CIK 4728 case CHIP_KAVERI: 4729 case CHIP_KABINI: 4730 case CHIP_MULLINS: 4731 #endif 4732 case CHIP_CARRIZO: 4733 case CHIP_STONEY: 4734 case CHIP_CYAN_SKILLFISH: 4735 goto disabled; 4736 default: 4737 break; 4738 } 4739 } 4740 4741 return true; 4742 4743 disabled: 4744 dev_info(adev->dev, "GPU recovery disabled.\n"); 4745 return false; 4746 } 4747 4748 int amdgpu_device_mode1_reset(struct amdgpu_device *adev) 4749 { 4750 u32 i; 4751 int ret = 0; 4752 4753 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 4754 4755 dev_info(adev->dev, "GPU mode1 reset\n"); 4756 4757 /* disable BM */ 4758 pci_clear_master(adev->pdev); 4759 4760 amdgpu_device_cache_pci_state(adev->pdev); 4761 4762 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 4763 dev_info(adev->dev, "GPU smu mode1 reset\n"); 4764 ret = amdgpu_dpm_mode1_reset(adev); 4765 } else { 4766 dev_info(adev->dev, "GPU psp mode1 reset\n"); 4767 ret = psp_gpu_reset(adev); 4768 } 4769 4770 if (ret) 4771 dev_err(adev->dev, "GPU mode1 reset failed\n"); 4772 4773 amdgpu_device_load_pci_state(adev->pdev); 4774 4775 /* wait for asic to come out of reset */ 4776 for (i = 0; i < adev->usec_timeout; i++) { 4777 u32 memsize = adev->nbio.funcs->get_memsize(adev); 4778 4779 if (memsize != 0xffffffff) 4780 break; 4781 udelay(1); 4782 } 4783 4784 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 4785 return ret; 4786 } 4787 4788 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, 4789 struct amdgpu_reset_context *reset_context) 4790 { 4791 int i, r = 0; 4792 struct amdgpu_job *job = NULL; 4793 bool need_full_reset = 4794 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4795 4796 if (reset_context->reset_req_dev == adev) 4797 job = reset_context->job; 4798 4799 if (amdgpu_sriov_vf(adev)) { 4800 /* stop the data exchange thread */ 4801 amdgpu_virt_fini_data_exchange(adev); 4802 } 4803 4804 amdgpu_fence_driver_isr_toggle(adev, true); 4805 4806 /* block all schedulers and reset given job's ring */ 4807 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 4808 struct amdgpu_ring *ring = adev->rings[i]; 4809 4810 if (!ring || !ring->sched.thread) 4811 continue; 4812 4813 /*clear job fence from fence drv to avoid force_completion 4814 *leave NULL and vm flush fence in fence drv */ 4815 amdgpu_fence_driver_clear_job_fences(ring); 4816 4817 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ 4818 amdgpu_fence_driver_force_completion(ring); 4819 } 4820 4821 amdgpu_fence_driver_isr_toggle(adev, false); 4822 4823 if (job && job->vm) 4824 drm_sched_increase_karma(&job->base); 4825 4826 r = amdgpu_reset_prepare_hwcontext(adev, reset_context); 4827 /* If reset handler not implemented, continue; otherwise return */ 4828 if (r == -ENOSYS) 4829 r = 0; 4830 else 4831 return r; 4832 4833 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */ 4834 if (!amdgpu_sriov_vf(adev)) { 4835 4836 if (!need_full_reset) 4837 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 4838 4839 if (!need_full_reset && amdgpu_gpu_recovery) { 4840 amdgpu_device_ip_pre_soft_reset(adev); 4841 r = amdgpu_device_ip_soft_reset(adev); 4842 amdgpu_device_ip_post_soft_reset(adev); 4843 if (r || amdgpu_device_ip_check_soft_reset(adev)) { 4844 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n"); 4845 need_full_reset = true; 4846 } 4847 } 4848 4849 if (need_full_reset) 4850 r = amdgpu_device_ip_suspend(adev); 4851 if (need_full_reset) 4852 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4853 else 4854 clear_bit(AMDGPU_NEED_FULL_RESET, 4855 &reset_context->flags); 4856 } 4857 4858 return r; 4859 } 4860 4861 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev) 4862 { 4863 int i; 4864 4865 lockdep_assert_held(&adev->reset_domain->sem); 4866 4867 for (i = 0; i < adev->num_regs; i++) { 4868 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]); 4869 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i], 4870 adev->reset_dump_reg_value[i]); 4871 } 4872 4873 return 0; 4874 } 4875 4876 #ifdef CONFIG_DEV_COREDUMP 4877 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset, 4878 size_t count, void *data, size_t datalen) 4879 { 4880 struct drm_printer p; 4881 struct amdgpu_device *adev = data; 4882 struct drm_print_iterator iter; 4883 int i; 4884 4885 iter.data = buffer; 4886 iter.offset = 0; 4887 iter.start = offset; 4888 iter.remain = count; 4889 4890 p = drm_coredump_printer(&iter); 4891 4892 drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); 4893 drm_printf(&p, "kernel: " UTS_RELEASE "\n"); 4894 drm_printf(&p, "module: " KBUILD_MODNAME "\n"); 4895 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec); 4896 if (adev->reset_task_info.pid) 4897 drm_printf(&p, "process_name: %s PID: %d\n", 4898 adev->reset_task_info.process_name, 4899 adev->reset_task_info.pid); 4900 4901 if (adev->reset_vram_lost) 4902 drm_printf(&p, "VRAM is lost due to GPU reset!\n"); 4903 if (adev->num_regs) { 4904 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); 4905 4906 for (i = 0; i < adev->num_regs; i++) 4907 drm_printf(&p, "0x%08x: 0x%08x\n", 4908 adev->reset_dump_reg_list[i], 4909 adev->reset_dump_reg_value[i]); 4910 } 4911 4912 return count - iter.remain; 4913 } 4914 4915 static void amdgpu_devcoredump_free(void *data) 4916 { 4917 } 4918 4919 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev) 4920 { 4921 struct drm_device *dev = adev_to_drm(adev); 4922 4923 ktime_get_ts64(&adev->reset_time); 4924 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL, 4925 amdgpu_devcoredump_read, amdgpu_devcoredump_free); 4926 } 4927 #endif 4928 4929 int amdgpu_do_asic_reset(struct list_head *device_list_handle, 4930 struct amdgpu_reset_context *reset_context) 4931 { 4932 struct amdgpu_device *tmp_adev = NULL; 4933 bool need_full_reset, skip_hw_reset, vram_lost = false; 4934 int r = 0; 4935 bool gpu_reset_for_dev_remove = 0; 4936 4937 /* Try reset handler method first */ 4938 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 4939 reset_list); 4940 amdgpu_reset_reg_dumps(tmp_adev); 4941 4942 reset_context->reset_device_list = device_list_handle; 4943 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); 4944 /* If reset handler not implemented, continue; otherwise return */ 4945 if (r == -ENOSYS) 4946 r = 0; 4947 else 4948 return r; 4949 4950 /* Reset handler not implemented, use the default method */ 4951 need_full_reset = 4952 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4953 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); 4954 4955 gpu_reset_for_dev_remove = 4956 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && 4957 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 4958 4959 /* 4960 * ASIC reset has to be done on all XGMI hive nodes ASAP 4961 * to allow proper links negotiation in FW (within 1 sec) 4962 */ 4963 if (!skip_hw_reset && need_full_reset) { 4964 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4965 /* For XGMI run all resets in parallel to speed up the process */ 4966 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4967 tmp_adev->gmc.xgmi.pending_reset = false; 4968 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 4969 r = -EALREADY; 4970 } else 4971 r = amdgpu_asic_reset(tmp_adev); 4972 4973 if (r) { 4974 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s", 4975 r, adev_to_drm(tmp_adev)->unique); 4976 break; 4977 } 4978 } 4979 4980 /* For XGMI wait for all resets to complete before proceed */ 4981 if (!r) { 4982 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4983 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 4984 flush_work(&tmp_adev->xgmi_reset_work); 4985 r = tmp_adev->asic_reset_res; 4986 if (r) 4987 break; 4988 } 4989 } 4990 } 4991 } 4992 4993 if (!r && amdgpu_ras_intr_triggered()) { 4994 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 4995 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops && 4996 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) 4997 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev); 4998 } 4999 5000 amdgpu_ras_intr_cleared(); 5001 } 5002 5003 /* Since the mode1 reset affects base ip blocks, the 5004 * phase1 ip blocks need to be resumed. Otherwise there 5005 * will be a BIOS signature error and the psp bootloader 5006 * can't load kdb on the next amdgpu install. 5007 */ 5008 if (gpu_reset_for_dev_remove) { 5009 list_for_each_entry(tmp_adev, device_list_handle, reset_list) 5010 amdgpu_device_ip_resume_phase1(tmp_adev); 5011 5012 goto end; 5013 } 5014 5015 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5016 if (need_full_reset) { 5017 /* post card */ 5018 r = amdgpu_device_asic_init(tmp_adev); 5019 if (r) { 5020 dev_warn(tmp_adev->dev, "asic atom init failed!"); 5021 } else { 5022 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); 5023 r = amdgpu_amdkfd_resume_iommu(tmp_adev); 5024 if (r) 5025 goto out; 5026 5027 r = amdgpu_device_ip_resume_phase1(tmp_adev); 5028 if (r) 5029 goto out; 5030 5031 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 5032 #ifdef CONFIG_DEV_COREDUMP 5033 tmp_adev->reset_vram_lost = vram_lost; 5034 memset(&tmp_adev->reset_task_info, 0, 5035 sizeof(tmp_adev->reset_task_info)); 5036 if (reset_context->job && reset_context->job->vm) 5037 tmp_adev->reset_task_info = 5038 reset_context->job->vm->task_info; 5039 amdgpu_reset_capture_coredumpm(tmp_adev); 5040 #endif 5041 if (vram_lost) { 5042 DRM_INFO("VRAM is lost due to GPU reset!\n"); 5043 amdgpu_inc_vram_lost(tmp_adev); 5044 } 5045 5046 r = amdgpu_device_fw_loading(tmp_adev); 5047 if (r) 5048 return r; 5049 5050 r = amdgpu_device_ip_resume_phase2(tmp_adev); 5051 if (r) 5052 goto out; 5053 5054 if (vram_lost) 5055 amdgpu_device_fill_reset_magic(tmp_adev); 5056 5057 /* 5058 * Add this ASIC as tracked as reset was already 5059 * complete successfully. 5060 */ 5061 amdgpu_register_gpu_instance(tmp_adev); 5062 5063 if (!reset_context->hive && 5064 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5065 amdgpu_xgmi_add_device(tmp_adev); 5066 5067 r = amdgpu_device_ip_late_init(tmp_adev); 5068 if (r) 5069 goto out; 5070 5071 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); 5072 5073 /* 5074 * The GPU enters bad state once faulty pages 5075 * by ECC has reached the threshold, and ras 5076 * recovery is scheduled next. So add one check 5077 * here to break recovery if it indeed exceeds 5078 * bad page threshold, and remind user to 5079 * retire this GPU or setting one bigger 5080 * bad_page_threshold value to fix this once 5081 * probing driver again. 5082 */ 5083 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) { 5084 /* must succeed. */ 5085 amdgpu_ras_resume(tmp_adev); 5086 } else { 5087 r = -EINVAL; 5088 goto out; 5089 } 5090 5091 /* Update PSP FW topology after reset */ 5092 if (reset_context->hive && 5093 tmp_adev->gmc.xgmi.num_physical_nodes > 1) 5094 r = amdgpu_xgmi_update_topology( 5095 reset_context->hive, tmp_adev); 5096 } 5097 } 5098 5099 out: 5100 if (!r) { 5101 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); 5102 r = amdgpu_ib_ring_tests(tmp_adev); 5103 if (r) { 5104 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r); 5105 need_full_reset = true; 5106 r = -EAGAIN; 5107 goto end; 5108 } 5109 } 5110 5111 if (!r) 5112 r = amdgpu_device_recover_vram(tmp_adev); 5113 else 5114 tmp_adev->asic_reset_res = r; 5115 } 5116 5117 end: 5118 if (need_full_reset) 5119 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5120 else 5121 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5122 return r; 5123 } 5124 5125 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev) 5126 { 5127 5128 switch (amdgpu_asic_reset_method(adev)) { 5129 case AMD_RESET_METHOD_MODE1: 5130 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; 5131 break; 5132 case AMD_RESET_METHOD_MODE2: 5133 adev->mp1_state = PP_MP1_STATE_RESET; 5134 break; 5135 default: 5136 adev->mp1_state = PP_MP1_STATE_NONE; 5137 break; 5138 } 5139 5140 pci_dev_put(p); 5141 } 5142 5143 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev) 5144 { 5145 amdgpu_vf_error_trans_all(adev); 5146 adev->mp1_state = PP_MP1_STATE_NONE; 5147 } 5148 5149 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) 5150 { 5151 STUB(); 5152 #ifdef notyet 5153 struct pci_dev *p = NULL; 5154 5155 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5156 adev->pdev->bus->number, 1); 5157 if (p) { 5158 pm_runtime_enable(&(p->dev)); 5159 pm_runtime_resume(&(p->dev)); 5160 } 5161 #endif 5162 } 5163 5164 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) 5165 { 5166 enum amd_reset_method reset_method; 5167 struct pci_dev *p = NULL; 5168 u64 expires; 5169 5170 /* 5171 * For now, only BACO and mode1 reset are confirmed 5172 * to suffer the audio issue without proper suspended. 5173 */ 5174 reset_method = amdgpu_asic_reset_method(adev); 5175 if ((reset_method != AMD_RESET_METHOD_BACO) && 5176 (reset_method != AMD_RESET_METHOD_MODE1)) 5177 return -EINVAL; 5178 5179 STUB(); 5180 return -ENOSYS; 5181 #ifdef notyet 5182 5183 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 5184 adev->pdev->bus->number, 1); 5185 if (!p) 5186 return -ENODEV; 5187 5188 expires = pm_runtime_autosuspend_expiration(&(p->dev)); 5189 if (!expires) 5190 /* 5191 * If we cannot get the audio device autosuspend delay, 5192 * a fixed 4S interval will be used. Considering 3S is 5193 * the audio controller default autosuspend delay setting. 5194 * 4S used here is guaranteed to cover that. 5195 */ 5196 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL; 5197 5198 while (!pm_runtime_status_suspended(&(p->dev))) { 5199 if (!pm_runtime_suspend(&(p->dev))) 5200 break; 5201 5202 if (expires < ktime_get_mono_fast_ns()) { 5203 dev_warn(adev->dev, "failed to suspend display audio\n"); 5204 pci_dev_put(p); 5205 /* TODO: abort the succeeding gpu reset? */ 5206 return -ETIMEDOUT; 5207 } 5208 } 5209 5210 pm_runtime_disable(&(p->dev)); 5211 5212 pci_dev_put(p); 5213 return 0; 5214 #endif 5215 } 5216 5217 static void amdgpu_device_recheck_guilty_jobs( 5218 struct amdgpu_device *adev, struct list_head *device_list_handle, 5219 struct amdgpu_reset_context *reset_context) 5220 { 5221 int i, r = 0; 5222 5223 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5224 struct amdgpu_ring *ring = adev->rings[i]; 5225 int ret = 0; 5226 struct drm_sched_job *s_job; 5227 5228 if (!ring || !ring->sched.thread) 5229 continue; 5230 5231 s_job = list_first_entry_or_null(&ring->sched.pending_list, 5232 struct drm_sched_job, list); 5233 if (s_job == NULL) 5234 continue; 5235 5236 /* clear job's guilty and depend the folowing step to decide the real one */ 5237 drm_sched_reset_karma(s_job); 5238 drm_sched_resubmit_jobs_ext(&ring->sched, 1); 5239 5240 if (!s_job->s_fence->parent) { 5241 DRM_WARN("Failed to get a HW fence for job!"); 5242 continue; 5243 } 5244 5245 ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); 5246 if (ret == 0) { /* timeout */ 5247 DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n", 5248 ring->sched.name, s_job->id); 5249 5250 5251 amdgpu_fence_driver_isr_toggle(adev, true); 5252 5253 /* Clear this failed job from fence array */ 5254 amdgpu_fence_driver_clear_job_fences(ring); 5255 5256 amdgpu_fence_driver_isr_toggle(adev, false); 5257 5258 /* Since the job won't signal and we go for 5259 * another resubmit drop this parent pointer 5260 */ 5261 dma_fence_put(s_job->s_fence->parent); 5262 s_job->s_fence->parent = NULL; 5263 5264 /* set guilty */ 5265 drm_sched_increase_karma(s_job); 5266 amdgpu_reset_prepare_hwcontext(adev, reset_context); 5267 retry: 5268 /* do hw reset */ 5269 if (amdgpu_sriov_vf(adev)) { 5270 amdgpu_virt_fini_data_exchange(adev); 5271 r = amdgpu_device_reset_sriov(adev, false); 5272 if (r) 5273 adev->asic_reset_res = r; 5274 } else { 5275 clear_bit(AMDGPU_SKIP_HW_RESET, 5276 &reset_context->flags); 5277 r = amdgpu_do_asic_reset(device_list_handle, 5278 reset_context); 5279 if (r && r == -EAGAIN) 5280 goto retry; 5281 } 5282 5283 /* 5284 * add reset counter so that the following 5285 * resubmitted job could flush vmid 5286 */ 5287 atomic_inc(&adev->gpu_reset_counter); 5288 continue; 5289 } 5290 5291 /* got the hw fence, signal finished fence */ 5292 atomic_dec(ring->sched.score); 5293 dma_fence_get(&s_job->s_fence->finished); 5294 dma_fence_signal(&s_job->s_fence->finished); 5295 dma_fence_put(&s_job->s_fence->finished); 5296 5297 /* remove node from list and free the job */ 5298 spin_lock(&ring->sched.job_list_lock); 5299 list_del_init(&s_job->list); 5300 spin_unlock(&ring->sched.job_list_lock); 5301 ring->sched.ops->free_job(s_job); 5302 } 5303 } 5304 5305 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) 5306 { 5307 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5308 5309 #if defined(CONFIG_DEBUG_FS) 5310 if (!amdgpu_sriov_vf(adev)) 5311 cancel_work(&adev->reset_work); 5312 #endif 5313 5314 if (adev->kfd.dev) 5315 cancel_work(&adev->kfd.reset_work); 5316 5317 if (amdgpu_sriov_vf(adev)) 5318 cancel_work(&adev->virt.flr_work); 5319 5320 if (con && adev->ras_enabled) 5321 cancel_work(&con->recovery_work); 5322 5323 } 5324 5325 5326 /** 5327 * amdgpu_device_gpu_recover - reset the asic and recover scheduler 5328 * 5329 * @adev: amdgpu_device pointer 5330 * @job: which job trigger hang 5331 * 5332 * Attempt to reset the GPU if it has hung (all asics). 5333 * Attempt to do soft-reset or full-reset and reinitialize Asic 5334 * Returns 0 for success or an error on failure. 5335 */ 5336 5337 int amdgpu_device_gpu_recover(struct amdgpu_device *adev, 5338 struct amdgpu_job *job, 5339 struct amdgpu_reset_context *reset_context) 5340 { 5341 struct list_head device_list, *device_list_handle = NULL; 5342 bool job_signaled = false; 5343 struct amdgpu_hive_info *hive = NULL; 5344 struct amdgpu_device *tmp_adev = NULL; 5345 int i, r = 0; 5346 bool need_emergency_restart = false; 5347 bool audio_suspended = false; 5348 int tmp_vram_lost_counter; 5349 bool gpu_reset_for_dev_remove = false; 5350 5351 gpu_reset_for_dev_remove = 5352 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && 5353 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); 5354 5355 /* 5356 * Special case: RAS triggered and full reset isn't supported 5357 */ 5358 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); 5359 5360 /* 5361 * Flush RAM to disk so that after reboot 5362 * the user can read log and see why the system rebooted. 5363 */ 5364 if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { 5365 DRM_WARN("Emergency reboot."); 5366 5367 #ifdef notyet 5368 ksys_sync_helper(); 5369 emergency_restart(); 5370 #else 5371 panic("emergency_restart"); 5372 #endif 5373 } 5374 5375 dev_info(adev->dev, "GPU %s begin!\n", 5376 need_emergency_restart ? "jobs stop":"reset"); 5377 5378 if (!amdgpu_sriov_vf(adev)) 5379 hive = amdgpu_get_xgmi_hive(adev); 5380 if (hive) 5381 mutex_lock(&hive->hive_lock); 5382 5383 reset_context->job = job; 5384 reset_context->hive = hive; 5385 /* 5386 * Build list of devices to reset. 5387 * In case we are in XGMI hive mode, resort the device list 5388 * to put adev in the 1st position. 5389 */ 5390 INIT_LIST_HEAD(&device_list); 5391 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { 5392 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 5393 list_add_tail(&tmp_adev->reset_list, &device_list); 5394 if (gpu_reset_for_dev_remove && adev->shutdown) 5395 tmp_adev->shutdown = true; 5396 } 5397 if (!list_is_first(&adev->reset_list, &device_list)) 5398 list_rotate_to_front(&adev->reset_list, &device_list); 5399 device_list_handle = &device_list; 5400 } else { 5401 list_add_tail(&adev->reset_list, &device_list); 5402 device_list_handle = &device_list; 5403 } 5404 5405 /* We need to lock reset domain only once both for XGMI and single device */ 5406 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5407 reset_list); 5408 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); 5409 5410 /* block all schedulers and reset given job's ring */ 5411 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5412 5413 amdgpu_device_set_mp1_state(tmp_adev); 5414 5415 /* 5416 * Try to put the audio codec into suspend state 5417 * before gpu reset started. 5418 * 5419 * Due to the power domain of the graphics device 5420 * is shared with AZ power domain. Without this, 5421 * we may change the audio hardware from behind 5422 * the audio driver's back. That will trigger 5423 * some audio codec errors. 5424 */ 5425 if (!amdgpu_device_suspend_display_audio(tmp_adev)) 5426 audio_suspended = true; 5427 5428 amdgpu_ras_set_error_query_ready(tmp_adev, false); 5429 5430 cancel_delayed_work_sync(&tmp_adev->delayed_init_work); 5431 5432 if (!amdgpu_sriov_vf(tmp_adev)) 5433 amdgpu_amdkfd_pre_reset(tmp_adev); 5434 5435 /* 5436 * Mark these ASICs to be reseted as untracked first 5437 * And add them back after reset completed 5438 */ 5439 amdgpu_unregister_gpu_instance(tmp_adev); 5440 5441 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true); 5442 5443 /* disable ras on ALL IPs */ 5444 if (!need_emergency_restart && 5445 amdgpu_device_ip_need_full_reset(tmp_adev)) 5446 amdgpu_ras_suspend(tmp_adev); 5447 5448 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5449 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5450 5451 if (!ring || !ring->sched.thread) 5452 continue; 5453 5454 drm_sched_stop(&ring->sched, job ? &job->base : NULL); 5455 5456 if (need_emergency_restart) 5457 amdgpu_job_stop_all_jobs_on_sched(&ring->sched); 5458 } 5459 atomic_inc(&tmp_adev->gpu_reset_counter); 5460 } 5461 5462 if (need_emergency_restart) 5463 goto skip_sched_resume; 5464 5465 /* 5466 * Must check guilty signal here since after this point all old 5467 * HW fences are force signaled. 5468 * 5469 * job->base holds a reference to parent fence 5470 */ 5471 if (job && dma_fence_is_signaled(&job->hw_fence)) { 5472 job_signaled = true; 5473 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); 5474 goto skip_hw_reset; 5475 } 5476 5477 retry: /* Rest of adevs pre asic reset from XGMI hive. */ 5478 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5479 if (gpu_reset_for_dev_remove) { 5480 /* Workaroud for ASICs need to disable SMC first */ 5481 amdgpu_device_smu_fini_early(tmp_adev); 5482 } 5483 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); 5484 /*TODO Should we stop ?*/ 5485 if (r) { 5486 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ", 5487 r, adev_to_drm(tmp_adev)->unique); 5488 tmp_adev->asic_reset_res = r; 5489 } 5490 5491 /* 5492 * Drop all pending non scheduler resets. Scheduler resets 5493 * were already dropped during drm_sched_stop 5494 */ 5495 amdgpu_device_stop_pending_resets(tmp_adev); 5496 } 5497 5498 tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); 5499 /* Actual ASIC resets if needed.*/ 5500 /* Host driver will handle XGMI hive reset for SRIOV */ 5501 if (amdgpu_sriov_vf(adev)) { 5502 r = amdgpu_device_reset_sriov(adev, job ? false : true); 5503 if (r) 5504 adev->asic_reset_res = r; 5505 5506 /* Aldebaran supports ras in SRIOV, so need resume ras during reset */ 5507 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 5508 amdgpu_ras_resume(adev); 5509 } else { 5510 r = amdgpu_do_asic_reset(device_list_handle, reset_context); 5511 if (r && r == -EAGAIN) 5512 goto retry; 5513 5514 if (!r && gpu_reset_for_dev_remove) 5515 goto recover_end; 5516 } 5517 5518 skip_hw_reset: 5519 5520 /* Post ASIC reset for all devs .*/ 5521 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5522 5523 /* 5524 * Sometimes a later bad compute job can block a good gfx job as gfx 5525 * and compute ring share internal GC HW mutually. We add an additional 5526 * guilty jobs recheck step to find the real guilty job, it synchronously 5527 * submits and pends for the first job being signaled. If it gets timeout, 5528 * we identify it as a real guilty job. 5529 */ 5530 if (amdgpu_gpu_recovery == 2 && 5531 !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter))) 5532 amdgpu_device_recheck_guilty_jobs( 5533 tmp_adev, device_list_handle, reset_context); 5534 5535 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5536 struct amdgpu_ring *ring = tmp_adev->rings[i]; 5537 5538 if (!ring || !ring->sched.thread) 5539 continue; 5540 5541 /* No point to resubmit jobs if we didn't HW reset*/ 5542 if (!tmp_adev->asic_reset_res && !job_signaled) 5543 drm_sched_resubmit_jobs(&ring->sched); 5544 5545 drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); 5546 } 5547 5548 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) 5549 amdgpu_mes_self_test(tmp_adev); 5550 5551 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { 5552 drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); 5553 } 5554 5555 if (tmp_adev->asic_reset_res) 5556 r = tmp_adev->asic_reset_res; 5557 5558 tmp_adev->asic_reset_res = 0; 5559 5560 if (r) { 5561 /* bad news, how to tell it to userspace ? */ 5562 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5563 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); 5564 } else { 5565 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 5566 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) 5567 DRM_WARN("smart shift update failed\n"); 5568 } 5569 } 5570 5571 skip_sched_resume: 5572 list_for_each_entry(tmp_adev, device_list_handle, reset_list) { 5573 /* unlock kfd: SRIOV would do it separately */ 5574 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) 5575 amdgpu_amdkfd_post_reset(tmp_adev); 5576 5577 /* kfd_post_reset will do nothing if kfd device is not initialized, 5578 * need to bring up kfd here if it's not be initialized before 5579 */ 5580 if (!adev->kfd.init_complete) 5581 amdgpu_amdkfd_device_init(adev); 5582 5583 if (audio_suspended) 5584 amdgpu_device_resume_display_audio(tmp_adev); 5585 5586 amdgpu_device_unset_mp1_state(tmp_adev); 5587 } 5588 5589 recover_end: 5590 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5591 reset_list); 5592 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); 5593 5594 if (hive) { 5595 mutex_unlock(&hive->hive_lock); 5596 amdgpu_put_xgmi_hive(hive); 5597 } 5598 5599 if (r) 5600 dev_info(adev->dev, "GPU reset end with ret = %d\n", r); 5601 5602 atomic_set(&adev->reset_domain->reset_res, r); 5603 return r; 5604 } 5605 5606 /** 5607 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot 5608 * 5609 * @adev: amdgpu_device pointer 5610 * 5611 * Fetchs and stores in the driver the PCIE capabilities (gen speed 5612 * and lanes) of the slot the device is in. Handles APUs and 5613 * virtualized environments where PCIE config space may not be available. 5614 */ 5615 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) 5616 { 5617 struct pci_dev *pdev; 5618 enum pci_bus_speed speed_cap, platform_speed_cap; 5619 enum pcie_link_width platform_link_width; 5620 5621 if (amdgpu_pcie_gen_cap) 5622 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; 5623 5624 if (amdgpu_pcie_lane_cap) 5625 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; 5626 5627 /* covers APUs as well */ 5628 if (pci_is_root_bus(adev->pdev->bus)) { 5629 if (adev->pm.pcie_gen_mask == 0) 5630 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; 5631 if (adev->pm.pcie_mlw_mask == 0) 5632 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; 5633 return; 5634 } 5635 5636 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) 5637 return; 5638 5639 pcie_bandwidth_available(adev->pdev, NULL, 5640 &platform_speed_cap, &platform_link_width); 5641 5642 if (adev->pm.pcie_gen_mask == 0) { 5643 /* asic caps */ 5644 pdev = adev->pdev; 5645 speed_cap = pcie_get_speed_cap(pdev); 5646 if (speed_cap == PCI_SPEED_UNKNOWN) { 5647 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5648 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5649 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5650 } else { 5651 if (speed_cap == PCIE_SPEED_32_0GT) 5652 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5653 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5654 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5655 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5656 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); 5657 else if (speed_cap == PCIE_SPEED_16_0GT) 5658 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5659 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5660 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5661 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4); 5662 else if (speed_cap == PCIE_SPEED_8_0GT) 5663 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5664 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5665 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 5666 else if (speed_cap == PCIE_SPEED_5_0GT) 5667 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5668 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2); 5669 else 5670 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; 5671 } 5672 /* platform caps */ 5673 if (platform_speed_cap == PCI_SPEED_UNKNOWN) { 5674 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5675 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5676 } else { 5677 if (platform_speed_cap == PCIE_SPEED_32_0GT) 5678 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5679 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5680 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5681 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | 5682 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); 5683 else if (platform_speed_cap == PCIE_SPEED_16_0GT) 5684 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5685 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5686 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | 5687 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); 5688 else if (platform_speed_cap == PCIE_SPEED_8_0GT) 5689 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5690 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 5691 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); 5692 else if (platform_speed_cap == PCIE_SPEED_5_0GT) 5693 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | 5694 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); 5695 else 5696 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 5697 5698 } 5699 } 5700 if (adev->pm.pcie_mlw_mask == 0) { 5701 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { 5702 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; 5703 } else { 5704 switch (platform_link_width) { 5705 case PCIE_LNK_X32: 5706 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 5707 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5708 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5709 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5710 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5711 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5712 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5713 break; 5714 case PCIE_LNK_X16: 5715 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 5716 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5717 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5718 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5719 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5720 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5721 break; 5722 case PCIE_LNK_X12: 5723 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 5724 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5725 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5726 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5727 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5728 break; 5729 case PCIE_LNK_X8: 5730 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 5731 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5732 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5733 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5734 break; 5735 case PCIE_LNK_X4: 5736 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 5737 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5738 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5739 break; 5740 case PCIE_LNK_X2: 5741 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 5742 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 5743 break; 5744 case PCIE_LNK_X1: 5745 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 5746 break; 5747 default: 5748 break; 5749 } 5750 } 5751 } 5752 } 5753 5754 /** 5755 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR 5756 * 5757 * @adev: amdgpu_device pointer 5758 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev 5759 * 5760 * Return true if @peer_adev can access (DMA) @adev through the PCIe 5761 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of 5762 * @peer_adev. 5763 */ 5764 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 5765 struct amdgpu_device *peer_adev) 5766 { 5767 #ifdef CONFIG_HSA_AMD_P2P 5768 uint64_t address_mask = peer_adev->dev->dma_mask ? 5769 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); 5770 resource_size_t aper_limit = 5771 adev->gmc.aper_base + adev->gmc.aper_size - 1; 5772 bool p2p_access = 5773 !adev->gmc.xgmi.connected_to_cpu && 5774 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); 5775 5776 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && 5777 adev->gmc.real_vram_size == adev->gmc.visible_vram_size && 5778 !(adev->gmc.aper_base & address_mask || 5779 aper_limit & address_mask)); 5780 #else 5781 return false; 5782 #endif 5783 } 5784 5785 int amdgpu_device_baco_enter(struct drm_device *dev) 5786 { 5787 struct amdgpu_device *adev = drm_to_adev(dev); 5788 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5789 5790 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5791 return -ENOTSUPP; 5792 5793 if (ras && adev->ras_enabled && 5794 adev->nbio.funcs->enable_doorbell_interrupt) 5795 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 5796 5797 return amdgpu_dpm_baco_enter(adev); 5798 } 5799 5800 int amdgpu_device_baco_exit(struct drm_device *dev) 5801 { 5802 struct amdgpu_device *adev = drm_to_adev(dev); 5803 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 5804 int ret = 0; 5805 5806 if (!amdgpu_device_supports_baco(adev_to_drm(adev))) 5807 return -ENOTSUPP; 5808 5809 ret = amdgpu_dpm_baco_exit(adev); 5810 if (ret) 5811 return ret; 5812 5813 if (ras && adev->ras_enabled && 5814 adev->nbio.funcs->enable_doorbell_interrupt) 5815 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 5816 5817 if (amdgpu_passthrough(adev) && 5818 adev->nbio.funcs->clear_doorbell_interrupt) 5819 adev->nbio.funcs->clear_doorbell_interrupt(adev); 5820 5821 return 0; 5822 } 5823 5824 /** 5825 * amdgpu_pci_error_detected - Called when a PCI error is detected. 5826 * @pdev: PCI device struct 5827 * @state: PCI channel state 5828 * 5829 * Description: Called when a PCI error is detected. 5830 * 5831 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 5832 */ 5833 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 5834 { 5835 STUB(); 5836 return 0; 5837 #ifdef notyet 5838 struct drm_device *dev = pci_get_drvdata(pdev); 5839 struct amdgpu_device *adev = drm_to_adev(dev); 5840 int i; 5841 5842 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state); 5843 5844 if (adev->gmc.xgmi.num_physical_nodes > 1) { 5845 DRM_WARN("No support for XGMI hive yet..."); 5846 return PCI_ERS_RESULT_DISCONNECT; 5847 } 5848 5849 adev->pci_channel_state = state; 5850 5851 switch (state) { 5852 case pci_channel_io_normal: 5853 return PCI_ERS_RESULT_CAN_RECOVER; 5854 /* Fatal error, prepare for slot reset */ 5855 case pci_channel_io_frozen: 5856 /* 5857 * Locking adev->reset_domain->sem will prevent any external access 5858 * to GPU during PCI error recovery 5859 */ 5860 amdgpu_device_lock_reset_domain(adev->reset_domain); 5861 amdgpu_device_set_mp1_state(adev); 5862 5863 /* 5864 * Block any work scheduling as we do for regular GPU reset 5865 * for the duration of the recovery 5866 */ 5867 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 5868 struct amdgpu_ring *ring = adev->rings[i]; 5869 5870 if (!ring || !ring->sched.thread) 5871 continue; 5872 5873 drm_sched_stop(&ring->sched, NULL); 5874 } 5875 atomic_inc(&adev->gpu_reset_counter); 5876 return PCI_ERS_RESULT_NEED_RESET; 5877 case pci_channel_io_perm_failure: 5878 /* Permanent error, prepare for device removal */ 5879 return PCI_ERS_RESULT_DISCONNECT; 5880 } 5881 5882 return PCI_ERS_RESULT_NEED_RESET; 5883 #endif 5884 } 5885 5886 /** 5887 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers 5888 * @pdev: pointer to PCI device 5889 */ 5890 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev) 5891 { 5892 5893 DRM_INFO("PCI error: mmio enabled callback!!\n"); 5894 5895 /* TODO - dump whatever for debugging purposes */ 5896 5897 /* This called only if amdgpu_pci_error_detected returns 5898 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 5899 * works, no need to reset slot. 5900 */ 5901 5902 return PCI_ERS_RESULT_RECOVERED; 5903 } 5904 5905 /** 5906 * amdgpu_pci_slot_reset - Called when PCI slot has been reset. 5907 * @pdev: PCI device struct 5908 * 5909 * Description: This routine is called by the pci error recovery 5910 * code after the PCI slot has been reset, just before we 5911 * should resume normal operations. 5912 */ 5913 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) 5914 { 5915 STUB(); 5916 return PCI_ERS_RESULT_RECOVERED; 5917 #ifdef notyet 5918 struct drm_device *dev = pci_get_drvdata(pdev); 5919 struct amdgpu_device *adev = drm_to_adev(dev); 5920 int r, i; 5921 struct amdgpu_reset_context reset_context; 5922 u32 memsize; 5923 struct list_head device_list; 5924 5925 DRM_INFO("PCI error: slot reset callback!!\n"); 5926 5927 memset(&reset_context, 0, sizeof(reset_context)); 5928 5929 INIT_LIST_HEAD(&device_list); 5930 list_add_tail(&adev->reset_list, &device_list); 5931 5932 /* wait for asic to come out of reset */ 5933 drm_msleep(500); 5934 5935 /* Restore PCI confspace */ 5936 amdgpu_device_load_pci_state(pdev); 5937 5938 /* confirm ASIC came out of reset */ 5939 for (i = 0; i < adev->usec_timeout; i++) { 5940 memsize = amdgpu_asic_get_config_memsize(adev); 5941 5942 if (memsize != 0xffffffff) 5943 break; 5944 udelay(1); 5945 } 5946 if (memsize == 0xffffffff) { 5947 r = -ETIME; 5948 goto out; 5949 } 5950 5951 reset_context.method = AMD_RESET_METHOD_NONE; 5952 reset_context.reset_req_dev = adev; 5953 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 5954 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 5955 5956 adev->no_hw_access = true; 5957 r = amdgpu_device_pre_asic_reset(adev, &reset_context); 5958 adev->no_hw_access = false; 5959 if (r) 5960 goto out; 5961 5962 r = amdgpu_do_asic_reset(&device_list, &reset_context); 5963 5964 out: 5965 if (!r) { 5966 if (amdgpu_device_cache_pci_state(adev->pdev)) 5967 pci_restore_state(adev->pdev); 5968 5969 DRM_INFO("PCIe error recovery succeeded\n"); 5970 } else { 5971 DRM_ERROR("PCIe error recovery failed, err:%d", r); 5972 amdgpu_device_unset_mp1_state(adev); 5973 amdgpu_device_unlock_reset_domain(adev->reset_domain); 5974 } 5975 5976 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 5977 #endif 5978 } 5979 5980 /** 5981 * amdgpu_pci_resume() - resume normal ops after PCI reset 5982 * @pdev: pointer to PCI device 5983 * 5984 * Called when the error recovery driver tells us that its 5985 * OK to resume normal operation. 5986 */ 5987 void amdgpu_pci_resume(struct pci_dev *pdev) 5988 { 5989 STUB(); 5990 #ifdef notyet 5991 struct drm_device *dev = pci_get_drvdata(pdev); 5992 struct amdgpu_device *adev = drm_to_adev(dev); 5993 int i; 5994 5995 5996 DRM_INFO("PCI error: resume callback!!\n"); 5997 5998 /* Only continue execution for the case of pci_channel_io_frozen */ 5999 if (adev->pci_channel_state != pci_channel_io_frozen) 6000 return; 6001 6002 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 6003 struct amdgpu_ring *ring = adev->rings[i]; 6004 6005 if (!ring || !ring->sched.thread) 6006 continue; 6007 6008 6009 drm_sched_resubmit_jobs(&ring->sched); 6010 drm_sched_start(&ring->sched, true); 6011 } 6012 6013 amdgpu_device_unset_mp1_state(adev); 6014 amdgpu_device_unlock_reset_domain(adev->reset_domain); 6015 #endif 6016 } 6017 6018 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) 6019 { 6020 return false; 6021 #ifdef notyet 6022 struct drm_device *dev = pci_get_drvdata(pdev); 6023 struct amdgpu_device *adev = drm_to_adev(dev); 6024 int r; 6025 6026 r = pci_save_state(pdev); 6027 if (!r) { 6028 kfree(adev->pci_state); 6029 6030 adev->pci_state = pci_store_saved_state(pdev); 6031 6032 if (!adev->pci_state) { 6033 DRM_ERROR("Failed to store PCI saved state"); 6034 return false; 6035 } 6036 } else { 6037 DRM_WARN("Failed to save PCI state, err:%d\n", r); 6038 return false; 6039 } 6040 6041 return true; 6042 #endif 6043 } 6044 6045 bool amdgpu_device_load_pci_state(struct pci_dev *pdev) 6046 { 6047 STUB(); 6048 return false; 6049 #ifdef notyet 6050 struct drm_device *dev = pci_get_drvdata(pdev); 6051 struct amdgpu_device *adev = drm_to_adev(dev); 6052 int r; 6053 6054 if (!adev->pci_state) 6055 return false; 6056 6057 r = pci_load_saved_state(pdev, adev->pci_state); 6058 6059 if (!r) { 6060 pci_restore_state(pdev); 6061 } else { 6062 DRM_WARN("Failed to load PCI state, err:%d\n", r); 6063 return false; 6064 } 6065 6066 return true; 6067 #endif 6068 } 6069 6070 void amdgpu_device_flush_hdp(struct amdgpu_device *adev, 6071 struct amdgpu_ring *ring) 6072 { 6073 #ifdef CONFIG_X86_64 6074 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 6075 return; 6076 #endif 6077 if (adev->gmc.xgmi.connected_to_cpu) 6078 return; 6079 6080 if (ring && ring->funcs->emit_hdp_flush) 6081 amdgpu_ring_emit_hdp_flush(ring); 6082 else 6083 amdgpu_asic_flush_hdp(adev, ring); 6084 } 6085 6086 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, 6087 struct amdgpu_ring *ring) 6088 { 6089 #ifdef CONFIG_X86_64 6090 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) 6091 return; 6092 #endif 6093 if (adev->gmc.xgmi.connected_to_cpu) 6094 return; 6095 6096 amdgpu_asic_invalidate_hdp(adev, ring); 6097 } 6098 6099 int amdgpu_in_reset(struct amdgpu_device *adev) 6100 { 6101 return atomic_read(&adev->reset_domain->in_gpu_reset); 6102 } 6103 6104 /** 6105 * amdgpu_device_halt() - bring hardware to some kind of halt state 6106 * 6107 * @adev: amdgpu_device pointer 6108 * 6109 * Bring hardware to some kind of halt state so that no one can touch it 6110 * any more. It will help to maintain error context when error occurred. 6111 * Compare to a simple hang, the system will keep stable at least for SSH 6112 * access. Then it should be trivial to inspect the hardware state and 6113 * see what's going on. Implemented as following: 6114 * 6115 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc), 6116 * clears all CPU mappings to device, disallows remappings through page faults 6117 * 2. amdgpu_irq_disable_all() disables all interrupts 6118 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences 6119 * 4. set adev->no_hw_access to avoid potential crashes after setp 5 6120 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings 6121 * 6. pci_disable_device() and pci_wait_for_pending_transaction() 6122 * flush any in flight DMA operations 6123 */ 6124 void amdgpu_device_halt(struct amdgpu_device *adev) 6125 { 6126 struct pci_dev *pdev = adev->pdev; 6127 struct drm_device *ddev = adev_to_drm(adev); 6128 6129 drm_dev_unplug(ddev); 6130 6131 amdgpu_irq_disable_all(adev); 6132 6133 amdgpu_fence_driver_hw_fini(adev); 6134 6135 adev->no_hw_access = true; 6136 6137 amdgpu_device_unmap_mmio(adev); 6138 6139 pci_disable_device(pdev); 6140 pci_wait_for_pending_transaction(pdev); 6141 } 6142 6143 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, 6144 u32 reg) 6145 { 6146 unsigned long flags, address, data; 6147 u32 r; 6148 6149 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 6150 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 6151 6152 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 6153 WREG32(address, reg * 4); 6154 (void)RREG32(address); 6155 r = RREG32(data); 6156 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 6157 return r; 6158 } 6159 6160 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, 6161 u32 reg, u32 v) 6162 { 6163 unsigned long flags, address, data; 6164 6165 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 6166 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 6167 6168 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 6169 WREG32(address, reg * 4); 6170 (void)RREG32(address); 6171 WREG32(data, v); 6172 (void)RREG32(data); 6173 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 6174 } 6175 6176 /** 6177 * amdgpu_device_switch_gang - switch to a new gang 6178 * @adev: amdgpu_device pointer 6179 * @gang: the gang to switch to 6180 * 6181 * Try to switch to a new gang. 6182 * Returns: NULL if we switched to the new gang or a reference to the current 6183 * gang leader. 6184 */ 6185 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, 6186 struct dma_fence *gang) 6187 { 6188 struct dma_fence *old = NULL; 6189 6190 do { 6191 dma_fence_put(old); 6192 rcu_read_lock(); 6193 old = dma_fence_get_rcu_safe(&adev->gang_submit); 6194 rcu_read_unlock(); 6195 6196 if (old == gang) 6197 break; 6198 6199 if (!dma_fence_is_signaled(old)) 6200 return old; 6201 6202 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, 6203 old, gang) != old); 6204 6205 dma_fence_put(old); 6206 return NULL; 6207 } 6208 6209 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) 6210 { 6211 switch (adev->asic_type) { 6212 #ifdef CONFIG_DRM_AMDGPU_SI 6213 case CHIP_HAINAN: 6214 #endif 6215 case CHIP_TOPAZ: 6216 /* chips with no display hardware */ 6217 return false; 6218 #ifdef CONFIG_DRM_AMDGPU_SI 6219 case CHIP_TAHITI: 6220 case CHIP_PITCAIRN: 6221 case CHIP_VERDE: 6222 case CHIP_OLAND: 6223 #endif 6224 #ifdef CONFIG_DRM_AMDGPU_CIK 6225 case CHIP_BONAIRE: 6226 case CHIP_HAWAII: 6227 case CHIP_KAVERI: 6228 case CHIP_KABINI: 6229 case CHIP_MULLINS: 6230 #endif 6231 case CHIP_TONGA: 6232 case CHIP_FIJI: 6233 case CHIP_POLARIS10: 6234 case CHIP_POLARIS11: 6235 case CHIP_POLARIS12: 6236 case CHIP_VEGAM: 6237 case CHIP_CARRIZO: 6238 case CHIP_STONEY: 6239 /* chips with display hardware */ 6240 return true; 6241 default: 6242 /* IP discovery */ 6243 if (!adev->ip_versions[DCE_HWIP][0] || 6244 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 6245 return false; 6246 return true; 6247 } 6248 } 6249