1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/kthread.h> 27 #include <linux/pci.h> 28 #include <linux/uaccess.h> 29 #include <linux/pm_runtime.h> 30 31 #include "amdgpu.h" 32 #include "amdgpu_pm.h" 33 #include "amdgpu_dm_debugfs.h" 34 #include "amdgpu_ras.h" 35 #include "amdgpu_rap.h" 36 #include "amdgpu_securedisplay.h" 37 #include "amdgpu_fw_attestation.h" 38 #include "amdgpu_umr.h" 39 40 #include "amdgpu_reset.h" 41 #include "amdgpu_psp_ta.h" 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 /** 46 * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes 47 * 48 * @read: True if reading 49 * @f: open file handle 50 * @buf: User buffer to write/read to 51 * @size: Number of bytes to write/read 52 * @pos: Offset to seek to 53 * 54 * This debugfs entry has special meaning on the offset being sought. 55 * Various bits have different meanings: 56 * 57 * Bit 62: Indicates a GRBM bank switch is needed 58 * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is 59 * zero) 60 * Bits 24..33: The SE or ME selector if needed 61 * Bits 34..43: The SH (or SA) or PIPE selector if needed 62 * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed 63 * 64 * Bit 23: Indicates that the PM power gating lock should be held 65 * This is necessary to read registers that might be 66 * unreliable during a power gating transistion. 67 * 68 * The lower bits are the BYTE offset of the register to read. This 69 * allows reading multiple registers in a single call and having 70 * the returned size reflect that. 71 */ 72 static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, 73 char __user *buf, size_t size, loff_t *pos) 74 { 75 struct amdgpu_device *adev = file_inode(f)->i_private; 76 ssize_t result = 0; 77 int r; 78 bool pm_pg_lock, use_bank, use_ring; 79 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid; 80 81 pm_pg_lock = use_bank = use_ring = false; 82 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0; 83 84 if (size & 0x3 || *pos & 0x3 || 85 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61)))) 86 return -EINVAL; 87 88 /* are we reading registers for which a PG lock is necessary? */ 89 pm_pg_lock = (*pos >> 23) & 1; 90 91 if (*pos & (1ULL << 62)) { 92 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; 93 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; 94 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; 95 96 if (se_bank == 0x3FF) 97 se_bank = 0xFFFFFFFF; 98 if (sh_bank == 0x3FF) 99 sh_bank = 0xFFFFFFFF; 100 if (instance_bank == 0x3FF) 101 instance_bank = 0xFFFFFFFF; 102 use_bank = true; 103 } else if (*pos & (1ULL << 61)) { 104 105 me = (*pos & GENMASK_ULL(33, 24)) >> 24; 106 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34; 107 queue = (*pos & GENMASK_ULL(53, 44)) >> 44; 108 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; 109 110 use_ring = true; 111 } else { 112 use_bank = use_ring = false; 113 } 114 115 *pos &= (1UL << 22) - 1; 116 117 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 118 if (r < 0) { 119 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 120 return r; 121 } 122 123 r = amdgpu_virt_enable_access_debugfs(adev); 124 if (r < 0) { 125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 126 return r; 127 } 128 129 if (use_bank) { 130 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || 131 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { 132 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 133 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 134 amdgpu_virt_disable_access_debugfs(adev); 135 return -EINVAL; 136 } 137 mutex_lock(&adev->grbm_idx_mutex); 138 amdgpu_gfx_select_se_sh(adev, se_bank, 139 sh_bank, instance_bank); 140 } else if (use_ring) { 141 mutex_lock(&adev->srbm_mutex); 142 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid); 143 } 144 145 if (pm_pg_lock) 146 mutex_lock(&adev->pm.mutex); 147 148 while (size) { 149 uint32_t value; 150 151 if (read) { 152 value = RREG32(*pos >> 2); 153 r = put_user(value, (uint32_t *)buf); 154 } else { 155 r = get_user(value, (uint32_t *)buf); 156 if (!r) 157 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value); 158 } 159 if (r) { 160 result = r; 161 goto end; 162 } 163 164 result += 4; 165 buf += 4; 166 *pos += 4; 167 size -= 4; 168 } 169 170 end: 171 if (use_bank) { 172 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 173 mutex_unlock(&adev->grbm_idx_mutex); 174 } else if (use_ring) { 175 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); 176 mutex_unlock(&adev->srbm_mutex); 177 } 178 179 if (pm_pg_lock) 180 mutex_unlock(&adev->pm.mutex); 181 182 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 183 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 184 185 amdgpu_virt_disable_access_debugfs(adev); 186 return result; 187 } 188 189 /* 190 * amdgpu_debugfs_regs_read - Callback for reading MMIO registers 191 */ 192 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, 193 size_t size, loff_t *pos) 194 { 195 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); 196 } 197 198 /* 199 * amdgpu_debugfs_regs_write - Callback for writing MMIO registers 200 */ 201 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, 202 size_t size, loff_t *pos) 203 { 204 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); 205 } 206 207 static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) 208 { 209 struct amdgpu_debugfs_regs2_data *rd; 210 211 rd = kzalloc(sizeof *rd, GFP_KERNEL); 212 if (!rd) 213 return -ENOMEM; 214 rd->adev = file_inode(file)->i_private; 215 file->private_data = rd; 216 mutex_init(&rd->lock); 217 218 return 0; 219 } 220 221 static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) 222 { 223 struct amdgpu_debugfs_regs2_data *rd = file->private_data; 224 mutex_destroy(&rd->lock); 225 kfree(file->private_data); 226 return 0; 227 } 228 229 static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) 230 { 231 struct amdgpu_debugfs_regs2_data *rd = f->private_data; 232 struct amdgpu_device *adev = rd->adev; 233 ssize_t result = 0; 234 int r; 235 uint32_t value; 236 237 if (size & 0x3 || offset & 0x3) 238 return -EINVAL; 239 240 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 241 if (r < 0) { 242 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 243 return r; 244 } 245 246 r = amdgpu_virt_enable_access_debugfs(adev); 247 if (r < 0) { 248 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 249 return r; 250 } 251 252 mutex_lock(&rd->lock); 253 254 if (rd->id.use_grbm) { 255 if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || 256 (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { 257 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 258 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 259 amdgpu_virt_disable_access_debugfs(adev); 260 mutex_unlock(&rd->lock); 261 return -EINVAL; 262 } 263 mutex_lock(&adev->grbm_idx_mutex); 264 amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, 265 rd->id.grbm.sh, 266 rd->id.grbm.instance); 267 } 268 269 if (rd->id.use_srbm) { 270 mutex_lock(&adev->srbm_mutex); 271 amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, 272 rd->id.srbm.queue, rd->id.srbm.vmid); 273 } 274 275 if (rd->id.pg_lock) 276 mutex_lock(&adev->pm.mutex); 277 278 while (size) { 279 if (!write_en) { 280 value = RREG32(offset >> 2); 281 r = put_user(value, (uint32_t *)buf); 282 } else { 283 r = get_user(value, (uint32_t *)buf); 284 if (!r) 285 amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); 286 } 287 if (r) { 288 result = r; 289 goto end; 290 } 291 offset += 4; 292 size -= 4; 293 result += 4; 294 buf += 4; 295 } 296 end: 297 if (rd->id.use_grbm) { 298 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 299 mutex_unlock(&adev->grbm_idx_mutex); 300 } 301 302 if (rd->id.use_srbm) { 303 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); 304 mutex_unlock(&adev->srbm_mutex); 305 } 306 307 if (rd->id.pg_lock) 308 mutex_unlock(&adev->pm.mutex); 309 310 mutex_unlock(&rd->lock); 311 312 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 313 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 314 315 amdgpu_virt_disable_access_debugfs(adev); 316 return result; 317 } 318 319 static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) 320 { 321 struct amdgpu_debugfs_regs2_data *rd = f->private_data; 322 int r; 323 324 switch (cmd) { 325 case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: 326 mutex_lock(&rd->lock); 327 r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id); 328 mutex_unlock(&rd->lock); 329 return r ? -EINVAL : 0; 330 default: 331 return -EINVAL; 332 } 333 return 0; 334 } 335 336 static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) 337 { 338 return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); 339 } 340 341 static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) 342 { 343 return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); 344 } 345 346 347 /** 348 * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register 349 * 350 * @f: open file handle 351 * @buf: User buffer to store read data in 352 * @size: Number of bytes to read 353 * @pos: Offset to seek to 354 * 355 * The lower bits are the BYTE offset of the register to read. This 356 * allows reading multiple registers in a single call and having 357 * the returned size reflect that. 358 */ 359 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, 360 size_t size, loff_t *pos) 361 { 362 struct amdgpu_device *adev = file_inode(f)->i_private; 363 ssize_t result = 0; 364 int r; 365 366 if (size & 0x3 || *pos & 0x3) 367 return -EINVAL; 368 369 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 370 if (r < 0) { 371 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 372 return r; 373 } 374 375 r = amdgpu_virt_enable_access_debugfs(adev); 376 if (r < 0) { 377 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 378 return r; 379 } 380 381 while (size) { 382 uint32_t value; 383 384 value = RREG32_PCIE(*pos); 385 r = put_user(value, (uint32_t *)buf); 386 if (r) 387 goto out; 388 389 result += 4; 390 buf += 4; 391 *pos += 4; 392 size -= 4; 393 } 394 395 r = result; 396 out: 397 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 398 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 399 amdgpu_virt_disable_access_debugfs(adev); 400 return r; 401 } 402 403 /** 404 * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register 405 * 406 * @f: open file handle 407 * @buf: User buffer to write data from 408 * @size: Number of bytes to write 409 * @pos: Offset to seek to 410 * 411 * The lower bits are the BYTE offset of the register to write. This 412 * allows writing multiple registers in a single call and having 413 * the returned size reflect that. 414 */ 415 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, 416 size_t size, loff_t *pos) 417 { 418 struct amdgpu_device *adev = file_inode(f)->i_private; 419 ssize_t result = 0; 420 int r; 421 422 if (size & 0x3 || *pos & 0x3) 423 return -EINVAL; 424 425 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 426 if (r < 0) { 427 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 428 return r; 429 } 430 431 r = amdgpu_virt_enable_access_debugfs(adev); 432 if (r < 0) { 433 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 434 return r; 435 } 436 437 while (size) { 438 uint32_t value; 439 440 r = get_user(value, (uint32_t *)buf); 441 if (r) 442 goto out; 443 444 WREG32_PCIE(*pos, value); 445 446 result += 4; 447 buf += 4; 448 *pos += 4; 449 size -= 4; 450 } 451 452 r = result; 453 out: 454 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 455 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 456 amdgpu_virt_disable_access_debugfs(adev); 457 return r; 458 } 459 460 /** 461 * amdgpu_debugfs_regs_didt_read - Read from a DIDT register 462 * 463 * @f: open file handle 464 * @buf: User buffer to store read data in 465 * @size: Number of bytes to read 466 * @pos: Offset to seek to 467 * 468 * The lower bits are the BYTE offset of the register to read. This 469 * allows reading multiple registers in a single call and having 470 * the returned size reflect that. 471 */ 472 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, 473 size_t size, loff_t *pos) 474 { 475 struct amdgpu_device *adev = file_inode(f)->i_private; 476 ssize_t result = 0; 477 int r; 478 479 if (size & 0x3 || *pos & 0x3) 480 return -EINVAL; 481 482 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 483 if (r < 0) { 484 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 485 return r; 486 } 487 488 r = amdgpu_virt_enable_access_debugfs(adev); 489 if (r < 0) { 490 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 491 return r; 492 } 493 494 while (size) { 495 uint32_t value; 496 497 value = RREG32_DIDT(*pos >> 2); 498 r = put_user(value, (uint32_t *)buf); 499 if (r) 500 goto out; 501 502 result += 4; 503 buf += 4; 504 *pos += 4; 505 size -= 4; 506 } 507 508 r = result; 509 out: 510 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 511 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 512 amdgpu_virt_disable_access_debugfs(adev); 513 return r; 514 } 515 516 /** 517 * amdgpu_debugfs_regs_didt_write - Write to a DIDT register 518 * 519 * @f: open file handle 520 * @buf: User buffer to write data from 521 * @size: Number of bytes to write 522 * @pos: Offset to seek to 523 * 524 * The lower bits are the BYTE offset of the register to write. This 525 * allows writing multiple registers in a single call and having 526 * the returned size reflect that. 527 */ 528 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, 529 size_t size, loff_t *pos) 530 { 531 struct amdgpu_device *adev = file_inode(f)->i_private; 532 ssize_t result = 0; 533 int r; 534 535 if (size & 0x3 || *pos & 0x3) 536 return -EINVAL; 537 538 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 539 if (r < 0) { 540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 541 return r; 542 } 543 544 r = amdgpu_virt_enable_access_debugfs(adev); 545 if (r < 0) { 546 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 547 return r; 548 } 549 550 while (size) { 551 uint32_t value; 552 553 r = get_user(value, (uint32_t *)buf); 554 if (r) 555 goto out; 556 557 WREG32_DIDT(*pos >> 2, value); 558 559 result += 4; 560 buf += 4; 561 *pos += 4; 562 size -= 4; 563 } 564 565 r = result; 566 out: 567 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 568 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 569 amdgpu_virt_disable_access_debugfs(adev); 570 return r; 571 } 572 573 /** 574 * amdgpu_debugfs_regs_smc_read - Read from a SMC register 575 * 576 * @f: open file handle 577 * @buf: User buffer to store read data in 578 * @size: Number of bytes to read 579 * @pos: Offset to seek to 580 * 581 * The lower bits are the BYTE offset of the register to read. This 582 * allows reading multiple registers in a single call and having 583 * the returned size reflect that. 584 */ 585 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, 586 size_t size, loff_t *pos) 587 { 588 struct amdgpu_device *adev = file_inode(f)->i_private; 589 ssize_t result = 0; 590 int r; 591 592 if (!adev->smc_rreg) 593 return -EPERM; 594 595 if (size & 0x3 || *pos & 0x3) 596 return -EINVAL; 597 598 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 599 if (r < 0) { 600 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 601 return r; 602 } 603 604 r = amdgpu_virt_enable_access_debugfs(adev); 605 if (r < 0) { 606 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 607 return r; 608 } 609 610 while (size) { 611 uint32_t value; 612 613 value = RREG32_SMC(*pos); 614 r = put_user(value, (uint32_t *)buf); 615 if (r) 616 goto out; 617 618 result += 4; 619 buf += 4; 620 *pos += 4; 621 size -= 4; 622 } 623 624 r = result; 625 out: 626 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 627 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 628 amdgpu_virt_disable_access_debugfs(adev); 629 return r; 630 } 631 632 /** 633 * amdgpu_debugfs_regs_smc_write - Write to a SMC register 634 * 635 * @f: open file handle 636 * @buf: User buffer to write data from 637 * @size: Number of bytes to write 638 * @pos: Offset to seek to 639 * 640 * The lower bits are the BYTE offset of the register to write. This 641 * allows writing multiple registers in a single call and having 642 * the returned size reflect that. 643 */ 644 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, 645 size_t size, loff_t *pos) 646 { 647 struct amdgpu_device *adev = file_inode(f)->i_private; 648 ssize_t result = 0; 649 int r; 650 651 if (!adev->smc_wreg) 652 return -EPERM; 653 654 if (size & 0x3 || *pos & 0x3) 655 return -EINVAL; 656 657 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 658 if (r < 0) { 659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 660 return r; 661 } 662 663 r = amdgpu_virt_enable_access_debugfs(adev); 664 if (r < 0) { 665 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 666 return r; 667 } 668 669 while (size) { 670 uint32_t value; 671 672 r = get_user(value, (uint32_t *)buf); 673 if (r) 674 goto out; 675 676 WREG32_SMC(*pos, value); 677 678 result += 4; 679 buf += 4; 680 *pos += 4; 681 size -= 4; 682 } 683 684 r = result; 685 out: 686 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 687 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 688 amdgpu_virt_disable_access_debugfs(adev); 689 return r; 690 } 691 692 /** 693 * amdgpu_debugfs_gca_config_read - Read from gfx config data 694 * 695 * @f: open file handle 696 * @buf: User buffer to store read data in 697 * @size: Number of bytes to read 698 * @pos: Offset to seek to 699 * 700 * This file is used to access configuration data in a somewhat 701 * stable fashion. The format is a series of DWORDs with the first 702 * indicating which revision it is. New content is appended to the 703 * end so that older software can still read the data. 704 */ 705 706 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, 707 size_t size, loff_t *pos) 708 { 709 struct amdgpu_device *adev = file_inode(f)->i_private; 710 ssize_t result = 0; 711 int r; 712 uint32_t *config, no_regs = 0; 713 714 if (size & 0x3 || *pos & 0x3) 715 return -EINVAL; 716 717 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); 718 if (!config) 719 return -ENOMEM; 720 721 /* version, increment each time something is added */ 722 config[no_regs++] = 5; 723 config[no_regs++] = adev->gfx.config.max_shader_engines; 724 config[no_regs++] = adev->gfx.config.max_tile_pipes; 725 config[no_regs++] = adev->gfx.config.max_cu_per_sh; 726 config[no_regs++] = adev->gfx.config.max_sh_per_se; 727 config[no_regs++] = adev->gfx.config.max_backends_per_se; 728 config[no_regs++] = adev->gfx.config.max_texture_channel_caches; 729 config[no_regs++] = adev->gfx.config.max_gprs; 730 config[no_regs++] = adev->gfx.config.max_gs_threads; 731 config[no_regs++] = adev->gfx.config.max_hw_contexts; 732 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; 733 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; 734 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; 735 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; 736 config[no_regs++] = adev->gfx.config.num_tile_pipes; 737 config[no_regs++] = adev->gfx.config.backend_enable_mask; 738 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; 739 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; 740 config[no_regs++] = adev->gfx.config.shader_engine_tile_size; 741 config[no_regs++] = adev->gfx.config.num_gpus; 742 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; 743 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; 744 config[no_regs++] = adev->gfx.config.gb_addr_config; 745 config[no_regs++] = adev->gfx.config.num_rbs; 746 747 /* rev==1 */ 748 config[no_regs++] = adev->rev_id; 749 config[no_regs++] = lower_32_bits(adev->pg_flags); 750 config[no_regs++] = lower_32_bits(adev->cg_flags); 751 752 /* rev==2 */ 753 config[no_regs++] = adev->family; 754 config[no_regs++] = adev->external_rev_id; 755 756 /* rev==3 */ 757 config[no_regs++] = adev->pdev->device; 758 config[no_regs++] = adev->pdev->revision; 759 config[no_regs++] = adev->pdev->subsystem_device; 760 config[no_regs++] = adev->pdev->subsystem_vendor; 761 762 /* rev==4 APU flag */ 763 config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0; 764 765 /* rev==5 PG/CG flag upper 32bit */ 766 config[no_regs++] = upper_32_bits(adev->pg_flags); 767 config[no_regs++] = upper_32_bits(adev->cg_flags); 768 769 while (size && (*pos < no_regs * 4)) { 770 uint32_t value; 771 772 value = config[*pos >> 2]; 773 r = put_user(value, (uint32_t *)buf); 774 if (r) { 775 kfree(config); 776 return r; 777 } 778 779 result += 4; 780 buf += 4; 781 *pos += 4; 782 size -= 4; 783 } 784 785 kfree(config); 786 return result; 787 } 788 789 /** 790 * amdgpu_debugfs_sensor_read - Read from the powerplay sensors 791 * 792 * @f: open file handle 793 * @buf: User buffer to store read data in 794 * @size: Number of bytes to read 795 * @pos: Offset to seek to 796 * 797 * The offset is treated as the BYTE address of one of the sensors 798 * enumerated in amd/include/kgd_pp_interface.h under the 799 * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK 800 * you would use the offset 3 * 4 = 12. 801 */ 802 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, 803 size_t size, loff_t *pos) 804 { 805 struct amdgpu_device *adev = file_inode(f)->i_private; 806 int idx, x, outsize, r, valuesize; 807 uint32_t values[16]; 808 809 if (size & 3 || *pos & 0x3) 810 return -EINVAL; 811 812 if (!adev->pm.dpm_enabled) 813 return -EINVAL; 814 815 /* convert offset to sensor number */ 816 idx = *pos >> 2; 817 818 valuesize = sizeof(values); 819 820 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 821 if (r < 0) { 822 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 823 return r; 824 } 825 826 r = amdgpu_virt_enable_access_debugfs(adev); 827 if (r < 0) { 828 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 829 return r; 830 } 831 832 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); 833 834 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 835 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 836 837 if (r) { 838 amdgpu_virt_disable_access_debugfs(adev); 839 return r; 840 } 841 842 if (size > valuesize) { 843 amdgpu_virt_disable_access_debugfs(adev); 844 return -EINVAL; 845 } 846 847 outsize = 0; 848 x = 0; 849 if (!r) { 850 while (size) { 851 r = put_user(values[x++], (int32_t *)buf); 852 buf += 4; 853 size -= 4; 854 outsize += 4; 855 } 856 } 857 858 amdgpu_virt_disable_access_debugfs(adev); 859 return !r ? outsize : r; 860 } 861 862 /** amdgpu_debugfs_wave_read - Read WAVE STATUS data 863 * 864 * @f: open file handle 865 * @buf: User buffer to store read data in 866 * @size: Number of bytes to read 867 * @pos: Offset to seek to 868 * 869 * The offset being sought changes which wave that the status data 870 * will be returned for. The bits are used as follows: 871 * 872 * Bits 0..6: Byte offset into data 873 * Bits 7..14: SE selector 874 * Bits 15..22: SH/SA selector 875 * Bits 23..30: CU/{WGP+SIMD} selector 876 * Bits 31..36: WAVE ID selector 877 * Bits 37..44: SIMD ID selector 878 * 879 * The returned data begins with one DWORD of version information 880 * Followed by WAVE STATUS registers relevant to the GFX IP version 881 * being used. See gfx_v8_0_read_wave_data() for an example output. 882 */ 883 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, 884 size_t size, loff_t *pos) 885 { 886 struct amdgpu_device *adev = f->f_inode->i_private; 887 int r, x; 888 ssize_t result = 0; 889 uint32_t offset, se, sh, cu, wave, simd, data[32]; 890 891 if (size & 3 || *pos & 3) 892 return -EINVAL; 893 894 /* decode offset */ 895 offset = (*pos & GENMASK_ULL(6, 0)); 896 se = (*pos & GENMASK_ULL(14, 7)) >> 7; 897 sh = (*pos & GENMASK_ULL(22, 15)) >> 15; 898 cu = (*pos & GENMASK_ULL(30, 23)) >> 23; 899 wave = (*pos & GENMASK_ULL(36, 31)) >> 31; 900 simd = (*pos & GENMASK_ULL(44, 37)) >> 37; 901 902 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 903 if (r < 0) { 904 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 905 return r; 906 } 907 908 r = amdgpu_virt_enable_access_debugfs(adev); 909 if (r < 0) { 910 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 911 return r; 912 } 913 914 /* switch to the specific se/sh/cu */ 915 mutex_lock(&adev->grbm_idx_mutex); 916 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 917 918 x = 0; 919 if (adev->gfx.funcs->read_wave_data) 920 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); 921 922 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 923 mutex_unlock(&adev->grbm_idx_mutex); 924 925 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 926 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 927 928 if (!x) { 929 amdgpu_virt_disable_access_debugfs(adev); 930 return -EINVAL; 931 } 932 933 while (size && (offset < x * 4)) { 934 uint32_t value; 935 936 value = data[offset >> 2]; 937 r = put_user(value, (uint32_t *)buf); 938 if (r) { 939 amdgpu_virt_disable_access_debugfs(adev); 940 return r; 941 } 942 943 result += 4; 944 buf += 4; 945 offset += 4; 946 size -= 4; 947 } 948 949 amdgpu_virt_disable_access_debugfs(adev); 950 return result; 951 } 952 953 /** amdgpu_debugfs_gpr_read - Read wave gprs 954 * 955 * @f: open file handle 956 * @buf: User buffer to store read data in 957 * @size: Number of bytes to read 958 * @pos: Offset to seek to 959 * 960 * The offset being sought changes which wave that the status data 961 * will be returned for. The bits are used as follows: 962 * 963 * Bits 0..11: Byte offset into data 964 * Bits 12..19: SE selector 965 * Bits 20..27: SH/SA selector 966 * Bits 28..35: CU/{WGP+SIMD} selector 967 * Bits 36..43: WAVE ID selector 968 * Bits 37..44: SIMD ID selector 969 * Bits 52..59: Thread selector 970 * Bits 60..61: Bank selector (VGPR=0,SGPR=1) 971 * 972 * The return data comes from the SGPR or VGPR register bank for 973 * the selected operational unit. 974 */ 975 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, 976 size_t size, loff_t *pos) 977 { 978 struct amdgpu_device *adev = f->f_inode->i_private; 979 int r; 980 ssize_t result = 0; 981 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; 982 983 if (size > 4096 || size & 3 || *pos & 3) 984 return -EINVAL; 985 986 /* decode offset */ 987 offset = (*pos & GENMASK_ULL(11, 0)) >> 2; 988 se = (*pos & GENMASK_ULL(19, 12)) >> 12; 989 sh = (*pos & GENMASK_ULL(27, 20)) >> 20; 990 cu = (*pos & GENMASK_ULL(35, 28)) >> 28; 991 wave = (*pos & GENMASK_ULL(43, 36)) >> 36; 992 simd = (*pos & GENMASK_ULL(51, 44)) >> 44; 993 thread = (*pos & GENMASK_ULL(59, 52)) >> 52; 994 bank = (*pos & GENMASK_ULL(61, 60)) >> 60; 995 996 data = kcalloc(1024, sizeof(*data), GFP_KERNEL); 997 if (!data) 998 return -ENOMEM; 999 1000 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1001 if (r < 0) 1002 goto err; 1003 1004 r = amdgpu_virt_enable_access_debugfs(adev); 1005 if (r < 0) 1006 goto err; 1007 1008 /* switch to the specific se/sh/cu */ 1009 mutex_lock(&adev->grbm_idx_mutex); 1010 amdgpu_gfx_select_se_sh(adev, se, sh, cu); 1011 1012 if (bank == 0) { 1013 if (adev->gfx.funcs->read_wave_vgprs) 1014 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); 1015 } else { 1016 if (adev->gfx.funcs->read_wave_sgprs) 1017 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); 1018 } 1019 1020 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); 1021 mutex_unlock(&adev->grbm_idx_mutex); 1022 1023 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1024 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1025 1026 while (size) { 1027 uint32_t value; 1028 1029 value = data[result >> 2]; 1030 r = put_user(value, (uint32_t *)buf); 1031 if (r) { 1032 amdgpu_virt_disable_access_debugfs(adev); 1033 goto err; 1034 } 1035 1036 result += 4; 1037 buf += 4; 1038 size -= 4; 1039 } 1040 1041 kfree(data); 1042 amdgpu_virt_disable_access_debugfs(adev); 1043 return result; 1044 1045 err: 1046 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1047 kfree(data); 1048 return r; 1049 } 1050 1051 /** 1052 * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency 1053 * 1054 * @f: open file handle 1055 * @buf: User buffer to store read data in 1056 * @size: Number of bytes to read 1057 * @pos: Offset to seek to 1058 * 1059 * Read the last residency value logged. It doesn't auto update, one needs to 1060 * stop logging before getting the current value. 1061 */ 1062 static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf, 1063 size_t size, loff_t *pos) 1064 { 1065 struct amdgpu_device *adev = file_inode(f)->i_private; 1066 ssize_t result = 0; 1067 int r; 1068 1069 if (size & 0x3 || *pos & 0x3) 1070 return -EINVAL; 1071 1072 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1073 if (r < 0) { 1074 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1075 return r; 1076 } 1077 1078 while (size) { 1079 uint32_t value; 1080 1081 r = amdgpu_get_gfx_off_residency(adev, &value); 1082 if (r) 1083 goto out; 1084 1085 r = put_user(value, (uint32_t *)buf); 1086 if (r) 1087 goto out; 1088 1089 result += 4; 1090 buf += 4; 1091 *pos += 4; 1092 size -= 4; 1093 } 1094 1095 r = result; 1096 out: 1097 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1098 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1099 1100 return r; 1101 } 1102 1103 /** 1104 * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency 1105 * 1106 * @f: open file handle 1107 * @buf: User buffer to write data from 1108 * @size: Number of bytes to write 1109 * @pos: Offset to seek to 1110 * 1111 * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop 1112 */ 1113 static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf, 1114 size_t size, loff_t *pos) 1115 { 1116 struct amdgpu_device *adev = file_inode(f)->i_private; 1117 ssize_t result = 0; 1118 int r; 1119 1120 if (size & 0x3 || *pos & 0x3) 1121 return -EINVAL; 1122 1123 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1124 if (r < 0) { 1125 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1126 return r; 1127 } 1128 1129 while (size) { 1130 u32 value; 1131 1132 r = get_user(value, (uint32_t *)buf); 1133 if (r) 1134 goto out; 1135 1136 amdgpu_set_gfx_off_residency(adev, value ? true : false); 1137 1138 result += 4; 1139 buf += 4; 1140 *pos += 4; 1141 size -= 4; 1142 } 1143 1144 r = result; 1145 out: 1146 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1147 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1148 1149 return r; 1150 } 1151 1152 1153 /** 1154 * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count 1155 * 1156 * @f: open file handle 1157 * @buf: User buffer to store read data in 1158 * @size: Number of bytes to read 1159 * @pos: Offset to seek to 1160 */ 1161 static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf, 1162 size_t size, loff_t *pos) 1163 { 1164 struct amdgpu_device *adev = file_inode(f)->i_private; 1165 ssize_t result = 0; 1166 int r; 1167 1168 if (size & 0x3 || *pos & 0x3) 1169 return -EINVAL; 1170 1171 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1172 if (r < 0) { 1173 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1174 return r; 1175 } 1176 1177 while (size) { 1178 u64 value = 0; 1179 1180 r = amdgpu_get_gfx_off_entrycount(adev, &value); 1181 if (r) 1182 goto out; 1183 1184 r = put_user(value, (u64 *)buf); 1185 if (r) 1186 goto out; 1187 1188 result += 4; 1189 buf += 4; 1190 *pos += 4; 1191 size -= 4; 1192 } 1193 1194 r = result; 1195 out: 1196 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1197 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1198 1199 return r; 1200 } 1201 1202 /** 1203 * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF 1204 * 1205 * @f: open file handle 1206 * @buf: User buffer to write data from 1207 * @size: Number of bytes to write 1208 * @pos: Offset to seek to 1209 * 1210 * Write a 32-bit zero to disable or a 32-bit non-zero to enable 1211 */ 1212 static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf, 1213 size_t size, loff_t *pos) 1214 { 1215 struct amdgpu_device *adev = file_inode(f)->i_private; 1216 ssize_t result = 0; 1217 int r; 1218 1219 if (size & 0x3 || *pos & 0x3) 1220 return -EINVAL; 1221 1222 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1223 if (r < 0) { 1224 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1225 return r; 1226 } 1227 1228 while (size) { 1229 uint32_t value; 1230 1231 r = get_user(value, (uint32_t *)buf); 1232 if (r) 1233 goto out; 1234 1235 amdgpu_gfx_off_ctrl(adev, value ? true : false); 1236 1237 result += 4; 1238 buf += 4; 1239 *pos += 4; 1240 size -= 4; 1241 } 1242 1243 r = result; 1244 out: 1245 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1246 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1247 1248 return r; 1249 } 1250 1251 1252 /** 1253 * amdgpu_debugfs_gfxoff_read - read gfxoff status 1254 * 1255 * @f: open file handle 1256 * @buf: User buffer to store read data in 1257 * @size: Number of bytes to read 1258 * @pos: Offset to seek to 1259 */ 1260 static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, 1261 size_t size, loff_t *pos) 1262 { 1263 struct amdgpu_device *adev = file_inode(f)->i_private; 1264 ssize_t result = 0; 1265 int r; 1266 1267 if (size & 0x3 || *pos & 0x3) 1268 return -EINVAL; 1269 1270 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1271 if (r < 0) { 1272 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1273 return r; 1274 } 1275 1276 while (size) { 1277 u32 value = adev->gfx.gfx_off_state; 1278 1279 r = put_user(value, (u32 *)buf); 1280 if (r) 1281 goto out; 1282 1283 result += 4; 1284 buf += 4; 1285 *pos += 4; 1286 size -= 4; 1287 } 1288 1289 r = result; 1290 out: 1291 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1292 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1293 1294 return r; 1295 } 1296 1297 static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf, 1298 size_t size, loff_t *pos) 1299 { 1300 struct amdgpu_device *adev = file_inode(f)->i_private; 1301 ssize_t result = 0; 1302 int r; 1303 1304 if (size & 0x3 || *pos & 0x3) 1305 return -EINVAL; 1306 1307 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1308 if (r < 0) { 1309 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1310 return r; 1311 } 1312 1313 while (size) { 1314 u32 value; 1315 1316 r = amdgpu_get_gfx_off_status(adev, &value); 1317 if (r) 1318 goto out; 1319 1320 r = put_user(value, (u32 *)buf); 1321 if (r) 1322 goto out; 1323 1324 result += 4; 1325 buf += 4; 1326 *pos += 4; 1327 size -= 4; 1328 } 1329 1330 r = result; 1331 out: 1332 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1333 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1334 1335 return r; 1336 } 1337 1338 static const struct file_operations amdgpu_debugfs_regs2_fops = { 1339 .owner = THIS_MODULE, 1340 .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, 1341 .read = amdgpu_debugfs_regs2_read, 1342 .write = amdgpu_debugfs_regs2_write, 1343 .open = amdgpu_debugfs_regs2_open, 1344 .release = amdgpu_debugfs_regs2_release, 1345 .llseek = default_llseek 1346 }; 1347 1348 static const struct file_operations amdgpu_debugfs_regs_fops = { 1349 .owner = THIS_MODULE, 1350 .read = amdgpu_debugfs_regs_read, 1351 .write = amdgpu_debugfs_regs_write, 1352 .llseek = default_llseek 1353 }; 1354 static const struct file_operations amdgpu_debugfs_regs_didt_fops = { 1355 .owner = THIS_MODULE, 1356 .read = amdgpu_debugfs_regs_didt_read, 1357 .write = amdgpu_debugfs_regs_didt_write, 1358 .llseek = default_llseek 1359 }; 1360 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { 1361 .owner = THIS_MODULE, 1362 .read = amdgpu_debugfs_regs_pcie_read, 1363 .write = amdgpu_debugfs_regs_pcie_write, 1364 .llseek = default_llseek 1365 }; 1366 static const struct file_operations amdgpu_debugfs_regs_smc_fops = { 1367 .owner = THIS_MODULE, 1368 .read = amdgpu_debugfs_regs_smc_read, 1369 .write = amdgpu_debugfs_regs_smc_write, 1370 .llseek = default_llseek 1371 }; 1372 1373 static const struct file_operations amdgpu_debugfs_gca_config_fops = { 1374 .owner = THIS_MODULE, 1375 .read = amdgpu_debugfs_gca_config_read, 1376 .llseek = default_llseek 1377 }; 1378 1379 static const struct file_operations amdgpu_debugfs_sensors_fops = { 1380 .owner = THIS_MODULE, 1381 .read = amdgpu_debugfs_sensor_read, 1382 .llseek = default_llseek 1383 }; 1384 1385 static const struct file_operations amdgpu_debugfs_wave_fops = { 1386 .owner = THIS_MODULE, 1387 .read = amdgpu_debugfs_wave_read, 1388 .llseek = default_llseek 1389 }; 1390 static const struct file_operations amdgpu_debugfs_gpr_fops = { 1391 .owner = THIS_MODULE, 1392 .read = amdgpu_debugfs_gpr_read, 1393 .llseek = default_llseek 1394 }; 1395 1396 static const struct file_operations amdgpu_debugfs_gfxoff_fops = { 1397 .owner = THIS_MODULE, 1398 .read = amdgpu_debugfs_gfxoff_read, 1399 .write = amdgpu_debugfs_gfxoff_write, 1400 .llseek = default_llseek 1401 }; 1402 1403 static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = { 1404 .owner = THIS_MODULE, 1405 .read = amdgpu_debugfs_gfxoff_status_read, 1406 .llseek = default_llseek 1407 }; 1408 1409 static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = { 1410 .owner = THIS_MODULE, 1411 .read = amdgpu_debugfs_gfxoff_count_read, 1412 .llseek = default_llseek 1413 }; 1414 1415 static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = { 1416 .owner = THIS_MODULE, 1417 .read = amdgpu_debugfs_gfxoff_residency_read, 1418 .write = amdgpu_debugfs_gfxoff_residency_write, 1419 .llseek = default_llseek 1420 }; 1421 1422 static const struct file_operations *debugfs_regs[] = { 1423 &amdgpu_debugfs_regs_fops, 1424 &amdgpu_debugfs_regs2_fops, 1425 &amdgpu_debugfs_regs_didt_fops, 1426 &amdgpu_debugfs_regs_pcie_fops, 1427 &amdgpu_debugfs_regs_smc_fops, 1428 &amdgpu_debugfs_gca_config_fops, 1429 &amdgpu_debugfs_sensors_fops, 1430 &amdgpu_debugfs_wave_fops, 1431 &amdgpu_debugfs_gpr_fops, 1432 &amdgpu_debugfs_gfxoff_fops, 1433 &amdgpu_debugfs_gfxoff_status_fops, 1434 &amdgpu_debugfs_gfxoff_count_fops, 1435 &amdgpu_debugfs_gfxoff_residency_fops, 1436 }; 1437 1438 static const char *debugfs_regs_names[] = { 1439 "amdgpu_regs", 1440 "amdgpu_regs2", 1441 "amdgpu_regs_didt", 1442 "amdgpu_regs_pcie", 1443 "amdgpu_regs_smc", 1444 "amdgpu_gca_config", 1445 "amdgpu_sensors", 1446 "amdgpu_wave", 1447 "amdgpu_gpr", 1448 "amdgpu_gfxoff", 1449 "amdgpu_gfxoff_status", 1450 "amdgpu_gfxoff_count", 1451 "amdgpu_gfxoff_residency", 1452 }; 1453 1454 /** 1455 * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide 1456 * register access. 1457 * 1458 * @adev: The device to attach the debugfs entries to 1459 */ 1460 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 1461 { 1462 struct drm_minor *minor = adev_to_drm(adev)->primary; 1463 struct dentry *ent, *root = minor->debugfs_root; 1464 unsigned int i; 1465 1466 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { 1467 ent = debugfs_create_file(debugfs_regs_names[i], 1468 S_IFREG | S_IRUGO, root, 1469 adev, debugfs_regs[i]); 1470 if (!i && !IS_ERR_OR_NULL(ent)) 1471 i_size_write(ent->d_inode, adev->rmmio_size); 1472 } 1473 1474 return 0; 1475 } 1476 1477 static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) 1478 { 1479 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 1480 struct drm_device *dev = adev_to_drm(adev); 1481 int r = 0, i; 1482 1483 r = pm_runtime_get_sync(dev->dev); 1484 if (r < 0) { 1485 pm_runtime_put_autosuspend(dev->dev); 1486 return r; 1487 } 1488 1489 /* Avoid accidently unparking the sched thread during GPU reset */ 1490 r = down_write_killable(&adev->reset_domain->sem); 1491 if (r) 1492 return r; 1493 1494 /* hold on the scheduler */ 1495 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1496 struct amdgpu_ring *ring = adev->rings[i]; 1497 1498 if (!ring || !ring->sched.thread) 1499 continue; 1500 kthread_park(ring->sched.thread); 1501 } 1502 1503 seq_printf(m, "run ib test:\n"); 1504 r = amdgpu_ib_ring_tests(adev); 1505 if (r) 1506 seq_printf(m, "ib ring tests failed (%d).\n", r); 1507 else 1508 seq_printf(m, "ib ring tests passed.\n"); 1509 1510 /* go on the scheduler */ 1511 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1512 struct amdgpu_ring *ring = adev->rings[i]; 1513 1514 if (!ring || !ring->sched.thread) 1515 continue; 1516 kthread_unpark(ring->sched.thread); 1517 } 1518 1519 up_write(&adev->reset_domain->sem); 1520 1521 pm_runtime_mark_last_busy(dev->dev); 1522 pm_runtime_put_autosuspend(dev->dev); 1523 1524 return 0; 1525 } 1526 1527 static int amdgpu_debugfs_evict_vram(void *data, u64 *val) 1528 { 1529 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1530 struct drm_device *dev = adev_to_drm(adev); 1531 int r; 1532 1533 r = pm_runtime_get_sync(dev->dev); 1534 if (r < 0) { 1535 pm_runtime_put_autosuspend(dev->dev); 1536 return r; 1537 } 1538 1539 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 1540 1541 pm_runtime_mark_last_busy(dev->dev); 1542 pm_runtime_put_autosuspend(dev->dev); 1543 1544 return 0; 1545 } 1546 1547 1548 static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) 1549 { 1550 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1551 struct drm_device *dev = adev_to_drm(adev); 1552 int r; 1553 1554 r = pm_runtime_get_sync(dev->dev); 1555 if (r < 0) { 1556 pm_runtime_put_autosuspend(dev->dev); 1557 return r; 1558 } 1559 1560 *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); 1561 1562 pm_runtime_mark_last_busy(dev->dev); 1563 pm_runtime_put_autosuspend(dev->dev); 1564 1565 return 0; 1566 } 1567 1568 static int amdgpu_debugfs_benchmark(void *data, u64 val) 1569 { 1570 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1571 struct drm_device *dev = adev_to_drm(adev); 1572 int r; 1573 1574 r = pm_runtime_get_sync(dev->dev); 1575 if (r < 0) { 1576 pm_runtime_put_autosuspend(dev->dev); 1577 return r; 1578 } 1579 1580 r = amdgpu_benchmark(adev, val); 1581 1582 pm_runtime_mark_last_busy(dev->dev); 1583 pm_runtime_put_autosuspend(dev->dev); 1584 1585 return r; 1586 } 1587 1588 static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) 1589 { 1590 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 1591 struct drm_device *dev = adev_to_drm(adev); 1592 struct drm_file *file; 1593 int r; 1594 1595 r = mutex_lock_interruptible(&dev->filelist_mutex); 1596 if (r) 1597 return r; 1598 1599 list_for_each_entry(file, &dev->filelist, lhead) { 1600 struct amdgpu_fpriv *fpriv = file->driver_priv; 1601 struct amdgpu_vm *vm = &fpriv->vm; 1602 1603 seq_printf(m, "pid:%d\tProcess:%s ----------\n", 1604 vm->task_info.pid, vm->task_info.process_name); 1605 r = amdgpu_bo_reserve(vm->root.bo, true); 1606 if (r) 1607 break; 1608 amdgpu_debugfs_vm_bo_info(vm, m); 1609 amdgpu_bo_unreserve(vm->root.bo); 1610 } 1611 1612 mutex_unlock(&dev->filelist_mutex); 1613 1614 return r; 1615 } 1616 1617 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib); 1618 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info); 1619 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram, 1620 NULL, "%lld\n"); 1621 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt, 1622 NULL, "%lld\n"); 1623 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark, 1624 "%lld\n"); 1625 1626 static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring, 1627 struct dma_fence **fences) 1628 { 1629 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1630 uint32_t sync_seq, last_seq; 1631 1632 last_seq = atomic_read(&ring->fence_drv.last_seq); 1633 sync_seq = ring->fence_drv.sync_seq; 1634 1635 last_seq &= drv->num_fences_mask; 1636 sync_seq &= drv->num_fences_mask; 1637 1638 do { 1639 struct dma_fence *fence, **ptr; 1640 1641 ++last_seq; 1642 last_seq &= drv->num_fences_mask; 1643 ptr = &drv->fences[last_seq]; 1644 1645 fence = rcu_dereference_protected(*ptr, 1); 1646 RCU_INIT_POINTER(*ptr, NULL); 1647 1648 if (!fence) 1649 continue; 1650 1651 fences[last_seq] = fence; 1652 1653 } while (last_seq != sync_seq); 1654 } 1655 1656 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, 1657 int length) 1658 { 1659 int i; 1660 struct dma_fence *fence; 1661 1662 for (i = 0; i < length; i++) { 1663 fence = fences[i]; 1664 if (!fence) 1665 continue; 1666 dma_fence_signal(fence); 1667 dma_fence_put(fence); 1668 } 1669 } 1670 1671 static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) 1672 { 1673 struct drm_sched_job *s_job; 1674 struct dma_fence *fence; 1675 1676 spin_lock(&sched->job_list_lock); 1677 list_for_each_entry(s_job, &sched->pending_list, list) { 1678 fence = sched->ops->run_job(s_job); 1679 dma_fence_put(fence); 1680 } 1681 spin_unlock(&sched->job_list_lock); 1682 } 1683 1684 static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) 1685 { 1686 struct amdgpu_job *job; 1687 struct drm_sched_job *s_job, *tmp; 1688 uint32_t preempt_seq; 1689 struct dma_fence *fence, **ptr; 1690 struct amdgpu_fence_driver *drv = &ring->fence_drv; 1691 struct drm_gpu_scheduler *sched = &ring->sched; 1692 bool preempted = true; 1693 1694 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) 1695 return; 1696 1697 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); 1698 if (preempt_seq <= atomic_read(&drv->last_seq)) { 1699 preempted = false; 1700 goto no_preempt; 1701 } 1702 1703 preempt_seq &= drv->num_fences_mask; 1704 ptr = &drv->fences[preempt_seq]; 1705 fence = rcu_dereference_protected(*ptr, 1); 1706 1707 no_preempt: 1708 spin_lock(&sched->job_list_lock); 1709 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { 1710 if (dma_fence_is_signaled(&s_job->s_fence->finished)) { 1711 /* remove job from ring_mirror_list */ 1712 list_del_init(&s_job->list); 1713 sched->ops->free_job(s_job); 1714 continue; 1715 } 1716 job = to_amdgpu_job(s_job); 1717 if (preempted && (&job->hw_fence) == fence) 1718 /* mark the job as preempted */ 1719 job->preemption_status |= AMDGPU_IB_PREEMPTED; 1720 } 1721 spin_unlock(&sched->job_list_lock); 1722 } 1723 1724 static int amdgpu_debugfs_ib_preempt(void *data, u64 val) 1725 { 1726 int r, resched, length; 1727 struct amdgpu_ring *ring; 1728 struct dma_fence **fences = NULL; 1729 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1730 1731 if (val >= AMDGPU_MAX_RINGS) 1732 return -EINVAL; 1733 1734 ring = adev->rings[val]; 1735 1736 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) 1737 return -EINVAL; 1738 1739 /* the last preemption failed */ 1740 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr)) 1741 return -EBUSY; 1742 1743 length = ring->fence_drv.num_fences_mask + 1; 1744 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); 1745 if (!fences) 1746 return -ENOMEM; 1747 1748 /* Avoid accidently unparking the sched thread during GPU reset */ 1749 r = down_read_killable(&adev->reset_domain->sem); 1750 if (r) 1751 goto pro_end; 1752 1753 /* stop the scheduler */ 1754 kthread_park(ring->sched.thread); 1755 1756 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 1757 1758 /* preempt the IB */ 1759 r = amdgpu_ring_preempt_ib(ring); 1760 if (r) { 1761 DRM_WARN("failed to preempt ring %d\n", ring->idx); 1762 goto failure; 1763 } 1764 1765 amdgpu_fence_process(ring); 1766 1767 if (atomic_read(&ring->fence_drv.last_seq) != 1768 ring->fence_drv.sync_seq) { 1769 DRM_INFO("ring %d was preempted\n", ring->idx); 1770 1771 amdgpu_ib_preempt_mark_partial_job(ring); 1772 1773 /* swap out the old fences */ 1774 amdgpu_ib_preempt_fences_swap(ring, fences); 1775 1776 amdgpu_fence_driver_force_completion(ring); 1777 1778 /* resubmit unfinished jobs */ 1779 amdgpu_ib_preempt_job_recovery(&ring->sched); 1780 1781 /* wait for jobs finished */ 1782 amdgpu_fence_wait_empty(ring); 1783 1784 /* signal the old fences */ 1785 amdgpu_ib_preempt_signal_fences(fences, length); 1786 } 1787 1788 failure: 1789 /* restart the scheduler */ 1790 kthread_unpark(ring->sched.thread); 1791 1792 up_read(&adev->reset_domain->sem); 1793 1794 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); 1795 1796 pro_end: 1797 kfree(fences); 1798 1799 return r; 1800 } 1801 1802 static int amdgpu_debugfs_sclk_set(void *data, u64 val) 1803 { 1804 int ret = 0; 1805 uint32_t max_freq, min_freq; 1806 struct amdgpu_device *adev = (struct amdgpu_device *)data; 1807 1808 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1809 return -EINVAL; 1810 1811 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1812 if (ret < 0) { 1813 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1814 return ret; 1815 } 1816 1817 ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq); 1818 if (ret == -EOPNOTSUPP) { 1819 ret = 0; 1820 goto out; 1821 } 1822 if (ret || val > max_freq || val < min_freq) { 1823 ret = -EINVAL; 1824 goto out; 1825 } 1826 1827 ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val); 1828 if (ret) 1829 ret = -EINVAL; 1830 1831 out: 1832 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1833 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1834 1835 return ret; 1836 } 1837 1838 DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, 1839 amdgpu_debugfs_ib_preempt, "%llu\n"); 1840 1841 DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, 1842 amdgpu_debugfs_sclk_set, "%llu\n"); 1843 1844 static ssize_t amdgpu_reset_dump_register_list_read(struct file *f, 1845 char __user *buf, size_t size, loff_t *pos) 1846 { 1847 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 1848 char reg_offset[12]; 1849 int i, ret, len = 0; 1850 1851 if (*pos) 1852 return 0; 1853 1854 memset(reg_offset, 0, 12); 1855 ret = down_read_killable(&adev->reset_domain->sem); 1856 if (ret) 1857 return ret; 1858 1859 for (i = 0; i < adev->num_regs; i++) { 1860 sprintf(reg_offset, "0x%x\n", adev->reset_dump_reg_list[i]); 1861 up_read(&adev->reset_domain->sem); 1862 if (copy_to_user(buf + len, reg_offset, strlen(reg_offset))) 1863 return -EFAULT; 1864 1865 len += strlen(reg_offset); 1866 ret = down_read_killable(&adev->reset_domain->sem); 1867 if (ret) 1868 return ret; 1869 } 1870 1871 up_read(&adev->reset_domain->sem); 1872 *pos += len; 1873 1874 return len; 1875 } 1876 1877 static ssize_t amdgpu_reset_dump_register_list_write(struct file *f, 1878 const char __user *buf, size_t size, loff_t *pos) 1879 { 1880 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 1881 char reg_offset[11]; 1882 uint32_t *new = NULL, *tmp = NULL; 1883 int ret, i = 0, len = 0; 1884 1885 do { 1886 memset(reg_offset, 0, 11); 1887 if (copy_from_user(reg_offset, buf + len, 1888 min(10, ((int)size-len)))) { 1889 ret = -EFAULT; 1890 goto error_free; 1891 } 1892 1893 new = krealloc_array(tmp, i + 1, sizeof(uint32_t), GFP_KERNEL); 1894 if (!new) { 1895 ret = -ENOMEM; 1896 goto error_free; 1897 } 1898 tmp = new; 1899 if (sscanf(reg_offset, "%X %n", &tmp[i], &ret) != 1) { 1900 ret = -EINVAL; 1901 goto error_free; 1902 } 1903 1904 len += ret; 1905 i++; 1906 } while (len < size); 1907 1908 new = kmalloc_array(i, sizeof(uint32_t), GFP_KERNEL); 1909 if (!new) { 1910 ret = -ENOMEM; 1911 goto error_free; 1912 } 1913 ret = down_write_killable(&adev->reset_domain->sem); 1914 if (ret) 1915 goto error_free; 1916 1917 swap(adev->reset_dump_reg_list, tmp); 1918 swap(adev->reset_dump_reg_value, new); 1919 adev->num_regs = i; 1920 up_write(&adev->reset_domain->sem); 1921 ret = size; 1922 1923 error_free: 1924 if (tmp != new) 1925 kfree(tmp); 1926 kfree(new); 1927 return ret; 1928 } 1929 1930 static const struct file_operations amdgpu_reset_dump_register_list = { 1931 .owner = THIS_MODULE, 1932 .read = amdgpu_reset_dump_register_list_read, 1933 .write = amdgpu_reset_dump_register_list_write, 1934 .llseek = default_llseek 1935 }; 1936 1937 int amdgpu_debugfs_init(struct amdgpu_device *adev) 1938 { 1939 struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; 1940 struct dentry *ent; 1941 int r, i; 1942 1943 if (!debugfs_initialized()) 1944 return 0; 1945 1946 debugfs_create_x32("amdgpu_smu_debug", 0600, root, 1947 &adev->pm.smu_debug_mask); 1948 1949 ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, 1950 &fops_ib_preempt); 1951 if (IS_ERR(ent)) { 1952 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); 1953 return PTR_ERR(ent); 1954 } 1955 1956 ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, 1957 &fops_sclk_set); 1958 if (IS_ERR(ent)) { 1959 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); 1960 return PTR_ERR(ent); 1961 } 1962 1963 /* Register debugfs entries for amdgpu_ttm */ 1964 amdgpu_ttm_debugfs_init(adev); 1965 amdgpu_debugfs_pm_init(adev); 1966 amdgpu_debugfs_sa_init(adev); 1967 amdgpu_debugfs_fence_init(adev); 1968 amdgpu_debugfs_gem_init(adev); 1969 1970 r = amdgpu_debugfs_regs_init(adev); 1971 if (r) 1972 DRM_ERROR("registering register debugfs failed (%d).\n", r); 1973 1974 amdgpu_debugfs_firmware_init(adev); 1975 amdgpu_ta_if_debugfs_init(adev); 1976 1977 #if defined(CONFIG_DRM_AMD_DC) 1978 if (amdgpu_device_has_dc_support(adev)) 1979 dtn_debugfs_init(adev); 1980 #endif 1981 1982 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1983 struct amdgpu_ring *ring = adev->rings[i]; 1984 1985 if (!ring) 1986 continue; 1987 1988 amdgpu_debugfs_ring_init(adev, ring); 1989 } 1990 1991 for ( i = 0; i < adev->vcn.num_vcn_inst; i++) { 1992 if (!amdgpu_vcnfw_log) 1993 break; 1994 1995 if (adev->vcn.harvest_config & (1 << i)) 1996 continue; 1997 1998 amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]); 1999 } 2000 2001 amdgpu_ras_debugfs_create_all(adev); 2002 amdgpu_rap_debugfs_init(adev); 2003 amdgpu_securedisplay_debugfs_init(adev); 2004 amdgpu_fw_attestation_debugfs_init(adev); 2005 2006 debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, 2007 &amdgpu_evict_vram_fops); 2008 debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, 2009 &amdgpu_evict_gtt_fops); 2010 debugfs_create_file("amdgpu_test_ib", 0444, root, adev, 2011 &amdgpu_debugfs_test_ib_fops); 2012 debugfs_create_file("amdgpu_vm_info", 0444, root, adev, 2013 &amdgpu_debugfs_vm_info_fops); 2014 debugfs_create_file("amdgpu_benchmark", 0200, root, adev, 2015 &amdgpu_benchmark_fops); 2016 debugfs_create_file("amdgpu_reset_dump_register_list", 0644, root, adev, 2017 &amdgpu_reset_dump_register_list); 2018 2019 adev->debugfs_vbios_blob.data = adev->bios; 2020 adev->debugfs_vbios_blob.size = adev->bios_size; 2021 debugfs_create_blob("amdgpu_vbios", 0444, root, 2022 &adev->debugfs_vbios_blob); 2023 2024 adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; 2025 adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; 2026 debugfs_create_blob("amdgpu_discovery", 0444, root, 2027 &adev->debugfs_discovery_blob); 2028 2029 return 0; 2030 } 2031 2032 #else 2033 int amdgpu_debugfs_init(struct amdgpu_device *adev) 2034 { 2035 return 0; 2036 } 2037 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2038 { 2039 return 0; 2040 } 2041 #endif 2042