1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * 23 */ 24 #include <linux/debugfs.h> 25 #include <linux/list.h> 26 #include <linux/module.h> 27 #include <linux/uaccess.h> 28 #include <linux/reboot.h> 29 #include <linux/syscalls.h> 30 #include <linux/pm_runtime.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_ras.h" 34 #include "amdgpu_atomfirmware.h" 35 #include "amdgpu_xgmi.h" 36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 37 #include "atom.h" 38 #include "amdgpu_reset.h" 39 40 #ifdef CONFIG_X86_MCE_AMD 41 #include <asm/mce.h> 42 43 static bool notifier_registered; 44 #endif 45 static const char *RAS_FS_NAME = "ras"; 46 47 const char *ras_error_string[] = { 48 "none", 49 "parity", 50 "single_correctable", 51 "multi_uncorrectable", 52 "poison", 53 }; 54 55 const char *ras_block_string[] = { 56 "umc", 57 "sdma", 58 "gfx", 59 "mmhub", 60 "athub", 61 "pcie_bif", 62 "hdp", 63 "xgmi_wafl", 64 "df", 65 "smn", 66 "sem", 67 "mp0", 68 "mp1", 69 "fuse", 70 "mca", 71 "vcn", 72 "jpeg", 73 }; 74 75 const char *ras_mca_block_string[] = { 76 "mca_mp0", 77 "mca_mp1", 78 "mca_mpio", 79 "mca_iohc", 80 }; 81 82 struct amdgpu_ras_block_list { 83 /* ras block link */ 84 struct list_head node; 85 86 struct amdgpu_ras_block_object *ras_obj; 87 }; 88 89 const char *get_ras_block_str(struct ras_common_if *ras_block) 90 { 91 if (!ras_block) 92 return "NULL"; 93 94 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) 95 return "OUT OF RANGE"; 96 97 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) 98 return ras_mca_block_string[ras_block->sub_block_index]; 99 100 return ras_block_string[ras_block->block]; 101 } 102 103 #define ras_block_str(_BLOCK_) \ 104 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") 105 106 #define ras_err_str(i) (ras_error_string[ffs(i)]) 107 108 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) 109 110 /* inject address is 52 bits */ 111 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) 112 113 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ 114 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) 115 116 enum amdgpu_ras_retire_page_reservation { 117 AMDGPU_RAS_RETIRE_PAGE_RESERVED, 118 AMDGPU_RAS_RETIRE_PAGE_PENDING, 119 AMDGPU_RAS_RETIRE_PAGE_FAULT, 120 }; 121 122 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); 123 124 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 125 uint64_t addr); 126 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 127 uint64_t addr); 128 #ifdef CONFIG_X86_MCE_AMD 129 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); 130 struct mce_notifier_adev_list { 131 struct amdgpu_device *devs[MAX_GPU_INSTANCE]; 132 int num_gpu; 133 }; 134 static struct mce_notifier_adev_list mce_adev_list; 135 #endif 136 137 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) 138 { 139 if (adev && amdgpu_ras_get_context(adev)) 140 amdgpu_ras_get_context(adev)->error_query_ready = ready; 141 } 142 143 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) 144 { 145 if (adev && amdgpu_ras_get_context(adev)) 146 return amdgpu_ras_get_context(adev)->error_query_ready; 147 148 return false; 149 } 150 151 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) 152 { 153 struct ras_err_data err_data = {0, 0, 0, NULL}; 154 struct eeprom_table_record err_rec; 155 156 if ((address >= adev->gmc.mc_vram_size) || 157 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 158 dev_warn(adev->dev, 159 "RAS WARN: input address 0x%llx is invalid.\n", 160 address); 161 return -EINVAL; 162 } 163 164 if (amdgpu_ras_check_bad_page(adev, address)) { 165 dev_warn(adev->dev, 166 "RAS WARN: 0x%llx has already been marked as bad page!\n", 167 address); 168 return 0; 169 } 170 171 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); 172 err_data.err_addr = &err_rec; 173 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0); 174 175 if (amdgpu_bad_page_threshold != 0) { 176 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 177 err_data.err_addr_cnt); 178 amdgpu_ras_save_bad_pages(adev); 179 } 180 181 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n"); 182 dev_warn(adev->dev, "Clear EEPROM:\n"); 183 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n"); 184 185 return 0; 186 } 187 188 #ifdef __linux__ 189 190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, 191 size_t size, loff_t *pos) 192 { 193 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; 194 struct ras_query_if info = { 195 .head = obj->head, 196 }; 197 ssize_t s; 198 char val[128]; 199 200 if (amdgpu_ras_query_error_status(obj->adev, &info)) 201 return -EINVAL; 202 203 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 204 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 205 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 206 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 207 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 208 } 209 210 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", 211 "ue", info.ue_count, 212 "ce", info.ce_count); 213 if (*pos >= s) 214 return 0; 215 216 s -= *pos; 217 s = min_t(u64, s, size); 218 219 220 if (copy_to_user(buf, &val[*pos], s)) 221 return -EINVAL; 222 223 *pos += s; 224 225 return s; 226 } 227 228 static const struct file_operations amdgpu_ras_debugfs_ops = { 229 .owner = THIS_MODULE, 230 .read = amdgpu_ras_debugfs_read, 231 .write = NULL, 232 .llseek = default_llseek 233 }; 234 235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) 236 { 237 int i; 238 239 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { 240 *block_id = i; 241 if (strcmp(name, ras_block_string[i]) == 0) 242 return 0; 243 } 244 return -EINVAL; 245 } 246 247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, 248 const char __user *buf, size_t size, 249 loff_t *pos, struct ras_debug_if *data) 250 { 251 ssize_t s = min_t(u64, 64, size); 252 char str[65]; 253 char block_name[33]; 254 char err[9] = "ue"; 255 int op = -1; 256 int block_id; 257 uint32_t sub_block; 258 u64 address, value; 259 260 if (*pos) 261 return -EINVAL; 262 *pos = size; 263 264 memset(str, 0, sizeof(str)); 265 memset(data, 0, sizeof(*data)); 266 267 if (copy_from_user(str, buf, s)) 268 return -EINVAL; 269 270 if (sscanf(str, "disable %32s", block_name) == 1) 271 op = 0; 272 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2) 273 op = 1; 274 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 275 op = 2; 276 else if (strstr(str, "retire_page") != NULL) 277 op = 3; 278 else if (str[0] && str[1] && str[2] && str[3]) 279 /* ascii string, but commands are not matched. */ 280 return -EINVAL; 281 282 if (op != -1) { 283 if (op == 3) { 284 if (sscanf(str, "%*s 0x%llx", &address) != 1 && 285 sscanf(str, "%*s %llu", &address) != 1) 286 return -EINVAL; 287 288 data->op = op; 289 data->inject.address = address; 290 291 return 0; 292 } 293 294 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id)) 295 return -EINVAL; 296 297 data->head.block = block_id; 298 /* only ue and ce errors are supported */ 299 if (!memcmp("ue", err, 2)) 300 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; 301 else if (!memcmp("ce", err, 2)) 302 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; 303 else 304 return -EINVAL; 305 306 data->op = op; 307 308 if (op == 2) { 309 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 310 &sub_block, &address, &value) != 3 && 311 sscanf(str, "%*s %*s %*s %u %llu %llu", 312 &sub_block, &address, &value) != 3) 313 return -EINVAL; 314 data->head.sub_block_index = sub_block; 315 data->inject.address = address; 316 data->inject.value = value; 317 } 318 } else { 319 if (size < sizeof(*data)) 320 return -EINVAL; 321 322 if (copy_from_user(data, buf, sizeof(*data))) 323 return -EINVAL; 324 } 325 326 return 0; 327 } 328 329 /** 330 * DOC: AMDGPU RAS debugfs control interface 331 * 332 * The control interface accepts struct ras_debug_if which has two members. 333 * 334 * First member: ras_debug_if::head or ras_debug_if::inject. 335 * 336 * head is used to indicate which IP block will be under control. 337 * 338 * head has four members, they are block, type, sub_block_index, name. 339 * block: which IP will be under control. 340 * type: what kind of error will be enabled/disabled/injected. 341 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. 342 * name: the name of IP. 343 * 344 * inject has two more members than head, they are address, value. 345 * As their names indicate, inject operation will write the 346 * value to the address. 347 * 348 * The second member: struct ras_debug_if::op. 349 * It has three kinds of operations. 350 * 351 * - 0: disable RAS on the block. Take ::head as its data. 352 * - 1: enable RAS on the block. Take ::head as its data. 353 * - 2: inject errors on the block. Take ::inject as its data. 354 * 355 * How to use the interface? 356 * 357 * In a program 358 * 359 * Copy the struct ras_debug_if in your code and initialize it. 360 * Write the struct to the control interface. 361 * 362 * From shell 363 * 364 * .. code-block:: bash 365 * 366 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 367 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 368 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 369 * 370 * Where N, is the card which you want to affect. 371 * 372 * "disable" requires only the block. 373 * "enable" requires the block and error type. 374 * "inject" requires the block, error type, address, and value. 375 * 376 * The block is one of: umc, sdma, gfx, etc. 377 * see ras_block_string[] for details 378 * 379 * The error type is one of: ue, ce, where, 380 * ue is multi-uncorrectable 381 * ce is single-correctable 382 * 383 * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 384 * The address and value are hexadecimal numbers, leading 0x is optional. 385 * 386 * For instance, 387 * 388 * .. code-block:: bash 389 * 390 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 391 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 392 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 393 * 394 * How to check the result of the operation? 395 * 396 * To check disable/enable, see "ras" features at, 397 * /sys/class/drm/card[0/1/2...]/device/ras/features 398 * 399 * To check inject, see the corresponding error count at, 400 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 401 * 402 * .. note:: 403 * Operations are only allowed on blocks which are supported. 404 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 405 * to see which blocks support RAS on a particular asic. 406 * 407 */ 408 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, 409 const char __user *buf, 410 size_t size, loff_t *pos) 411 { 412 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 413 struct ras_debug_if data; 414 int ret = 0; 415 416 if (!amdgpu_ras_get_error_query_ready(adev)) { 417 dev_warn(adev->dev, "RAS WARN: error injection " 418 "currently inaccessible\n"); 419 return size; 420 } 421 422 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); 423 if (ret) 424 return ret; 425 426 if (data.op == 3) { 427 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 428 if (!ret) 429 return size; 430 else 431 return ret; 432 } 433 434 if (!amdgpu_ras_is_supported(adev, data.head.block)) 435 return -EINVAL; 436 437 switch (data.op) { 438 case 0: 439 ret = amdgpu_ras_feature_enable(adev, &data.head, 0); 440 break; 441 case 1: 442 ret = amdgpu_ras_feature_enable(adev, &data.head, 1); 443 break; 444 case 2: 445 if ((data.inject.address >= adev->gmc.mc_vram_size) || 446 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { 447 dev_warn(adev->dev, "RAS WARN: input address " 448 "0x%llx is invalid.", 449 data.inject.address); 450 ret = -EINVAL; 451 break; 452 } 453 454 /* umc ce/ue error injection for a bad page is not allowed */ 455 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && 456 amdgpu_ras_check_bad_page(adev, data.inject.address)) { 457 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " 458 "already been marked as bad!\n", 459 data.inject.address); 460 break; 461 } 462 463 /* data.inject.address is offset instead of absolute gpu address */ 464 ret = amdgpu_ras_error_inject(adev, &data.inject); 465 break; 466 default: 467 ret = -EINVAL; 468 break; 469 } 470 471 if (ret) 472 return ret; 473 474 return size; 475 } 476 477 /** 478 * DOC: AMDGPU RAS debugfs EEPROM table reset interface 479 * 480 * Some boards contain an EEPROM which is used to persistently store a list of 481 * bad pages which experiences ECC errors in vram. This interface provides 482 * a way to reset the EEPROM, e.g., after testing error injection. 483 * 484 * Usage: 485 * 486 * .. code-block:: bash 487 * 488 * echo 1 > ../ras/ras_eeprom_reset 489 * 490 * will reset EEPROM table to 0 entries. 491 * 492 */ 493 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, 494 const char __user *buf, 495 size_t size, loff_t *pos) 496 { 497 struct amdgpu_device *adev = 498 (struct amdgpu_device *)file_inode(f)->i_private; 499 int ret; 500 501 ret = amdgpu_ras_eeprom_reset_table( 502 &(amdgpu_ras_get_context(adev)->eeprom_control)); 503 504 if (!ret) { 505 /* Something was written to EEPROM. 506 */ 507 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; 508 return size; 509 } else { 510 return ret; 511 } 512 } 513 514 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { 515 .owner = THIS_MODULE, 516 .read = NULL, 517 .write = amdgpu_ras_debugfs_ctrl_write, 518 .llseek = default_llseek 519 }; 520 521 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { 522 .owner = THIS_MODULE, 523 .read = NULL, 524 .write = amdgpu_ras_debugfs_eeprom_write, 525 .llseek = default_llseek 526 }; 527 528 /** 529 * DOC: AMDGPU RAS sysfs Error Count Interface 530 * 531 * It allows the user to read the error count for each IP block on the gpu through 532 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 533 * 534 * It outputs the multiple lines which report the uncorrected (ue) and corrected 535 * (ce) error counts. 536 * 537 * The format of one line is below, 538 * 539 * [ce|ue]: count 540 * 541 * Example: 542 * 543 * .. code-block:: bash 544 * 545 * ue: 0 546 * ce: 1 547 * 548 */ 549 static ssize_t amdgpu_ras_sysfs_read(struct device *dev, 550 struct device_attribute *attr, char *buf) 551 { 552 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); 553 struct ras_query_if info = { 554 .head = obj->head, 555 }; 556 557 if (!amdgpu_ras_get_error_query_ready(obj->adev)) 558 return sysfs_emit(buf, "Query currently inaccessible\n"); 559 560 if (amdgpu_ras_query_error_status(obj->adev, &info)) 561 return -EINVAL; 562 563 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 564 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 565 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 566 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 567 } 568 569 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 570 "ce", info.ce_count); 571 } 572 573 #endif /* __linux__ */ 574 575 /* obj begin */ 576 577 #define get_obj(obj) do { (obj)->use++; } while (0) 578 #define alive_obj(obj) ((obj)->use) 579 580 static inline void put_obj(struct ras_manager *obj) 581 { 582 if (obj && (--obj->use == 0)) 583 list_del(&obj->node); 584 if (obj && (obj->use < 0)) 585 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); 586 } 587 588 /* make one obj and return it. */ 589 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, 590 struct ras_common_if *head) 591 { 592 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 593 struct ras_manager *obj; 594 595 if (!adev->ras_enabled || !con) 596 return NULL; 597 598 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 599 return NULL; 600 601 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 602 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 603 return NULL; 604 605 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 606 } else 607 obj = &con->objs[head->block]; 608 609 /* already exist. return obj? */ 610 if (alive_obj(obj)) 611 return NULL; 612 613 obj->head = *head; 614 obj->adev = adev; 615 list_add(&obj->node, &con->head); 616 get_obj(obj); 617 618 return obj; 619 } 620 621 /* return an obj equal to head, or the first when head is NULL */ 622 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, 623 struct ras_common_if *head) 624 { 625 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 626 struct ras_manager *obj; 627 int i; 628 629 if (!adev->ras_enabled || !con) 630 return NULL; 631 632 if (head) { 633 if (head->block >= AMDGPU_RAS_BLOCK_COUNT) 634 return NULL; 635 636 if (head->block == AMDGPU_RAS_BLOCK__MCA) { 637 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) 638 return NULL; 639 640 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; 641 } else 642 obj = &con->objs[head->block]; 643 644 if (alive_obj(obj)) 645 return obj; 646 } else { 647 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 648 obj = &con->objs[i]; 649 if (alive_obj(obj)) 650 return obj; 651 } 652 } 653 654 return NULL; 655 } 656 /* obj end */ 657 658 /* feature ctl begin */ 659 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, 660 struct ras_common_if *head) 661 { 662 return adev->ras_hw_enabled & BIT(head->block); 663 } 664 665 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, 666 struct ras_common_if *head) 667 { 668 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 669 670 return con->features & BIT(head->block); 671 } 672 673 /* 674 * if obj is not created, then create one. 675 * set feature enable flag. 676 */ 677 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, 678 struct ras_common_if *head, int enable) 679 { 680 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 681 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 682 683 /* If hardware does not support ras, then do not create obj. 684 * But if hardware support ras, we can create the obj. 685 * Ras framework checks con->hw_supported to see if it need do 686 * corresponding initialization. 687 * IP checks con->support to see if it need disable ras. 688 */ 689 if (!amdgpu_ras_is_feature_allowed(adev, head)) 690 return 0; 691 692 if (enable) { 693 if (!obj) { 694 obj = amdgpu_ras_create_obj(adev, head); 695 if (!obj) 696 return -EINVAL; 697 } else { 698 /* In case we create obj somewhere else */ 699 get_obj(obj); 700 } 701 con->features |= BIT(head->block); 702 } else { 703 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { 704 con->features &= ~BIT(head->block); 705 put_obj(obj); 706 } 707 } 708 709 return 0; 710 } 711 712 /* wrapper of psp_ras_enable_features */ 713 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 714 struct ras_common_if *head, bool enable) 715 { 716 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 717 union ta_ras_cmd_input *info; 718 int ret; 719 720 if (!con) 721 return -EINVAL; 722 723 if (head->block == AMDGPU_RAS_BLOCK__GFX) { 724 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); 725 if (!info) 726 return -ENOMEM; 727 728 if (!enable) { 729 info->disable_features = (struct ta_ras_disable_features_input) { 730 .block_id = amdgpu_ras_block_to_ta(head->block), 731 .error_type = amdgpu_ras_error_to_ta(head->type), 732 }; 733 } else { 734 info->enable_features = (struct ta_ras_enable_features_input) { 735 .block_id = amdgpu_ras_block_to_ta(head->block), 736 .error_type = amdgpu_ras_error_to_ta(head->type), 737 }; 738 } 739 } 740 741 /* Do not enable if it is not allowed. */ 742 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); 743 744 /* Only enable ras feature operation handle on host side */ 745 if (head->block == AMDGPU_RAS_BLOCK__GFX && 746 !amdgpu_sriov_vf(adev) && 747 !amdgpu_ras_intr_triggered()) { 748 ret = psp_ras_enable_features(&adev->psp, info, enable); 749 if (ret) { 750 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", 751 enable ? "enable":"disable", 752 get_ras_block_str(head), 753 amdgpu_ras_is_poison_mode_supported(adev), ret); 754 goto out; 755 } 756 } 757 758 /* setup the obj */ 759 __amdgpu_ras_feature_enable(adev, head, enable); 760 ret = 0; 761 out: 762 if (head->block == AMDGPU_RAS_BLOCK__GFX) 763 kfree(info); 764 return ret; 765 } 766 767 /* Only used in device probe stage and called only once. */ 768 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, 769 struct ras_common_if *head, bool enable) 770 { 771 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 772 int ret; 773 774 if (!con) 775 return -EINVAL; 776 777 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 778 if (enable) { 779 /* There is no harm to issue a ras TA cmd regardless of 780 * the currecnt ras state. 781 * If current state == target state, it will do nothing 782 * But sometimes it requests driver to reset and repost 783 * with error code -EAGAIN. 784 */ 785 ret = amdgpu_ras_feature_enable(adev, head, 1); 786 /* With old ras TA, we might fail to enable ras. 787 * Log it and just setup the object. 788 * TODO need remove this WA in the future. 789 */ 790 if (ret == -EINVAL) { 791 ret = __amdgpu_ras_feature_enable(adev, head, 1); 792 if (!ret) 793 dev_info(adev->dev, 794 "RAS INFO: %s setup object\n", 795 get_ras_block_str(head)); 796 } 797 } else { 798 /* setup the object then issue a ras TA disable cmd.*/ 799 ret = __amdgpu_ras_feature_enable(adev, head, 1); 800 if (ret) 801 return ret; 802 803 /* gfx block ras dsiable cmd must send to ras-ta */ 804 if (head->block == AMDGPU_RAS_BLOCK__GFX) 805 con->features |= BIT(head->block); 806 807 ret = amdgpu_ras_feature_enable(adev, head, 0); 808 809 /* clean gfx block ras features flag */ 810 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) 811 con->features &= ~BIT(head->block); 812 } 813 } else 814 ret = amdgpu_ras_feature_enable(adev, head, enable); 815 816 return ret; 817 } 818 819 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, 820 bool bypass) 821 { 822 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 823 struct ras_manager *obj, *tmp; 824 825 list_for_each_entry_safe(obj, tmp, &con->head, node) { 826 /* bypass psp. 827 * aka just release the obj and corresponding flags 828 */ 829 if (bypass) { 830 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) 831 break; 832 } else { 833 if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) 834 break; 835 } 836 } 837 838 return con->features; 839 } 840 841 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, 842 bool bypass) 843 { 844 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 845 int i; 846 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; 847 848 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { 849 struct ras_common_if head = { 850 .block = i, 851 .type = default_ras_type, 852 .sub_block_index = 0, 853 }; 854 855 if (i == AMDGPU_RAS_BLOCK__MCA) 856 continue; 857 858 if (bypass) { 859 /* 860 * bypass psp. vbios enable ras for us. 861 * so just create the obj 862 */ 863 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 864 break; 865 } else { 866 if (amdgpu_ras_feature_enable(adev, &head, 1)) 867 break; 868 } 869 } 870 871 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { 872 struct ras_common_if head = { 873 .block = AMDGPU_RAS_BLOCK__MCA, 874 .type = default_ras_type, 875 .sub_block_index = i, 876 }; 877 878 if (bypass) { 879 /* 880 * bypass psp. vbios enable ras for us. 881 * so just create the obj 882 */ 883 if (__amdgpu_ras_feature_enable(adev, &head, 1)) 884 break; 885 } else { 886 if (amdgpu_ras_feature_enable(adev, &head, 1)) 887 break; 888 } 889 } 890 891 return con->features; 892 } 893 /* feature ctl end */ 894 895 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, 896 enum amdgpu_ras_block block) 897 { 898 if (!block_obj) 899 return -EINVAL; 900 901 if (block_obj->ras_comm.block == block) 902 return 0; 903 904 return -EINVAL; 905 } 906 907 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, 908 enum amdgpu_ras_block block, uint32_t sub_block_index) 909 { 910 struct amdgpu_ras_block_list *node, *tmp; 911 struct amdgpu_ras_block_object *obj; 912 913 if (block >= AMDGPU_RAS_BLOCK__LAST) 914 return NULL; 915 916 if (!amdgpu_ras_is_supported(adev, block)) 917 return NULL; 918 919 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 920 if (!node->ras_obj) { 921 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 922 continue; 923 } 924 925 obj = node->ras_obj; 926 if (obj->ras_block_match) { 927 if (obj->ras_block_match(obj, block, sub_block_index) == 0) 928 return obj; 929 } else { 930 if (amdgpu_ras_block_match_default(obj, block) == 0) 931 return obj; 932 } 933 } 934 935 return NULL; 936 } 937 938 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) 939 { 940 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 941 int ret = 0; 942 943 /* 944 * choosing right query method according to 945 * whether smu support query error information 946 */ 947 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); 948 if (ret == -EOPNOTSUPP) { 949 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 950 adev->umc.ras->ras_block.hw_ops->query_ras_error_count) 951 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); 952 953 /* umc query_ras_error_address is also responsible for clearing 954 * error status 955 */ 956 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && 957 adev->umc.ras->ras_block.hw_ops->query_ras_error_address) 958 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); 959 } else if (!ret) { 960 if (adev->umc.ras && 961 adev->umc.ras->ecc_info_query_ras_error_count) 962 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); 963 964 if (adev->umc.ras && 965 adev->umc.ras->ecc_info_query_ras_error_address) 966 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); 967 } 968 } 969 970 /* query/inject/cure begin */ 971 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, 972 struct ras_query_if *info) 973 { 974 struct amdgpu_ras_block_object *block_obj = NULL; 975 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 976 struct ras_err_data err_data = {0, 0, 0, NULL}; 977 978 if (!obj) 979 return -EINVAL; 980 981 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { 982 amdgpu_ras_get_ecc_info(adev, &err_data); 983 } else { 984 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); 985 if (!block_obj || !block_obj->hw_ops) { 986 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 987 get_ras_block_str(&info->head)); 988 return -EINVAL; 989 } 990 991 if (block_obj->hw_ops->query_ras_error_count) 992 block_obj->hw_ops->query_ras_error_count(adev, &err_data); 993 994 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || 995 (info->head.block == AMDGPU_RAS_BLOCK__GFX) || 996 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { 997 if (block_obj->hw_ops->query_ras_error_status) 998 block_obj->hw_ops->query_ras_error_status(adev); 999 } 1000 } 1001 1002 obj->err_data.ue_count += err_data.ue_count; 1003 obj->err_data.ce_count += err_data.ce_count; 1004 1005 info->ue_count = obj->err_data.ue_count; 1006 info->ce_count = obj->err_data.ce_count; 1007 1008 if (err_data.ce_count) { 1009 if (adev->smuio.funcs && 1010 adev->smuio.funcs->get_socket_id && 1011 adev->smuio.funcs->get_die_id) { 1012 dev_info(adev->dev, "socket: %d, die: %d " 1013 "%ld correctable hardware errors " 1014 "detected in %s block, no user " 1015 "action is needed.\n", 1016 adev->smuio.funcs->get_socket_id(adev), 1017 adev->smuio.funcs->get_die_id(adev), 1018 obj->err_data.ce_count, 1019 get_ras_block_str(&info->head)); 1020 } else { 1021 dev_info(adev->dev, "%ld correctable hardware errors " 1022 "detected in %s block, no user " 1023 "action is needed.\n", 1024 obj->err_data.ce_count, 1025 get_ras_block_str(&info->head)); 1026 } 1027 } 1028 if (err_data.ue_count) { 1029 if (adev->smuio.funcs && 1030 adev->smuio.funcs->get_socket_id && 1031 adev->smuio.funcs->get_die_id) { 1032 dev_info(adev->dev, "socket: %d, die: %d " 1033 "%ld uncorrectable hardware errors " 1034 "detected in %s block\n", 1035 adev->smuio.funcs->get_socket_id(adev), 1036 adev->smuio.funcs->get_die_id(adev), 1037 obj->err_data.ue_count, 1038 get_ras_block_str(&info->head)); 1039 } else { 1040 dev_info(adev->dev, "%ld uncorrectable hardware errors " 1041 "detected in %s block\n", 1042 obj->err_data.ue_count, 1043 get_ras_block_str(&info->head)); 1044 } 1045 } 1046 1047 return 0; 1048 } 1049 1050 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, 1051 enum amdgpu_ras_block block) 1052 { 1053 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); 1054 1055 if (!amdgpu_ras_is_supported(adev, block)) 1056 return -EINVAL; 1057 1058 if (!block_obj || !block_obj->hw_ops) { 1059 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1060 ras_block_str(block)); 1061 return -EINVAL; 1062 } 1063 1064 if (block_obj->hw_ops->reset_ras_error_count) 1065 block_obj->hw_ops->reset_ras_error_count(adev); 1066 1067 if ((block == AMDGPU_RAS_BLOCK__GFX) || 1068 (block == AMDGPU_RAS_BLOCK__MMHUB)) { 1069 if (block_obj->hw_ops->reset_ras_error_status) 1070 block_obj->hw_ops->reset_ras_error_status(adev); 1071 } 1072 1073 return 0; 1074 } 1075 1076 /* wrapper of psp_ras_trigger_error */ 1077 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 1078 struct ras_inject_if *info) 1079 { 1080 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1081 struct ta_ras_trigger_error_input block_info = { 1082 .block_id = amdgpu_ras_block_to_ta(info->head.block), 1083 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), 1084 .sub_block_index = info->head.sub_block_index, 1085 .address = info->address, 1086 .value = info->value, 1087 }; 1088 int ret = -EINVAL; 1089 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, 1090 info->head.block, 1091 info->head.sub_block_index); 1092 1093 if (!obj) 1094 return -EINVAL; 1095 1096 if (!block_obj || !block_obj->hw_ops) { 1097 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1098 get_ras_block_str(&info->head)); 1099 return -EINVAL; 1100 } 1101 1102 /* Calculate XGMI relative offset */ 1103 if (adev->gmc.xgmi.num_physical_nodes > 1) { 1104 block_info.address = 1105 amdgpu_xgmi_get_relative_phy_addr(adev, 1106 block_info.address); 1107 } 1108 1109 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { 1110 if (block_obj->hw_ops->ras_error_inject) 1111 ret = block_obj->hw_ops->ras_error_inject(adev, info); 1112 } else { 1113 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ 1114 if (block_obj->hw_ops->ras_error_inject) 1115 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); 1116 else /*If not defined .ras_error_inject, use default ras_error_inject*/ 1117 ret = psp_ras_trigger_error(&adev->psp, &block_info); 1118 } 1119 1120 if (ret) 1121 dev_err(adev->dev, "ras inject %s failed %d\n", 1122 get_ras_block_str(&info->head), ret); 1123 1124 return ret; 1125 } 1126 1127 /** 1128 * amdgpu_ras_query_error_count -- Get error counts of all IPs 1129 * @adev: pointer to AMD GPU device 1130 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1131 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1132 * errors. 1133 * 1134 * If set, @ce_count or @ue_count, count and return the corresponding 1135 * error counts in those integer pointers. Return 0 if the device 1136 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1137 */ 1138 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1139 unsigned long *ce_count, 1140 unsigned long *ue_count) 1141 { 1142 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1143 struct ras_manager *obj; 1144 unsigned long ce, ue; 1145 1146 if (!adev->ras_enabled || !con) 1147 return -EOPNOTSUPP; 1148 1149 /* Don't count since no reporting. 1150 */ 1151 if (!ce_count && !ue_count) 1152 return 0; 1153 1154 ce = 0; 1155 ue = 0; 1156 list_for_each_entry(obj, &con->head, node) { 1157 struct ras_query_if info = { 1158 .head = obj->head, 1159 }; 1160 int res; 1161 1162 res = amdgpu_ras_query_error_status(adev, &info); 1163 if (res) 1164 return res; 1165 1166 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1167 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 1168 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 1169 dev_warn(adev->dev, "Failed to reset error counter and error status"); 1170 } 1171 1172 ce += info.ce_count; 1173 ue += info.ue_count; 1174 } 1175 1176 if (ce_count) 1177 *ce_count = ce; 1178 1179 if (ue_count) 1180 *ue_count = ue; 1181 1182 return 0; 1183 } 1184 /* query/inject/cure end */ 1185 1186 #ifdef __linux__ 1187 1188 /* sysfs begin */ 1189 1190 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1191 struct ras_badpage **bps, unsigned int *count); 1192 1193 static char *amdgpu_ras_badpage_flags_str(unsigned int flags) 1194 { 1195 switch (flags) { 1196 case AMDGPU_RAS_RETIRE_PAGE_RESERVED: 1197 return "R"; 1198 case AMDGPU_RAS_RETIRE_PAGE_PENDING: 1199 return "P"; 1200 case AMDGPU_RAS_RETIRE_PAGE_FAULT: 1201 default: 1202 return "F"; 1203 } 1204 } 1205 1206 /** 1207 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface 1208 * 1209 * It allows user to read the bad pages of vram on the gpu through 1210 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages 1211 * 1212 * It outputs multiple lines, and each line stands for one gpu page. 1213 * 1214 * The format of one line is below, 1215 * gpu pfn : gpu page size : flags 1216 * 1217 * gpu pfn and gpu page size are printed in hex format. 1218 * flags can be one of below character, 1219 * 1220 * R: reserved, this gpu page is reserved and not able to use. 1221 * 1222 * P: pending for reserve, this gpu page is marked as bad, will be reserved 1223 * in next window of page_reserve. 1224 * 1225 * F: unable to reserve. this gpu page can't be reserved due to some reasons. 1226 * 1227 * Examples: 1228 * 1229 * .. code-block:: bash 1230 * 1231 * 0x00000001 : 0x00001000 : R 1232 * 0x00000002 : 0x00001000 : P 1233 * 1234 */ 1235 1236 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, 1237 struct kobject *kobj, struct bin_attribute *attr, 1238 char *buf, loff_t ppos, size_t count) 1239 { 1240 struct amdgpu_ras *con = 1241 container_of(attr, struct amdgpu_ras, badpages_attr); 1242 struct amdgpu_device *adev = con->adev; 1243 const unsigned int element_size = 1244 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1; 1245 unsigned int start = div64_ul(ppos + element_size - 1, element_size); 1246 unsigned int end = div64_ul(ppos + count - 1, element_size); 1247 ssize_t s = 0; 1248 struct ras_badpage *bps = NULL; 1249 unsigned int bps_count = 0; 1250 1251 memset(buf, 0, count); 1252 1253 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) 1254 return 0; 1255 1256 for (; start < end && start < bps_count; start++) 1257 s += scnprintf(&buf[s], element_size + 1, 1258 "0x%08x : 0x%08x : %1s\n", 1259 bps[start].bp, 1260 bps[start].size, 1261 amdgpu_ras_badpage_flags_str(bps[start].flags)); 1262 1263 kfree(bps); 1264 1265 return s; 1266 } 1267 1268 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, 1269 struct device_attribute *attr, char *buf) 1270 { 1271 struct amdgpu_ras *con = 1272 container_of(attr, struct amdgpu_ras, features_attr); 1273 1274 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); 1275 } 1276 1277 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) 1278 { 1279 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1280 1281 sysfs_remove_file_from_group(&adev->dev->kobj, 1282 &con->badpages_attr.attr, 1283 RAS_FS_NAME); 1284 } 1285 1286 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev) 1287 { 1288 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1289 struct attribute *attrs[] = { 1290 &con->features_attr.attr, 1291 NULL 1292 }; 1293 struct attribute_group group = { 1294 .name = RAS_FS_NAME, 1295 .attrs = attrs, 1296 }; 1297 1298 sysfs_remove_group(&adev->dev->kobj, &group); 1299 1300 return 0; 1301 } 1302 1303 #endif /* __linux__ */ 1304 1305 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, 1306 struct ras_common_if *head) 1307 { 1308 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1309 1310 if (!obj || obj->attr_inuse) 1311 return -EINVAL; 1312 1313 STUB(); 1314 return -ENOSYS; 1315 #ifdef notyet 1316 get_obj(obj); 1317 1318 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name), 1319 "%s_err_count", head->name); 1320 1321 obj->sysfs_attr = (struct device_attribute){ 1322 .attr = { 1323 .name = obj->fs_data.sysfs_name, 1324 .mode = S_IRUGO, 1325 }, 1326 .show = amdgpu_ras_sysfs_read, 1327 }; 1328 sysfs_attr_init(&obj->sysfs_attr.attr); 1329 1330 if (sysfs_add_file_to_group(&adev->dev->kobj, 1331 &obj->sysfs_attr.attr, 1332 RAS_FS_NAME)) { 1333 put_obj(obj); 1334 return -EINVAL; 1335 } 1336 1337 obj->attr_inuse = 1; 1338 1339 return 0; 1340 #endif 1341 } 1342 1343 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, 1344 struct ras_common_if *head) 1345 { 1346 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1347 1348 if (!obj || !obj->attr_inuse) 1349 return -EINVAL; 1350 1351 sysfs_remove_file_from_group(&adev->dev->kobj, 1352 &obj->sysfs_attr.attr, 1353 RAS_FS_NAME); 1354 obj->attr_inuse = 0; 1355 put_obj(obj); 1356 1357 return 0; 1358 } 1359 1360 #ifdef __linux__ 1361 1362 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) 1363 { 1364 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1365 struct ras_manager *obj, *tmp; 1366 1367 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1368 amdgpu_ras_sysfs_remove(adev, &obj->head); 1369 } 1370 1371 if (amdgpu_bad_page_threshold != 0) 1372 amdgpu_ras_sysfs_remove_bad_page_node(adev); 1373 1374 amdgpu_ras_sysfs_remove_feature_node(adev); 1375 1376 return 0; 1377 } 1378 /* sysfs end */ 1379 1380 /** 1381 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors 1382 * 1383 * Normally when there is an uncorrectable error, the driver will reset 1384 * the GPU to recover. However, in the event of an unrecoverable error, 1385 * the driver provides an interface to reboot the system automatically 1386 * in that event. 1387 * 1388 * The following file in debugfs provides that interface: 1389 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot 1390 * 1391 * Usage: 1392 * 1393 * .. code-block:: bash 1394 * 1395 * echo true > .../ras/auto_reboot 1396 * 1397 */ 1398 /* debugfs begin */ 1399 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) 1400 { 1401 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1402 struct drm_minor *minor = adev_to_drm(adev)->primary; 1403 struct dentry *dir; 1404 1405 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); 1406 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, 1407 &amdgpu_ras_debugfs_ctrl_ops); 1408 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1409 &amdgpu_ras_debugfs_eeprom_ops); 1410 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1411 &con->bad_page_cnt_threshold); 1412 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); 1413 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); 1414 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev, 1415 &amdgpu_ras_debugfs_eeprom_size_ops); 1416 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table", 1417 S_IRUGO, dir, adev, 1418 &amdgpu_ras_debugfs_eeprom_table_ops); 1419 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control); 1420 1421 /* 1422 * After one uncorrectable error happens, usually GPU recovery will 1423 * be scheduled. But due to the known problem in GPU recovery failing 1424 * to bring GPU back, below interface provides one direct way to 1425 * user to reboot system automatically in such case within 1426 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine 1427 * will never be called. 1428 */ 1429 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot); 1430 1431 /* 1432 * User could set this not to clean up hardware's error count register 1433 * of RAS IPs during ras recovery. 1434 */ 1435 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir, 1436 &con->disable_ras_err_cnt_harvest); 1437 return dir; 1438 } 1439 1440 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, 1441 struct ras_fs_if *head, 1442 struct dentry *dir) 1443 { 1444 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); 1445 1446 if (!obj || !dir) 1447 return; 1448 1449 get_obj(obj); 1450 1451 memcpy(obj->fs_data.debugfs_name, 1452 head->debugfs_name, 1453 sizeof(obj->fs_data.debugfs_name)); 1454 1455 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir, 1456 obj, &amdgpu_ras_debugfs_ops); 1457 } 1458 1459 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) 1460 { 1461 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1462 struct dentry *dir; 1463 struct ras_manager *obj; 1464 struct ras_fs_if fs_info; 1465 1466 /* 1467 * it won't be called in resume path, no need to check 1468 * suspend and gpu reset status 1469 */ 1470 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) 1471 return; 1472 1473 dir = amdgpu_ras_debugfs_create_ctrl_node(adev); 1474 1475 list_for_each_entry(obj, &con->head, node) { 1476 if (amdgpu_ras_is_supported(adev, obj->head.block) && 1477 (obj->attr_inuse == 1)) { 1478 sprintf(fs_info.debugfs_name, "%s_err_inject", 1479 get_ras_block_str(&obj->head)); 1480 fs_info.head = obj->head; 1481 amdgpu_ras_debugfs_create(adev, &fs_info, dir); 1482 } 1483 } 1484 } 1485 1486 /* debugfs end */ 1487 1488 /* ras fs */ 1489 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, 1490 amdgpu_ras_sysfs_badpages_read, NULL, 0); 1491 #endif /* __linux__ */ 1492 static DEVICE_ATTR(features, S_IRUGO, 1493 amdgpu_ras_sysfs_features_read, NULL); 1494 static int amdgpu_ras_fs_init(struct amdgpu_device *adev) 1495 { 1496 #ifdef __linux__ 1497 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1498 struct attribute_group group = { 1499 .name = RAS_FS_NAME, 1500 }; 1501 struct attribute *attrs[] = { 1502 &con->features_attr.attr, 1503 NULL 1504 }; 1505 struct bin_attribute *bin_attrs[] = { 1506 NULL, 1507 NULL, 1508 }; 1509 int r; 1510 1511 /* add features entry */ 1512 con->features_attr = dev_attr_features; 1513 group.attrs = attrs; 1514 sysfs_attr_init(attrs[0]); 1515 1516 if (amdgpu_bad_page_threshold != 0) { 1517 /* add bad_page_features entry */ 1518 bin_attr_gpu_vram_bad_pages.private = NULL; 1519 con->badpages_attr = bin_attr_gpu_vram_bad_pages; 1520 bin_attrs[0] = &con->badpages_attr; 1521 group.bin_attrs = bin_attrs; 1522 sysfs_bin_attr_init(bin_attrs[0]); 1523 } 1524 1525 r = sysfs_create_group(&adev->dev->kobj, &group); 1526 if (r) 1527 dev_err(adev->dev, "Failed to create RAS sysfs group!"); 1528 #endif 1529 1530 return 0; 1531 } 1532 1533 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) 1534 { 1535 #ifdef __linux__ 1536 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1537 struct ras_manager *con_obj, *ip_obj, *tmp; 1538 1539 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1540 list_for_each_entry_safe(con_obj, tmp, &con->head, node) { 1541 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); 1542 if (ip_obj) 1543 put_obj(ip_obj); 1544 } 1545 } 1546 1547 amdgpu_ras_sysfs_remove_all(adev); 1548 #endif 1549 return 0; 1550 } 1551 /* ras fs end */ 1552 1553 /* ih begin */ 1554 1555 /* For the hardware that cannot enable bif ring for both ras_controller_irq 1556 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status 1557 * register to check whether the interrupt is triggered or not, and properly 1558 * ack the interrupt if it is there 1559 */ 1560 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) 1561 { 1562 /* Fatal error events are handled on host side */ 1563 if (amdgpu_sriov_vf(adev) || 1564 !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) 1565 return; 1566 1567 if (adev->nbio.ras && 1568 adev->nbio.ras->handle_ras_controller_intr_no_bifring) 1569 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); 1570 1571 if (adev->nbio.ras && 1572 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) 1573 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); 1574 } 1575 1576 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, 1577 struct amdgpu_iv_entry *entry) 1578 { 1579 bool poison_stat = false; 1580 struct amdgpu_device *adev = obj->adev; 1581 struct ras_err_data err_data = {0, 0, 0, NULL}; 1582 struct amdgpu_ras_block_object *block_obj = 1583 amdgpu_ras_get_ras_block(adev, obj->head.block, 0); 1584 1585 if (!block_obj || !block_obj->hw_ops) 1586 return; 1587 1588 /* both query_poison_status and handle_poison_consumption are optional, 1589 * but at least one of them should be implemented if we need poison 1590 * consumption handler 1591 */ 1592 if (block_obj->hw_ops->query_poison_status) { 1593 poison_stat = block_obj->hw_ops->query_poison_status(adev); 1594 if (!poison_stat) { 1595 /* Not poison consumption interrupt, no need to handle it */ 1596 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n", 1597 block_obj->ras_comm.name); 1598 1599 return; 1600 } 1601 } 1602 1603 if (!adev->gmc.xgmi.connected_to_cpu) 1604 amdgpu_umc_poison_handler(adev, &err_data, false); 1605 1606 if (block_obj->hw_ops->handle_poison_consumption) 1607 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); 1608 1609 /* gpu reset is fallback for failed and default cases */ 1610 if (poison_stat) { 1611 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n", 1612 block_obj->ras_comm.name); 1613 amdgpu_ras_reset_gpu(adev); 1614 } 1615 } 1616 1617 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, 1618 struct amdgpu_iv_entry *entry) 1619 { 1620 dev_info(obj->adev->dev, 1621 "Poison is created, no user action is needed.\n"); 1622 } 1623 1624 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, 1625 struct amdgpu_iv_entry *entry) 1626 { 1627 struct ras_ih_data *data = &obj->ih_data; 1628 struct ras_err_data err_data = {0, 0, 0, NULL}; 1629 int ret; 1630 1631 if (!data->cb) 1632 return; 1633 1634 /* Let IP handle its data, maybe we need get the output 1635 * from the callback to update the error type/count, etc 1636 */ 1637 ret = data->cb(obj->adev, &err_data, entry); 1638 /* ue will trigger an interrupt, and in that case 1639 * we need do a reset to recovery the whole system. 1640 * But leave IP do that recovery, here we just dispatch 1641 * the error. 1642 */ 1643 if (ret == AMDGPU_RAS_SUCCESS) { 1644 /* these counts could be left as 0 if 1645 * some blocks do not count error number 1646 */ 1647 obj->err_data.ue_count += err_data.ue_count; 1648 obj->err_data.ce_count += err_data.ce_count; 1649 } 1650 } 1651 1652 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) 1653 { 1654 struct ras_ih_data *data = &obj->ih_data; 1655 struct amdgpu_iv_entry entry; 1656 1657 while (data->rptr != data->wptr) { 1658 rmb(); 1659 memcpy(&entry, &data->ring[data->rptr], 1660 data->element_size); 1661 1662 wmb(); 1663 data->rptr = (data->aligned_element_size + 1664 data->rptr) % data->ring_size; 1665 1666 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) { 1667 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 1668 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry); 1669 else 1670 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry); 1671 } else { 1672 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) 1673 amdgpu_ras_interrupt_umc_handler(obj, &entry); 1674 else 1675 dev_warn(obj->adev->dev, 1676 "No RAS interrupt handler for non-UMC block with poison disabled.\n"); 1677 } 1678 } 1679 } 1680 1681 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) 1682 { 1683 struct ras_ih_data *data = 1684 container_of(work, struct ras_ih_data, ih_work); 1685 struct ras_manager *obj = 1686 container_of(data, struct ras_manager, ih_data); 1687 1688 amdgpu_ras_interrupt_handler(obj); 1689 } 1690 1691 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, 1692 struct ras_dispatch_if *info) 1693 { 1694 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 1695 struct ras_ih_data *data = &obj->ih_data; 1696 1697 if (!obj) 1698 return -EINVAL; 1699 1700 if (data->inuse == 0) 1701 return 0; 1702 1703 /* Might be overflow... */ 1704 memcpy(&data->ring[data->wptr], info->entry, 1705 data->element_size); 1706 1707 wmb(); 1708 data->wptr = (data->aligned_element_size + 1709 data->wptr) % data->ring_size; 1710 1711 schedule_work(&data->ih_work); 1712 1713 return 0; 1714 } 1715 1716 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, 1717 struct ras_common_if *head) 1718 { 1719 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1720 struct ras_ih_data *data; 1721 1722 if (!obj) 1723 return -EINVAL; 1724 1725 data = &obj->ih_data; 1726 if (data->inuse == 0) 1727 return 0; 1728 1729 cancel_work_sync(&data->ih_work); 1730 1731 kfree(data->ring); 1732 memset(data, 0, sizeof(*data)); 1733 put_obj(obj); 1734 1735 return 0; 1736 } 1737 1738 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, 1739 struct ras_common_if *head) 1740 { 1741 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); 1742 struct ras_ih_data *data; 1743 struct amdgpu_ras_block_object *ras_obj; 1744 1745 if (!obj) { 1746 /* in case we registe the IH before enable ras feature */ 1747 obj = amdgpu_ras_create_obj(adev, head); 1748 if (!obj) 1749 return -EINVAL; 1750 } else 1751 get_obj(obj); 1752 1753 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); 1754 1755 data = &obj->ih_data; 1756 /* add the callback.etc */ 1757 *data = (struct ras_ih_data) { 1758 .inuse = 0, 1759 .cb = ras_obj->ras_cb, 1760 .element_size = sizeof(struct amdgpu_iv_entry), 1761 .rptr = 0, 1762 .wptr = 0, 1763 }; 1764 1765 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); 1766 1767 data->aligned_element_size = roundup2(data->element_size, 8); 1768 /* the ring can store 64 iv entries. */ 1769 data->ring_size = 64 * data->aligned_element_size; 1770 data->ring = kmalloc(data->ring_size, GFP_KERNEL); 1771 if (!data->ring) { 1772 put_obj(obj); 1773 return -ENOMEM; 1774 } 1775 1776 /* IH is ready */ 1777 data->inuse = 1; 1778 1779 return 0; 1780 } 1781 1782 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) 1783 { 1784 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1785 struct ras_manager *obj, *tmp; 1786 1787 list_for_each_entry_safe(obj, tmp, &con->head, node) { 1788 amdgpu_ras_interrupt_remove_handler(adev, &obj->head); 1789 } 1790 1791 return 0; 1792 } 1793 /* ih end */ 1794 1795 /* traversal all IPs except NBIO to query error counter */ 1796 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) 1797 { 1798 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1799 struct ras_manager *obj; 1800 1801 if (!adev->ras_enabled || !con) 1802 return; 1803 1804 list_for_each_entry(obj, &con->head, node) { 1805 struct ras_query_if info = { 1806 .head = obj->head, 1807 }; 1808 1809 /* 1810 * PCIE_BIF IP has one different isr by ras controller 1811 * interrupt, the specific ras counter query will be 1812 * done in that isr. So skip such block from common 1813 * sync flood interrupt isr calling. 1814 */ 1815 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) 1816 continue; 1817 1818 /* 1819 * this is a workaround for aldebaran, skip send msg to 1820 * smu to get ecc_info table due to smu handle get ecc 1821 * info table failed temporarily. 1822 * should be removed until smu fix handle ecc_info table. 1823 */ 1824 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 1825 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) 1826 continue; 1827 1828 amdgpu_ras_query_error_status(adev, &info); 1829 1830 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1831 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && 1832 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { 1833 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 1834 dev_warn(adev->dev, "Failed to reset error counter and error status"); 1835 } 1836 } 1837 } 1838 1839 /* Parse RdRspStatus and WrRspStatus */ 1840 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, 1841 struct ras_query_if *info) 1842 { 1843 struct amdgpu_ras_block_object *block_obj; 1844 /* 1845 * Only two block need to query read/write 1846 * RspStatus at current state 1847 */ 1848 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && 1849 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) 1850 return; 1851 1852 block_obj = amdgpu_ras_get_ras_block(adev, 1853 info->head.block, 1854 info->head.sub_block_index); 1855 1856 if (!block_obj || !block_obj->hw_ops) { 1857 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", 1858 get_ras_block_str(&info->head)); 1859 return; 1860 } 1861 1862 if (block_obj->hw_ops->query_ras_error_status) 1863 block_obj->hw_ops->query_ras_error_status(adev); 1864 1865 } 1866 1867 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) 1868 { 1869 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1870 struct ras_manager *obj; 1871 1872 if (!adev->ras_enabled || !con) 1873 return; 1874 1875 list_for_each_entry(obj, &con->head, node) { 1876 struct ras_query_if info = { 1877 .head = obj->head, 1878 }; 1879 1880 amdgpu_ras_error_status_query(adev, &info); 1881 } 1882 } 1883 1884 /* recovery begin */ 1885 1886 /* return 0 on success. 1887 * caller need free bps. 1888 */ 1889 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, 1890 struct ras_badpage **bps, unsigned int *count) 1891 { 1892 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1893 struct ras_err_handler_data *data; 1894 int i = 0; 1895 int ret = 0, status; 1896 1897 if (!con || !con->eh_data || !bps || !count) 1898 return -EINVAL; 1899 1900 mutex_lock(&con->recovery_lock); 1901 data = con->eh_data; 1902 if (!data || data->count == 0) { 1903 *bps = NULL; 1904 ret = -EINVAL; 1905 goto out; 1906 } 1907 1908 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL); 1909 if (!*bps) { 1910 ret = -ENOMEM; 1911 goto out; 1912 } 1913 1914 for (; i < data->count; i++) { 1915 (*bps)[i] = (struct ras_badpage){ 1916 .bp = data->bps[i].retired_page, 1917 .size = AMDGPU_GPU_PAGE_SIZE, 1918 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, 1919 }; 1920 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, 1921 data->bps[i].retired_page); 1922 if (status == -EBUSY) 1923 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; 1924 else if (status == -ENOENT) 1925 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; 1926 } 1927 1928 *count = data->count; 1929 out: 1930 mutex_unlock(&con->recovery_lock); 1931 return ret; 1932 } 1933 1934 static void amdgpu_ras_do_recovery(struct work_struct *work) 1935 { 1936 struct amdgpu_ras *ras = 1937 container_of(work, struct amdgpu_ras, recovery_work); 1938 struct amdgpu_device *remote_adev = NULL; 1939 struct amdgpu_device *adev = ras->adev; 1940 struct list_head device_list, *device_list_handle = NULL; 1941 1942 if (!ras->disable_ras_err_cnt_harvest) { 1943 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 1944 1945 /* Build list of devices to query RAS related errors */ 1946 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { 1947 device_list_handle = &hive->device_list; 1948 } else { 1949 INIT_LIST_HEAD(&device_list); 1950 list_add_tail(&adev->gmc.xgmi.head, &device_list); 1951 device_list_handle = &device_list; 1952 } 1953 1954 list_for_each_entry(remote_adev, 1955 device_list_handle, gmc.xgmi.head) { 1956 amdgpu_ras_query_err_status(remote_adev); 1957 amdgpu_ras_log_on_err_counter(remote_adev); 1958 } 1959 1960 amdgpu_put_xgmi_hive(hive); 1961 } 1962 1963 if (amdgpu_device_should_recover_gpu(ras->adev)) { 1964 struct amdgpu_reset_context reset_context; 1965 memset(&reset_context, 0, sizeof(reset_context)); 1966 1967 reset_context.method = AMD_RESET_METHOD_NONE; 1968 reset_context.reset_req_dev = adev; 1969 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); 1970 1971 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); 1972 } 1973 atomic_set(&ras->in_recovery, 0); 1974 } 1975 1976 /* alloc/realloc bps array */ 1977 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, 1978 struct ras_err_handler_data *data, int pages) 1979 { 1980 unsigned int old_space = data->count + data->space_left; 1981 unsigned int new_space = old_space + pages; 1982 unsigned int align_space = roundup2(new_space, 512); 1983 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); 1984 1985 if (!bps) { 1986 return -ENOMEM; 1987 } 1988 1989 if (data->bps) { 1990 memcpy(bps, data->bps, 1991 data->count * sizeof(*data->bps)); 1992 kfree(data->bps); 1993 } 1994 1995 data->bps = bps; 1996 data->space_left += align_space - old_space; 1997 return 0; 1998 } 1999 2000 /* it deal with vram only. */ 2001 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, 2002 struct eeprom_table_record *bps, int pages) 2003 { 2004 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2005 struct ras_err_handler_data *data; 2006 int ret = 0; 2007 uint32_t i; 2008 2009 if (!con || !con->eh_data || !bps || pages <= 0) 2010 return 0; 2011 2012 mutex_lock(&con->recovery_lock); 2013 data = con->eh_data; 2014 if (!data) 2015 goto out; 2016 2017 for (i = 0; i < pages; i++) { 2018 if (amdgpu_ras_check_bad_page_unlock(con, 2019 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2020 continue; 2021 2022 if (!data->space_left && 2023 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) { 2024 ret = -ENOMEM; 2025 goto out; 2026 } 2027 2028 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr, 2029 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, 2030 AMDGPU_GPU_PAGE_SIZE); 2031 2032 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); 2033 data->count++; 2034 data->space_left--; 2035 } 2036 out: 2037 mutex_unlock(&con->recovery_lock); 2038 2039 return ret; 2040 } 2041 2042 /* 2043 * write error record array to eeprom, the function should be 2044 * protected by recovery_lock 2045 */ 2046 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) 2047 { 2048 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2049 struct ras_err_handler_data *data; 2050 struct amdgpu_ras_eeprom_control *control; 2051 int save_count; 2052 2053 if (!con || !con->eh_data) 2054 return 0; 2055 2056 mutex_lock(&con->recovery_lock); 2057 control = &con->eeprom_control; 2058 data = con->eh_data; 2059 save_count = data->count - control->ras_num_recs; 2060 mutex_unlock(&con->recovery_lock); 2061 /* only new entries are saved */ 2062 if (save_count > 0) { 2063 if (amdgpu_ras_eeprom_append(control, 2064 &data->bps[control->ras_num_recs], 2065 save_count)) { 2066 dev_err(adev->dev, "Failed to save EEPROM table data!"); 2067 return -EIO; 2068 } 2069 2070 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); 2071 } 2072 2073 return 0; 2074 } 2075 2076 /* 2077 * read error record array in eeprom and reserve enough space for 2078 * storing new bad pages 2079 */ 2080 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) 2081 { 2082 struct amdgpu_ras_eeprom_control *control = 2083 &adev->psp.ras_context.ras->eeprom_control; 2084 struct eeprom_table_record *bps; 2085 int ret; 2086 2087 /* no bad page record, skip eeprom access */ 2088 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) 2089 return 0; 2090 2091 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL); 2092 if (!bps) 2093 return -ENOMEM; 2094 2095 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs); 2096 if (ret) 2097 dev_err(adev->dev, "Failed to load EEPROM table records!"); 2098 else 2099 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs); 2100 2101 kfree(bps); 2102 return ret; 2103 } 2104 2105 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, 2106 uint64_t addr) 2107 { 2108 struct ras_err_handler_data *data = con->eh_data; 2109 int i; 2110 2111 addr >>= AMDGPU_GPU_PAGE_SHIFT; 2112 for (i = 0; i < data->count; i++) 2113 if (addr == data->bps[i].retired_page) 2114 return true; 2115 2116 return false; 2117 } 2118 2119 /* 2120 * check if an address belongs to bad page 2121 * 2122 * Note: this check is only for umc block 2123 */ 2124 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, 2125 uint64_t addr) 2126 { 2127 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2128 bool ret = false; 2129 2130 if (!con || !con->eh_data) 2131 return ret; 2132 2133 mutex_lock(&con->recovery_lock); 2134 ret = amdgpu_ras_check_bad_page_unlock(con, addr); 2135 mutex_unlock(&con->recovery_lock); 2136 return ret; 2137 } 2138 2139 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, 2140 uint32_t max_count) 2141 { 2142 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2143 2144 /* 2145 * Justification of value bad_page_cnt_threshold in ras structure 2146 * 2147 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length 2148 * in eeprom, and introduce two scenarios accordingly. 2149 * 2150 * Bad page retirement enablement: 2151 * - If amdgpu_bad_page_threshold = -1, 2152 * bad_page_cnt_threshold = typical value by formula. 2153 * 2154 * - When the value from user is 0 < amdgpu_bad_page_threshold < 2155 * max record length in eeprom, use it directly. 2156 * 2157 * Bad page retirement disablement: 2158 * - If amdgpu_bad_page_threshold = 0, bad page retirement 2159 * functionality is disabled, and bad_page_cnt_threshold will 2160 * take no effect. 2161 */ 2162 2163 if (amdgpu_bad_page_threshold < 0) { 2164 u64 val = adev->gmc.mc_vram_size; 2165 2166 do_div(val, RAS_BAD_PAGE_COVER); 2167 con->bad_page_cnt_threshold = min(lower_32_bits(val), 2168 max_count); 2169 } else { 2170 con->bad_page_cnt_threshold = min_t(int, max_count, 2171 amdgpu_bad_page_threshold); 2172 } 2173 } 2174 2175 int amdgpu_ras_recovery_init(struct amdgpu_device *adev) 2176 { 2177 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2178 struct ras_err_handler_data **data; 2179 u32 max_eeprom_records_count = 0; 2180 bool exc_err_limit = false; 2181 int ret; 2182 2183 if (!con || amdgpu_sriov_vf(adev)) 2184 return 0; 2185 2186 /* Allow access to RAS EEPROM via debugfs, when the ASIC 2187 * supports RAS and debugfs is enabled, but when 2188 * adev->ras_enabled is unset, i.e. when "ras_enable" 2189 * module parameter is set to 0. 2190 */ 2191 con->adev = adev; 2192 2193 if (!adev->ras_enabled) 2194 return 0; 2195 2196 data = &con->eh_data; 2197 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); 2198 if (!*data) { 2199 ret = -ENOMEM; 2200 goto out; 2201 } 2202 2203 rw_init(&con->recovery_lock, "rasrec"); 2204 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); 2205 atomic_set(&con->in_recovery, 0); 2206 con->eeprom_control.bad_channel_bitmap = 0; 2207 2208 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(); 2209 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count); 2210 2211 /* Todo: During test the SMU might fail to read the eeprom through I2C 2212 * when the GPU is pending on XGMI reset during probe time 2213 * (Mostly after second bus reset), skip it now 2214 */ 2215 if (adev->gmc.xgmi.pending_reset) 2216 return 0; 2217 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit); 2218 /* 2219 * This calling fails when exc_err_limit is true or 2220 * ret != 0. 2221 */ 2222 if (exc_err_limit || ret) 2223 goto free; 2224 2225 if (con->eeprom_control.ras_num_recs) { 2226 ret = amdgpu_ras_load_bad_pages(adev); 2227 if (ret) 2228 goto free; 2229 2230 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); 2231 2232 if (con->update_channel_flag == true) { 2233 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap); 2234 con->update_channel_flag = false; 2235 } 2236 } 2237 2238 #ifdef CONFIG_X86_MCE_AMD 2239 if ((adev->asic_type == CHIP_ALDEBARAN) && 2240 (adev->gmc.xgmi.connected_to_cpu)) 2241 amdgpu_register_bad_pages_mca_notifier(adev); 2242 #endif 2243 return 0; 2244 2245 free: 2246 kfree((*data)->bps); 2247 kfree(*data); 2248 con->eh_data = NULL; 2249 out: 2250 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret); 2251 2252 /* 2253 * Except error threshold exceeding case, other failure cases in this 2254 * function would not fail amdgpu driver init. 2255 */ 2256 if (!exc_err_limit) 2257 ret = 0; 2258 else 2259 ret = -EINVAL; 2260 2261 return ret; 2262 } 2263 2264 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) 2265 { 2266 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2267 struct ras_err_handler_data *data = con->eh_data; 2268 2269 /* recovery_init failed to init it, fini is useless */ 2270 if (!data) 2271 return 0; 2272 2273 cancel_work_sync(&con->recovery_work); 2274 2275 mutex_lock(&con->recovery_lock); 2276 con->eh_data = NULL; 2277 kfree(data->bps); 2278 kfree(data); 2279 mutex_unlock(&con->recovery_lock); 2280 2281 return 0; 2282 } 2283 /* recovery end */ 2284 2285 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 2286 { 2287 if (amdgpu_sriov_vf(adev)) { 2288 switch (adev->ip_versions[MP0_HWIP][0]) { 2289 case IP_VERSION(13, 0, 2): 2290 return true; 2291 default: 2292 return false; 2293 } 2294 } 2295 2296 if (adev->asic_type == CHIP_IP_DISCOVERY) { 2297 switch (adev->ip_versions[MP0_HWIP][0]) { 2298 case IP_VERSION(13, 0, 0): 2299 case IP_VERSION(13, 0, 10): 2300 return true; 2301 default: 2302 return false; 2303 } 2304 } 2305 2306 return adev->asic_type == CHIP_VEGA10 || 2307 adev->asic_type == CHIP_VEGA20 || 2308 adev->asic_type == CHIP_ARCTURUS || 2309 adev->asic_type == CHIP_ALDEBARAN || 2310 adev->asic_type == CHIP_SIENNA_CICHLID; 2311 } 2312 2313 /* 2314 * this is workaround for vega20 workstation sku, 2315 * force enable gfx ras, ignore vbios gfx ras flag 2316 * due to GC EDC can not write 2317 */ 2318 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) 2319 { 2320 struct atom_context *ctx = adev->mode_info.atom_context; 2321 2322 if (!ctx) 2323 return; 2324 2325 #ifdef notyet 2326 if (strnstr(ctx->vbios_version, "D16406", 2327 sizeof(ctx->vbios_version)) || 2328 strnstr(ctx->vbios_version, "D36002", 2329 sizeof(ctx->vbios_version))) 2330 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); 2331 #endif 2332 } 2333 2334 /* 2335 * check hardware's ras ability which will be saved in hw_supported. 2336 * if hardware does not support ras, we can skip some ras initializtion and 2337 * forbid some ras operations from IP. 2338 * if software itself, say boot parameter, limit the ras ability. We still 2339 * need allow IP do some limited operations, like disable. In such case, 2340 * we have to initialize ras as normal. but need check if operation is 2341 * allowed or not in each function. 2342 */ 2343 static void amdgpu_ras_check_supported(struct amdgpu_device *adev) 2344 { 2345 adev->ras_hw_enabled = adev->ras_enabled = 0; 2346 2347 if (!adev->is_atom_fw || 2348 !amdgpu_ras_asic_supported(adev)) 2349 return; 2350 2351 if (!adev->gmc.xgmi.connected_to_cpu) { 2352 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { 2353 dev_info(adev->dev, "MEM ECC is active.\n"); 2354 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | 2355 1 << AMDGPU_RAS_BLOCK__DF); 2356 } else { 2357 dev_info(adev->dev, "MEM ECC is not presented.\n"); 2358 } 2359 2360 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 2361 dev_info(adev->dev, "SRAM ECC is active.\n"); 2362 if (!amdgpu_sriov_vf(adev)) { 2363 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 2364 1 << AMDGPU_RAS_BLOCK__DF); 2365 2366 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0)) 2367 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 2368 1 << AMDGPU_RAS_BLOCK__JPEG); 2369 else 2370 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 2371 1 << AMDGPU_RAS_BLOCK__JPEG); 2372 } else { 2373 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 2374 1 << AMDGPU_RAS_BLOCK__SDMA | 2375 1 << AMDGPU_RAS_BLOCK__GFX); 2376 } 2377 } else { 2378 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 2379 } 2380 } else { 2381 /* driver only manages a few IP blocks RAS feature 2382 * when GPU is connected cpu through XGMI */ 2383 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | 2384 1 << AMDGPU_RAS_BLOCK__SDMA | 2385 1 << AMDGPU_RAS_BLOCK__MMHUB); 2386 } 2387 2388 amdgpu_ras_get_quirks(adev); 2389 2390 /* hw_supported needs to be aligned with RAS block mask. */ 2391 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; 2392 2393 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : 2394 adev->ras_hw_enabled & amdgpu_ras_mask; 2395 } 2396 2397 static void amdgpu_ras_counte_dw(struct work_struct *work) 2398 { 2399 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 2400 ras_counte_delay_work.work); 2401 struct amdgpu_device *adev = con->adev; 2402 struct drm_device *dev = adev_to_drm(adev); 2403 unsigned long ce_count, ue_count; 2404 int res; 2405 2406 res = pm_runtime_get_sync(dev->dev); 2407 if (res < 0) 2408 goto Out; 2409 2410 /* Cache new values. 2411 */ 2412 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { 2413 atomic_set(&con->ras_ce_count, ce_count); 2414 atomic_set(&con->ras_ue_count, ue_count); 2415 } 2416 2417 pm_runtime_mark_last_busy(dev->dev); 2418 Out: 2419 pm_runtime_put_autosuspend(dev->dev); 2420 } 2421 2422 int amdgpu_ras_init(struct amdgpu_device *adev) 2423 { 2424 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2425 int r; 2426 bool df_poison, umc_poison; 2427 2428 if (con) 2429 return 0; 2430 2431 con = kmalloc(sizeof(struct amdgpu_ras) + 2432 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + 2433 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, 2434 GFP_KERNEL|__GFP_ZERO); 2435 if (!con) 2436 return -ENOMEM; 2437 2438 con->adev = adev; 2439 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); 2440 atomic_set(&con->ras_ce_count, 0); 2441 atomic_set(&con->ras_ue_count, 0); 2442 2443 con->objs = (struct ras_manager *)(con + 1); 2444 2445 amdgpu_ras_set_context(adev, con); 2446 2447 amdgpu_ras_check_supported(adev); 2448 2449 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { 2450 /* set gfx block ras context feature for VEGA20 Gaming 2451 * send ras disable cmd to ras ta during ras late init. 2452 */ 2453 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { 2454 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); 2455 2456 return 0; 2457 } 2458 2459 r = 0; 2460 goto release_con; 2461 } 2462 2463 con->update_channel_flag = false; 2464 con->features = 0; 2465 INIT_LIST_HEAD(&con->head); 2466 /* Might need get this flag from vbios. */ 2467 con->flags = RAS_DEFAULT_FLAGS; 2468 2469 /* initialize nbio ras function ahead of any other 2470 * ras functions so hardware fatal error interrupt 2471 * can be enabled as early as possible */ 2472 switch (adev->asic_type) { 2473 case CHIP_VEGA20: 2474 case CHIP_ARCTURUS: 2475 case CHIP_ALDEBARAN: 2476 if (!adev->gmc.xgmi.connected_to_cpu) { 2477 adev->nbio.ras = &nbio_v7_4_ras; 2478 amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block); 2479 adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm; 2480 } 2481 break; 2482 default: 2483 /* nbio ras is not available */ 2484 break; 2485 } 2486 2487 if (adev->nbio.ras && 2488 adev->nbio.ras->init_ras_controller_interrupt) { 2489 r = adev->nbio.ras->init_ras_controller_interrupt(adev); 2490 if (r) 2491 goto release_con; 2492 } 2493 2494 if (adev->nbio.ras && 2495 adev->nbio.ras->init_ras_err_event_athub_interrupt) { 2496 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); 2497 if (r) 2498 goto release_con; 2499 } 2500 2501 /* Init poison supported flag, the default value is false */ 2502 if (adev->gmc.xgmi.connected_to_cpu) { 2503 /* enabled by default when GPU is connected to CPU */ 2504 con->poison_supported = true; 2505 } 2506 else if (adev->df.funcs && 2507 adev->df.funcs->query_ras_poison_mode && 2508 adev->umc.ras && 2509 adev->umc.ras->query_ras_poison_mode) { 2510 df_poison = 2511 adev->df.funcs->query_ras_poison_mode(adev); 2512 umc_poison = 2513 adev->umc.ras->query_ras_poison_mode(adev); 2514 /* Only poison is set in both DF and UMC, we can support it */ 2515 if (df_poison && umc_poison) 2516 con->poison_supported = true; 2517 else if (df_poison != umc_poison) 2518 dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 2519 df_poison, umc_poison); 2520 } 2521 2522 if (amdgpu_ras_fs_init(adev)) { 2523 r = -EINVAL; 2524 goto release_con; 2525 } 2526 2527 dev_info(adev->dev, "RAS INFO: ras initialized successfully, " 2528 "hardware ability[%x] ras_mask[%x]\n", 2529 adev->ras_hw_enabled, adev->ras_enabled); 2530 2531 return 0; 2532 release_con: 2533 amdgpu_ras_set_context(adev, NULL); 2534 kfree(con); 2535 2536 return r; 2537 } 2538 2539 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) 2540 { 2541 if (adev->gmc.xgmi.connected_to_cpu) 2542 return 1; 2543 return 0; 2544 } 2545 2546 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, 2547 struct ras_common_if *ras_block) 2548 { 2549 struct ras_query_if info = { 2550 .head = *ras_block, 2551 }; 2552 2553 if (!amdgpu_persistent_edc_harvesting_supported(adev)) 2554 return 0; 2555 2556 if (amdgpu_ras_query_error_status(adev, &info) != 0) 2557 DRM_WARN("RAS init harvest failure"); 2558 2559 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0) 2560 DRM_WARN("RAS init harvest reset failure"); 2561 2562 return 0; 2563 } 2564 2565 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) 2566 { 2567 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2568 2569 if (!con) 2570 return false; 2571 2572 return con->poison_supported; 2573 } 2574 2575 /* helper function to handle common stuff in ip late init phase */ 2576 int amdgpu_ras_block_late_init(struct amdgpu_device *adev, 2577 struct ras_common_if *ras_block) 2578 { 2579 struct amdgpu_ras_block_object *ras_obj = NULL; 2580 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2581 unsigned long ue_count, ce_count; 2582 int r; 2583 2584 /* disable RAS feature per IP block if it is not supported */ 2585 if (!amdgpu_ras_is_supported(adev, ras_block->block)) { 2586 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); 2587 return 0; 2588 } 2589 2590 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); 2591 if (r) { 2592 if (adev->in_suspend || amdgpu_in_reset(adev)) { 2593 /* in resume phase, if fail to enable ras, 2594 * clean up all ras fs nodes, and disable ras */ 2595 goto cleanup; 2596 } else 2597 return r; 2598 } 2599 2600 /* check for errors on warm reset edc persisant supported ASIC */ 2601 amdgpu_persistent_edc_harvesting(adev, ras_block); 2602 2603 /* in resume phase, no need to create ras fs node */ 2604 if (adev->in_suspend || amdgpu_in_reset(adev)) 2605 return 0; 2606 2607 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 2608 if (ras_obj->ras_cb || (ras_obj->hw_ops && 2609 (ras_obj->hw_ops->query_poison_status || 2610 ras_obj->hw_ops->handle_poison_consumption))) { 2611 r = amdgpu_ras_interrupt_add_handler(adev, ras_block); 2612 if (r) 2613 goto cleanup; 2614 } 2615 2616 r = amdgpu_ras_sysfs_create(adev, ras_block); 2617 if (r) 2618 goto interrupt; 2619 2620 /* Those are the cached values at init. 2621 */ 2622 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { 2623 atomic_set(&con->ras_ce_count, ce_count); 2624 atomic_set(&con->ras_ue_count, ue_count); 2625 } 2626 2627 return 0; 2628 2629 interrupt: 2630 if (ras_obj->ras_cb) 2631 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 2632 cleanup: 2633 amdgpu_ras_feature_enable(adev, ras_block, 0); 2634 return r; 2635 } 2636 2637 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, 2638 struct ras_common_if *ras_block) 2639 { 2640 return amdgpu_ras_block_late_init(adev, ras_block); 2641 } 2642 2643 /* helper function to remove ras fs node and interrupt handler */ 2644 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, 2645 struct ras_common_if *ras_block) 2646 { 2647 struct amdgpu_ras_block_object *ras_obj; 2648 if (!ras_block) 2649 return; 2650 2651 amdgpu_ras_sysfs_remove(adev, ras_block); 2652 2653 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); 2654 if (ras_obj->ras_cb) 2655 amdgpu_ras_interrupt_remove_handler(adev, ras_block); 2656 } 2657 2658 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, 2659 struct ras_common_if *ras_block) 2660 { 2661 return amdgpu_ras_block_late_fini(adev, ras_block); 2662 } 2663 2664 /* do some init work after IP late init as dependence. 2665 * and it runs in resume/gpu reset/booting up cases. 2666 */ 2667 void amdgpu_ras_resume(struct amdgpu_device *adev) 2668 { 2669 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2670 struct ras_manager *obj, *tmp; 2671 2672 if (!adev->ras_enabled || !con) { 2673 /* clean ras context for VEGA20 Gaming after send ras disable cmd */ 2674 amdgpu_release_ras_context(adev); 2675 2676 return; 2677 } 2678 2679 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { 2680 /* Set up all other IPs which are not implemented. There is a 2681 * tricky thing that IP's actual ras error type should be 2682 * MULTI_UNCORRECTABLE, but as driver does not handle it, so 2683 * ERROR_NONE make sense anyway. 2684 */ 2685 amdgpu_ras_enable_all_features(adev, 1); 2686 2687 /* We enable ras on all hw_supported block, but as boot 2688 * parameter might disable some of them and one or more IP has 2689 * not implemented yet. So we disable them on behalf. 2690 */ 2691 list_for_each_entry_safe(obj, tmp, &con->head, node) { 2692 if (!amdgpu_ras_is_supported(adev, obj->head.block)) { 2693 amdgpu_ras_feature_enable(adev, &obj->head, 0); 2694 /* there should be no any reference. */ 2695 WARN_ON(alive_obj(obj)); 2696 } 2697 } 2698 } 2699 } 2700 2701 void amdgpu_ras_suspend(struct amdgpu_device *adev) 2702 { 2703 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2704 2705 if (!adev->ras_enabled || !con) 2706 return; 2707 2708 amdgpu_ras_disable_all_features(adev, 0); 2709 /* Make sure all ras objects are disabled. */ 2710 if (con->features) 2711 amdgpu_ras_disable_all_features(adev, 1); 2712 } 2713 2714 int amdgpu_ras_late_init(struct amdgpu_device *adev) 2715 { 2716 struct amdgpu_ras_block_list *node, *tmp; 2717 struct amdgpu_ras_block_object *obj; 2718 int r; 2719 2720 /* Guest side doesn't need init ras feature */ 2721 if (amdgpu_sriov_vf(adev)) 2722 return 0; 2723 2724 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { 2725 if (!node->ras_obj) { 2726 dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); 2727 continue; 2728 } 2729 2730 obj = node->ras_obj; 2731 if (obj->ras_late_init) { 2732 r = obj->ras_late_init(adev, &obj->ras_comm); 2733 if (r) { 2734 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n", 2735 obj->ras_comm.name, r); 2736 return r; 2737 } 2738 } else 2739 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm); 2740 } 2741 2742 return 0; 2743 } 2744 2745 /* do some fini work before IP fini as dependence */ 2746 int amdgpu_ras_pre_fini(struct amdgpu_device *adev) 2747 { 2748 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2749 2750 if (!adev->ras_enabled || !con) 2751 return 0; 2752 2753 2754 /* Need disable ras on all IPs here before ip [hw/sw]fini */ 2755 if (con->features) 2756 amdgpu_ras_disable_all_features(adev, 0); 2757 amdgpu_ras_recovery_fini(adev); 2758 return 0; 2759 } 2760 2761 int amdgpu_ras_fini(struct amdgpu_device *adev) 2762 { 2763 struct amdgpu_ras_block_list *ras_node, *tmp; 2764 struct amdgpu_ras_block_object *obj = NULL; 2765 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2766 2767 if (!adev->ras_enabled || !con) 2768 return 0; 2769 2770 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { 2771 if (ras_node->ras_obj) { 2772 obj = ras_node->ras_obj; 2773 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) && 2774 obj->ras_fini) 2775 obj->ras_fini(adev, &obj->ras_comm); 2776 else 2777 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm); 2778 } 2779 2780 /* Clear ras blocks from ras_list and free ras block list node */ 2781 list_del(&ras_node->node); 2782 kfree(ras_node); 2783 } 2784 2785 amdgpu_ras_fs_fini(adev); 2786 amdgpu_ras_interrupt_remove_all(adev); 2787 2788 WARN(con->features, "Feature mask is not cleared"); 2789 2790 if (con->features) 2791 amdgpu_ras_disable_all_features(adev, 1); 2792 2793 cancel_delayed_work_sync(&con->ras_counte_delay_work); 2794 2795 amdgpu_ras_set_context(adev, NULL); 2796 kfree(con); 2797 2798 return 0; 2799 } 2800 2801 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) 2802 { 2803 amdgpu_ras_check_supported(adev); 2804 if (!adev->ras_hw_enabled) 2805 return; 2806 2807 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 2808 dev_info(adev->dev, "uncorrectable hardware error" 2809 "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); 2810 2811 amdgpu_ras_reset_gpu(adev); 2812 } 2813 } 2814 2815 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) 2816 { 2817 if (adev->asic_type == CHIP_VEGA20 && 2818 adev->pm.fw_version <= 0x283400) { 2819 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && 2820 amdgpu_ras_intr_triggered(); 2821 } 2822 2823 return false; 2824 } 2825 2826 void amdgpu_release_ras_context(struct amdgpu_device *adev) 2827 { 2828 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2829 2830 if (!con) 2831 return; 2832 2833 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { 2834 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); 2835 amdgpu_ras_set_context(adev, NULL); 2836 kfree(con); 2837 } 2838 } 2839 2840 #ifdef CONFIG_X86_MCE_AMD 2841 static struct amdgpu_device *find_adev(uint32_t node_id) 2842 { 2843 int i; 2844 struct amdgpu_device *adev = NULL; 2845 2846 for (i = 0; i < mce_adev_list.num_gpu; i++) { 2847 adev = mce_adev_list.devs[i]; 2848 2849 if (adev && adev->gmc.xgmi.connected_to_cpu && 2850 adev->gmc.xgmi.physical_node_id == node_id) 2851 break; 2852 adev = NULL; 2853 } 2854 2855 return adev; 2856 } 2857 2858 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) 2859 #define GET_UMC_INST(m) (((m) >> 21) & 0x7) 2860 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) 2861 #define GPU_ID_OFFSET 8 2862 2863 static int amdgpu_bad_page_notifier(struct notifier_block *nb, 2864 unsigned long val, void *data) 2865 { 2866 struct mce *m = (struct mce *)data; 2867 struct amdgpu_device *adev = NULL; 2868 uint32_t gpu_id = 0; 2869 uint32_t umc_inst = 0, ch_inst = 0; 2870 struct ras_err_data err_data = {0, 0, 0, NULL}; 2871 2872 /* 2873 * If the error was generated in UMC_V2, which belongs to GPU UMCs, 2874 * and error occurred in DramECC (Extended error code = 0) then only 2875 * process the error, else bail out. 2876 */ 2877 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && 2878 (XEC(m->status, 0x3f) == 0x0))) 2879 return NOTIFY_DONE; 2880 2881 /* 2882 * If it is correctable error, return. 2883 */ 2884 if (mce_is_correctable(m)) 2885 return NOTIFY_OK; 2886 2887 /* 2888 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. 2889 */ 2890 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; 2891 2892 adev = find_adev(gpu_id); 2893 if (!adev) { 2894 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, 2895 gpu_id); 2896 return NOTIFY_DONE; 2897 } 2898 2899 /* 2900 * If it is uncorrectable error, then find out UMC instance and 2901 * channel index. 2902 */ 2903 umc_inst = GET_UMC_INST(m->ipid); 2904 ch_inst = GET_CHAN_INDEX(m->ipid); 2905 2906 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", 2907 umc_inst, ch_inst); 2908 2909 err_data.err_addr = 2910 kcalloc(adev->umc.max_ras_err_cnt_per_query, 2911 sizeof(struct eeprom_table_record), GFP_KERNEL); 2912 if (!err_data.err_addr) { 2913 dev_warn(adev->dev, 2914 "Failed to alloc memory for umc error record in mca notifier!\n"); 2915 return NOTIFY_DONE; 2916 } 2917 2918 /* 2919 * Translate UMC channel address to Physical address 2920 */ 2921 if (adev->umc.ras && 2922 adev->umc.ras->convert_ras_error_address) 2923 adev->umc.ras->convert_ras_error_address(adev, 2924 &err_data, m->addr, ch_inst, umc_inst); 2925 2926 if (amdgpu_bad_page_threshold != 0) { 2927 amdgpu_ras_add_bad_pages(adev, err_data.err_addr, 2928 err_data.err_addr_cnt); 2929 amdgpu_ras_save_bad_pages(adev); 2930 } 2931 2932 kfree(err_data.err_addr); 2933 return NOTIFY_OK; 2934 } 2935 2936 static struct notifier_block amdgpu_bad_page_nb = { 2937 .notifier_call = amdgpu_bad_page_notifier, 2938 .priority = MCE_PRIO_UC, 2939 }; 2940 2941 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) 2942 { 2943 /* 2944 * Add the adev to the mce_adev_list. 2945 * During mode2 reset, amdgpu device is temporarily 2946 * removed from the mgpu_info list which can cause 2947 * page retirement to fail. 2948 * Use this list instead of mgpu_info to find the amdgpu 2949 * device on which the UMC error was reported. 2950 */ 2951 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; 2952 2953 /* 2954 * Register the x86 notifier only once 2955 * with MCE subsystem. 2956 */ 2957 if (notifier_registered == false) { 2958 mce_register_decode_chain(&amdgpu_bad_page_nb); 2959 notifier_registered = true; 2960 } 2961 } 2962 #endif 2963 2964 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) 2965 { 2966 if (!adev) 2967 return NULL; 2968 2969 return adev->psp.ras_context.ras; 2970 } 2971 2972 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) 2973 { 2974 if (!adev) 2975 return -EINVAL; 2976 2977 adev->psp.ras_context.ras = ras_con; 2978 return 0; 2979 } 2980 2981 /* check if ras is supported on block, say, sdma, gfx */ 2982 int amdgpu_ras_is_supported(struct amdgpu_device *adev, 2983 unsigned int block) 2984 { 2985 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2986 2987 if (block >= AMDGPU_RAS_BLOCK_COUNT) 2988 return 0; 2989 return ras && (adev->ras_enabled & (1 << block)); 2990 } 2991 2992 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 2993 { 2994 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 2995 2996 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) 2997 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work); 2998 return 0; 2999 } 3000 3001 3002 /* Register each ip ras block into amdgpu ras */ 3003 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, 3004 struct amdgpu_ras_block_object *ras_block_obj) 3005 { 3006 struct amdgpu_ras_block_list *ras_node; 3007 if (!adev || !ras_block_obj) 3008 return -EINVAL; 3009 3010 if (!amdgpu_ras_asic_supported(adev)) 3011 return 0; 3012 3013 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); 3014 if (!ras_node) 3015 return -ENOMEM; 3016 3017 INIT_LIST_HEAD(&ras_node->node); 3018 ras_node->ras_obj = ras_block_obj; 3019 list_add_tail(&ras_node->node, &adev->ras_list); 3020 3021 return 0; 3022 } 3023