1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_cycles.h> 7 #include <rte_ethdev.h> 8 #include <rte_io.h> 9 #include <rte_malloc.h> 10 11 #include "hns3_ethdev.h" 12 #include "hns3_logs.h" 13 #include "hns3_intr.h" 14 #include "hns3_regs.h" 15 #include "hns3_rxtx.h" 16 17 #define SWITCH_CONTEXT_US 10 18 19 #define HNS3_CHECK_MERGE_CNT(val) \ 20 do { \ 21 if (val) \ 22 hw->reset.stats.merge_cnt++; \ 23 } while (0) 24 25 static const char *reset_string[HNS3_MAX_RESET] = { 26 "flr", "vf_func", "vf_pf_func", "vf_full", "vf_global", 27 "pf_func", "global", "IMP", "none", 28 }; 29 30 static const struct hns3_hw_error mac_afifo_tnl_int[] = { 31 { 32 .int_msk = BIT(0), 33 .msg = "egu_cge_afifo_ecc_1bit_err", 34 .reset_level = HNS3_NONE_RESET 35 }, { 36 .int_msk = BIT(1), 37 .msg = "egu_cge_afifo_ecc_mbit_err", 38 .reset_level = HNS3_GLOBAL_RESET 39 }, { 40 .int_msk = BIT(2), 41 .msg = "egu_lge_afifo_ecc_1bit_err", 42 .reset_level = HNS3_NONE_RESET 43 }, { 44 .int_msk = BIT(3), 45 .msg = "egu_lge_afifo_ecc_mbit_err", 46 .reset_level = HNS3_GLOBAL_RESET 47 }, { 48 .int_msk = BIT(4), 49 .msg = "cge_igu_afifo_ecc_1bit_err", 50 .reset_level = HNS3_NONE_RESET 51 }, { 52 .int_msk = BIT(5), 53 .msg = "cge_igu_afifo_ecc_mbit_err", 54 .reset_level = HNS3_GLOBAL_RESET 55 }, { 56 .int_msk = BIT(6), 57 .msg = "lge_igu_afifo_ecc_1bit_err", 58 .reset_level = HNS3_NONE_RESET 59 }, { 60 .int_msk = BIT(7), 61 .msg = "lge_igu_afifo_ecc_mbit_err", 62 .reset_level = HNS3_GLOBAL_RESET 63 }, { 64 .int_msk = BIT(8), 65 .msg = "cge_igu_afifo_overflow_err", 66 .reset_level = HNS3_GLOBAL_RESET 67 }, { 68 .int_msk = BIT(9), 69 .msg = "lge_igu_afifo_overflow_err", 70 .reset_level = HNS3_GLOBAL_RESET 71 }, { 72 .int_msk = BIT(10), 73 .msg = "egu_cge_afifo_underrun_err", 74 .reset_level = HNS3_GLOBAL_RESET 75 }, { 76 .int_msk = BIT(11), 77 .msg = "egu_lge_afifo_underrun_err", 78 .reset_level = HNS3_GLOBAL_RESET 79 }, { 80 .int_msk = BIT(12), 81 .msg = "egu_ge_afifo_underrun_err", 82 .reset_level = HNS3_GLOBAL_RESET 83 }, { 84 .int_msk = BIT(13), 85 .msg = "ge_igu_afifo_overflow_err", 86 .reset_level = HNS3_GLOBAL_RESET 87 }, { 88 .int_msk = 0, 89 .msg = NULL, 90 .reset_level = HNS3_NONE_RESET 91 } 92 }; 93 94 static const struct hns3_hw_error ppu_mpf_abnormal_int_st1[] = { 95 { 96 .int_msk = 0xFFFFFFFF, 97 .msg = "rpu_rx_pkt_ecc_mbit_err", 98 .reset_level = HNS3_GLOBAL_RESET 99 }, { 100 .int_msk = 0, 101 .msg = NULL, 102 .reset_level = HNS3_NONE_RESET 103 } 104 }; 105 106 static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_ras[] = { 107 { 108 .int_msk = BIT(13), 109 .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", 110 .reset_level = HNS3_GLOBAL_RESET 111 }, { 112 .int_msk = BIT(14), 113 .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", 114 .reset_level = HNS3_GLOBAL_RESET 115 }, { 116 .int_msk = BIT(15), 117 .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", 118 .reset_level = HNS3_GLOBAL_RESET 119 }, { 120 .int_msk = BIT(16), 121 .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", 122 .reset_level = HNS3_GLOBAL_RESET 123 }, { 124 .int_msk = BIT(17), 125 .msg = "rcb_tx_ring_ecc_mbit_err", 126 .reset_level = HNS3_GLOBAL_RESET 127 }, { 128 .int_msk = BIT(18), 129 .msg = "rcb_rx_ring_ecc_mbit_err", 130 .reset_level = HNS3_GLOBAL_RESET 131 }, { 132 .int_msk = BIT(19), 133 .msg = "rcb_tx_fbd_ecc_mbit_err", 134 .reset_level = HNS3_GLOBAL_RESET 135 }, { 136 .int_msk = BIT(20), 137 .msg = "rcb_rx_ebd_ecc_mbit_err", 138 .reset_level = HNS3_GLOBAL_RESET 139 }, { 140 .int_msk = BIT(21), 141 .msg = "rcb_tso_info_ecc_mbit_err", 142 .reset_level = HNS3_GLOBAL_RESET 143 }, { 144 .int_msk = BIT(22), 145 .msg = "rcb_tx_int_info_ecc_mbit_err", 146 .reset_level = HNS3_GLOBAL_RESET 147 }, { 148 .int_msk = BIT(23), 149 .msg = "rcb_rx_int_info_ecc_mbit_err", 150 .reset_level = HNS3_GLOBAL_RESET 151 }, { 152 .int_msk = BIT(24), 153 .msg = "tpu_tx_pkt_0_ecc_mbit_err", 154 .reset_level = HNS3_GLOBAL_RESET 155 }, { 156 .int_msk = BIT(25), 157 .msg = "tpu_tx_pkt_1_ecc_mbit_err", 158 .reset_level = HNS3_GLOBAL_RESET 159 }, { 160 .int_msk = BIT(26), 161 .msg = "rd_bus_err", 162 .reset_level = HNS3_GLOBAL_RESET 163 }, { 164 .int_msk = BIT(27), 165 .msg = "wr_bus_err", 166 .reset_level = HNS3_GLOBAL_RESET 167 }, { 168 .int_msk = BIT(30), 169 .msg = "ooo_ecc_err_detect", 170 .reset_level = HNS3_NONE_RESET 171 }, { 172 .int_msk = BIT(31), 173 .msg = "ooo_ecc_err_multpl", 174 .reset_level = HNS3_GLOBAL_RESET 175 }, { 176 .int_msk = 0, 177 .msg = NULL, 178 .reset_level = HNS3_NONE_RESET 179 } 180 }; 181 182 static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_msix[] = { 183 { 184 .int_msk = BIT(29), 185 .msg = "rx_q_search_miss", 186 .reset_level = HNS3_NONE_RESET 187 }, { 188 .int_msk = 0, 189 .msg = NULL, 190 .reset_level = HNS3_NONE_RESET 191 } 192 }; 193 194 static const struct hns3_hw_error ssu_port_based_pf_int[] = { 195 { 196 .int_msk = BIT(0), 197 .msg = "roc_pkt_without_key_port", 198 .reset_level = HNS3_GLOBAL_RESET 199 }, { 200 .int_msk = BIT(9), 201 .msg = "low_water_line_err_port", 202 .reset_level = HNS3_NONE_RESET 203 }, { 204 .int_msk = 0, 205 .msg = NULL, 206 .reset_level = HNS3_NONE_RESET 207 } 208 }; 209 210 static const struct hns3_hw_error ppp_pf_abnormal_int[] = { 211 { 212 .int_msk = BIT(0), 213 .msg = "tx_vlan_tag_err", 214 .reset_level = HNS3_NONE_RESET 215 }, { 216 .int_msk = BIT(1), 217 .msg = "rss_list_tc_unassigned_queue_err", 218 .reset_level = HNS3_NONE_RESET 219 }, { 220 .int_msk = 0, 221 .msg = NULL, 222 .reset_level = HNS3_NONE_RESET 223 } 224 }; 225 226 static const struct hns3_hw_error ppu_pf_abnormal_int_ras[] = { 227 { 228 .int_msk = BIT(3), 229 .msg = "tx_rd_fbd_poison", 230 .reset_level = HNS3_FUNC_RESET 231 }, { 232 .int_msk = BIT(4), 233 .msg = "rx_rd_ebd_poison", 234 .reset_level = HNS3_FUNC_RESET 235 }, { 236 .int_msk = 0, 237 .msg = NULL, 238 .reset_level = HNS3_NONE_RESET 239 } 240 }; 241 242 static const struct hns3_hw_error ppu_pf_abnormal_int_msix[] = { 243 { 244 .int_msk = BIT(0), 245 .msg = "over_8bd_no_fe", 246 .reset_level = HNS3_FUNC_RESET 247 }, { 248 .int_msk = BIT(1), 249 .msg = "tso_mss_cmp_min_err", 250 .reset_level = HNS3_NONE_RESET 251 }, { 252 .int_msk = BIT(2), 253 .msg = "tso_mss_cmp_max_err", 254 .reset_level = HNS3_NONE_RESET 255 }, { 256 .int_msk = BIT(5), 257 .msg = "buf_wait_timeout", 258 .reset_level = HNS3_NONE_RESET 259 }, { 260 .int_msk = 0, 261 .msg = NULL, 262 .reset_level = HNS3_NONE_RESET 263 } 264 }; 265 266 static const struct hns3_hw_error imp_tcm_ecc_int[] = { 267 { 268 .int_msk = BIT(1), 269 .msg = "imp_itcm0_ecc_mbit_err", 270 .reset_level = HNS3_NONE_RESET 271 }, { 272 .int_msk = BIT(3), 273 .msg = "imp_itcm1_ecc_mbit_err", 274 .reset_level = HNS3_NONE_RESET 275 }, { 276 .int_msk = BIT(5), 277 .msg = "imp_itcm2_ecc_mbit_err", 278 .reset_level = HNS3_NONE_RESET 279 }, { 280 .int_msk = BIT(7), 281 .msg = "imp_itcm3_ecc_mbit_err", 282 .reset_level = HNS3_NONE_RESET 283 }, { 284 .int_msk = BIT(9), 285 .msg = "imp_dtcm0_mem0_ecc_mbit_err", 286 .reset_level = HNS3_NONE_RESET 287 }, { 288 .int_msk = BIT(11), 289 .msg = "imp_dtcm0_mem1_ecc_mbit_err", 290 .reset_level = HNS3_NONE_RESET 291 }, { 292 .int_msk = BIT(13), 293 .msg = "imp_dtcm1_mem0_ecc_mbit_err", 294 .reset_level = HNS3_NONE_RESET 295 }, { 296 .int_msk = BIT(15), 297 .msg = "imp_dtcm1_mem1_ecc_mbit_err", 298 .reset_level = HNS3_NONE_RESET 299 }, { 300 .int_msk = BIT(17), 301 .msg = "imp_itcm4_ecc_mbit_err", 302 .reset_level = HNS3_NONE_RESET 303 }, { 304 .int_msk = 0, 305 .msg = NULL, 306 .reset_level = HNS3_NONE_RESET 307 } 308 }; 309 310 static const struct hns3_hw_error cmdq_mem_ecc_int[] = { 311 { 312 .int_msk = BIT(1), 313 .msg = "cmdq_nic_rx_depth_ecc_mbit_err", 314 .reset_level = HNS3_NONE_RESET 315 }, { 316 .int_msk = BIT(3), 317 .msg = "cmdq_nic_tx_depth_ecc_mbit_err", 318 .reset_level = HNS3_NONE_RESET 319 }, { 320 .int_msk = BIT(5), 321 .msg = "cmdq_nic_rx_tail_ecc_mbit_err", 322 .reset_level = HNS3_NONE_RESET 323 }, { 324 .int_msk = BIT(7), 325 .msg = "cmdq_nic_tx_tail_ecc_mbit_err", 326 .reset_level = HNS3_NONE_RESET 327 }, { 328 .int_msk = BIT(9), 329 .msg = "cmdq_nic_rx_head_ecc_mbit_err", 330 .reset_level = HNS3_NONE_RESET 331 }, { 332 .int_msk = BIT(11), 333 .msg = "cmdq_nic_tx_head_ecc_mbit_err", 334 .reset_level = HNS3_NONE_RESET 335 }, { 336 .int_msk = BIT(13), 337 .msg = "cmdq_nic_rx_addr_ecc_mbit_err", 338 .reset_level = HNS3_NONE_RESET 339 }, { 340 .int_msk = BIT(15), 341 .msg = "cmdq_nic_tx_addr_ecc_mbit_err", 342 .reset_level = HNS3_NONE_RESET 343 }, { 344 .int_msk = 0, 345 .msg = NULL, 346 .reset_level = HNS3_NONE_RESET 347 } 348 }; 349 350 static const struct hns3_hw_error tqp_int_ecc_int[] = { 351 { 352 .int_msk = BIT(6), 353 .msg = "tqp_int_cfg_even_ecc_mbit_err", 354 .reset_level = HNS3_NONE_RESET 355 }, { 356 .int_msk = BIT(7), 357 .msg = "tqp_int_cfg_odd_ecc_mbit_err", 358 .reset_level = HNS3_NONE_RESET 359 }, { 360 .int_msk = BIT(8), 361 .msg = "tqp_int_ctrl_even_ecc_mbit_err", 362 .reset_level = HNS3_NONE_RESET 363 }, { 364 .int_msk = BIT(9), 365 .msg = "tqp_int_ctrl_odd_ecc_mbit_err", 366 .reset_level = HNS3_NONE_RESET 367 }, { 368 .int_msk = BIT(10), 369 .msg = "tx_queue_scan_int_ecc_mbit_err", 370 .reset_level = HNS3_NONE_RESET 371 }, { 372 .int_msk = BIT(11), 373 .msg = "rx_queue_scan_int_ecc_mbit_err", 374 .reset_level = HNS3_NONE_RESET 375 }, { 376 .int_msk = 0, 377 .msg = NULL, 378 .reset_level = HNS3_NONE_RESET 379 } 380 }; 381 382 static const struct hns3_hw_error imp_rd_poison_int[] = { 383 { 384 .int_msk = BIT(0), 385 .msg = "imp_rd_poison_int", 386 .reset_level = HNS3_NONE_RESET 387 }, { 388 .int_msk = 0, 389 .msg = NULL, 390 .reset_level = HNS3_NONE_RESET 391 } 392 }; 393 394 #define HNS3_SSU_MEM_ECC_ERR(x) \ 395 { \ 396 .int_msk = BIT(x), \ 397 .msg = "ssu_mem" #x "_ecc_mbit_err", \ 398 .reset_level = HNS3_GLOBAL_RESET \ 399 } 400 401 static const struct hns3_hw_error ssu_ecc_multi_bit_int_0[] = { 402 HNS3_SSU_MEM_ECC_ERR(0), 403 HNS3_SSU_MEM_ECC_ERR(1), 404 HNS3_SSU_MEM_ECC_ERR(2), 405 HNS3_SSU_MEM_ECC_ERR(3), 406 HNS3_SSU_MEM_ECC_ERR(4), 407 HNS3_SSU_MEM_ECC_ERR(5), 408 HNS3_SSU_MEM_ECC_ERR(6), 409 HNS3_SSU_MEM_ECC_ERR(7), 410 HNS3_SSU_MEM_ECC_ERR(8), 411 HNS3_SSU_MEM_ECC_ERR(9), 412 HNS3_SSU_MEM_ECC_ERR(10), 413 HNS3_SSU_MEM_ECC_ERR(11), 414 HNS3_SSU_MEM_ECC_ERR(12), 415 HNS3_SSU_MEM_ECC_ERR(13), 416 HNS3_SSU_MEM_ECC_ERR(14), 417 HNS3_SSU_MEM_ECC_ERR(15), 418 HNS3_SSU_MEM_ECC_ERR(16), 419 HNS3_SSU_MEM_ECC_ERR(17), 420 HNS3_SSU_MEM_ECC_ERR(18), 421 HNS3_SSU_MEM_ECC_ERR(19), 422 HNS3_SSU_MEM_ECC_ERR(20), 423 HNS3_SSU_MEM_ECC_ERR(21), 424 HNS3_SSU_MEM_ECC_ERR(22), 425 HNS3_SSU_MEM_ECC_ERR(23), 426 HNS3_SSU_MEM_ECC_ERR(24), 427 HNS3_SSU_MEM_ECC_ERR(25), 428 HNS3_SSU_MEM_ECC_ERR(26), 429 HNS3_SSU_MEM_ECC_ERR(27), 430 HNS3_SSU_MEM_ECC_ERR(28), 431 HNS3_SSU_MEM_ECC_ERR(29), 432 HNS3_SSU_MEM_ECC_ERR(30), 433 HNS3_SSU_MEM_ECC_ERR(31), 434 { .int_msk = 0, 435 .msg = NULL, 436 .reset_level = HNS3_NONE_RESET} 437 }; 438 439 static const struct hns3_hw_error ssu_ecc_multi_bit_int_1[] = { 440 { 441 .int_msk = BIT(0), 442 .msg = "ssu_mem32_ecc_mbit_err", 443 .reset_level = HNS3_GLOBAL_RESET 444 }, { 445 .int_msk = 0, 446 .msg = NULL, 447 .reset_level = HNS3_NONE_RESET 448 } 449 }; 450 451 static const struct hns3_hw_error ssu_common_ecc_int[] = { 452 { 453 .int_msk = BIT(0), 454 .msg = "buf_sum_err", 455 .reset_level = HNS3_NONE_RESET 456 }, { 457 .int_msk = BIT(1), 458 .msg = "ppp_mb_num_err", 459 .reset_level = HNS3_NONE_RESET 460 }, { 461 .int_msk = BIT(2), 462 .msg = "ppp_mbid_err", 463 .reset_level = HNS3_GLOBAL_RESET 464 }, { 465 .int_msk = BIT(3), 466 .msg = "ppp_rlt_mac_err", 467 .reset_level = HNS3_GLOBAL_RESET 468 }, { 469 .int_msk = BIT(4), 470 .msg = "ppp_rlt_host_err", 471 .reset_level = HNS3_GLOBAL_RESET 472 }, { 473 .int_msk = BIT(5), 474 .msg = "cks_edit_position_err", 475 .reset_level = HNS3_GLOBAL_RESET 476 }, { 477 .int_msk = BIT(6), 478 .msg = "cks_edit_condition_err", 479 .reset_level = HNS3_GLOBAL_RESET 480 }, { 481 .int_msk = BIT(7), 482 .msg = "vlan_edit_condition_err", 483 .reset_level = HNS3_GLOBAL_RESET 484 }, { 485 .int_msk = BIT(8), 486 .msg = "vlan_num_ot_err", 487 .reset_level = HNS3_GLOBAL_RESET 488 }, { 489 .int_msk = BIT(9), 490 .msg = "vlan_num_in_err", 491 .reset_level = HNS3_GLOBAL_RESET 492 }, { 493 .int_msk = 0, 494 .msg = NULL, 495 .reset_level = HNS3_NONE_RESET 496 } 497 }; 498 499 static const struct hns3_hw_error igu_int[] = { 500 { 501 .int_msk = BIT(0), 502 .msg = "igu_rx_buf0_ecc_mbit_err", 503 .reset_level = HNS3_GLOBAL_RESET 504 }, { 505 .int_msk = BIT(2), 506 .msg = "igu_rx_buf1_ecc_mbit_err", 507 .reset_level = HNS3_GLOBAL_RESET 508 }, { 509 .int_msk = 0, 510 .msg = NULL, 511 .reset_level = HNS3_NONE_RESET 512 } 513 }; 514 515 static const struct hns3_hw_error msix_ecc_int[] = { 516 { 517 .int_msk = BIT(1), 518 .msg = "msix_nic_ecc_mbit_err", 519 .reset_level = HNS3_NONE_RESET 520 }, { 521 .int_msk = 0, 522 .msg = NULL, 523 .reset_level = HNS3_NONE_RESET 524 } 525 }; 526 527 static const struct hns3_hw_error ppp_mpf_abnormal_int_st1[] = { 528 { 529 .int_msk = BIT(0), 530 .msg = "vf_vlan_ad_mem_ecc_mbit_err", 531 .reset_level = HNS3_GLOBAL_RESET 532 }, { 533 .int_msk = BIT(1), 534 .msg = "umv_mcast_group_mem_ecc_mbit_err", 535 .reset_level = HNS3_GLOBAL_RESET 536 }, { 537 .int_msk = BIT(2), 538 .msg = "umv_key_mem0_ecc_mbit_err", 539 .reset_level = HNS3_GLOBAL_RESET 540 }, { 541 .int_msk = BIT(3), 542 .msg = "umv_key_mem1_ecc_mbit_err", 543 .reset_level = HNS3_GLOBAL_RESET 544 }, { 545 .int_msk = BIT(4), 546 .msg = "umv_key_mem2_ecc_mbit_err", 547 .reset_level = HNS3_GLOBAL_RESET 548 }, { 549 .int_msk = BIT(5), 550 .msg = "umv_key_mem3_ecc_mbit_err", 551 .reset_level = HNS3_GLOBAL_RESET 552 }, { 553 .int_msk = BIT(6), 554 .msg = "umv_ad_mem_ecc_mbit_err", 555 .reset_level = HNS3_GLOBAL_RESET 556 }, { 557 .int_msk = BIT(7), 558 .msg = "rss_tc_mode_mem_ecc_mbit_err", 559 .reset_level = HNS3_GLOBAL_RESET 560 }, { 561 .int_msk = BIT(8), 562 .msg = "rss_idt_mem0_ecc_mbit_err", 563 .reset_level = HNS3_GLOBAL_RESET 564 }, { 565 .int_msk = BIT(9), 566 .msg = "rss_idt_mem1_ecc_mbit_err", 567 .reset_level = HNS3_GLOBAL_RESET 568 }, { 569 .int_msk = BIT(10), 570 .msg = "rss_idt_mem2_ecc_mbit_err", 571 .reset_level = HNS3_GLOBAL_RESET 572 }, { 573 .int_msk = BIT(11), 574 .msg = "rss_idt_mem3_ecc_mbit_err", 575 .reset_level = HNS3_GLOBAL_RESET 576 }, { 577 .int_msk = BIT(12), 578 .msg = "rss_idt_mem4_ecc_mbit_err", 579 .reset_level = HNS3_GLOBAL_RESET 580 }, { 581 .int_msk = BIT(13), 582 .msg = "rss_idt_mem5_ecc_mbit_err", 583 .reset_level = HNS3_GLOBAL_RESET 584 }, { 585 .int_msk = BIT(14), 586 .msg = "rss_idt_mem6_ecc_mbit_err", 587 .reset_level = HNS3_GLOBAL_RESET 588 }, { 589 .int_msk = BIT(15), 590 .msg = "rss_idt_mem7_ecc_mbit_err", 591 .reset_level = HNS3_GLOBAL_RESET 592 }, { 593 .int_msk = BIT(16), 594 .msg = "rss_idt_mem8_ecc_mbit_err", 595 .reset_level = HNS3_GLOBAL_RESET 596 }, { 597 .int_msk = BIT(17), 598 .msg = "rss_idt_mem9_ecc_mbit_err", 599 .reset_level = HNS3_GLOBAL_RESET 600 }, { 601 .int_msk = BIT(18), 602 .msg = "rss_idt_mem10_ecc_m1bit_err", 603 .reset_level = HNS3_GLOBAL_RESET 604 }, { 605 .int_msk = BIT(19), 606 .msg = "rss_idt_mem11_ecc_mbit_err", 607 .reset_level = HNS3_GLOBAL_RESET 608 }, { 609 .int_msk = BIT(20), 610 .msg = "rss_idt_mem12_ecc_mbit_err", 611 .reset_level = HNS3_GLOBAL_RESET 612 }, { 613 .int_msk = BIT(21), 614 .msg = "rss_idt_mem13_ecc_mbit_err", 615 .reset_level = HNS3_GLOBAL_RESET 616 }, { 617 .int_msk = BIT(22), 618 .msg = "rss_idt_mem14_ecc_mbit_err", 619 .reset_level = HNS3_GLOBAL_RESET 620 }, { 621 .int_msk = BIT(23), 622 .msg = "rss_idt_mem15_ecc_mbit_err", 623 .reset_level = HNS3_GLOBAL_RESET 624 }, { 625 .int_msk = BIT(24), 626 .msg = "port_vlan_mem_ecc_mbit_err", 627 .reset_level = HNS3_GLOBAL_RESET 628 }, { 629 .int_msk = BIT(25), 630 .msg = "mcast_linear_table_mem_ecc_mbit_err", 631 .reset_level = HNS3_GLOBAL_RESET 632 }, { 633 .int_msk = BIT(26), 634 .msg = "mcast_result_mem_ecc_mbit_err", 635 .reset_level = HNS3_GLOBAL_RESET 636 }, { 637 .int_msk = BIT(27), 638 .msg = "flow_director_ad_mem0_ecc_mbit_err", 639 .reset_level = HNS3_GLOBAL_RESET 640 }, { 641 .int_msk = BIT(28), 642 .msg = "flow_director_ad_mem1_ecc_mbit_err", 643 .reset_level = HNS3_GLOBAL_RESET 644 }, { 645 .int_msk = BIT(29), 646 .msg = "rx_vlan_tag_memory_ecc_mbit_err", 647 .reset_level = HNS3_GLOBAL_RESET 648 }, { 649 .int_msk = BIT(30), 650 .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", 651 .reset_level = HNS3_GLOBAL_RESET 652 }, { 653 .int_msk = 0, 654 .msg = NULL, 655 .reset_level = HNS3_NONE_RESET 656 } 657 }; 658 659 static const struct hns3_hw_error ppp_mpf_abnormal_int_st3[] = { 660 { 661 .int_msk = BIT(0), 662 .msg = "hfs_fifo_mem_ecc_mbit_err", 663 .reset_level = HNS3_GLOBAL_RESET 664 }, { 665 .int_msk = BIT(1), 666 .msg = "rslt_descr_fifo_mem_ecc_mbit_err", 667 .reset_level = HNS3_GLOBAL_RESET 668 }, { 669 .int_msk = BIT(2), 670 .msg = "tx_vlan_tag_mem_ecc_mbit_err", 671 .reset_level = HNS3_GLOBAL_RESET 672 }, { 673 .int_msk = BIT(3), 674 .msg = "FD_CN0_memory_ecc_mbit_err", 675 .reset_level = HNS3_GLOBAL_RESET 676 }, { 677 .int_msk = BIT(4), 678 .msg = "FD_CN1_memory_ecc_mbit_err", 679 .reset_level = HNS3_GLOBAL_RESET 680 }, { 681 .int_msk = BIT(5), 682 .msg = "GRO_AD_memory_ecc_mbit_err", 683 .reset_level = HNS3_GLOBAL_RESET 684 }, { 685 .int_msk = 0, 686 .msg = NULL, 687 .reset_level = HNS3_NONE_RESET 688 } 689 }; 690 691 static const struct hns3_hw_error ppu_mpf_abnormal_int_st3[] = { 692 { 693 .int_msk = BIT(4), 694 .msg = "gro_bd_ecc_mbit_err", 695 .reset_level = HNS3_GLOBAL_RESET 696 }, { 697 .int_msk = BIT(5), 698 .msg = "gro_context_ecc_mbit_err", 699 .reset_level = HNS3_GLOBAL_RESET 700 }, { 701 .int_msk = BIT(6), 702 .msg = "rx_stash_cfg_ecc_mbit_err", 703 .reset_level = HNS3_GLOBAL_RESET 704 }, { 705 .int_msk = BIT(7), 706 .msg = "axi_rd_fbd_ecc_mbit_err", 707 .reset_level = HNS3_GLOBAL_RESET 708 }, { 709 .int_msk = 0, 710 .msg = NULL, 711 .reset_level = HNS3_NONE_RESET 712 } 713 }; 714 715 static const struct hns3_hw_error tm_sch_int[] = { 716 { 717 .int_msk = BIT(1), 718 .msg = "tm_sch_ecc_mbit_err", 719 .reset_level = HNS3_GLOBAL_RESET 720 }, { 721 .int_msk = BIT(2), 722 .msg = "tm_sch_port_shap_sub_fifo_wr_err", 723 .reset_level = HNS3_GLOBAL_RESET 724 }, { 725 .int_msk = BIT(3), 726 .msg = "tm_sch_port_shap_sub_fifo_rd_err", 727 .reset_level = HNS3_GLOBAL_RESET 728 }, { 729 .int_msk = BIT(4), 730 .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", 731 .reset_level = HNS3_GLOBAL_RESET 732 }, { 733 .int_msk = BIT(5), 734 .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", 735 .reset_level = HNS3_GLOBAL_RESET 736 }, { 737 .int_msk = BIT(6), 738 .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", 739 .reset_level = HNS3_GLOBAL_RESET 740 }, { 741 .int_msk = BIT(7), 742 .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", 743 .reset_level = HNS3_GLOBAL_RESET 744 }, { 745 .int_msk = BIT(8), 746 .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", 747 .reset_level = HNS3_GLOBAL_RESET 748 }, { 749 .int_msk = BIT(9), 750 .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", 751 .reset_level = HNS3_GLOBAL_RESET 752 }, { 753 .int_msk = BIT(10), 754 .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", 755 .reset_level = HNS3_GLOBAL_RESET 756 }, { 757 .int_msk = BIT(11), 758 .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", 759 .reset_level = HNS3_GLOBAL_RESET 760 }, { 761 .int_msk = BIT(12), 762 .msg = "tm_sch_port_shap_offset_fifo_wr_err", 763 .reset_level = HNS3_GLOBAL_RESET 764 }, { 765 .int_msk = BIT(13), 766 .msg = "tm_sch_port_shap_offset_fifo_rd_err", 767 .reset_level = HNS3_GLOBAL_RESET 768 }, { 769 .int_msk = BIT(14), 770 .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", 771 .reset_level = HNS3_GLOBAL_RESET 772 }, { 773 .int_msk = BIT(15), 774 .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", 775 .reset_level = HNS3_GLOBAL_RESET 776 }, { 777 .int_msk = BIT(16), 778 .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", 779 .reset_level = HNS3_GLOBAL_RESET 780 }, { 781 .int_msk = BIT(17), 782 .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", 783 .reset_level = HNS3_GLOBAL_RESET 784 }, { 785 .int_msk = BIT(18), 786 .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", 787 .reset_level = HNS3_GLOBAL_RESET 788 }, { 789 .int_msk = BIT(19), 790 .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", 791 .reset_level = HNS3_GLOBAL_RESET 792 }, { 793 .int_msk = BIT(20), 794 .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", 795 .reset_level = HNS3_GLOBAL_RESET 796 }, { 797 .int_msk = BIT(21), 798 .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", 799 .reset_level = HNS3_GLOBAL_RESET 800 }, { 801 .int_msk = BIT(22), 802 .msg = "tm_sch_rq_fifo_wr_err", 803 .reset_level = HNS3_GLOBAL_RESET 804 }, { 805 .int_msk = BIT(23), 806 .msg = "tm_sch_rq_fifo_rd_err", 807 .reset_level = HNS3_GLOBAL_RESET 808 }, { 809 .int_msk = BIT(24), 810 .msg = "tm_sch_nq_fifo_wr_err", 811 .reset_level = HNS3_GLOBAL_RESET 812 }, { 813 .int_msk = BIT(25), 814 .msg = "tm_sch_nq_fifo_rd_err", 815 .reset_level = HNS3_GLOBAL_RESET 816 }, { 817 .int_msk = BIT(26), 818 .msg = "tm_sch_roce_up_fifo_wr_err", 819 .reset_level = HNS3_GLOBAL_RESET 820 }, { 821 .int_msk = BIT(27), 822 .msg = "tm_sch_roce_up_fifo_rd_err", 823 .reset_level = HNS3_GLOBAL_RESET 824 }, { 825 .int_msk = BIT(28), 826 .msg = "tm_sch_rcb_byte_fifo_wr_err", 827 .reset_level = HNS3_GLOBAL_RESET 828 }, { 829 .int_msk = BIT(29), 830 .msg = "tm_sch_rcb_byte_fifo_rd_err", 831 .reset_level = HNS3_GLOBAL_RESET 832 }, { 833 .int_msk = BIT(30), 834 .msg = "tm_sch_ssu_byte_fifo_wr_err", 835 .reset_level = HNS3_GLOBAL_RESET 836 }, { 837 .int_msk = BIT(31), 838 .msg = "tm_sch_ssu_byte_fifo_rd_err", 839 .reset_level = HNS3_GLOBAL_RESET 840 }, { 841 .int_msk = 0, 842 .msg = NULL, 843 .reset_level = HNS3_NONE_RESET 844 } 845 }; 846 847 static const struct hns3_hw_error qcn_fifo_int[] = { 848 { 849 .int_msk = BIT(0), 850 .msg = "qcn_shap_gp0_sch_fifo_rd_err", 851 .reset_level = HNS3_GLOBAL_RESET 852 }, { 853 .int_msk = BIT(1), 854 .msg = "qcn_shap_gp0_sch_fifo_wr_err", 855 .reset_level = HNS3_GLOBAL_RESET 856 }, { 857 .int_msk = BIT(2), 858 .msg = "qcn_shap_gp1_sch_fifo_rd_err", 859 .reset_level = HNS3_GLOBAL_RESET 860 }, { 861 .int_msk = BIT(3), 862 .msg = "qcn_shap_gp1_sch_fifo_wr_err", 863 .reset_level = HNS3_GLOBAL_RESET 864 }, { 865 .int_msk = BIT(4), 866 .msg = "qcn_shap_gp2_sch_fifo_rd_err", 867 .reset_level = HNS3_GLOBAL_RESET 868 }, { 869 .int_msk = BIT(5), 870 .msg = "qcn_shap_gp2_sch_fifo_wr_err", 871 .reset_level = HNS3_GLOBAL_RESET 872 }, { 873 .int_msk = BIT(6), 874 .msg = "qcn_shap_gp3_sch_fifo_rd_err", 875 .reset_level = HNS3_GLOBAL_RESET 876 }, { 877 .int_msk = BIT(7), 878 .msg = "qcn_shap_gp3_sch_fifo_wr_err", 879 .reset_level = HNS3_GLOBAL_RESET 880 }, { 881 .int_msk = BIT(8), 882 .msg = "qcn_shap_gp0_offset_fifo_rd_err", 883 .reset_level = HNS3_GLOBAL_RESET 884 }, { 885 .int_msk = BIT(9), 886 .msg = "qcn_shap_gp0_offset_fifo_wr_err", 887 .reset_level = HNS3_GLOBAL_RESET 888 }, { 889 .int_msk = BIT(10), 890 .msg = "qcn_shap_gp1_offset_fifo_rd_err", 891 .reset_level = HNS3_GLOBAL_RESET 892 }, { 893 .int_msk = BIT(11), 894 .msg = "qcn_shap_gp1_offset_fifo_wr_err", 895 .reset_level = HNS3_GLOBAL_RESET 896 }, { 897 .int_msk = BIT(12), 898 .msg = "qcn_shap_gp2_offset_fifo_rd_err", 899 .reset_level = HNS3_GLOBAL_RESET 900 }, { 901 .int_msk = BIT(13), 902 .msg = "qcn_shap_gp2_offset_fifo_wr_err", 903 .reset_level = HNS3_GLOBAL_RESET 904 }, { 905 .int_msk = BIT(14), 906 .msg = "qcn_shap_gp3_offset_fifo_rd_err", 907 .reset_level = HNS3_GLOBAL_RESET 908 }, { 909 .int_msk = BIT(15), 910 .msg = "qcn_shap_gp3_offset_fifo_wr_err", 911 .reset_level = HNS3_GLOBAL_RESET 912 }, { 913 .int_msk = BIT(16), 914 .msg = "qcn_byte_info_fifo_rd_err", 915 .reset_level = HNS3_GLOBAL_RESET 916 }, { 917 .int_msk = BIT(17), 918 .msg = "qcn_byte_info_fifo_wr_err", 919 .reset_level = HNS3_GLOBAL_RESET 920 }, { 921 .int_msk = 0, 922 .msg = NULL, 923 .reset_level = HNS3_NONE_RESET 924 } 925 }; 926 927 static const struct hns3_hw_error qcn_ecc_int[] = { 928 { 929 .int_msk = BIT(1), 930 .msg = "qcn_byte_mem_ecc_mbit_err", 931 .reset_level = HNS3_GLOBAL_RESET 932 }, { 933 .int_msk = BIT(3), 934 .msg = "qcn_time_mem_ecc_mbit_err", 935 .reset_level = HNS3_GLOBAL_RESET 936 }, { 937 .int_msk = BIT(5), 938 .msg = "qcn_fb_mem_ecc_mbit_err", 939 .reset_level = HNS3_GLOBAL_RESET 940 }, { 941 .int_msk = BIT(7), 942 .msg = "qcn_link_mem_ecc_mbit_err", 943 .reset_level = HNS3_GLOBAL_RESET 944 }, { 945 .int_msk = BIT(9), 946 .msg = "qcn_rate_mem_ecc_mbit_err", 947 .reset_level = HNS3_GLOBAL_RESET 948 }, { 949 .int_msk = BIT(11), 950 .msg = "qcn_tmplt_mem_ecc_mbit_err", 951 .reset_level = HNS3_GLOBAL_RESET 952 }, { 953 .int_msk = BIT(13), 954 .msg = "qcn_shap_cfg_mem_ecc_mbit_err", 955 .reset_level = HNS3_GLOBAL_RESET 956 }, { 957 .int_msk = BIT(15), 958 .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", 959 .reset_level = HNS3_GLOBAL_RESET 960 }, { 961 .int_msk = BIT(17), 962 .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", 963 .reset_level = HNS3_GLOBAL_RESET 964 }, { 965 .int_msk = BIT(19), 966 .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", 967 .reset_level = HNS3_GLOBAL_RESET 968 }, { 969 .int_msk = BIT(21), 970 .msg = "qcn_gp3_barral_mem_ecc_mbit_err", 971 .reset_level = HNS3_GLOBAL_RESET 972 }, { 973 .int_msk = 0, 974 .msg = NULL, 975 .reset_level = HNS3_NONE_RESET 976 } 977 }; 978 979 static const struct hns3_hw_error ncsi_ecc_int[] = { 980 { 981 .int_msk = BIT(1), 982 .msg = "ncsi_tx_ecc_mbit_err", 983 .reset_level = HNS3_NONE_RESET 984 }, { 985 .int_msk = 0, 986 .msg = NULL, 987 .reset_level = HNS3_NONE_RESET 988 } 989 }; 990 991 static const struct hns3_hw_error ssu_fifo_overflow_int[] = { 992 { 993 .int_msk = BIT(0), 994 .msg = "ig_mac_inf_int", 995 .reset_level = HNS3_GLOBAL_RESET 996 }, { 997 .int_msk = BIT(1), 998 .msg = "ig_host_inf_int", 999 .reset_level = HNS3_GLOBAL_RESET 1000 }, { 1001 .int_msk = BIT(2), 1002 .msg = "ig_roc_buf_int", 1003 .reset_level = HNS3_GLOBAL_RESET 1004 }, { 1005 .int_msk = BIT(3), 1006 .msg = "ig_host_data_fifo_int", 1007 .reset_level = HNS3_GLOBAL_RESET 1008 }, { 1009 .int_msk = BIT(4), 1010 .msg = "ig_host_key_fifo_int", 1011 .reset_level = HNS3_GLOBAL_RESET 1012 }, { 1013 .int_msk = BIT(5), 1014 .msg = "tx_qcn_fifo_int", 1015 .reset_level = HNS3_GLOBAL_RESET 1016 }, { 1017 .int_msk = BIT(6), 1018 .msg = "rx_qcn_fifo_int", 1019 .reset_level = HNS3_GLOBAL_RESET 1020 }, { 1021 .int_msk = BIT(7), 1022 .msg = "tx_pf_rd_fifo_int", 1023 .reset_level = HNS3_GLOBAL_RESET 1024 }, { 1025 .int_msk = BIT(8), 1026 .msg = "rx_pf_rd_fifo_int", 1027 .reset_level = HNS3_GLOBAL_RESET 1028 }, { 1029 .int_msk = BIT(9), 1030 .msg = "qm_eof_fifo_int", 1031 .reset_level = HNS3_GLOBAL_RESET 1032 }, { 1033 .int_msk = BIT(10), 1034 .msg = "mb_rlt_fifo_int", 1035 .reset_level = HNS3_GLOBAL_RESET 1036 }, { 1037 .int_msk = BIT(11), 1038 .msg = "dup_uncopy_fifo_int", 1039 .reset_level = HNS3_GLOBAL_RESET 1040 }, { 1041 .int_msk = BIT(12), 1042 .msg = "dup_cnt_rd_fifo_int", 1043 .reset_level = HNS3_GLOBAL_RESET 1044 }, { 1045 .int_msk = BIT(13), 1046 .msg = "dup_cnt_drop_fifo_int", 1047 .reset_level = HNS3_GLOBAL_RESET 1048 }, { 1049 .int_msk = BIT(14), 1050 .msg = "dup_cnt_wrb_fifo_int", 1051 .reset_level = HNS3_GLOBAL_RESET 1052 }, { 1053 .int_msk = BIT(15), 1054 .msg = "host_cmd_fifo_int", 1055 .reset_level = HNS3_GLOBAL_RESET 1056 }, { 1057 .int_msk = BIT(16), 1058 .msg = "mac_cmd_fifo_int", 1059 .reset_level = HNS3_GLOBAL_RESET 1060 }, { 1061 .int_msk = BIT(17), 1062 .msg = "host_cmd_bitmap_empty_int", 1063 .reset_level = HNS3_GLOBAL_RESET 1064 }, { 1065 .int_msk = BIT(18), 1066 .msg = "mac_cmd_bitmap_empty_int", 1067 .reset_level = HNS3_GLOBAL_RESET 1068 }, { 1069 .int_msk = BIT(19), 1070 .msg = "dup_bitmap_empty_int", 1071 .reset_level = HNS3_GLOBAL_RESET 1072 }, { 1073 .int_msk = BIT(20), 1074 .msg = "out_queue_bitmap_empty_int", 1075 .reset_level = HNS3_GLOBAL_RESET 1076 }, { 1077 .int_msk = BIT(21), 1078 .msg = "bank2_bitmap_empty_int", 1079 .reset_level = HNS3_GLOBAL_RESET 1080 }, { 1081 .int_msk = BIT(22), 1082 .msg = "bank1_bitmap_empty_int", 1083 .reset_level = HNS3_GLOBAL_RESET 1084 }, { 1085 .int_msk = BIT(23), 1086 .msg = "bank0_bitmap_empty_int", 1087 .reset_level = HNS3_GLOBAL_RESET 1088 }, { 1089 .int_msk = 0, 1090 .msg = NULL, 1091 .reset_level = HNS3_NONE_RESET 1092 } 1093 }; 1094 1095 static const struct hns3_hw_error ssu_ets_tcg_int[] = { 1096 { 1097 .int_msk = BIT(0), 1098 .msg = "ets_rd_int_rx_tcg", 1099 .reset_level = HNS3_GLOBAL_RESET 1100 }, { 1101 .int_msk = BIT(1), 1102 .msg = "ets_wr_int_rx_tcg", 1103 .reset_level = HNS3_GLOBAL_RESET 1104 }, { 1105 .int_msk = BIT(2), 1106 .msg = "ets_rd_int_tx_tcg", 1107 .reset_level = HNS3_GLOBAL_RESET 1108 }, { 1109 .int_msk = BIT(3), 1110 .msg = "ets_wr_int_tx_tcg", 1111 .reset_level = HNS3_GLOBAL_RESET 1112 }, { 1113 .int_msk = 0, 1114 .msg = NULL, 1115 .reset_level = HNS3_NONE_RESET 1116 } 1117 }; 1118 1119 static const struct hns3_hw_error igu_egu_tnl_int[] = { 1120 { 1121 .int_msk = BIT(0), 1122 .msg = "rx_buf_overflow", 1123 .reset_level = HNS3_GLOBAL_RESET 1124 }, { 1125 .int_msk = BIT(1), 1126 .msg = "rx_stp_fifo_overflow", 1127 .reset_level = HNS3_GLOBAL_RESET 1128 }, { 1129 .int_msk = BIT(2), 1130 .msg = "rx_stp_fifo_underflow", 1131 .reset_level = HNS3_GLOBAL_RESET 1132 }, { 1133 .int_msk = BIT(3), 1134 .msg = "tx_buf_overflow", 1135 .reset_level = HNS3_GLOBAL_RESET 1136 }, { 1137 .int_msk = BIT(4), 1138 .msg = "tx_buf_underrun", 1139 .reset_level = HNS3_GLOBAL_RESET 1140 }, { 1141 .int_msk = BIT(5), 1142 .msg = "rx_stp_buf_overflow", 1143 .reset_level = HNS3_GLOBAL_RESET 1144 }, { 1145 .int_msk = 0, 1146 .msg = NULL, 1147 .reset_level = HNS3_NONE_RESET 1148 } 1149 }; 1150 1151 static const struct hns3_hw_error ssu_port_based_err_int[] = { 1152 { 1153 .int_msk = BIT(0), 1154 .msg = "roc_pkt_without_key_port", 1155 .reset_level = HNS3_FUNC_RESET 1156 }, { 1157 .int_msk = BIT(1), 1158 .msg = "tpu_pkt_without_key_port", 1159 .reset_level = HNS3_GLOBAL_RESET 1160 }, { 1161 .int_msk = BIT(2), 1162 .msg = "igu_pkt_without_key_port", 1163 .reset_level = HNS3_GLOBAL_RESET 1164 }, { 1165 .int_msk = BIT(3), 1166 .msg = "roc_eof_mis_match_port", 1167 .reset_level = HNS3_GLOBAL_RESET 1168 }, { 1169 .int_msk = BIT(4), 1170 .msg = "tpu_eof_mis_match_port", 1171 .reset_level = HNS3_GLOBAL_RESET 1172 }, { 1173 .int_msk = BIT(5), 1174 .msg = "igu_eof_mis_match_port", 1175 .reset_level = HNS3_GLOBAL_RESET 1176 }, { 1177 .int_msk = BIT(6), 1178 .msg = "roc_sof_mis_match_port", 1179 .reset_level = HNS3_GLOBAL_RESET 1180 }, { 1181 .int_msk = BIT(7), 1182 .msg = "tpu_sof_mis_match_port", 1183 .reset_level = HNS3_GLOBAL_RESET 1184 }, { 1185 .int_msk = BIT(8), 1186 .msg = "igu_sof_mis_match_port", 1187 .reset_level = HNS3_GLOBAL_RESET 1188 }, { 1189 .int_msk = BIT(11), 1190 .msg = "ets_rd_int_rx_port", 1191 .reset_level = HNS3_GLOBAL_RESET 1192 }, { 1193 .int_msk = BIT(12), 1194 .msg = "ets_wr_int_rx_port", 1195 .reset_level = HNS3_GLOBAL_RESET 1196 }, { 1197 .int_msk = BIT(13), 1198 .msg = "ets_rd_int_tx_port", 1199 .reset_level = HNS3_GLOBAL_RESET 1200 }, { 1201 .int_msk = BIT(14), 1202 .msg = "ets_wr_int_tx_port", 1203 .reset_level = HNS3_GLOBAL_RESET 1204 }, { 1205 .int_msk = 0, 1206 .msg = NULL, 1207 .reset_level = HNS3_NONE_RESET 1208 } 1209 }; 1210 1211 static const struct hns3_hw_error_desc mpf_ras_err_tbl[] = { 1212 { 1213 .desc_offset = 0, 1214 .data_offset = 0, 1215 .msg = "IMP_TCM_ECC_INT_STS", 1216 .hw_err = imp_tcm_ecc_int 1217 }, { 1218 .desc_offset = 0, 1219 .data_offset = 1, 1220 .msg = "CMDQ_MEM_ECC_INT_STS", 1221 .hw_err = cmdq_mem_ecc_int 1222 }, { 1223 .desc_offset = 0, 1224 .data_offset = 2, 1225 .msg = "IMP_RD_POISON_INT_STS", 1226 .hw_err = imp_rd_poison_int 1227 }, { 1228 .desc_offset = 0, 1229 .data_offset = 3, 1230 .msg = "TQP_INT_ECC_INT_STS", 1231 .hw_err = tqp_int_ecc_int 1232 }, { 1233 .desc_offset = 0, 1234 .data_offset = 4, 1235 .msg = "MSIX_ECC_INT_STS", 1236 .hw_err = msix_ecc_int 1237 }, { 1238 .desc_offset = 2, 1239 .data_offset = 2, 1240 .msg = "SSU_ECC_MULTI_BIT_INT_0", 1241 .hw_err = ssu_ecc_multi_bit_int_0 1242 }, { 1243 .desc_offset = 2, 1244 .data_offset = 3, 1245 .msg = "SSU_ECC_MULTI_BIT_INT_1", 1246 .hw_err = ssu_ecc_multi_bit_int_1 1247 }, { 1248 .desc_offset = 2, 1249 .data_offset = 4, 1250 .msg = "SSU_COMMON_ERR_INT", 1251 .hw_err = ssu_common_ecc_int 1252 }, { 1253 .desc_offset = 3, 1254 .data_offset = 0, 1255 .msg = "IGU_INT_STS", 1256 .hw_err = igu_int 1257 }, { 1258 .desc_offset = 4, 1259 .data_offset = 1, 1260 .msg = "PPP_MPF_ABNORMAL_INT_ST1", 1261 .hw_err = ppp_mpf_abnormal_int_st1 1262 }, { 1263 .desc_offset = 4, 1264 .data_offset = 3, 1265 .msg = "PPP_MPF_ABNORMAL_INT_ST3", 1266 .hw_err = ppp_mpf_abnormal_int_st3 1267 }, { 1268 .desc_offset = 5, 1269 .data_offset = 1, 1270 .msg = "PPU_MPF_ABNORMAL_INT_ST1", 1271 .hw_err = ppu_mpf_abnormal_int_st1 1272 }, { 1273 .desc_offset = 5, 1274 .data_offset = 2, 1275 .msg = "PPU_MPF_ABNORMAL_INT_ST2_RAS", 1276 .hw_err = ppu_mpf_abnormal_int_st2_ras 1277 }, { 1278 .desc_offset = 5, 1279 .data_offset = 3, 1280 .msg = "PPU_MPF_ABNORMAL_INT_ST3", 1281 .hw_err = ppu_mpf_abnormal_int_st3 1282 }, { 1283 .desc_offset = 6, 1284 .data_offset = 0, 1285 .msg = "TM_SCH_RINT", 1286 .hw_err = tm_sch_int 1287 }, { 1288 .desc_offset = 7, 1289 .data_offset = 0, 1290 .msg = "QCN_FIFO_RINT", 1291 .hw_err = qcn_fifo_int 1292 }, { 1293 .desc_offset = 7, 1294 .data_offset = 1, 1295 .msg = "QCN_ECC_RINT", 1296 .hw_err = qcn_ecc_int 1297 }, { 1298 .desc_offset = 9, 1299 .data_offset = 0, 1300 .msg = "NCSI_ECC_INT_RPT", 1301 .hw_err = ncsi_ecc_int 1302 }, { 1303 .desc_offset = 0, 1304 .data_offset = 0, 1305 .msg = NULL, 1306 .hw_err = NULL 1307 } 1308 }; 1309 1310 static const struct hns3_hw_error_desc pf_ras_err_tbl[] = { 1311 { 1312 .desc_offset = 0, 1313 .data_offset = 0, 1314 .msg = "SSU_PORT_BASED_ERR_INT_RAS", 1315 .hw_err = ssu_port_based_err_int 1316 }, { 1317 .desc_offset = 0, 1318 .data_offset = 1, 1319 .msg = "SSU_FIFO_OVERFLOW_INT", 1320 .hw_err = ssu_fifo_overflow_int 1321 }, { 1322 .desc_offset = 0, 1323 .data_offset = 2, 1324 .msg = "SSU_ETS_TCG_INT", 1325 .hw_err = ssu_ets_tcg_int 1326 }, { 1327 .desc_offset = 1, 1328 .data_offset = 0, 1329 .msg = "IGU_EGU_TNL_INT_STS", 1330 .hw_err = igu_egu_tnl_int 1331 }, { 1332 .desc_offset = 3, 1333 .data_offset = 0, 1334 .msg = "PPU_PF_ABNORMAL_INT_ST_RAS", 1335 .hw_err = ppu_pf_abnormal_int_ras 1336 }, { 1337 .desc_offset = 0, 1338 .data_offset = 0, 1339 .msg = NULL, 1340 .hw_err = NULL 1341 } 1342 }; 1343 1344 static const struct hns3_hw_error_desc mpf_msix_err_tbl[] = { 1345 { 1346 .desc_offset = 1, 1347 .data_offset = 0, 1348 .msg = "MAC_AFIFO_TNL_INT_R", 1349 .hw_err = mac_afifo_tnl_int 1350 }, { 1351 .desc_offset = 5, 1352 .data_offset = 2, 1353 .msg = "PPU_MPF_ABNORMAL_INT_ST2_MSIX", 1354 .hw_err = ppu_mpf_abnormal_int_st2_msix 1355 }, { 1356 .desc_offset = 0, 1357 .data_offset = 0, 1358 .msg = NULL, 1359 .hw_err = NULL 1360 } 1361 }; 1362 1363 static const struct hns3_hw_error_desc pf_msix_err_tbl[] = { 1364 { 1365 .desc_offset = 0, 1366 .data_offset = 0, 1367 .msg = "SSU_PORT_BASED_ERR_INT_MSIX", 1368 .hw_err = ssu_port_based_pf_int 1369 }, { 1370 .desc_offset = 2, 1371 .data_offset = 0, 1372 .msg = "PPP_PF_ABNORMAL_INT_ST0", 1373 .hw_err = ppp_pf_abnormal_int 1374 }, { 1375 .desc_offset = 3, 1376 .data_offset = 0, 1377 .msg = "PPU_PF_ABNORMAL_INT_ST_MSIX", 1378 .hw_err = ppu_pf_abnormal_int_msix 1379 }, { 1380 .desc_offset = 0, 1381 .data_offset = 0, 1382 .msg = NULL, 1383 .hw_err = NULL 1384 } 1385 }; 1386 1387 enum hns3_hw_err_report_type { 1388 MPF_MSIX_ERR, 1389 PF_MSIX_ERR, 1390 MPF_RAS_ERR, 1391 PF_RAS_ERR, 1392 }; 1393 1394 static const struct hns3_hw_mod_name hns3_hw_module_name[] = { 1395 { 1396 .module_name = MODULE_NONE, 1397 .msg = "MODULE_NONE" 1398 }, { 1399 .module_name = MODULE_BIOS_COMMON, 1400 .msg = "MODULE_BIOS_COMMON" 1401 }, { 1402 .module_name = MODULE_GE, 1403 .msg = "MODULE_GE" 1404 }, { 1405 .module_name = MODULE_IGU_EGU, 1406 .msg = "MODULE_IGU_EGU" 1407 }, { 1408 .module_name = MODULE_LGE, 1409 .msg = "MODULE_LGE" 1410 }, { 1411 .module_name = MODULE_NCSI, 1412 .msg = "MODULE_NCSI" 1413 }, { 1414 .module_name = MODULE_PPP, 1415 .msg = "MODULE_PPP" 1416 }, { 1417 .module_name = MODULE_QCN, 1418 .msg = "MODULE_QCN" 1419 }, { 1420 .module_name = MODULE_RCB_RX, 1421 .msg = "MODULE_RCB_RX" 1422 }, { 1423 .module_name = MODULE_RTC, 1424 .msg = "MODULE_RTC" 1425 }, { 1426 .module_name = MODULE_SSU, 1427 .msg = "MODULE_SSU" 1428 }, { 1429 .module_name = MODULE_TM, 1430 .msg = "MODULE_TM" 1431 }, { 1432 .module_name = MODULE_RCB_TX, 1433 .msg = "MODULE_RCB_TX" 1434 }, { 1435 .module_name = MODULE_TXDMA, 1436 .msg = "MODULE_TXDMA" 1437 }, { 1438 .module_name = MODULE_MASTER, 1439 .msg = "MODULE_MASTER" 1440 }, { 1441 .module_name = MODULE_ROH_MAC, 1442 .msg = "MODULE_ROH_MAC" 1443 } 1444 }; 1445 1446 static const struct hns3_hw_err_type hns3_hw_error_type[] = { 1447 { 1448 .error_type = NONE_ERROR, 1449 .msg = "none_error" 1450 }, { 1451 .error_type = FIFO_ERROR, 1452 .msg = "fifo_error" 1453 }, { 1454 .error_type = MEMORY_ERROR, 1455 .msg = "memory_error" 1456 }, { 1457 .error_type = POISION_ERROR, 1458 .msg = "poision_error" 1459 }, { 1460 .error_type = MSIX_ECC_ERROR, 1461 .msg = "msix_ecc_error" 1462 }, { 1463 .error_type = TQP_INT_ECC_ERROR, 1464 .msg = "tqp_int_ecc_error" 1465 }, { 1466 .error_type = PF_ABNORMAL_INT_ERROR, 1467 .msg = "pf_abnormal_int_error" 1468 }, { 1469 .error_type = MPF_ABNORMAL_INT_ERROR, 1470 .msg = "mpf_abnormal_int_error" 1471 }, { 1472 .error_type = COMMON_ERROR, 1473 .msg = "common_error" 1474 }, { 1475 .error_type = PORT_ERROR, 1476 .msg = "port_error" 1477 }, { 1478 .error_type = ETS_ERROR, 1479 .msg = "ets_error" 1480 }, { 1481 .error_type = NCSI_ERROR, 1482 .msg = "ncsi_error" 1483 }, { 1484 .error_type = GLB_ERROR, 1485 .msg = "glb_error" 1486 } 1487 }; 1488 1489 static int 1490 hns3_config_ncsi_hw_err_int(struct hns3_adapter *hns, bool en) 1491 { 1492 struct hns3_hw *hw = &hns->hw; 1493 struct hns3_cmd_desc desc; 1494 int ret; 1495 1496 /* configure NCSI error interrupts */ 1497 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_NCSI_INT_EN, false); 1498 if (en) 1499 desc.data[0] = rte_cpu_to_le_32(HNS3_NCSI_ERR_INT_EN); 1500 1501 ret = hns3_cmd_send(hw, &desc, 1); 1502 if (ret) 1503 hns3_err(hw, "fail to %s NCSI error interrupts, ret = %d", 1504 en ? "enable" : "disable", ret); 1505 1506 return ret; 1507 } 1508 1509 static int 1510 enable_igu_egu_err_intr(struct hns3_adapter *hns, bool en) 1511 { 1512 struct hns3_hw *hw = &hns->hw; 1513 struct hns3_cmd_desc desc; 1514 int ret; 1515 1516 /* configure IGU,EGU error interrupts */ 1517 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_IGU_COMMON_INT_EN, false); 1518 if (en) 1519 desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_ENABLE); 1520 else 1521 desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_DISABLE); 1522 1523 desc.data[1] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_EN_MASK); 1524 1525 ret = hns3_cmd_send(hw, &desc, 1); 1526 if (ret) { 1527 hns3_err(hw, "fail to %s IGU common interrupts, ret = %d", 1528 en ? "enable" : "disable", ret); 1529 return ret; 1530 } 1531 1532 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_IGU_EGU_TNL_INT_EN, false); 1533 if (en) 1534 desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_TNL_ERR_INT_EN); 1535 1536 desc.data[1] = rte_cpu_to_le_32(HNS3_IGU_TNL_ERR_INT_EN_MASK); 1537 1538 ret = hns3_cmd_send(hw, &desc, 1); 1539 if (ret) { 1540 hns3_err(hw, "fail to %s IGU-EGU TNL interrupts, ret = %d", 1541 en ? "enable" : "disable", ret); 1542 return ret; 1543 } 1544 1545 return hns3_config_ncsi_hw_err_int(hns, en); 1546 } 1547 1548 static int 1549 config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en) 1550 { 1551 struct hns3_hw *hw = &hns->hw; 1552 struct hns3_cmd_desc desc[2]; 1553 int ret; 1554 1555 /* configure PPP error interrupts */ 1556 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1557 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1558 hns3_cmd_setup_basic_desc(&desc[1], cmd, false); 1559 1560 if (cmd == HNS3_OPC_PPP_CMD0_INT_CMD) { 1561 if (en) { 1562 desc[0].data[0] = 1563 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN); 1564 desc[0].data[1] = 1565 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN); 1566 desc[0].data[4] = 1567 rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN); 1568 } 1569 1570 desc[1].data[0] = 1571 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN_MASK); 1572 desc[1].data[1] = 1573 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN_MASK); 1574 desc[1].data[2] = 1575 rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN_MASK); 1576 } else if (cmd == HNS3_OPC_PPP_CMD1_INT_CMD) { 1577 if (en) { 1578 desc[0].data[0] = 1579 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN); 1580 desc[0].data[1] = 1581 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT3_EN); 1582 } 1583 1584 desc[1].data[0] = 1585 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN_MASK); 1586 desc[1].data[1] = 1587 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT3_EN_MASK); 1588 } 1589 1590 ret = hns3_cmd_send(hw, &desc[0], 2); 1591 if (ret) 1592 hns3_err(hw, "fail to %s PPP error int, ret = %d", 1593 en ? "enable" : "disable", ret); 1594 1595 return ret; 1596 } 1597 1598 static int 1599 enable_ppp_err_intr(struct hns3_adapter *hns, bool en) 1600 { 1601 int ret; 1602 1603 ret = config_ppp_err_intr(hns, HNS3_OPC_PPP_CMD0_INT_CMD, en); 1604 if (ret) 1605 return ret; 1606 1607 return config_ppp_err_intr(hns, HNS3_OPC_PPP_CMD1_INT_CMD, en); 1608 } 1609 1610 static int 1611 enable_ssu_err_intr(struct hns3_adapter *hns, bool en) 1612 { 1613 struct hns3_hw *hw = &hns->hw; 1614 struct hns3_cmd_desc desc[2]; 1615 int ret; 1616 1617 /* configure SSU ecc error interrupts */ 1618 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_SSU_ECC_INT_CMD, false); 1619 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1620 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_SSU_ECC_INT_CMD, false); 1621 if (en) { 1622 desc[0].data[0] = 1623 rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN); 1624 desc[0].data[1] = 1625 rte_cpu_to_le_32(HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN); 1626 desc[0].data[4] = 1627 rte_cpu_to_le_32(HNS3_SSU_BIT32_ECC_ERR_INT_EN); 1628 } 1629 1630 desc[1].data[0] = rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN_MASK); 1631 desc[1].data[1] = 1632 rte_cpu_to_le_32(HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); 1633 desc[1].data[2] = rte_cpu_to_le_32(HNS3_SSU_BIT32_ECC_ERR_INT_EN_MASK); 1634 1635 ret = hns3_cmd_send(hw, &desc[0], 2); 1636 if (ret) { 1637 hns3_err(hw, "fail to %s SSU ECC error interrupt, ret = %d", 1638 en ? "enable" : "disable", ret); 1639 return ret; 1640 } 1641 1642 /* configure SSU common error interrupts */ 1643 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_SSU_COMMON_INT_CMD, false); 1644 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1645 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_SSU_COMMON_INT_CMD, false); 1646 1647 if (en) { 1648 desc[0].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN); 1649 desc[0].data[1] = 1650 rte_cpu_to_le_32(HNS3_SSU_PORT_BASED_ERR_INT_EN); 1651 desc[0].data[2] = 1652 rte_cpu_to_le_32(HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN); 1653 } 1654 1655 desc[1].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN_MASK | 1656 HNS3_SSU_PORT_BASED_ERR_INT_EN_MASK); 1657 desc[1].data[1] = 1658 rte_cpu_to_le_32(HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); 1659 1660 ret = hns3_cmd_send(hw, &desc[0], 2); 1661 if (ret) 1662 hns3_err(hw, "fail to %s SSU COMMON error intr, ret = %d", 1663 en ? "enable" : "disable", ret); 1664 1665 return ret; 1666 } 1667 1668 void 1669 hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en) 1670 { 1671 struct hns3_cmd_desc desc; 1672 int ret; 1673 1674 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_TNL_INT_EN, false); 1675 if (en) 1676 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_EN); 1677 else 1678 desc.data[0] = 0; 1679 1680 desc.data[1] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_EN_MASK); 1681 1682 ret = hns3_cmd_send(hw, &desc, 1); 1683 if (ret) 1684 hns3_err(hw, "fail to %s mac tnl intr, ret = %d", 1685 en ? "enable" : "disable", ret); 1686 } 1687 1688 static int 1689 config_ppu_err_intrs(struct hns3_adapter *hns, uint32_t cmd, bool en) 1690 { 1691 struct hns3_hw *hw = &hns->hw; 1692 struct hns3_cmd_desc desc[2]; 1693 int num = 1; 1694 1695 /* configure PPU error interrupts */ 1696 switch (cmd) { 1697 case HNS3_OPC_PPU_MPF_ECC_INT_CMD: 1698 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1699 desc[0].flag |= HNS3_CMD_FLAG_NEXT; 1700 hns3_cmd_setup_basic_desc(&desc[1], cmd, false); 1701 if (en) { 1702 desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT0_EN; 1703 desc[0].data[1] = HNS3_PPU_MPF_ABNORMAL_INT1_EN; 1704 desc[1].data[3] = HNS3_PPU_MPF_ABNORMAL_INT3_EN; 1705 desc[1].data[4] = HNS3_PPU_MPF_ABNORMAL_INT2_EN; 1706 } 1707 1708 desc[1].data[0] = HNS3_PPU_MPF_ABNORMAL_INT0_EN_MASK; 1709 desc[1].data[1] = HNS3_PPU_MPF_ABNORMAL_INT1_EN_MASK; 1710 desc[1].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN_MASK; 1711 desc[1].data[3] |= HNS3_PPU_MPF_ABNORMAL_INT3_EN_MASK; 1712 num = 2; 1713 break; 1714 case HNS3_OPC_PPU_MPF_OTHER_INT_CMD: 1715 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1716 if (en) 1717 desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2; 1718 1719 desc[0].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2_MASK; 1720 break; 1721 case HNS3_OPC_PPU_PF_OTHER_INT_CMD: 1722 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1723 if (en) 1724 desc[0].data[0] = HNS3_PPU_PF_ABNORMAL_INT_EN; 1725 1726 desc[0].data[2] = HNS3_PPU_PF_ABNORMAL_INT_EN_MASK; 1727 break; 1728 default: 1729 hns3_err(hw, 1730 "Invalid cmd(%u) to configure PPU error interrupts.", 1731 cmd); 1732 return -EINVAL; 1733 } 1734 1735 return hns3_cmd_send(hw, &desc[0], num); 1736 } 1737 1738 static int 1739 enable_ppu_err_intr(struct hns3_adapter *hns, bool en) 1740 { 1741 struct hns3_hw *hw = &hns->hw; 1742 int ret; 1743 1744 ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_MPF_ECC_INT_CMD, en); 1745 if (ret) { 1746 hns3_err(hw, "fail to %s PPU MPF ECC error intr, ret = %d", 1747 en ? "enable" : "disable", ret); 1748 return ret; 1749 } 1750 1751 ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_MPF_OTHER_INT_CMD, en); 1752 if (ret) { 1753 hns3_err(hw, "fail to %s PPU MPF other intr, ret = %d", 1754 en ? "enable" : "disable", ret); 1755 return ret; 1756 } 1757 1758 ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_PF_OTHER_INT_CMD, en); 1759 if (ret) 1760 hns3_err(hw, "fail to %s PPU PF error interrupts, ret = %d", 1761 en ? "enable" : "disable", ret); 1762 return ret; 1763 } 1764 1765 static int 1766 enable_tm_err_intr(struct hns3_adapter *hns, bool en) 1767 { 1768 struct hns3_hw *hw = &hns->hw; 1769 struct hns3_cmd_desc desc; 1770 int ret; 1771 1772 /* configure TM SCH error interrupts */ 1773 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_SCH_ECC_INT_EN, false); 1774 if (en) 1775 desc.data[0] = rte_cpu_to_le_32(HNS3_TM_SCH_ECC_ERR_INT_EN); 1776 1777 ret = hns3_cmd_send(hw, &desc, 1); 1778 if (ret) { 1779 hns3_err(hw, "fail to %s TM SCH interrupts, ret = %d", 1780 en ? "enable" : "disable", ret); 1781 return ret; 1782 } 1783 1784 /* configure TM QCN hw errors */ 1785 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QCN_MEM_INT_CFG, false); 1786 desc.data[0] = rte_cpu_to_le_32(HNS3_TM_QCN_ERR_INT_TYPE); 1787 if (en) { 1788 desc.data[0] |= rte_cpu_to_le_32(HNS3_TM_QCN_FIFO_INT_EN); 1789 desc.data[1] = rte_cpu_to_le_32(HNS3_TM_QCN_MEM_ERR_INT_EN); 1790 } 1791 1792 ret = hns3_cmd_send(hw, &desc, 1); 1793 if (ret) 1794 hns3_err(hw, "fail to %s TM QCN mem errors, ret = %d\n", 1795 en ? "enable" : "disable", ret); 1796 1797 return ret; 1798 } 1799 1800 static int 1801 enable_common_err_intr(struct hns3_adapter *hns, bool en) 1802 { 1803 struct hns3_hw *hw = &hns->hw; 1804 struct hns3_cmd_desc desc[2]; 1805 int ret; 1806 1807 /* configure common error interrupts */ 1808 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_COMMON_ECC_INT_CFG, false); 1809 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1810 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_COMMON_ECC_INT_CFG, false); 1811 1812 if (en) { 1813 desc[0].data[0] = 1814 rte_cpu_to_le_32(HNS3_IMP_TCM_ECC_ERR_INT_EN); 1815 desc[0].data[2] = 1816 rte_cpu_to_le_32(HNS3_CMDQ_NIC_ECC_ERR_INT_EN); 1817 desc[0].data[3] = 1818 rte_cpu_to_le_32(HNS3_IMP_RD_POISON_ERR_INT_EN); 1819 desc[0].data[4] = 1820 rte_cpu_to_le_32(HNS3_TQP_ECC_ERR_INT_EN | 1821 HNS3_MSIX_SRAM_ECC_ERR_INT_EN); 1822 desc[0].data[5] = 1823 rte_cpu_to_le_32(HNS3_IMP_ITCM4_ECC_ERR_INT_EN); 1824 } 1825 1826 desc[1].data[0] = rte_cpu_to_le_32(HNS3_IMP_TCM_ECC_ERR_INT_EN_MASK); 1827 desc[1].data[2] = rte_cpu_to_le_32(HNS3_CMDQ_NIC_ECC_ERR_INT_EN_MASK); 1828 desc[1].data[3] = rte_cpu_to_le_32(HNS3_IMP_RD_POISON_ERR_INT_EN_MASK); 1829 desc[1].data[4] = rte_cpu_to_le_32(HNS3_TQP_ECC_ERR_INT_EN_MASK | 1830 HNS3_MSIX_SRAM_ECC_ERR_INT_EN_MASK); 1831 desc[1].data[5] = rte_cpu_to_le_32(HNS3_IMP_ITCM4_ECC_ERR_INT_EN_MASK); 1832 1833 ret = hns3_cmd_send(hw, &desc[0], RTE_DIM(desc)); 1834 if (ret) 1835 hns3_err(hw, "fail to %s common err interrupts, ret = %d\n", 1836 en ? "enable" : "disable", ret); 1837 1838 return ret; 1839 } 1840 1841 static int 1842 enable_mac_err_intr(struct hns3_adapter *hns, bool en) 1843 { 1844 struct hns3_hw *hw = &hns->hw; 1845 struct hns3_cmd_desc desc; 1846 int ret; 1847 1848 /* configure MAC common error interrupts */ 1849 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_COMMON_INT_EN, false); 1850 if (en) 1851 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN); 1852 1853 desc.data[1] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN_MASK); 1854 1855 ret = hns3_cmd_send(hw, &desc, 1); 1856 if (ret) 1857 hns3_err(hw, "fail to %s MAC COMMON error intr: %d", 1858 en ? "enable" : "disable", ret); 1859 1860 return ret; 1861 } 1862 1863 static const struct hns3_hw_blk hw_blk[] = { 1864 { 1865 .name = "IGU_EGU", 1866 .enable_err_intr = enable_igu_egu_err_intr, 1867 }, 1868 { 1869 .name = "PPP", 1870 .enable_err_intr = enable_ppp_err_intr, 1871 }, 1872 { 1873 .name = "SSU", 1874 .enable_err_intr = enable_ssu_err_intr, 1875 }, 1876 { 1877 .name = "PPU", 1878 .enable_err_intr = enable_ppu_err_intr, 1879 }, 1880 { 1881 .name = "TM", 1882 .enable_err_intr = enable_tm_err_intr, 1883 }, 1884 { 1885 .name = "COMMON", 1886 .enable_err_intr = enable_common_err_intr, 1887 }, 1888 { 1889 .name = "MAC", 1890 .enable_err_intr = enable_mac_err_intr, 1891 }, 1892 { 1893 .name = NULL, 1894 .enable_err_intr = NULL, 1895 } 1896 }; 1897 1898 int 1899 hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en) 1900 { 1901 const struct hns3_hw_blk *module = hw_blk; 1902 int ret = 0; 1903 1904 while (module->enable_err_intr) { 1905 ret = module->enable_err_intr(hns, en); 1906 if (ret) 1907 return ret; 1908 1909 module++; 1910 } 1911 1912 return ret; 1913 } 1914 1915 static enum hns3_reset_level 1916 hns3_find_highest_level(struct hns3_adapter *hns, const char *reg, 1917 const struct hns3_hw_error *err, uint32_t err_sts) 1918 { 1919 enum hns3_reset_level reset_level = HNS3_FUNC_RESET; 1920 struct hns3_hw *hw = &hns->hw; 1921 bool need_reset = false; 1922 1923 while (err->msg) { 1924 if (err->int_msk & err_sts) { 1925 hns3_warn(hw, "%s %s found [error status=0x%x]", 1926 reg, err->msg, err_sts); 1927 if (err->reset_level != HNS3_NONE_RESET && 1928 err->reset_level >= reset_level) { 1929 reset_level = err->reset_level; 1930 need_reset = true; 1931 } 1932 } 1933 err++; 1934 } 1935 if (need_reset) 1936 return reset_level; 1937 else 1938 return HNS3_NONE_RESET; 1939 } 1940 1941 static int 1942 query_num_bds(struct hns3_hw *hw, bool is_ras, uint32_t *mpf_bd_num, 1943 uint32_t *pf_bd_num) 1944 { 1945 uint32_t mpf_min_bd_num, pf_min_bd_num; 1946 uint32_t mpf_bd_num_val, pf_bd_num_val; 1947 enum hns3_opcode_type opcode; 1948 struct hns3_cmd_desc desc; 1949 int ret; 1950 1951 if (is_ras) { 1952 opcode = HNS3_OPC_QUERY_RAS_INT_STS_BD_NUM; 1953 mpf_min_bd_num = HNS3_MPF_RAS_INT_MIN_BD_NUM; 1954 pf_min_bd_num = HNS3_PF_RAS_INT_MIN_BD_NUM; 1955 } else { 1956 opcode = HNS3_OPC_QUERY_MSIX_INT_STS_BD_NUM; 1957 mpf_min_bd_num = HNS3_MPF_MSIX_INT_MIN_BD_NUM; 1958 pf_min_bd_num = HNS3_PF_MSIX_INT_MIN_BD_NUM; 1959 } 1960 1961 hns3_cmd_setup_basic_desc(&desc, opcode, true); 1962 ret = hns3_cmd_send(hw, &desc, 1); 1963 if (ret) { 1964 hns3_err(hw, "query num bds in msix failed, ret = %d", ret); 1965 return ret; 1966 } 1967 1968 mpf_bd_num_val = rte_le_to_cpu_32(desc.data[0]); 1969 pf_bd_num_val = rte_le_to_cpu_32(desc.data[1]); 1970 if (mpf_bd_num_val < mpf_min_bd_num || pf_bd_num_val < pf_min_bd_num) { 1971 hns3_err(hw, "error bd num: mpf(%u), min_mpf(%u), " 1972 "pf(%u), min_pf(%u)\n", mpf_bd_num_val, mpf_min_bd_num, 1973 pf_bd_num_val, pf_min_bd_num); 1974 return -EINVAL; 1975 } 1976 1977 *mpf_bd_num = mpf_bd_num_val; 1978 *pf_bd_num = pf_bd_num_val; 1979 1980 return 0; 1981 } 1982 1983 void 1984 hns3_intr_unregister(const struct rte_intr_handle *hdl, 1985 rte_intr_callback_fn cb_fn, void *cb_arg) 1986 { 1987 int retry_cnt = 0; 1988 int ret; 1989 1990 do { 1991 ret = rte_intr_callback_unregister(hdl, cb_fn, cb_arg); 1992 if (ret >= 0) { 1993 break; 1994 } else if (ret != -EAGAIN) { 1995 PMD_INIT_LOG(ERR, "Failed to unregister intr: %d", ret); 1996 break; 1997 } 1998 rte_delay_ms(HNS3_INTR_UNREG_FAIL_DELAY_MS); 1999 } while (retry_cnt++ < HNS3_INTR_UNREG_FAIL_RETRY_CNT); 2000 } 2001 2002 static uint32_t 2003 hns3_get_hw_error_status(struct hns3_cmd_desc *desc, uint8_t desc_offset, 2004 uint8_t data_offset) 2005 { 2006 uint32_t status; 2007 uint32_t *desc_data; 2008 2009 if (desc_offset == 0) 2010 status = rte_le_to_cpu_32(desc[desc_offset].data[data_offset]); 2011 else { 2012 desc_data = (uint32_t *)&desc[desc_offset]; 2013 status = rte_le_to_cpu_32(*(desc_data + data_offset)); 2014 } 2015 2016 return status; 2017 } 2018 2019 static int 2020 hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, 2021 int num, uint64_t *levels, 2022 enum hns3_hw_err_report_type err_type) 2023 { 2024 const struct hns3_hw_error_desc *err = pf_ras_err_tbl; 2025 enum hns3_opcode_type opcode; 2026 enum hns3_reset_level req_level; 2027 struct hns3_hw *hw = &hns->hw; 2028 uint32_t status; 2029 int ret; 2030 2031 switch (err_type) { 2032 case MPF_MSIX_ERR: 2033 err = mpf_msix_err_tbl; 2034 opcode = HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT; 2035 break; 2036 case PF_MSIX_ERR: 2037 err = pf_msix_err_tbl; 2038 opcode = HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT; 2039 break; 2040 case MPF_RAS_ERR: 2041 err = mpf_ras_err_tbl; 2042 opcode = HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT; 2043 break; 2044 case PF_RAS_ERR: 2045 err = pf_ras_err_tbl; 2046 opcode = HNS3_OPC_QUERY_CLEAR_PF_RAS_INT; 2047 break; 2048 default: 2049 hns3_err(hw, "error hardware err_type = %d\n", err_type); 2050 return -EINVAL; 2051 } 2052 2053 /* query all hardware errors */ 2054 hns3_cmd_setup_basic_desc(&desc[0], opcode, true); 2055 ret = hns3_cmd_send(hw, &desc[0], num); 2056 if (ret) { 2057 hns3_err(hw, "query hw err int 0x%x cmd failed, ret = %d\n", 2058 opcode, ret); 2059 return ret; 2060 } 2061 2062 /* traverses the error table and process based on the error type */ 2063 while (err->msg) { 2064 status = hns3_get_hw_error_status(desc, err->desc_offset, 2065 err->data_offset); 2066 if (status) { 2067 /* 2068 * set the reset_level or non_reset flag based on 2069 * the error type and add error statistics. here just 2070 * set the flag, the actual reset action is in 2071 * hns3_msix_process. 2072 */ 2073 req_level = hns3_find_highest_level(hns, err->msg, 2074 err->hw_err, 2075 status); 2076 hns3_atomic_set_bit(req_level, levels); 2077 } 2078 err++; 2079 } 2080 2081 /* clear all hardware errors */ 2082 hns3_cmd_reuse_desc(&desc[0], false); 2083 ret = hns3_cmd_send(hw, &desc[0], num); 2084 if (ret) 2085 hns3_err(hw, "clear all hw err int cmd failed, ret = %d\n", 2086 ret); 2087 2088 return ret; 2089 } 2090 2091 void 2092 hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels) 2093 { 2094 uint32_t mpf_bd_num, pf_bd_num, bd_num; 2095 struct hns3_hw *hw = &hns->hw; 2096 struct hns3_cmd_desc *desc; 2097 int ret; 2098 2099 /* query the number of bds for the MSIx int status */ 2100 ret = query_num_bds(hw, false, &mpf_bd_num, &pf_bd_num); 2101 if (ret) { 2102 hns3_err(hw, "fail to query msix int status bd num: ret = %d", 2103 ret); 2104 return; 2105 } 2106 2107 bd_num = RTE_MAX(mpf_bd_num, pf_bd_num); 2108 desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0); 2109 if (desc == NULL) { 2110 hns3_err(hw, 2111 "fail to zmalloc desc for handling msix error, size = %zu", 2112 bd_num * sizeof(struct hns3_cmd_desc)); 2113 return; 2114 } 2115 2116 /* handle all main PF MSIx errors */ 2117 ret = hns3_handle_hw_error(hns, desc, mpf_bd_num, levels, MPF_MSIX_ERR); 2118 if (ret) { 2119 hns3_err(hw, "fail to handle all main pf msix errors, ret = %d", 2120 ret); 2121 goto out; 2122 } 2123 2124 memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc)); 2125 2126 /* handle all PF MSIx errors */ 2127 ret = hns3_handle_hw_error(hns, desc, pf_bd_num, levels, PF_MSIX_ERR); 2128 if (ret) { 2129 hns3_err(hw, "fail to handle all pf msix errors, ret = %d", 2130 ret); 2131 goto out; 2132 } 2133 2134 out: 2135 rte_free(desc); 2136 } 2137 2138 void 2139 hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels) 2140 { 2141 uint32_t mpf_bd_num, pf_bd_num, bd_num; 2142 struct hns3_hw *hw = &hns->hw; 2143 struct hns3_cmd_desc *desc; 2144 uint32_t status; 2145 int ret; 2146 2147 status = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 2148 if ((status & HNS3_RAS_REG_NFE_MASK) == 0) 2149 return; 2150 2151 /* query the number of bds for the RAS int status */ 2152 ret = query_num_bds(hw, true, &mpf_bd_num, &pf_bd_num); 2153 if (ret) { 2154 hns3_err(hw, "fail to query ras int status bd num: ret = %d", 2155 ret); 2156 return; 2157 } 2158 2159 bd_num = RTE_MAX(mpf_bd_num, pf_bd_num); 2160 desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0); 2161 if (desc == NULL) { 2162 hns3_err(hw, 2163 "fail to zmalloc desc for handing ras error, size = %zu", 2164 bd_num * sizeof(struct hns3_cmd_desc)); 2165 return; 2166 } 2167 2168 /* handle all main PF RAS errors */ 2169 ret = hns3_handle_hw_error(hns, desc, mpf_bd_num, levels, MPF_RAS_ERR); 2170 if (ret) { 2171 hns3_err(hw, "fail to handle all main pf ras errors, ret = %d", 2172 ret); 2173 goto out; 2174 } 2175 2176 memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc)); 2177 2178 /* handle all PF RAS errors */ 2179 ret = hns3_handle_hw_error(hns, desc, pf_bd_num, levels, PF_RAS_ERR); 2180 if (ret) { 2181 hns3_err(hw, "fail to handle all pf ras errors, ret = %d", ret); 2182 goto out; 2183 } 2184 2185 out: 2186 rte_free(desc); 2187 } 2188 2189 static void 2190 hns3_handle_type_reg_error_data(struct hns3_hw *hw, 2191 struct hns3_mod_err_info *mod_err_info, 2192 struct hns3_type_reg_err_info *err_info) 2193 { 2194 #define HNS3_ERR_TYPE_MASK 0x7F 2195 #define HNS3_ERR_TYPE_IS_RAS_OFFSET 7 2196 2197 uint8_t mod_id, total_module, type_id, total_type; 2198 uint8_t is_ras; 2199 uint8_t i; 2200 2201 mod_id = mod_err_info->mod_id; 2202 type_id = err_info->type_id & HNS3_ERR_TYPE_MASK; 2203 is_ras = err_info->type_id >> HNS3_ERR_TYPE_IS_RAS_OFFSET; 2204 2205 total_module = RTE_DIM(hns3_hw_module_name); 2206 total_type = RTE_DIM(hns3_hw_error_type); 2207 2208 hns3_err(hw, "total_module:%u, total_type:%u", 2209 total_module, total_type); 2210 2211 if (mod_id < total_module && type_id < total_type) 2212 hns3_err(hw, "found %s %s, is %s error.", 2213 hns3_hw_module_name[mod_id].msg, 2214 hns3_hw_error_type[type_id].msg, 2215 is_ras ? "ras" : "msix"); 2216 else 2217 hns3_err(hw, "unknown module[%u] or type[%u].", 2218 mod_id, type_id); 2219 2220 hns3_err(hw, "reg_value:"); 2221 for (i = 0; i < err_info->reg_num; i++) 2222 hns3_err(hw, "0x%08x", err_info->reg[i]); 2223 } 2224 2225 static void 2226 hns3_handle_module_error_data(struct hns3_hw *hw, uint32_t *buf, 2227 uint32_t buf_size) 2228 { 2229 struct hns3_type_reg_err_info *type_reg_err_info; 2230 struct hns3_mod_err_info *mod_err_info; 2231 struct hns3_sum_err_info *sum_err_info; 2232 uint8_t mod_num, reset_type; 2233 uint32_t offset = 0; 2234 uint8_t err_num; 2235 uint8_t i; 2236 2237 sum_err_info = (struct hns3_sum_err_info *)&buf[offset++]; 2238 mod_num = sum_err_info->mod_num; 2239 reset_type = sum_err_info->reset_type; 2240 if (reset_type && reset_type != HNS3_NONE_RESET) 2241 hns3_atomic_set_bit(reset_type, &hw->reset.request); 2242 2243 hns3_err(hw, "reset_type = %s, mod_num = %u.", 2244 reset_string[reset_type], mod_num); 2245 2246 while (mod_num--) { 2247 if (offset >= buf_size) { 2248 hns3_err(hw, "offset(%u) exceeds buf's size(%u).", 2249 offset, buf_size); 2250 return; 2251 } 2252 mod_err_info = (struct hns3_mod_err_info *)&buf[offset++]; 2253 err_num = mod_err_info->err_num; 2254 for (i = 0; i < err_num; i++) { 2255 if (offset >= buf_size) { 2256 hns3_err(hw, 2257 "offset(%u) exceeds buf size(%u).", 2258 offset, buf_size); 2259 return; 2260 } 2261 2262 type_reg_err_info = (struct hns3_type_reg_err_info *) 2263 &buf[offset++]; 2264 hns3_handle_type_reg_error_data(hw, mod_err_info, 2265 type_reg_err_info); 2266 2267 offset += type_reg_err_info->reg_num; 2268 } 2269 } 2270 } 2271 2272 static int 2273 hns3_query_all_err_bd_num(struct hns3_hw *hw, uint32_t *bd_num) 2274 { 2275 struct hns3_cmd_desc desc; 2276 uint32_t bd_num_data; 2277 int ret; 2278 2279 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_ALL_ERR_BD_NUM, true); 2280 ret = hns3_cmd_send(hw, &desc, 1); 2281 if (ret) { 2282 hns3_err(hw, "failed to query error bd_num, ret = %d.", ret); 2283 return ret; 2284 } 2285 2286 bd_num_data = rte_le_to_cpu_32(desc.data[0]); 2287 *bd_num = bd_num_data; 2288 if (bd_num_data == 0) { 2289 hns3_err(hw, "the value of bd_num is 0!"); 2290 return -EINVAL; 2291 } 2292 2293 return 0; 2294 } 2295 2296 static int 2297 hns3_query_all_err_info(struct hns3_hw *hw, struct hns3_cmd_desc *desc, 2298 uint32_t bd_num) 2299 { 2300 int ret; 2301 2302 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_QUERY_ALL_ERR_INFO, true); 2303 ret = hns3_cmd_send(hw, desc, bd_num); 2304 if (ret) { 2305 hns3_err(hw, "failed to query error info, ret = %d.", ret); 2306 return ret; 2307 } 2308 2309 return ret; 2310 } 2311 2312 static void 2313 hns3_handle_hw_error_v2(struct hns3_hw *hw) 2314 { 2315 uint32_t bd_num, buf_len, i, buf_size; 2316 struct hns3_cmd_desc *desc; 2317 uint32_t *desc_data; 2318 uint32_t *buf; 2319 int ret; 2320 2321 ret = hns3_query_all_err_bd_num(hw, &bd_num); 2322 if (ret) 2323 goto out; 2324 2325 desc = rte_zmalloc("hns3_ras", bd_num * sizeof(struct hns3_cmd_desc), 2326 0); 2327 if (desc == NULL) { 2328 hns3_err(hw, "failed to malloc hns3 ras cmd desc."); 2329 goto out; 2330 } 2331 2332 ret = hns3_query_all_err_info(hw, desc, bd_num); 2333 if (ret) 2334 goto err_desc; 2335 2336 buf_len = bd_num * sizeof(struct hns3_cmd_desc) - HNS3_DESC_NO_DATA_LEN; 2337 buf_size = buf_len / HNS3_DESC_DATA_UNIT_SIZE; 2338 2339 desc_data = rte_zmalloc("hns3_ras", buf_len, 0); 2340 if (desc_data == NULL) { 2341 hns3_err(hw, "failed to malloc hns3 ras desc data."); 2342 goto err_desc; 2343 } 2344 2345 buf = rte_zmalloc("hns3_ras", buf_len, 0); 2346 if (buf == NULL) { 2347 hns3_err(hw, "failed to malloc hns3 ras buf data."); 2348 goto err_buf_alloc; 2349 } 2350 2351 memcpy(desc_data, &desc[0].data[0], buf_len); 2352 for (i = 0; i < buf_size; i++) 2353 buf[i] = rte_le_to_cpu_32(desc_data[i]); 2354 2355 hns3_handle_module_error_data(hw, buf, buf_size); 2356 rte_free(buf); 2357 2358 err_buf_alloc: 2359 rte_free(desc_data); 2360 err_desc: 2361 rte_free(desc); 2362 out: 2363 return; 2364 } 2365 2366 void 2367 hns3_handle_error(struct hns3_adapter *hns) 2368 { 2369 struct hns3_hw *hw = &hns->hw; 2370 2371 if (hns3_dev_get_support(hw, RAS_IMP)) { 2372 hns3_handle_hw_error_v2(hw); 2373 hns3_schedule_reset(hns); 2374 } else { 2375 hns3_handle_msix_error(hns, &hw->reset.request); 2376 hns3_handle_ras_error(hns, &hw->reset.request); 2377 hns3_schedule_reset(hns); 2378 } 2379 } 2380 2381 int 2382 hns3_reset_init(struct hns3_hw *hw) 2383 { 2384 rte_spinlock_init(&hw->lock); 2385 hw->reset.level = HNS3_NONE_RESET; 2386 hw->reset.stage = RESET_STAGE_NONE; 2387 hw->reset.request = 0; 2388 hw->reset.pending = 0; 2389 hw->reset.resetting = 0; 2390 __atomic_store_n(&hw->reset.disable_cmd, 0, __ATOMIC_RELAXED); 2391 hw->reset.wait_data = rte_zmalloc("wait_data", 2392 sizeof(struct hns3_wait_data), 0); 2393 if (!hw->reset.wait_data) { 2394 PMD_INIT_LOG(ERR, "Failed to allocate memory for wait_data"); 2395 return -ENOMEM; 2396 } 2397 return 0; 2398 } 2399 2400 void 2401 hns3_schedule_reset(struct hns3_adapter *hns) 2402 { 2403 struct hns3_hw *hw = &hns->hw; 2404 2405 /* Reschedule the reset process after successful initialization */ 2406 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { 2407 __atomic_store_n(&hw->reset.schedule, SCHEDULE_PENDING, 2408 __ATOMIC_RELAXED); 2409 return; 2410 } 2411 2412 if (hw->adapter_state >= HNS3_NIC_CLOSED) 2413 return; 2414 2415 /* Schedule restart alarm if it is not scheduled yet */ 2416 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 2417 SCHEDULE_REQUESTED) 2418 return; 2419 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == 2420 SCHEDULE_DEFERRED) 2421 rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); 2422 else 2423 __atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED, 2424 __ATOMIC_RELAXED); 2425 2426 rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); 2427 } 2428 2429 void 2430 hns3_schedule_delayed_reset(struct hns3_adapter *hns) 2431 { 2432 #define DEFERRED_SCHED_US (3 * MSEC_PER_SEC * USEC_PER_MSEC) 2433 struct hns3_hw *hw = &hns->hw; 2434 2435 /* Do nothing if it is uninited or closed */ 2436 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED || 2437 hw->adapter_state >= HNS3_NIC_CLOSED) { 2438 return; 2439 } 2440 2441 if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) != 2442 SCHEDULE_NONE) 2443 return; 2444 __atomic_store_n(&hw->reset.schedule, SCHEDULE_DEFERRED, 2445 __ATOMIC_RELAXED); 2446 rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns); 2447 } 2448 2449 void 2450 hns3_wait_callback(void *param) 2451 { 2452 struct hns3_wait_data *data = (struct hns3_wait_data *)param; 2453 struct hns3_adapter *hns = data->hns; 2454 struct hns3_hw *hw = &hns->hw; 2455 uint64_t msec; 2456 bool done; 2457 2458 data->count--; 2459 if (data->check_completion) { 2460 /* 2461 * Check if the current time exceeds the deadline 2462 * or a pending reset coming, or reset during close. 2463 */ 2464 msec = hns3_clock_gettime_ms(); 2465 if (msec > data->end_ms || is_reset_pending(hns) || 2466 hw->adapter_state == HNS3_NIC_CLOSING) { 2467 done = false; 2468 data->count = 0; 2469 } else 2470 done = data->check_completion(hw); 2471 } else 2472 done = true; 2473 2474 if (!done && data->count > 0) { 2475 rte_eal_alarm_set(data->interval, hns3_wait_callback, data); 2476 return; 2477 } 2478 if (done) 2479 data->result = HNS3_WAIT_SUCCESS; 2480 else { 2481 hns3_err(hw, "%s wait timeout at stage %d", 2482 reset_string[hw->reset.level], hw->reset.stage); 2483 data->result = HNS3_WAIT_TIMEOUT; 2484 } 2485 hns3_schedule_reset(hns); 2486 } 2487 2488 void 2489 hns3_notify_reset_ready(struct hns3_hw *hw, bool enable) 2490 { 2491 uint32_t reg_val; 2492 2493 reg_val = hns3_read_dev(hw, HNS3_CMDQ_TX_DEPTH_REG); 2494 if (enable) 2495 reg_val |= HNS3_NIC_SW_RST_RDY; 2496 else 2497 reg_val &= ~HNS3_NIC_SW_RST_RDY; 2498 2499 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, reg_val); 2500 } 2501 2502 int 2503 hns3_reset_req_hw_reset(struct hns3_adapter *hns) 2504 { 2505 struct hns3_hw *hw = &hns->hw; 2506 2507 if (hw->reset.wait_data->result == HNS3_WAIT_UNKNOWN) { 2508 hw->reset.wait_data->hns = hns; 2509 hw->reset.wait_data->check_completion = NULL; 2510 hw->reset.wait_data->interval = HNS3_RESET_SYNC_US; 2511 hw->reset.wait_data->count = 1; 2512 hw->reset.wait_data->result = HNS3_WAIT_REQUEST; 2513 rte_eal_alarm_set(hw->reset.wait_data->interval, 2514 hns3_wait_callback, hw->reset.wait_data); 2515 return -EAGAIN; 2516 } else if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST) 2517 return -EAGAIN; 2518 2519 /* inform hardware that preparatory work is done */ 2520 hns3_notify_reset_ready(hw, true); 2521 return 0; 2522 } 2523 2524 static void 2525 hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels) 2526 { 2527 uint64_t merge_cnt = hw->reset.stats.merge_cnt; 2528 int64_t tmp; 2529 2530 switch (hw->reset.level) { 2531 case HNS3_IMP_RESET: 2532 hns3_atomic_clear_bit(HNS3_IMP_RESET, levels); 2533 tmp = hns3_test_and_clear_bit(HNS3_GLOBAL_RESET, levels); 2534 HNS3_CHECK_MERGE_CNT(tmp); 2535 tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); 2536 HNS3_CHECK_MERGE_CNT(tmp); 2537 break; 2538 case HNS3_GLOBAL_RESET: 2539 hns3_atomic_clear_bit(HNS3_GLOBAL_RESET, levels); 2540 tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); 2541 HNS3_CHECK_MERGE_CNT(tmp); 2542 break; 2543 case HNS3_FUNC_RESET: 2544 hns3_atomic_clear_bit(HNS3_FUNC_RESET, levels); 2545 break; 2546 case HNS3_VF_RESET: 2547 hns3_atomic_clear_bit(HNS3_VF_RESET, levels); 2548 tmp = hns3_test_and_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); 2549 HNS3_CHECK_MERGE_CNT(tmp); 2550 tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); 2551 HNS3_CHECK_MERGE_CNT(tmp); 2552 break; 2553 case HNS3_VF_FULL_RESET: 2554 hns3_atomic_clear_bit(HNS3_VF_FULL_RESET, levels); 2555 tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); 2556 HNS3_CHECK_MERGE_CNT(tmp); 2557 break; 2558 case HNS3_VF_PF_FUNC_RESET: 2559 hns3_atomic_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); 2560 tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); 2561 HNS3_CHECK_MERGE_CNT(tmp); 2562 break; 2563 case HNS3_VF_FUNC_RESET: 2564 hns3_atomic_clear_bit(HNS3_VF_FUNC_RESET, levels); 2565 break; 2566 case HNS3_FLR_RESET: 2567 hns3_atomic_clear_bit(HNS3_FLR_RESET, levels); 2568 break; 2569 case HNS3_NONE_RESET: 2570 default: 2571 return; 2572 }; 2573 if (merge_cnt != hw->reset.stats.merge_cnt) 2574 hns3_warn(hw, 2575 "No need to do low-level reset after %s reset. " 2576 "merge cnt: %" PRIu64 " total merge cnt: %" PRIu64, 2577 reset_string[hw->reset.level], 2578 hw->reset.stats.merge_cnt - merge_cnt, 2579 hw->reset.stats.merge_cnt); 2580 } 2581 2582 static bool 2583 hns3_reset_err_handle(struct hns3_adapter *hns) 2584 { 2585 #define MAX_RESET_FAIL_CNT 30 2586 2587 struct hns3_hw *hw = &hns->hw; 2588 2589 if (hw->adapter_state == HNS3_NIC_CLOSING) 2590 goto reset_fail; 2591 2592 if (is_reset_pending(hns)) { 2593 hw->reset.attempts = 0; 2594 hw->reset.stats.fail_cnt++; 2595 hns3_warn(hw, "%s reset fail because new Reset is pending " 2596 "attempts:%" PRIu64, 2597 reset_string[hw->reset.level], 2598 hw->reset.stats.fail_cnt); 2599 hw->reset.level = HNS3_NONE_RESET; 2600 return true; 2601 } 2602 2603 hw->reset.attempts++; 2604 if (hw->reset.attempts < MAX_RESET_FAIL_CNT) { 2605 hns3_atomic_set_bit(hw->reset.level, &hw->reset.pending); 2606 hns3_warn(hw, "%s retry to reset attempts: %d", 2607 reset_string[hw->reset.level], 2608 hw->reset.attempts); 2609 return true; 2610 } 2611 2612 /* 2613 * Failure to reset does not mean that the network port is 2614 * completely unavailable, so cmd still needs to be initialized. 2615 * Regardless of whether the execution is successful or not, the 2616 * flow after execution must be continued. 2617 */ 2618 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) 2619 (void)hns3_cmd_init(hw); 2620 reset_fail: 2621 hw->reset.attempts = 0; 2622 hw->reset.stats.fail_cnt++; 2623 hns3_warn(hw, "%s reset fail fail_cnt:%" PRIu64 " success_cnt:%" PRIu64 2624 " global_cnt:%" PRIu64 " imp_cnt:%" PRIu64 2625 " request_cnt:%" PRIu64 " exec_cnt:%" PRIu64 2626 " merge_cnt:%" PRIu64 "adapter_state:%d", 2627 reset_string[hw->reset.level], hw->reset.stats.fail_cnt, 2628 hw->reset.stats.success_cnt, hw->reset.stats.global_cnt, 2629 hw->reset.stats.imp_cnt, hw->reset.stats.request_cnt, 2630 hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt, 2631 hw->adapter_state); 2632 2633 /* IMP no longer waiting the ready flag */ 2634 hns3_notify_reset_ready(hw, true); 2635 return false; 2636 } 2637 2638 static int 2639 hns3_reset_pre(struct hns3_adapter *hns) 2640 { 2641 struct hns3_hw *hw = &hns->hw; 2642 struct timeval tv; 2643 int ret; 2644 2645 if (hw->reset.stage == RESET_STAGE_NONE) { 2646 __atomic_store_n(&hns->hw.reset.resetting, 1, __ATOMIC_RELAXED); 2647 hw->reset.stage = RESET_STAGE_DOWN; 2648 ret = hw->reset.ops->stop_service(hns); 2649 hns3_clock_gettime(&tv); 2650 if (ret) { 2651 hns3_warn(hw, "Reset step1 down fail=%d time=%ld.%.6ld", 2652 ret, tv.tv_sec, tv.tv_usec); 2653 return ret; 2654 } 2655 hns3_warn(hw, "Reset step1 down success time=%ld.%.6ld", 2656 tv.tv_sec, tv.tv_usec); 2657 hw->reset.stage = RESET_STAGE_PREWAIT; 2658 } 2659 if (hw->reset.stage == RESET_STAGE_PREWAIT) { 2660 ret = hw->reset.ops->prepare_reset(hns); 2661 hns3_clock_gettime(&tv); 2662 if (ret) { 2663 hns3_warn(hw, 2664 "Reset step2 prepare wait fail=%d time=%ld.%.6ld", 2665 ret, tv.tv_sec, tv.tv_usec); 2666 return ret; 2667 } 2668 hns3_warn(hw, "Reset step2 prepare wait success time=%ld.%.6ld", 2669 tv.tv_sec, tv.tv_usec); 2670 hw->reset.stage = RESET_STAGE_REQ_HW_RESET; 2671 hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN; 2672 } 2673 return 0; 2674 } 2675 2676 static int 2677 hns3_reset_post(struct hns3_adapter *hns) 2678 { 2679 #define TIMEOUT_RETRIES_CNT 30 2680 struct hns3_hw *hw = &hns->hw; 2681 struct timeval tv_delta; 2682 struct timeval tv; 2683 int ret = 0; 2684 2685 if (hw->adapter_state == HNS3_NIC_CLOSING) { 2686 hns3_warn(hw, "Don't do reset_post during closing, just uninit cmd"); 2687 hns3_cmd_uninit(hw); 2688 return -EPERM; 2689 } 2690 2691 if (hw->reset.stage == RESET_STAGE_DEV_INIT) { 2692 rte_spinlock_lock(&hw->lock); 2693 if (hw->reset.mbuf_deferred_free) { 2694 hns3_dev_release_mbufs(hns); 2695 hw->reset.mbuf_deferred_free = false; 2696 } 2697 ret = hw->reset.ops->reinit_dev(hns); 2698 rte_spinlock_unlock(&hw->lock); 2699 hns3_clock_gettime(&tv); 2700 if (ret) { 2701 hns3_warn(hw, "Reset step5 devinit fail=%d retries=%d", 2702 ret, hw->reset.retries); 2703 goto err; 2704 } 2705 hns3_warn(hw, "Reset step5 devinit success time=%ld.%.6ld", 2706 tv.tv_sec, tv.tv_usec); 2707 hw->reset.retries = 0; 2708 hw->reset.stage = RESET_STAGE_RESTORE; 2709 rte_eal_alarm_set(SWITCH_CONTEXT_US, 2710 hw->reset.ops->reset_service, hns); 2711 return -EAGAIN; 2712 } 2713 if (hw->reset.stage == RESET_STAGE_RESTORE) { 2714 rte_spinlock_lock(&hw->lock); 2715 ret = hw->reset.ops->restore_conf(hns); 2716 rte_spinlock_unlock(&hw->lock); 2717 hns3_clock_gettime(&tv); 2718 if (ret) { 2719 hns3_warn(hw, 2720 "Reset step6 restore fail=%d retries=%d", 2721 ret, hw->reset.retries); 2722 goto err; 2723 } 2724 hns3_warn(hw, "Reset step6 restore success time=%ld.%.6ld", 2725 tv.tv_sec, tv.tv_usec); 2726 hw->reset.retries = 0; 2727 hw->reset.stage = RESET_STAGE_DONE; 2728 } 2729 if (hw->reset.stage == RESET_STAGE_DONE) { 2730 /* IMP will wait ready flag before reset */ 2731 hns3_notify_reset_ready(hw, false); 2732 hns3_clear_reset_level(hw, &hw->reset.pending); 2733 __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); 2734 hw->reset.attempts = 0; 2735 hw->reset.stats.success_cnt++; 2736 hw->reset.stage = RESET_STAGE_NONE; 2737 rte_spinlock_lock(&hw->lock); 2738 hw->reset.ops->start_service(hns); 2739 rte_spinlock_unlock(&hw->lock); 2740 hns3_clock_gettime(&tv); 2741 timersub(&tv, &hw->reset.start_time, &tv_delta); 2742 hns3_warn(hw, "%s reset done fail_cnt:%" PRIu64 2743 " success_cnt:%" PRIu64 " global_cnt:%" PRIu64 2744 " imp_cnt:%" PRIu64 " request_cnt:%" PRIu64 2745 " exec_cnt:%" PRIu64 " merge_cnt:%" PRIu64, 2746 reset_string[hw->reset.level], 2747 hw->reset.stats.fail_cnt, hw->reset.stats.success_cnt, 2748 hw->reset.stats.global_cnt, hw->reset.stats.imp_cnt, 2749 hw->reset.stats.request_cnt, hw->reset.stats.exec_cnt, 2750 hw->reset.stats.merge_cnt); 2751 hns3_warn(hw, 2752 "%s reset done delta %" PRIu64 " ms time=%ld.%.6ld", 2753 reset_string[hw->reset.level], 2754 hns3_clock_calctime_ms(&tv_delta), 2755 tv.tv_sec, tv.tv_usec); 2756 hw->reset.level = HNS3_NONE_RESET; 2757 } 2758 return 0; 2759 2760 err: 2761 if (ret == -ETIME) { 2762 hw->reset.retries++; 2763 if (hw->reset.retries < TIMEOUT_RETRIES_CNT) { 2764 rte_eal_alarm_set(HNS3_RESET_SYNC_US, 2765 hw->reset.ops->reset_service, hns); 2766 return -EAGAIN; 2767 } 2768 } 2769 hw->reset.retries = 0; 2770 return -EIO; 2771 } 2772 2773 /* 2774 * There are three scenarios as follows: 2775 * When the reset is not in progress, the reset process starts. 2776 * During the reset process, if the reset level has not changed, 2777 * the reset process continues; otherwise, the reset process is aborted. 2778 * hw->reset.level new_level action 2779 * HNS3_NONE_RESET HNS3_XXXX_RESET start reset 2780 * HNS3_XXXX_RESET HNS3_XXXX_RESET continue reset 2781 * HNS3_LOW_RESET HNS3_HIGH_RESET abort 2782 */ 2783 int 2784 hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level) 2785 { 2786 struct hns3_hw *hw = &hns->hw; 2787 struct timeval tv_delta; 2788 struct timeval tv; 2789 int ret; 2790 2791 if (hw->reset.level == HNS3_NONE_RESET) { 2792 hw->reset.level = new_level; 2793 hw->reset.stats.exec_cnt++; 2794 hns3_clock_gettime(&hw->reset.start_time); 2795 hns3_warn(hw, "Start %s reset time=%ld.%.6ld", 2796 reset_string[hw->reset.level], 2797 hw->reset.start_time.tv_sec, 2798 hw->reset.start_time.tv_usec); 2799 } 2800 2801 if (is_reset_pending(hns)) { 2802 hns3_clock_gettime(&tv); 2803 hns3_warn(hw, 2804 "%s reset is aborted by high level time=%ld.%.6ld", 2805 reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec); 2806 if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST) 2807 rte_eal_alarm_cancel(hns3_wait_callback, 2808 hw->reset.wait_data); 2809 goto err; 2810 } 2811 2812 ret = hns3_reset_pre(hns); 2813 if (ret) 2814 goto err; 2815 2816 if (hw->reset.stage == RESET_STAGE_REQ_HW_RESET) { 2817 ret = hns3_reset_req_hw_reset(hns); 2818 if (ret == -EAGAIN) 2819 return ret; 2820 hns3_clock_gettime(&tv); 2821 hns3_warn(hw, 2822 "Reset step3 request IMP reset success time=%ld.%.6ld", 2823 tv.tv_sec, tv.tv_usec); 2824 hw->reset.stage = RESET_STAGE_WAIT; 2825 hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN; 2826 } 2827 if (hw->reset.stage == RESET_STAGE_WAIT) { 2828 ret = hw->reset.ops->wait_hardware_ready(hns); 2829 if (ret) 2830 goto retry; 2831 hns3_clock_gettime(&tv); 2832 hns3_warn(hw, "Reset step4 reset wait success time=%ld.%.6ld", 2833 tv.tv_sec, tv.tv_usec); 2834 hw->reset.stage = RESET_STAGE_DEV_INIT; 2835 } 2836 2837 ret = hns3_reset_post(hns); 2838 if (ret) 2839 goto retry; 2840 2841 return 0; 2842 retry: 2843 if (ret == -EAGAIN) 2844 return ret; 2845 err: 2846 hns3_clear_reset_level(hw, &hw->reset.pending); 2847 if (hns3_reset_err_handle(hns)) { 2848 hw->reset.stage = RESET_STAGE_PREWAIT; 2849 hns3_schedule_reset(hns); 2850 } else { 2851 rte_spinlock_lock(&hw->lock); 2852 if (hw->reset.mbuf_deferred_free) { 2853 hns3_dev_release_mbufs(hns); 2854 hw->reset.mbuf_deferred_free = false; 2855 } 2856 rte_spinlock_unlock(&hw->lock); 2857 __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); 2858 hw->reset.stage = RESET_STAGE_NONE; 2859 hns3_clock_gettime(&tv); 2860 timersub(&tv, &hw->reset.start_time, &tv_delta); 2861 hns3_warn(hw, "%s reset fail delta %" PRIu64 " ms time=%ld.%.6ld", 2862 reset_string[hw->reset.level], 2863 hns3_clock_calctime_ms(&tv_delta), 2864 tv.tv_sec, tv.tv_usec); 2865 hw->reset.level = HNS3_NONE_RESET; 2866 } 2867 2868 return -EIO; 2869 } 2870 2871 /* 2872 * The reset process can only be terminated after handshake with IMP(step3), 2873 * so that IMP can complete the reset process normally. 2874 */ 2875 void 2876 hns3_reset_abort(struct hns3_adapter *hns) 2877 { 2878 struct hns3_hw *hw = &hns->hw; 2879 struct timeval tv; 2880 int i; 2881 2882 for (i = 0; i < HNS3_QUIT_RESET_CNT; i++) { 2883 if (hw->reset.level == HNS3_NONE_RESET) 2884 break; 2885 rte_delay_ms(HNS3_QUIT_RESET_DELAY_MS); 2886 } 2887 2888 /* IMP no longer waiting the ready flag */ 2889 hns3_notify_reset_ready(hw, true); 2890 2891 rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); 2892 rte_eal_alarm_cancel(hns3_wait_callback, hw->reset.wait_data); 2893 2894 if (hw->reset.level != HNS3_NONE_RESET) { 2895 hns3_clock_gettime(&tv); 2896 hns3_err(hw, "Failed to terminate reset: %s time=%ld.%.6ld", 2897 reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec); 2898 } 2899 } 2900 2901 static void 2902 hns3_report_lse(void *arg) 2903 { 2904 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg; 2905 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2906 2907 if (hw->adapter_state == HNS3_NIC_STARTED) 2908 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2909 } 2910 2911 void 2912 hns3_start_report_lse(struct rte_eth_dev *dev) 2913 { 2914 #define DELAY_REPORT_LSE_US 1 2915 /* 2916 * When this function called, the context may hold hns3_hw.lock, if 2917 * report lse right now, in some application such as bonding, it will 2918 * trigger call driver's ops which may acquire hns3_hw.lock again, so 2919 * lead to deadlock. 2920 * Here we use delay report to avoid the deadlock. 2921 */ 2922 rte_eal_alarm_set(DELAY_REPORT_LSE_US, hns3_report_lse, dev); 2923 } 2924 2925 void 2926 hns3_stop_report_lse(struct rte_eth_dev *dev) 2927 { 2928 rte_eal_alarm_cancel(hns3_report_lse, dev); 2929 } 2930