1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #include <rte_alarm.h> 6 #include <rte_cycles.h> 7 #include <rte_ethdev.h> 8 #include <rte_io.h> 9 #include <rte_malloc.h> 10 11 #include "hns3_common.h" 12 #include "hns3_logs.h" 13 #include "hns3_regs.h" 14 #include "hns3_rxtx.h" 15 #include "hns3_intr.h" 16 17 #define SWITCH_CONTEXT_US 10 18 19 static const char *reset_string[HNS3_MAX_RESET] = { 20 "flr", "vf_func", "vf_pf_func", "vf_full", "vf_global", 21 "pf_func", "global", "IMP", "none", 22 }; 23 24 static const struct hns3_hw_error mac_afifo_tnl_int[] = { 25 { 26 .int_msk = BIT(0), 27 .msg = "egu_cge_afifo_ecc_1bit_err", 28 .reset_level = HNS3_NONE_RESET 29 }, { 30 .int_msk = BIT(1), 31 .msg = "egu_cge_afifo_ecc_mbit_err", 32 .reset_level = HNS3_GLOBAL_RESET 33 }, { 34 .int_msk = BIT(2), 35 .msg = "egu_lge_afifo_ecc_1bit_err", 36 .reset_level = HNS3_NONE_RESET 37 }, { 38 .int_msk = BIT(3), 39 .msg = "egu_lge_afifo_ecc_mbit_err", 40 .reset_level = HNS3_GLOBAL_RESET 41 }, { 42 .int_msk = BIT(4), 43 .msg = "cge_igu_afifo_ecc_1bit_err", 44 .reset_level = HNS3_NONE_RESET 45 }, { 46 .int_msk = BIT(5), 47 .msg = "cge_igu_afifo_ecc_mbit_err", 48 .reset_level = HNS3_GLOBAL_RESET 49 }, { 50 .int_msk = BIT(6), 51 .msg = "lge_igu_afifo_ecc_1bit_err", 52 .reset_level = HNS3_NONE_RESET 53 }, { 54 .int_msk = BIT(7), 55 .msg = "lge_igu_afifo_ecc_mbit_err", 56 .reset_level = HNS3_GLOBAL_RESET 57 }, { 58 .int_msk = BIT(8), 59 .msg = "cge_igu_afifo_overflow_err", 60 .reset_level = HNS3_GLOBAL_RESET 61 }, { 62 .int_msk = BIT(9), 63 .msg = "lge_igu_afifo_overflow_err", 64 .reset_level = HNS3_GLOBAL_RESET 65 }, { 66 .int_msk = BIT(10), 67 .msg = "egu_cge_afifo_underrun_err", 68 .reset_level = HNS3_GLOBAL_RESET 69 }, { 70 .int_msk = BIT(11), 71 .msg = "egu_lge_afifo_underrun_err", 72 .reset_level = HNS3_GLOBAL_RESET 73 }, { 74 .int_msk = BIT(12), 75 .msg = "egu_ge_afifo_underrun_err", 76 .reset_level = HNS3_GLOBAL_RESET 77 }, { 78 .int_msk = BIT(13), 79 .msg = "ge_igu_afifo_overflow_err", 80 .reset_level = HNS3_GLOBAL_RESET 81 }, { 82 .int_msk = 0, 83 .msg = NULL, 84 .reset_level = HNS3_NONE_RESET 85 } 86 }; 87 88 static const struct hns3_hw_error ppu_mpf_abnormal_int_st1[] = { 89 { 90 .int_msk = 0xFFFFFFFF, 91 .msg = "rpu_rx_pkt_ecc_mbit_err", 92 .reset_level = HNS3_GLOBAL_RESET 93 }, { 94 .int_msk = 0, 95 .msg = NULL, 96 .reset_level = HNS3_NONE_RESET 97 } 98 }; 99 100 static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_ras[] = { 101 { 102 .int_msk = BIT(13), 103 .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", 104 .reset_level = HNS3_GLOBAL_RESET 105 }, { 106 .int_msk = BIT(14), 107 .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", 108 .reset_level = HNS3_GLOBAL_RESET 109 }, { 110 .int_msk = BIT(15), 111 .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", 112 .reset_level = HNS3_GLOBAL_RESET 113 }, { 114 .int_msk = BIT(16), 115 .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", 116 .reset_level = HNS3_GLOBAL_RESET 117 }, { 118 .int_msk = BIT(17), 119 .msg = "rcb_tx_ring_ecc_mbit_err", 120 .reset_level = HNS3_GLOBAL_RESET 121 }, { 122 .int_msk = BIT(18), 123 .msg = "rcb_rx_ring_ecc_mbit_err", 124 .reset_level = HNS3_GLOBAL_RESET 125 }, { 126 .int_msk = BIT(19), 127 .msg = "rcb_tx_fbd_ecc_mbit_err", 128 .reset_level = HNS3_GLOBAL_RESET 129 }, { 130 .int_msk = BIT(20), 131 .msg = "rcb_rx_ebd_ecc_mbit_err", 132 .reset_level = HNS3_GLOBAL_RESET 133 }, { 134 .int_msk = BIT(21), 135 .msg = "rcb_tso_info_ecc_mbit_err", 136 .reset_level = HNS3_GLOBAL_RESET 137 }, { 138 .int_msk = BIT(22), 139 .msg = "rcb_tx_int_info_ecc_mbit_err", 140 .reset_level = HNS3_GLOBAL_RESET 141 }, { 142 .int_msk = BIT(23), 143 .msg = "rcb_rx_int_info_ecc_mbit_err", 144 .reset_level = HNS3_GLOBAL_RESET 145 }, { 146 .int_msk = BIT(24), 147 .msg = "tpu_tx_pkt_0_ecc_mbit_err", 148 .reset_level = HNS3_GLOBAL_RESET 149 }, { 150 .int_msk = BIT(25), 151 .msg = "tpu_tx_pkt_1_ecc_mbit_err", 152 .reset_level = HNS3_GLOBAL_RESET 153 }, { 154 .int_msk = BIT(26), 155 .msg = "rd_bus_err", 156 .reset_level = HNS3_GLOBAL_RESET 157 }, { 158 .int_msk = BIT(27), 159 .msg = "wr_bus_err", 160 .reset_level = HNS3_GLOBAL_RESET 161 }, { 162 .int_msk = BIT(30), 163 .msg = "ooo_ecc_err_detect", 164 .reset_level = HNS3_NONE_RESET 165 }, { 166 .int_msk = BIT(31), 167 .msg = "ooo_ecc_err_multpl", 168 .reset_level = HNS3_GLOBAL_RESET 169 }, { 170 .int_msk = 0, 171 .msg = NULL, 172 .reset_level = HNS3_NONE_RESET 173 } 174 }; 175 176 static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_msix[] = { 177 { 178 .int_msk = BIT(29), 179 .msg = "rx_q_search_miss", 180 .reset_level = HNS3_NONE_RESET 181 }, { 182 .int_msk = 0, 183 .msg = NULL, 184 .reset_level = HNS3_NONE_RESET 185 } 186 }; 187 188 static const struct hns3_hw_error ssu_port_based_pf_int[] = { 189 { 190 .int_msk = BIT(0), 191 .msg = "roc_pkt_without_key_port", 192 .reset_level = HNS3_GLOBAL_RESET 193 }, { 194 .int_msk = BIT(9), 195 .msg = "low_water_line_err_port", 196 .reset_level = HNS3_NONE_RESET 197 }, { 198 .int_msk = 0, 199 .msg = NULL, 200 .reset_level = HNS3_NONE_RESET 201 } 202 }; 203 204 static const struct hns3_hw_error ppp_pf_abnormal_int[] = { 205 { 206 .int_msk = BIT(0), 207 .msg = "tx_vlan_tag_err", 208 .reset_level = HNS3_NONE_RESET 209 }, { 210 .int_msk = BIT(1), 211 .msg = "rss_list_tc_unassigned_queue_err", 212 .reset_level = HNS3_NONE_RESET 213 }, { 214 .int_msk = 0, 215 .msg = NULL, 216 .reset_level = HNS3_NONE_RESET 217 } 218 }; 219 220 static const struct hns3_hw_error ppu_pf_abnormal_int_ras[] = { 221 { 222 .int_msk = BIT(3), 223 .msg = "tx_rd_fbd_poison", 224 .reset_level = HNS3_FUNC_RESET 225 }, { 226 .int_msk = BIT(4), 227 .msg = "rx_rd_ebd_poison", 228 .reset_level = HNS3_FUNC_RESET 229 }, { 230 .int_msk = 0, 231 .msg = NULL, 232 .reset_level = HNS3_NONE_RESET 233 } 234 }; 235 236 static const struct hns3_hw_error ppu_pf_abnormal_int_msix[] = { 237 { 238 .int_msk = BIT(0), 239 .msg = "over_8bd_no_fe", 240 .reset_level = HNS3_FUNC_RESET 241 }, { 242 .int_msk = BIT(1), 243 .msg = "tso_mss_cmp_min_err", 244 .reset_level = HNS3_NONE_RESET 245 }, { 246 .int_msk = BIT(2), 247 .msg = "tso_mss_cmp_max_err", 248 .reset_level = HNS3_NONE_RESET 249 }, { 250 .int_msk = BIT(5), 251 .msg = "buf_wait_timeout", 252 .reset_level = HNS3_NONE_RESET 253 }, { 254 .int_msk = 0, 255 .msg = NULL, 256 .reset_level = HNS3_NONE_RESET 257 } 258 }; 259 260 static const struct hns3_hw_error imp_tcm_ecc_int[] = { 261 { 262 .int_msk = BIT(1), 263 .msg = "imp_itcm0_ecc_mbit_err", 264 .reset_level = HNS3_NONE_RESET 265 }, { 266 .int_msk = BIT(3), 267 .msg = "imp_itcm1_ecc_mbit_err", 268 .reset_level = HNS3_NONE_RESET 269 }, { 270 .int_msk = BIT(5), 271 .msg = "imp_itcm2_ecc_mbit_err", 272 .reset_level = HNS3_NONE_RESET 273 }, { 274 .int_msk = BIT(7), 275 .msg = "imp_itcm3_ecc_mbit_err", 276 .reset_level = HNS3_NONE_RESET 277 }, { 278 .int_msk = BIT(9), 279 .msg = "imp_dtcm0_mem0_ecc_mbit_err", 280 .reset_level = HNS3_NONE_RESET 281 }, { 282 .int_msk = BIT(11), 283 .msg = "imp_dtcm0_mem1_ecc_mbit_err", 284 .reset_level = HNS3_NONE_RESET 285 }, { 286 .int_msk = BIT(13), 287 .msg = "imp_dtcm1_mem0_ecc_mbit_err", 288 .reset_level = HNS3_NONE_RESET 289 }, { 290 .int_msk = BIT(15), 291 .msg = "imp_dtcm1_mem1_ecc_mbit_err", 292 .reset_level = HNS3_NONE_RESET 293 }, { 294 .int_msk = BIT(17), 295 .msg = "imp_itcm4_ecc_mbit_err", 296 .reset_level = HNS3_NONE_RESET 297 }, { 298 .int_msk = 0, 299 .msg = NULL, 300 .reset_level = HNS3_NONE_RESET 301 } 302 }; 303 304 static const struct hns3_hw_error cmdq_mem_ecc_int[] = { 305 { 306 .int_msk = BIT(1), 307 .msg = "cmdq_nic_rx_depth_ecc_mbit_err", 308 .reset_level = HNS3_NONE_RESET 309 }, { 310 .int_msk = BIT(3), 311 .msg = "cmdq_nic_tx_depth_ecc_mbit_err", 312 .reset_level = HNS3_NONE_RESET 313 }, { 314 .int_msk = BIT(5), 315 .msg = "cmdq_nic_rx_tail_ecc_mbit_err", 316 .reset_level = HNS3_NONE_RESET 317 }, { 318 .int_msk = BIT(7), 319 .msg = "cmdq_nic_tx_tail_ecc_mbit_err", 320 .reset_level = HNS3_NONE_RESET 321 }, { 322 .int_msk = BIT(9), 323 .msg = "cmdq_nic_rx_head_ecc_mbit_err", 324 .reset_level = HNS3_NONE_RESET 325 }, { 326 .int_msk = BIT(11), 327 .msg = "cmdq_nic_tx_head_ecc_mbit_err", 328 .reset_level = HNS3_NONE_RESET 329 }, { 330 .int_msk = BIT(13), 331 .msg = "cmdq_nic_rx_addr_ecc_mbit_err", 332 .reset_level = HNS3_NONE_RESET 333 }, { 334 .int_msk = BIT(15), 335 .msg = "cmdq_nic_tx_addr_ecc_mbit_err", 336 .reset_level = HNS3_NONE_RESET 337 }, { 338 .int_msk = 0, 339 .msg = NULL, 340 .reset_level = HNS3_NONE_RESET 341 } 342 }; 343 344 static const struct hns3_hw_error tqp_int_ecc_int[] = { 345 { 346 .int_msk = BIT(6), 347 .msg = "tqp_int_cfg_even_ecc_mbit_err", 348 .reset_level = HNS3_NONE_RESET 349 }, { 350 .int_msk = BIT(7), 351 .msg = "tqp_int_cfg_odd_ecc_mbit_err", 352 .reset_level = HNS3_NONE_RESET 353 }, { 354 .int_msk = BIT(8), 355 .msg = "tqp_int_ctrl_even_ecc_mbit_err", 356 .reset_level = HNS3_NONE_RESET 357 }, { 358 .int_msk = BIT(9), 359 .msg = "tqp_int_ctrl_odd_ecc_mbit_err", 360 .reset_level = HNS3_NONE_RESET 361 }, { 362 .int_msk = BIT(10), 363 .msg = "tx_queue_scan_int_ecc_mbit_err", 364 .reset_level = HNS3_NONE_RESET 365 }, { 366 .int_msk = BIT(11), 367 .msg = "rx_queue_scan_int_ecc_mbit_err", 368 .reset_level = HNS3_NONE_RESET 369 }, { 370 .int_msk = 0, 371 .msg = NULL, 372 .reset_level = HNS3_NONE_RESET 373 } 374 }; 375 376 static const struct hns3_hw_error imp_rd_poison_int[] = { 377 { 378 .int_msk = BIT(0), 379 .msg = "imp_rd_poison_int", 380 .reset_level = HNS3_NONE_RESET 381 }, { 382 .int_msk = 0, 383 .msg = NULL, 384 .reset_level = HNS3_NONE_RESET 385 } 386 }; 387 388 #define HNS3_SSU_MEM_ECC_ERR(x) \ 389 { \ 390 .int_msk = BIT(x), \ 391 .msg = "ssu_mem" #x "_ecc_mbit_err", \ 392 .reset_level = HNS3_GLOBAL_RESET \ 393 } 394 395 static const struct hns3_hw_error ssu_ecc_multi_bit_int_0[] = { 396 HNS3_SSU_MEM_ECC_ERR(0), 397 HNS3_SSU_MEM_ECC_ERR(1), 398 HNS3_SSU_MEM_ECC_ERR(2), 399 HNS3_SSU_MEM_ECC_ERR(3), 400 HNS3_SSU_MEM_ECC_ERR(4), 401 HNS3_SSU_MEM_ECC_ERR(5), 402 HNS3_SSU_MEM_ECC_ERR(6), 403 HNS3_SSU_MEM_ECC_ERR(7), 404 HNS3_SSU_MEM_ECC_ERR(8), 405 HNS3_SSU_MEM_ECC_ERR(9), 406 HNS3_SSU_MEM_ECC_ERR(10), 407 HNS3_SSU_MEM_ECC_ERR(11), 408 HNS3_SSU_MEM_ECC_ERR(12), 409 HNS3_SSU_MEM_ECC_ERR(13), 410 HNS3_SSU_MEM_ECC_ERR(14), 411 HNS3_SSU_MEM_ECC_ERR(15), 412 HNS3_SSU_MEM_ECC_ERR(16), 413 HNS3_SSU_MEM_ECC_ERR(17), 414 HNS3_SSU_MEM_ECC_ERR(18), 415 HNS3_SSU_MEM_ECC_ERR(19), 416 HNS3_SSU_MEM_ECC_ERR(20), 417 HNS3_SSU_MEM_ECC_ERR(21), 418 HNS3_SSU_MEM_ECC_ERR(22), 419 HNS3_SSU_MEM_ECC_ERR(23), 420 HNS3_SSU_MEM_ECC_ERR(24), 421 HNS3_SSU_MEM_ECC_ERR(25), 422 HNS3_SSU_MEM_ECC_ERR(26), 423 HNS3_SSU_MEM_ECC_ERR(27), 424 HNS3_SSU_MEM_ECC_ERR(28), 425 HNS3_SSU_MEM_ECC_ERR(29), 426 HNS3_SSU_MEM_ECC_ERR(30), 427 HNS3_SSU_MEM_ECC_ERR(31), 428 { .int_msk = 0, 429 .msg = NULL, 430 .reset_level = HNS3_NONE_RESET} 431 }; 432 433 static const struct hns3_hw_error ssu_ecc_multi_bit_int_1[] = { 434 { 435 .int_msk = BIT(0), 436 .msg = "ssu_mem32_ecc_mbit_err", 437 .reset_level = HNS3_GLOBAL_RESET 438 }, { 439 .int_msk = 0, 440 .msg = NULL, 441 .reset_level = HNS3_NONE_RESET 442 } 443 }; 444 445 static const struct hns3_hw_error ssu_common_ecc_int[] = { 446 { 447 .int_msk = BIT(0), 448 .msg = "buf_sum_err", 449 .reset_level = HNS3_NONE_RESET 450 }, { 451 .int_msk = BIT(1), 452 .msg = "ppp_mb_num_err", 453 .reset_level = HNS3_NONE_RESET 454 }, { 455 .int_msk = BIT(2), 456 .msg = "ppp_mbid_err", 457 .reset_level = HNS3_GLOBAL_RESET 458 }, { 459 .int_msk = BIT(3), 460 .msg = "ppp_rlt_mac_err", 461 .reset_level = HNS3_GLOBAL_RESET 462 }, { 463 .int_msk = BIT(4), 464 .msg = "ppp_rlt_host_err", 465 .reset_level = HNS3_GLOBAL_RESET 466 }, { 467 .int_msk = BIT(5), 468 .msg = "cks_edit_position_err", 469 .reset_level = HNS3_GLOBAL_RESET 470 }, { 471 .int_msk = BIT(6), 472 .msg = "cks_edit_condition_err", 473 .reset_level = HNS3_GLOBAL_RESET 474 }, { 475 .int_msk = BIT(7), 476 .msg = "vlan_edit_condition_err", 477 .reset_level = HNS3_GLOBAL_RESET 478 }, { 479 .int_msk = BIT(8), 480 .msg = "vlan_num_ot_err", 481 .reset_level = HNS3_GLOBAL_RESET 482 }, { 483 .int_msk = BIT(9), 484 .msg = "vlan_num_in_err", 485 .reset_level = HNS3_GLOBAL_RESET 486 }, { 487 .int_msk = 0, 488 .msg = NULL, 489 .reset_level = HNS3_NONE_RESET 490 } 491 }; 492 493 static const struct hns3_hw_error igu_int[] = { 494 { 495 .int_msk = BIT(0), 496 .msg = "igu_rx_buf0_ecc_mbit_err", 497 .reset_level = HNS3_GLOBAL_RESET 498 }, { 499 .int_msk = BIT(2), 500 .msg = "igu_rx_buf1_ecc_mbit_err", 501 .reset_level = HNS3_GLOBAL_RESET 502 }, { 503 .int_msk = 0, 504 .msg = NULL, 505 .reset_level = HNS3_NONE_RESET 506 } 507 }; 508 509 static const struct hns3_hw_error msix_ecc_int[] = { 510 { 511 .int_msk = BIT(1), 512 .msg = "msix_nic_ecc_mbit_err", 513 .reset_level = HNS3_NONE_RESET 514 }, { 515 .int_msk = 0, 516 .msg = NULL, 517 .reset_level = HNS3_NONE_RESET 518 } 519 }; 520 521 static const struct hns3_hw_error ppp_mpf_abnormal_int_st1[] = { 522 { 523 .int_msk = BIT(0), 524 .msg = "vf_vlan_ad_mem_ecc_mbit_err", 525 .reset_level = HNS3_GLOBAL_RESET 526 }, { 527 .int_msk = BIT(1), 528 .msg = "umv_mcast_group_mem_ecc_mbit_err", 529 .reset_level = HNS3_GLOBAL_RESET 530 }, { 531 .int_msk = BIT(2), 532 .msg = "umv_key_mem0_ecc_mbit_err", 533 .reset_level = HNS3_GLOBAL_RESET 534 }, { 535 .int_msk = BIT(3), 536 .msg = "umv_key_mem1_ecc_mbit_err", 537 .reset_level = HNS3_GLOBAL_RESET 538 }, { 539 .int_msk = BIT(4), 540 .msg = "umv_key_mem2_ecc_mbit_err", 541 .reset_level = HNS3_GLOBAL_RESET 542 }, { 543 .int_msk = BIT(5), 544 .msg = "umv_key_mem3_ecc_mbit_err", 545 .reset_level = HNS3_GLOBAL_RESET 546 }, { 547 .int_msk = BIT(6), 548 .msg = "umv_ad_mem_ecc_mbit_err", 549 .reset_level = HNS3_GLOBAL_RESET 550 }, { 551 .int_msk = BIT(7), 552 .msg = "rss_tc_mode_mem_ecc_mbit_err", 553 .reset_level = HNS3_GLOBAL_RESET 554 }, { 555 .int_msk = BIT(8), 556 .msg = "rss_idt_mem0_ecc_mbit_err", 557 .reset_level = HNS3_GLOBAL_RESET 558 }, { 559 .int_msk = BIT(9), 560 .msg = "rss_idt_mem1_ecc_mbit_err", 561 .reset_level = HNS3_GLOBAL_RESET 562 }, { 563 .int_msk = BIT(10), 564 .msg = "rss_idt_mem2_ecc_mbit_err", 565 .reset_level = HNS3_GLOBAL_RESET 566 }, { 567 .int_msk = BIT(11), 568 .msg = "rss_idt_mem3_ecc_mbit_err", 569 .reset_level = HNS3_GLOBAL_RESET 570 }, { 571 .int_msk = BIT(12), 572 .msg = "rss_idt_mem4_ecc_mbit_err", 573 .reset_level = HNS3_GLOBAL_RESET 574 }, { 575 .int_msk = BIT(13), 576 .msg = "rss_idt_mem5_ecc_mbit_err", 577 .reset_level = HNS3_GLOBAL_RESET 578 }, { 579 .int_msk = BIT(14), 580 .msg = "rss_idt_mem6_ecc_mbit_err", 581 .reset_level = HNS3_GLOBAL_RESET 582 }, { 583 .int_msk = BIT(15), 584 .msg = "rss_idt_mem7_ecc_mbit_err", 585 .reset_level = HNS3_GLOBAL_RESET 586 }, { 587 .int_msk = BIT(16), 588 .msg = "rss_idt_mem8_ecc_mbit_err", 589 .reset_level = HNS3_GLOBAL_RESET 590 }, { 591 .int_msk = BIT(17), 592 .msg = "rss_idt_mem9_ecc_mbit_err", 593 .reset_level = HNS3_GLOBAL_RESET 594 }, { 595 .int_msk = BIT(18), 596 .msg = "rss_idt_mem10_ecc_m1bit_err", 597 .reset_level = HNS3_GLOBAL_RESET 598 }, { 599 .int_msk = BIT(19), 600 .msg = "rss_idt_mem11_ecc_mbit_err", 601 .reset_level = HNS3_GLOBAL_RESET 602 }, { 603 .int_msk = BIT(20), 604 .msg = "rss_idt_mem12_ecc_mbit_err", 605 .reset_level = HNS3_GLOBAL_RESET 606 }, { 607 .int_msk = BIT(21), 608 .msg = "rss_idt_mem13_ecc_mbit_err", 609 .reset_level = HNS3_GLOBAL_RESET 610 }, { 611 .int_msk = BIT(22), 612 .msg = "rss_idt_mem14_ecc_mbit_err", 613 .reset_level = HNS3_GLOBAL_RESET 614 }, { 615 .int_msk = BIT(23), 616 .msg = "rss_idt_mem15_ecc_mbit_err", 617 .reset_level = HNS3_GLOBAL_RESET 618 }, { 619 .int_msk = BIT(24), 620 .msg = "port_vlan_mem_ecc_mbit_err", 621 .reset_level = HNS3_GLOBAL_RESET 622 }, { 623 .int_msk = BIT(25), 624 .msg = "mcast_linear_table_mem_ecc_mbit_err", 625 .reset_level = HNS3_GLOBAL_RESET 626 }, { 627 .int_msk = BIT(26), 628 .msg = "mcast_result_mem_ecc_mbit_err", 629 .reset_level = HNS3_GLOBAL_RESET 630 }, { 631 .int_msk = BIT(27), 632 .msg = "flow_director_ad_mem0_ecc_mbit_err", 633 .reset_level = HNS3_GLOBAL_RESET 634 }, { 635 .int_msk = BIT(28), 636 .msg = "flow_director_ad_mem1_ecc_mbit_err", 637 .reset_level = HNS3_GLOBAL_RESET 638 }, { 639 .int_msk = BIT(29), 640 .msg = "rx_vlan_tag_memory_ecc_mbit_err", 641 .reset_level = HNS3_GLOBAL_RESET 642 }, { 643 .int_msk = BIT(30), 644 .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", 645 .reset_level = HNS3_GLOBAL_RESET 646 }, { 647 .int_msk = 0, 648 .msg = NULL, 649 .reset_level = HNS3_NONE_RESET 650 } 651 }; 652 653 static const struct hns3_hw_error ppp_mpf_abnormal_int_st3[] = { 654 { 655 .int_msk = BIT(0), 656 .msg = "hfs_fifo_mem_ecc_mbit_err", 657 .reset_level = HNS3_GLOBAL_RESET 658 }, { 659 .int_msk = BIT(1), 660 .msg = "rslt_descr_fifo_mem_ecc_mbit_err", 661 .reset_level = HNS3_GLOBAL_RESET 662 }, { 663 .int_msk = BIT(2), 664 .msg = "tx_vlan_tag_mem_ecc_mbit_err", 665 .reset_level = HNS3_GLOBAL_RESET 666 }, { 667 .int_msk = BIT(3), 668 .msg = "FD_CN0_memory_ecc_mbit_err", 669 .reset_level = HNS3_GLOBAL_RESET 670 }, { 671 .int_msk = BIT(4), 672 .msg = "FD_CN1_memory_ecc_mbit_err", 673 .reset_level = HNS3_GLOBAL_RESET 674 }, { 675 .int_msk = BIT(5), 676 .msg = "GRO_AD_memory_ecc_mbit_err", 677 .reset_level = HNS3_GLOBAL_RESET 678 }, { 679 .int_msk = 0, 680 .msg = NULL, 681 .reset_level = HNS3_NONE_RESET 682 } 683 }; 684 685 static const struct hns3_hw_error ppu_mpf_abnormal_int_st3[] = { 686 { 687 .int_msk = BIT(4), 688 .msg = "gro_bd_ecc_mbit_err", 689 .reset_level = HNS3_GLOBAL_RESET 690 }, { 691 .int_msk = BIT(5), 692 .msg = "gro_context_ecc_mbit_err", 693 .reset_level = HNS3_GLOBAL_RESET 694 }, { 695 .int_msk = BIT(6), 696 .msg = "rx_stash_cfg_ecc_mbit_err", 697 .reset_level = HNS3_GLOBAL_RESET 698 }, { 699 .int_msk = BIT(7), 700 .msg = "axi_rd_fbd_ecc_mbit_err", 701 .reset_level = HNS3_GLOBAL_RESET 702 }, { 703 .int_msk = 0, 704 .msg = NULL, 705 .reset_level = HNS3_NONE_RESET 706 } 707 }; 708 709 static const struct hns3_hw_error tm_sch_int[] = { 710 { 711 .int_msk = BIT(1), 712 .msg = "tm_sch_ecc_mbit_err", 713 .reset_level = HNS3_GLOBAL_RESET 714 }, { 715 .int_msk = BIT(2), 716 .msg = "tm_sch_port_shap_sub_fifo_wr_err", 717 .reset_level = HNS3_GLOBAL_RESET 718 }, { 719 .int_msk = BIT(3), 720 .msg = "tm_sch_port_shap_sub_fifo_rd_err", 721 .reset_level = HNS3_GLOBAL_RESET 722 }, { 723 .int_msk = BIT(4), 724 .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", 725 .reset_level = HNS3_GLOBAL_RESET 726 }, { 727 .int_msk = BIT(5), 728 .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", 729 .reset_level = HNS3_GLOBAL_RESET 730 }, { 731 .int_msk = BIT(6), 732 .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", 733 .reset_level = HNS3_GLOBAL_RESET 734 }, { 735 .int_msk = BIT(7), 736 .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", 737 .reset_level = HNS3_GLOBAL_RESET 738 }, { 739 .int_msk = BIT(8), 740 .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", 741 .reset_level = HNS3_GLOBAL_RESET 742 }, { 743 .int_msk = BIT(9), 744 .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", 745 .reset_level = HNS3_GLOBAL_RESET 746 }, { 747 .int_msk = BIT(10), 748 .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", 749 .reset_level = HNS3_GLOBAL_RESET 750 }, { 751 .int_msk = BIT(11), 752 .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", 753 .reset_level = HNS3_GLOBAL_RESET 754 }, { 755 .int_msk = BIT(12), 756 .msg = "tm_sch_port_shap_offset_fifo_wr_err", 757 .reset_level = HNS3_GLOBAL_RESET 758 }, { 759 .int_msk = BIT(13), 760 .msg = "tm_sch_port_shap_offset_fifo_rd_err", 761 .reset_level = HNS3_GLOBAL_RESET 762 }, { 763 .int_msk = BIT(14), 764 .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", 765 .reset_level = HNS3_GLOBAL_RESET 766 }, { 767 .int_msk = BIT(15), 768 .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", 769 .reset_level = HNS3_GLOBAL_RESET 770 }, { 771 .int_msk = BIT(16), 772 .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", 773 .reset_level = HNS3_GLOBAL_RESET 774 }, { 775 .int_msk = BIT(17), 776 .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", 777 .reset_level = HNS3_GLOBAL_RESET 778 }, { 779 .int_msk = BIT(18), 780 .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", 781 .reset_level = HNS3_GLOBAL_RESET 782 }, { 783 .int_msk = BIT(19), 784 .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", 785 .reset_level = HNS3_GLOBAL_RESET 786 }, { 787 .int_msk = BIT(20), 788 .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", 789 .reset_level = HNS3_GLOBAL_RESET 790 }, { 791 .int_msk = BIT(21), 792 .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", 793 .reset_level = HNS3_GLOBAL_RESET 794 }, { 795 .int_msk = BIT(22), 796 .msg = "tm_sch_rq_fifo_wr_err", 797 .reset_level = HNS3_GLOBAL_RESET 798 }, { 799 .int_msk = BIT(23), 800 .msg = "tm_sch_rq_fifo_rd_err", 801 .reset_level = HNS3_GLOBAL_RESET 802 }, { 803 .int_msk = BIT(24), 804 .msg = "tm_sch_nq_fifo_wr_err", 805 .reset_level = HNS3_GLOBAL_RESET 806 }, { 807 .int_msk = BIT(25), 808 .msg = "tm_sch_nq_fifo_rd_err", 809 .reset_level = HNS3_GLOBAL_RESET 810 }, { 811 .int_msk = BIT(26), 812 .msg = "tm_sch_roce_up_fifo_wr_err", 813 .reset_level = HNS3_GLOBAL_RESET 814 }, { 815 .int_msk = BIT(27), 816 .msg = "tm_sch_roce_up_fifo_rd_err", 817 .reset_level = HNS3_GLOBAL_RESET 818 }, { 819 .int_msk = BIT(28), 820 .msg = "tm_sch_rcb_byte_fifo_wr_err", 821 .reset_level = HNS3_GLOBAL_RESET 822 }, { 823 .int_msk = BIT(29), 824 .msg = "tm_sch_rcb_byte_fifo_rd_err", 825 .reset_level = HNS3_GLOBAL_RESET 826 }, { 827 .int_msk = BIT(30), 828 .msg = "tm_sch_ssu_byte_fifo_wr_err", 829 .reset_level = HNS3_GLOBAL_RESET 830 }, { 831 .int_msk = BIT(31), 832 .msg = "tm_sch_ssu_byte_fifo_rd_err", 833 .reset_level = HNS3_GLOBAL_RESET 834 }, { 835 .int_msk = 0, 836 .msg = NULL, 837 .reset_level = HNS3_NONE_RESET 838 } 839 }; 840 841 static const struct hns3_hw_error qcn_fifo_int[] = { 842 { 843 .int_msk = BIT(0), 844 .msg = "qcn_shap_gp0_sch_fifo_rd_err", 845 .reset_level = HNS3_GLOBAL_RESET 846 }, { 847 .int_msk = BIT(1), 848 .msg = "qcn_shap_gp0_sch_fifo_wr_err", 849 .reset_level = HNS3_GLOBAL_RESET 850 }, { 851 .int_msk = BIT(2), 852 .msg = "qcn_shap_gp1_sch_fifo_rd_err", 853 .reset_level = HNS3_GLOBAL_RESET 854 }, { 855 .int_msk = BIT(3), 856 .msg = "qcn_shap_gp1_sch_fifo_wr_err", 857 .reset_level = HNS3_GLOBAL_RESET 858 }, { 859 .int_msk = BIT(4), 860 .msg = "qcn_shap_gp2_sch_fifo_rd_err", 861 .reset_level = HNS3_GLOBAL_RESET 862 }, { 863 .int_msk = BIT(5), 864 .msg = "qcn_shap_gp2_sch_fifo_wr_err", 865 .reset_level = HNS3_GLOBAL_RESET 866 }, { 867 .int_msk = BIT(6), 868 .msg = "qcn_shap_gp3_sch_fifo_rd_err", 869 .reset_level = HNS3_GLOBAL_RESET 870 }, { 871 .int_msk = BIT(7), 872 .msg = "qcn_shap_gp3_sch_fifo_wr_err", 873 .reset_level = HNS3_GLOBAL_RESET 874 }, { 875 .int_msk = BIT(8), 876 .msg = "qcn_shap_gp0_offset_fifo_rd_err", 877 .reset_level = HNS3_GLOBAL_RESET 878 }, { 879 .int_msk = BIT(9), 880 .msg = "qcn_shap_gp0_offset_fifo_wr_err", 881 .reset_level = HNS3_GLOBAL_RESET 882 }, { 883 .int_msk = BIT(10), 884 .msg = "qcn_shap_gp1_offset_fifo_rd_err", 885 .reset_level = HNS3_GLOBAL_RESET 886 }, { 887 .int_msk = BIT(11), 888 .msg = "qcn_shap_gp1_offset_fifo_wr_err", 889 .reset_level = HNS3_GLOBAL_RESET 890 }, { 891 .int_msk = BIT(12), 892 .msg = "qcn_shap_gp2_offset_fifo_rd_err", 893 .reset_level = HNS3_GLOBAL_RESET 894 }, { 895 .int_msk = BIT(13), 896 .msg = "qcn_shap_gp2_offset_fifo_wr_err", 897 .reset_level = HNS3_GLOBAL_RESET 898 }, { 899 .int_msk = BIT(14), 900 .msg = "qcn_shap_gp3_offset_fifo_rd_err", 901 .reset_level = HNS3_GLOBAL_RESET 902 }, { 903 .int_msk = BIT(15), 904 .msg = "qcn_shap_gp3_offset_fifo_wr_err", 905 .reset_level = HNS3_GLOBAL_RESET 906 }, { 907 .int_msk = BIT(16), 908 .msg = "qcn_byte_info_fifo_rd_err", 909 .reset_level = HNS3_GLOBAL_RESET 910 }, { 911 .int_msk = BIT(17), 912 .msg = "qcn_byte_info_fifo_wr_err", 913 .reset_level = HNS3_GLOBAL_RESET 914 }, { 915 .int_msk = 0, 916 .msg = NULL, 917 .reset_level = HNS3_NONE_RESET 918 } 919 }; 920 921 static const struct hns3_hw_error qcn_ecc_int[] = { 922 { 923 .int_msk = BIT(1), 924 .msg = "qcn_byte_mem_ecc_mbit_err", 925 .reset_level = HNS3_GLOBAL_RESET 926 }, { 927 .int_msk = BIT(3), 928 .msg = "qcn_time_mem_ecc_mbit_err", 929 .reset_level = HNS3_GLOBAL_RESET 930 }, { 931 .int_msk = BIT(5), 932 .msg = "qcn_fb_mem_ecc_mbit_err", 933 .reset_level = HNS3_GLOBAL_RESET 934 }, { 935 .int_msk = BIT(7), 936 .msg = "qcn_link_mem_ecc_mbit_err", 937 .reset_level = HNS3_GLOBAL_RESET 938 }, { 939 .int_msk = BIT(9), 940 .msg = "qcn_rate_mem_ecc_mbit_err", 941 .reset_level = HNS3_GLOBAL_RESET 942 }, { 943 .int_msk = BIT(11), 944 .msg = "qcn_tmplt_mem_ecc_mbit_err", 945 .reset_level = HNS3_GLOBAL_RESET 946 }, { 947 .int_msk = BIT(13), 948 .msg = "qcn_shap_cfg_mem_ecc_mbit_err", 949 .reset_level = HNS3_GLOBAL_RESET 950 }, { 951 .int_msk = BIT(15), 952 .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", 953 .reset_level = HNS3_GLOBAL_RESET 954 }, { 955 .int_msk = BIT(17), 956 .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", 957 .reset_level = HNS3_GLOBAL_RESET 958 }, { 959 .int_msk = BIT(19), 960 .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", 961 .reset_level = HNS3_GLOBAL_RESET 962 }, { 963 .int_msk = BIT(21), 964 .msg = "qcn_gp3_barral_mem_ecc_mbit_err", 965 .reset_level = HNS3_GLOBAL_RESET 966 }, { 967 .int_msk = 0, 968 .msg = NULL, 969 .reset_level = HNS3_NONE_RESET 970 } 971 }; 972 973 static const struct hns3_hw_error ncsi_ecc_int[] = { 974 { 975 .int_msk = BIT(1), 976 .msg = "ncsi_tx_ecc_mbit_err", 977 .reset_level = HNS3_NONE_RESET 978 }, { 979 .int_msk = 0, 980 .msg = NULL, 981 .reset_level = HNS3_NONE_RESET 982 } 983 }; 984 985 static const struct hns3_hw_error ssu_fifo_overflow_int[] = { 986 { 987 .int_msk = BIT(0), 988 .msg = "ig_mac_inf_int", 989 .reset_level = HNS3_GLOBAL_RESET 990 }, { 991 .int_msk = BIT(1), 992 .msg = "ig_host_inf_int", 993 .reset_level = HNS3_GLOBAL_RESET 994 }, { 995 .int_msk = BIT(2), 996 .msg = "ig_roc_buf_int", 997 .reset_level = HNS3_GLOBAL_RESET 998 }, { 999 .int_msk = BIT(3), 1000 .msg = "ig_host_data_fifo_int", 1001 .reset_level = HNS3_GLOBAL_RESET 1002 }, { 1003 .int_msk = BIT(4), 1004 .msg = "ig_host_key_fifo_int", 1005 .reset_level = HNS3_GLOBAL_RESET 1006 }, { 1007 .int_msk = BIT(5), 1008 .msg = "tx_qcn_fifo_int", 1009 .reset_level = HNS3_GLOBAL_RESET 1010 }, { 1011 .int_msk = BIT(6), 1012 .msg = "rx_qcn_fifo_int", 1013 .reset_level = HNS3_GLOBAL_RESET 1014 }, { 1015 .int_msk = BIT(7), 1016 .msg = "tx_pf_rd_fifo_int", 1017 .reset_level = HNS3_GLOBAL_RESET 1018 }, { 1019 .int_msk = BIT(8), 1020 .msg = "rx_pf_rd_fifo_int", 1021 .reset_level = HNS3_GLOBAL_RESET 1022 }, { 1023 .int_msk = BIT(9), 1024 .msg = "qm_eof_fifo_int", 1025 .reset_level = HNS3_GLOBAL_RESET 1026 }, { 1027 .int_msk = BIT(10), 1028 .msg = "mb_rlt_fifo_int", 1029 .reset_level = HNS3_GLOBAL_RESET 1030 }, { 1031 .int_msk = BIT(11), 1032 .msg = "dup_uncopy_fifo_int", 1033 .reset_level = HNS3_GLOBAL_RESET 1034 }, { 1035 .int_msk = BIT(12), 1036 .msg = "dup_cnt_rd_fifo_int", 1037 .reset_level = HNS3_GLOBAL_RESET 1038 }, { 1039 .int_msk = BIT(13), 1040 .msg = "dup_cnt_drop_fifo_int", 1041 .reset_level = HNS3_GLOBAL_RESET 1042 }, { 1043 .int_msk = BIT(14), 1044 .msg = "dup_cnt_wrb_fifo_int", 1045 .reset_level = HNS3_GLOBAL_RESET 1046 }, { 1047 .int_msk = BIT(15), 1048 .msg = "host_cmd_fifo_int", 1049 .reset_level = HNS3_GLOBAL_RESET 1050 }, { 1051 .int_msk = BIT(16), 1052 .msg = "mac_cmd_fifo_int", 1053 .reset_level = HNS3_GLOBAL_RESET 1054 }, { 1055 .int_msk = BIT(17), 1056 .msg = "host_cmd_bitmap_empty_int", 1057 .reset_level = HNS3_GLOBAL_RESET 1058 }, { 1059 .int_msk = BIT(18), 1060 .msg = "mac_cmd_bitmap_empty_int", 1061 .reset_level = HNS3_GLOBAL_RESET 1062 }, { 1063 .int_msk = BIT(19), 1064 .msg = "dup_bitmap_empty_int", 1065 .reset_level = HNS3_GLOBAL_RESET 1066 }, { 1067 .int_msk = BIT(20), 1068 .msg = "out_queue_bitmap_empty_int", 1069 .reset_level = HNS3_GLOBAL_RESET 1070 }, { 1071 .int_msk = BIT(21), 1072 .msg = "bank2_bitmap_empty_int", 1073 .reset_level = HNS3_GLOBAL_RESET 1074 }, { 1075 .int_msk = BIT(22), 1076 .msg = "bank1_bitmap_empty_int", 1077 .reset_level = HNS3_GLOBAL_RESET 1078 }, { 1079 .int_msk = BIT(23), 1080 .msg = "bank0_bitmap_empty_int", 1081 .reset_level = HNS3_GLOBAL_RESET 1082 }, { 1083 .int_msk = 0, 1084 .msg = NULL, 1085 .reset_level = HNS3_NONE_RESET 1086 } 1087 }; 1088 1089 static const struct hns3_hw_error ssu_ets_tcg_int[] = { 1090 { 1091 .int_msk = BIT(0), 1092 .msg = "ets_rd_int_rx_tcg", 1093 .reset_level = HNS3_GLOBAL_RESET 1094 }, { 1095 .int_msk = BIT(1), 1096 .msg = "ets_wr_int_rx_tcg", 1097 .reset_level = HNS3_GLOBAL_RESET 1098 }, { 1099 .int_msk = BIT(2), 1100 .msg = "ets_rd_int_tx_tcg", 1101 .reset_level = HNS3_GLOBAL_RESET 1102 }, { 1103 .int_msk = BIT(3), 1104 .msg = "ets_wr_int_tx_tcg", 1105 .reset_level = HNS3_GLOBAL_RESET 1106 }, { 1107 .int_msk = 0, 1108 .msg = NULL, 1109 .reset_level = HNS3_NONE_RESET 1110 } 1111 }; 1112 1113 static const struct hns3_hw_error igu_egu_tnl_int[] = { 1114 { 1115 .int_msk = BIT(0), 1116 .msg = "rx_buf_overflow", 1117 .reset_level = HNS3_GLOBAL_RESET 1118 }, { 1119 .int_msk = BIT(1), 1120 .msg = "rx_stp_fifo_overflow", 1121 .reset_level = HNS3_GLOBAL_RESET 1122 }, { 1123 .int_msk = BIT(2), 1124 .msg = "rx_stp_fifo_underflow", 1125 .reset_level = HNS3_GLOBAL_RESET 1126 }, { 1127 .int_msk = BIT(3), 1128 .msg = "tx_buf_overflow", 1129 .reset_level = HNS3_GLOBAL_RESET 1130 }, { 1131 .int_msk = BIT(4), 1132 .msg = "tx_buf_underrun", 1133 .reset_level = HNS3_GLOBAL_RESET 1134 }, { 1135 .int_msk = BIT(5), 1136 .msg = "rx_stp_buf_overflow", 1137 .reset_level = HNS3_GLOBAL_RESET 1138 }, { 1139 .int_msk = 0, 1140 .msg = NULL, 1141 .reset_level = HNS3_NONE_RESET 1142 } 1143 }; 1144 1145 static const struct hns3_hw_error ssu_port_based_err_int[] = { 1146 { 1147 .int_msk = BIT(0), 1148 .msg = "roc_pkt_without_key_port", 1149 .reset_level = HNS3_FUNC_RESET 1150 }, { 1151 .int_msk = BIT(1), 1152 .msg = "tpu_pkt_without_key_port", 1153 .reset_level = HNS3_GLOBAL_RESET 1154 }, { 1155 .int_msk = BIT(2), 1156 .msg = "igu_pkt_without_key_port", 1157 .reset_level = HNS3_GLOBAL_RESET 1158 }, { 1159 .int_msk = BIT(3), 1160 .msg = "roc_eof_mis_match_port", 1161 .reset_level = HNS3_GLOBAL_RESET 1162 }, { 1163 .int_msk = BIT(4), 1164 .msg = "tpu_eof_mis_match_port", 1165 .reset_level = HNS3_GLOBAL_RESET 1166 }, { 1167 .int_msk = BIT(5), 1168 .msg = "igu_eof_mis_match_port", 1169 .reset_level = HNS3_GLOBAL_RESET 1170 }, { 1171 .int_msk = BIT(6), 1172 .msg = "roc_sof_mis_match_port", 1173 .reset_level = HNS3_GLOBAL_RESET 1174 }, { 1175 .int_msk = BIT(7), 1176 .msg = "tpu_sof_mis_match_port", 1177 .reset_level = HNS3_GLOBAL_RESET 1178 }, { 1179 .int_msk = BIT(8), 1180 .msg = "igu_sof_mis_match_port", 1181 .reset_level = HNS3_GLOBAL_RESET 1182 }, { 1183 .int_msk = BIT(11), 1184 .msg = "ets_rd_int_rx_port", 1185 .reset_level = HNS3_GLOBAL_RESET 1186 }, { 1187 .int_msk = BIT(12), 1188 .msg = "ets_wr_int_rx_port", 1189 .reset_level = HNS3_GLOBAL_RESET 1190 }, { 1191 .int_msk = BIT(13), 1192 .msg = "ets_rd_int_tx_port", 1193 .reset_level = HNS3_GLOBAL_RESET 1194 }, { 1195 .int_msk = BIT(14), 1196 .msg = "ets_wr_int_tx_port", 1197 .reset_level = HNS3_GLOBAL_RESET 1198 }, { 1199 .int_msk = 0, 1200 .msg = NULL, 1201 .reset_level = HNS3_NONE_RESET 1202 } 1203 }; 1204 1205 static const struct hns3_hw_error_desc mpf_ras_err_tbl[] = { 1206 { 1207 .desc_offset = 0, 1208 .data_offset = 0, 1209 .msg = "IMP_TCM_ECC_INT_STS", 1210 .hw_err = imp_tcm_ecc_int 1211 }, { 1212 .desc_offset = 0, 1213 .data_offset = 1, 1214 .msg = "CMDQ_MEM_ECC_INT_STS", 1215 .hw_err = cmdq_mem_ecc_int 1216 }, { 1217 .desc_offset = 0, 1218 .data_offset = 2, 1219 .msg = "IMP_RD_POISON_INT_STS", 1220 .hw_err = imp_rd_poison_int 1221 }, { 1222 .desc_offset = 0, 1223 .data_offset = 3, 1224 .msg = "TQP_INT_ECC_INT_STS", 1225 .hw_err = tqp_int_ecc_int 1226 }, { 1227 .desc_offset = 0, 1228 .data_offset = 4, 1229 .msg = "MSIX_ECC_INT_STS", 1230 .hw_err = msix_ecc_int 1231 }, { 1232 .desc_offset = 2, 1233 .data_offset = 2, 1234 .msg = "SSU_ECC_MULTI_BIT_INT_0", 1235 .hw_err = ssu_ecc_multi_bit_int_0 1236 }, { 1237 .desc_offset = 2, 1238 .data_offset = 3, 1239 .msg = "SSU_ECC_MULTI_BIT_INT_1", 1240 .hw_err = ssu_ecc_multi_bit_int_1 1241 }, { 1242 .desc_offset = 2, 1243 .data_offset = 4, 1244 .msg = "SSU_COMMON_ERR_INT", 1245 .hw_err = ssu_common_ecc_int 1246 }, { 1247 .desc_offset = 3, 1248 .data_offset = 0, 1249 .msg = "IGU_INT_STS", 1250 .hw_err = igu_int 1251 }, { 1252 .desc_offset = 4, 1253 .data_offset = 1, 1254 .msg = "PPP_MPF_ABNORMAL_INT_ST1", 1255 .hw_err = ppp_mpf_abnormal_int_st1 1256 }, { 1257 .desc_offset = 4, 1258 .data_offset = 3, 1259 .msg = "PPP_MPF_ABNORMAL_INT_ST3", 1260 .hw_err = ppp_mpf_abnormal_int_st3 1261 }, { 1262 .desc_offset = 5, 1263 .data_offset = 1, 1264 .msg = "PPU_MPF_ABNORMAL_INT_ST1", 1265 .hw_err = ppu_mpf_abnormal_int_st1 1266 }, { 1267 .desc_offset = 5, 1268 .data_offset = 2, 1269 .msg = "PPU_MPF_ABNORMAL_INT_ST2_RAS", 1270 .hw_err = ppu_mpf_abnormal_int_st2_ras 1271 }, { 1272 .desc_offset = 5, 1273 .data_offset = 3, 1274 .msg = "PPU_MPF_ABNORMAL_INT_ST3", 1275 .hw_err = ppu_mpf_abnormal_int_st3 1276 }, { 1277 .desc_offset = 6, 1278 .data_offset = 0, 1279 .msg = "TM_SCH_RINT", 1280 .hw_err = tm_sch_int 1281 }, { 1282 .desc_offset = 7, 1283 .data_offset = 0, 1284 .msg = "QCN_FIFO_RINT", 1285 .hw_err = qcn_fifo_int 1286 }, { 1287 .desc_offset = 7, 1288 .data_offset = 1, 1289 .msg = "QCN_ECC_RINT", 1290 .hw_err = qcn_ecc_int 1291 }, { 1292 .desc_offset = 9, 1293 .data_offset = 0, 1294 .msg = "NCSI_ECC_INT_RPT", 1295 .hw_err = ncsi_ecc_int 1296 }, { 1297 .desc_offset = 0, 1298 .data_offset = 0, 1299 .msg = NULL, 1300 .hw_err = NULL 1301 } 1302 }; 1303 1304 static const struct hns3_hw_error_desc pf_ras_err_tbl[] = { 1305 { 1306 .desc_offset = 0, 1307 .data_offset = 0, 1308 .msg = "SSU_PORT_BASED_ERR_INT_RAS", 1309 .hw_err = ssu_port_based_err_int 1310 }, { 1311 .desc_offset = 0, 1312 .data_offset = 1, 1313 .msg = "SSU_FIFO_OVERFLOW_INT", 1314 .hw_err = ssu_fifo_overflow_int 1315 }, { 1316 .desc_offset = 0, 1317 .data_offset = 2, 1318 .msg = "SSU_ETS_TCG_INT", 1319 .hw_err = ssu_ets_tcg_int 1320 }, { 1321 .desc_offset = 1, 1322 .data_offset = 0, 1323 .msg = "IGU_EGU_TNL_INT_STS", 1324 .hw_err = igu_egu_tnl_int 1325 }, { 1326 .desc_offset = 3, 1327 .data_offset = 0, 1328 .msg = "PPU_PF_ABNORMAL_INT_ST_RAS", 1329 .hw_err = ppu_pf_abnormal_int_ras 1330 }, { 1331 .desc_offset = 0, 1332 .data_offset = 0, 1333 .msg = NULL, 1334 .hw_err = NULL 1335 } 1336 }; 1337 1338 static const struct hns3_hw_error_desc mpf_msix_err_tbl[] = { 1339 { 1340 .desc_offset = 1, 1341 .data_offset = 0, 1342 .msg = "MAC_AFIFO_TNL_INT_R", 1343 .hw_err = mac_afifo_tnl_int 1344 }, { 1345 .desc_offset = 5, 1346 .data_offset = 2, 1347 .msg = "PPU_MPF_ABNORMAL_INT_ST2_MSIX", 1348 .hw_err = ppu_mpf_abnormal_int_st2_msix 1349 }, { 1350 .desc_offset = 0, 1351 .data_offset = 0, 1352 .msg = NULL, 1353 .hw_err = NULL 1354 } 1355 }; 1356 1357 static const struct hns3_hw_error_desc pf_msix_err_tbl[] = { 1358 { 1359 .desc_offset = 0, 1360 .data_offset = 0, 1361 .msg = "SSU_PORT_BASED_ERR_INT_MSIX", 1362 .hw_err = ssu_port_based_pf_int 1363 }, { 1364 .desc_offset = 2, 1365 .data_offset = 0, 1366 .msg = "PPP_PF_ABNORMAL_INT_ST0", 1367 .hw_err = ppp_pf_abnormal_int 1368 }, { 1369 .desc_offset = 3, 1370 .data_offset = 0, 1371 .msg = "PPU_PF_ABNORMAL_INT_ST_MSIX", 1372 .hw_err = ppu_pf_abnormal_int_msix 1373 }, { 1374 .desc_offset = 0, 1375 .data_offset = 0, 1376 .msg = NULL, 1377 .hw_err = NULL 1378 } 1379 }; 1380 1381 enum hns3_hw_err_report_type { 1382 MPF_MSIX_ERR, 1383 PF_MSIX_ERR, 1384 MPF_RAS_ERR, 1385 PF_RAS_ERR, 1386 }; 1387 1388 static const struct hns3_hw_mod_name hns3_hw_module_name[] = { 1389 { 1390 .module_name = MODULE_NONE, 1391 .msg = "MODULE_NONE" 1392 }, { 1393 .module_name = MODULE_BIOS_COMMON, 1394 .msg = "MODULE_BIOS_COMMON" 1395 }, { 1396 .module_name = MODULE_GE, 1397 .msg = "MODULE_GE" 1398 }, { 1399 .module_name = MODULE_IGU_EGU, 1400 .msg = "MODULE_IGU_EGU" 1401 }, { 1402 .module_name = MODULE_LGE, 1403 .msg = "MODULE_LGE" 1404 }, { 1405 .module_name = MODULE_NCSI, 1406 .msg = "MODULE_NCSI" 1407 }, { 1408 .module_name = MODULE_PPP, 1409 .msg = "MODULE_PPP" 1410 }, { 1411 .module_name = MODULE_QCN, 1412 .msg = "MODULE_QCN" 1413 }, { 1414 .module_name = MODULE_RCB_RX, 1415 .msg = "MODULE_RCB_RX" 1416 }, { 1417 .module_name = MODULE_RTC, 1418 .msg = "MODULE_RTC" 1419 }, { 1420 .module_name = MODULE_SSU, 1421 .msg = "MODULE_SSU" 1422 }, { 1423 .module_name = MODULE_TM, 1424 .msg = "MODULE_TM" 1425 }, { 1426 .module_name = MODULE_RCB_TX, 1427 .msg = "MODULE_RCB_TX" 1428 }, { 1429 .module_name = MODULE_TXDMA, 1430 .msg = "MODULE_TXDMA" 1431 }, { 1432 .module_name = MODULE_MASTER, 1433 .msg = "MODULE_MASTER" 1434 }, { 1435 .module_name = MODULE_HIMAC, 1436 .msg = "MODULE_HIMAC" 1437 } 1438 }; 1439 1440 static const struct hns3_hw_err_type hns3_hw_error_type[] = { 1441 { 1442 .error_type = NONE_ERROR, 1443 .msg = "none_error" 1444 }, { 1445 .error_type = FIFO_ERROR, 1446 .msg = "fifo_error" 1447 }, { 1448 .error_type = MEMORY_ERROR, 1449 .msg = "memory_error" 1450 }, { 1451 .error_type = POISION_ERROR, 1452 .msg = "poision_error" 1453 }, { 1454 .error_type = MSIX_ECC_ERROR, 1455 .msg = "msix_ecc_error" 1456 }, { 1457 .error_type = TQP_INT_ECC_ERROR, 1458 .msg = "tqp_int_ecc_error" 1459 }, { 1460 .error_type = PF_ABNORMAL_INT_ERROR, 1461 .msg = "pf_abnormal_int_error" 1462 }, { 1463 .error_type = MPF_ABNORMAL_INT_ERROR, 1464 .msg = "mpf_abnormal_int_error" 1465 }, { 1466 .error_type = COMMON_ERROR, 1467 .msg = "common_error" 1468 }, { 1469 .error_type = PORT_ERROR, 1470 .msg = "port_error" 1471 }, { 1472 .error_type = ETS_ERROR, 1473 .msg = "ets_error" 1474 }, { 1475 .error_type = NCSI_ERROR, 1476 .msg = "ncsi_error" 1477 }, { 1478 .error_type = GLB_ERROR, 1479 .msg = "glb_error" 1480 } 1481 }; 1482 1483 static void 1484 hns3_report_reset_begin(struct hns3_hw *hw) 1485 { 1486 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 1487 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_ERR_RECOVERING, NULL); 1488 } 1489 1490 static void 1491 hns3_report_reset_success(struct hns3_hw *hw) 1492 { 1493 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 1494 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_RECOVERY_SUCCESS, NULL); 1495 } 1496 1497 static void 1498 hns3_report_reset_failed(struct hns3_hw *hw) 1499 { 1500 struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id]; 1501 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_RECOVERY_FAILED, NULL); 1502 } 1503 1504 static int 1505 hns3_config_ncsi_hw_err_int(struct hns3_adapter *hns, bool en) 1506 { 1507 struct hns3_hw *hw = &hns->hw; 1508 struct hns3_cmd_desc desc; 1509 int ret; 1510 1511 /* configure NCSI error interrupts */ 1512 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_NCSI_INT_EN, false); 1513 if (en) 1514 desc.data[0] = rte_cpu_to_le_32(HNS3_NCSI_ERR_INT_EN); 1515 1516 ret = hns3_cmd_send(hw, &desc, 1); 1517 if (ret) 1518 hns3_err(hw, "fail to %s NCSI error interrupts, ret = %d", 1519 en ? "enable" : "disable", ret); 1520 1521 return ret; 1522 } 1523 1524 static int 1525 enable_igu_egu_err_intr(struct hns3_adapter *hns, bool en) 1526 { 1527 struct hns3_hw *hw = &hns->hw; 1528 struct hns3_cmd_desc desc; 1529 int ret; 1530 1531 /* configure IGU,EGU error interrupts */ 1532 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_IGU_COMMON_INT_EN, false); 1533 if (en) 1534 desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_ENABLE); 1535 else 1536 desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_DISABLE); 1537 1538 desc.data[1] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_EN_MASK); 1539 1540 ret = hns3_cmd_send(hw, &desc, 1); 1541 if (ret) { 1542 hns3_err(hw, "fail to %s IGU common interrupts, ret = %d", 1543 en ? "enable" : "disable", ret); 1544 return ret; 1545 } 1546 1547 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_IGU_EGU_TNL_INT_EN, false); 1548 if (en) 1549 desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_TNL_ERR_INT_EN); 1550 1551 desc.data[1] = rte_cpu_to_le_32(HNS3_IGU_TNL_ERR_INT_EN_MASK); 1552 1553 ret = hns3_cmd_send(hw, &desc, 1); 1554 if (ret) { 1555 hns3_err(hw, "fail to %s IGU-EGU TNL interrupts, ret = %d", 1556 en ? "enable" : "disable", ret); 1557 return ret; 1558 } 1559 1560 return hns3_config_ncsi_hw_err_int(hns, en); 1561 } 1562 1563 static int 1564 config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en) 1565 { 1566 struct hns3_hw *hw = &hns->hw; 1567 struct hns3_cmd_desc desc[2]; 1568 int ret; 1569 1570 /* configure PPP error interrupts */ 1571 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1572 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1573 hns3_cmd_setup_basic_desc(&desc[1], cmd, false); 1574 1575 if (cmd == HNS3_OPC_PPP_CMD0_INT_CMD) { 1576 if (en) { 1577 desc[0].data[0] = 1578 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN); 1579 desc[0].data[1] = 1580 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN); 1581 desc[0].data[4] = 1582 rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN); 1583 } 1584 1585 desc[1].data[0] = 1586 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN_MASK); 1587 desc[1].data[1] = 1588 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN_MASK); 1589 desc[1].data[2] = 1590 rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN_MASK); 1591 } else if (cmd == HNS3_OPC_PPP_CMD1_INT_CMD) { 1592 if (en) { 1593 desc[0].data[0] = 1594 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN); 1595 desc[0].data[1] = 1596 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT3_EN); 1597 } 1598 1599 desc[1].data[0] = 1600 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN_MASK); 1601 desc[1].data[1] = 1602 rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT3_EN_MASK); 1603 } 1604 1605 ret = hns3_cmd_send(hw, &desc[0], 2); 1606 if (ret) 1607 hns3_err(hw, "fail to %s PPP error int, ret = %d", 1608 en ? "enable" : "disable", ret); 1609 1610 return ret; 1611 } 1612 1613 static int 1614 enable_ppp_err_intr(struct hns3_adapter *hns, bool en) 1615 { 1616 int ret; 1617 1618 ret = config_ppp_err_intr(hns, HNS3_OPC_PPP_CMD0_INT_CMD, en); 1619 if (ret) 1620 return ret; 1621 1622 return config_ppp_err_intr(hns, HNS3_OPC_PPP_CMD1_INT_CMD, en); 1623 } 1624 1625 static int 1626 enable_ssu_err_intr(struct hns3_adapter *hns, bool en) 1627 { 1628 struct hns3_hw *hw = &hns->hw; 1629 struct hns3_cmd_desc desc[2]; 1630 int ret; 1631 1632 /* configure SSU ecc error interrupts */ 1633 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_SSU_ECC_INT_CMD, false); 1634 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1635 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_SSU_ECC_INT_CMD, false); 1636 if (en) { 1637 desc[0].data[0] = 1638 rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN); 1639 desc[0].data[1] = 1640 rte_cpu_to_le_32(HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN); 1641 desc[0].data[4] = 1642 rte_cpu_to_le_32(HNS3_SSU_BIT32_ECC_ERR_INT_EN); 1643 } 1644 1645 desc[1].data[0] = rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN_MASK); 1646 desc[1].data[1] = 1647 rte_cpu_to_le_32(HNS3_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); 1648 desc[1].data[2] = rte_cpu_to_le_32(HNS3_SSU_BIT32_ECC_ERR_INT_EN_MASK); 1649 1650 ret = hns3_cmd_send(hw, &desc[0], 2); 1651 if (ret) { 1652 hns3_err(hw, "fail to %s SSU ECC error interrupt, ret = %d", 1653 en ? "enable" : "disable", ret); 1654 return ret; 1655 } 1656 1657 /* configure SSU common error interrupts */ 1658 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_SSU_COMMON_INT_CMD, false); 1659 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1660 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_SSU_COMMON_INT_CMD, false); 1661 1662 if (en) { 1663 desc[0].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN); 1664 desc[0].data[1] = 1665 rte_cpu_to_le_32(HNS3_SSU_PORT_BASED_ERR_INT_EN); 1666 desc[0].data[2] = 1667 rte_cpu_to_le_32(HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN); 1668 } 1669 1670 desc[1].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN_MASK | 1671 HNS3_SSU_PORT_BASED_ERR_INT_EN_MASK); 1672 desc[1].data[1] = 1673 rte_cpu_to_le_32(HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); 1674 1675 ret = hns3_cmd_send(hw, &desc[0], 2); 1676 if (ret) 1677 hns3_err(hw, "fail to %s SSU COMMON error intr, ret = %d", 1678 en ? "enable" : "disable", ret); 1679 1680 return ret; 1681 } 1682 1683 void 1684 hns3_config_mac_tnl_int(struct hns3_hw *hw, bool en) 1685 { 1686 struct hns3_cmd_desc desc; 1687 int ret; 1688 1689 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_TNL_INT_EN, false); 1690 if (en) 1691 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_EN); 1692 else 1693 desc.data[0] = 0; 1694 1695 desc.data[1] = rte_cpu_to_le_32(HNS3_MAC_TNL_INT_EN_MASK); 1696 1697 ret = hns3_cmd_send(hw, &desc, 1); 1698 if (ret) 1699 hns3_err(hw, "fail to %s mac tnl intr, ret = %d", 1700 en ? "enable" : "disable", ret); 1701 } 1702 1703 static int 1704 config_ppu_err_intrs(struct hns3_adapter *hns, uint32_t cmd, bool en) 1705 { 1706 struct hns3_hw *hw = &hns->hw; 1707 struct hns3_cmd_desc desc[2]; 1708 int num = 1; 1709 1710 /* configure PPU error interrupts */ 1711 switch (cmd) { 1712 case HNS3_OPC_PPU_MPF_ECC_INT_CMD: 1713 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1714 desc[0].flag |= HNS3_CMD_FLAG_NEXT; 1715 hns3_cmd_setup_basic_desc(&desc[1], cmd, false); 1716 if (en) { 1717 desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT0_EN; 1718 desc[0].data[1] = HNS3_PPU_MPF_ABNORMAL_INT1_EN; 1719 desc[1].data[3] = HNS3_PPU_MPF_ABNORMAL_INT3_EN; 1720 desc[1].data[4] = HNS3_PPU_MPF_ABNORMAL_INT2_EN; 1721 } 1722 1723 desc[1].data[0] = HNS3_PPU_MPF_ABNORMAL_INT0_EN_MASK; 1724 desc[1].data[1] = HNS3_PPU_MPF_ABNORMAL_INT1_EN_MASK; 1725 desc[1].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN_MASK; 1726 desc[1].data[3] |= HNS3_PPU_MPF_ABNORMAL_INT3_EN_MASK; 1727 num = 2; 1728 break; 1729 case HNS3_OPC_PPU_MPF_OTHER_INT_CMD: 1730 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1731 if (en) 1732 desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2; 1733 1734 desc[0].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2_MASK; 1735 break; 1736 case HNS3_OPC_PPU_PF_OTHER_INT_CMD: 1737 hns3_cmd_setup_basic_desc(&desc[0], cmd, false); 1738 if (en) 1739 desc[0].data[0] = HNS3_PPU_PF_ABNORMAL_INT_EN; 1740 1741 desc[0].data[2] = HNS3_PPU_PF_ABNORMAL_INT_EN_MASK; 1742 break; 1743 default: 1744 hns3_err(hw, 1745 "Invalid cmd(%u) to configure PPU error interrupts.", 1746 cmd); 1747 return -EINVAL; 1748 } 1749 1750 return hns3_cmd_send(hw, &desc[0], num); 1751 } 1752 1753 static int 1754 enable_ppu_err_intr(struct hns3_adapter *hns, bool en) 1755 { 1756 struct hns3_hw *hw = &hns->hw; 1757 int ret; 1758 1759 ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_MPF_ECC_INT_CMD, en); 1760 if (ret) { 1761 hns3_err(hw, "fail to %s PPU MPF ECC error intr, ret = %d", 1762 en ? "enable" : "disable", ret); 1763 return ret; 1764 } 1765 1766 ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_MPF_OTHER_INT_CMD, en); 1767 if (ret) { 1768 hns3_err(hw, "fail to %s PPU MPF other intr, ret = %d", 1769 en ? "enable" : "disable", ret); 1770 return ret; 1771 } 1772 1773 ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_PF_OTHER_INT_CMD, en); 1774 if (ret) 1775 hns3_err(hw, "fail to %s PPU PF error interrupts, ret = %d", 1776 en ? "enable" : "disable", ret); 1777 return ret; 1778 } 1779 1780 static int 1781 enable_tm_err_intr(struct hns3_adapter *hns, bool en) 1782 { 1783 struct hns3_hw *hw = &hns->hw; 1784 struct hns3_cmd_desc desc; 1785 int ret; 1786 1787 /* configure TM SCH error interrupts */ 1788 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_SCH_ECC_INT_EN, false); 1789 if (en) 1790 desc.data[0] = rte_cpu_to_le_32(HNS3_TM_SCH_ECC_ERR_INT_EN); 1791 1792 ret = hns3_cmd_send(hw, &desc, 1); 1793 if (ret) { 1794 hns3_err(hw, "fail to %s TM SCH interrupts, ret = %d", 1795 en ? "enable" : "disable", ret); 1796 return ret; 1797 } 1798 1799 /* configure TM QCN hw errors */ 1800 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QCN_MEM_INT_CFG, false); 1801 desc.data[0] = rte_cpu_to_le_32(HNS3_TM_QCN_ERR_INT_TYPE); 1802 if (en) { 1803 desc.data[0] |= rte_cpu_to_le_32(HNS3_TM_QCN_FIFO_INT_EN); 1804 desc.data[1] = rte_cpu_to_le_32(HNS3_TM_QCN_MEM_ERR_INT_EN); 1805 } 1806 1807 ret = hns3_cmd_send(hw, &desc, 1); 1808 if (ret) 1809 hns3_err(hw, "fail to %s TM QCN mem errors, ret = %d", 1810 en ? "enable" : "disable", ret); 1811 1812 return ret; 1813 } 1814 1815 static int 1816 enable_common_err_intr(struct hns3_adapter *hns, bool en) 1817 { 1818 struct hns3_hw *hw = &hns->hw; 1819 struct hns3_cmd_desc desc[2]; 1820 int ret; 1821 1822 /* configure common error interrupts */ 1823 hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_COMMON_ECC_INT_CFG, false); 1824 desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT); 1825 hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_COMMON_ECC_INT_CFG, false); 1826 1827 if (en) { 1828 desc[0].data[0] = 1829 rte_cpu_to_le_32(HNS3_IMP_TCM_ECC_ERR_INT_EN); 1830 desc[0].data[2] = 1831 rte_cpu_to_le_32(HNS3_CMDQ_NIC_ECC_ERR_INT_EN); 1832 desc[0].data[3] = 1833 rte_cpu_to_le_32(HNS3_IMP_RD_POISON_ERR_INT_EN); 1834 desc[0].data[4] = 1835 rte_cpu_to_le_32(HNS3_TQP_ECC_ERR_INT_EN | 1836 HNS3_MSIX_SRAM_ECC_ERR_INT_EN); 1837 desc[0].data[5] = 1838 rte_cpu_to_le_32(HNS3_IMP_ITCM4_ECC_ERR_INT_EN); 1839 } 1840 1841 desc[1].data[0] = rte_cpu_to_le_32(HNS3_IMP_TCM_ECC_ERR_INT_EN_MASK); 1842 desc[1].data[2] = rte_cpu_to_le_32(HNS3_CMDQ_NIC_ECC_ERR_INT_EN_MASK); 1843 desc[1].data[3] = rte_cpu_to_le_32(HNS3_IMP_RD_POISON_ERR_INT_EN_MASK); 1844 desc[1].data[4] = rte_cpu_to_le_32(HNS3_TQP_ECC_ERR_INT_EN_MASK | 1845 HNS3_MSIX_SRAM_ECC_ERR_INT_EN_MASK); 1846 desc[1].data[5] = rte_cpu_to_le_32(HNS3_IMP_ITCM4_ECC_ERR_INT_EN_MASK); 1847 1848 ret = hns3_cmd_send(hw, &desc[0], RTE_DIM(desc)); 1849 if (ret) 1850 hns3_err(hw, "fail to %s common err interrupts, ret = %d", 1851 en ? "enable" : "disable", ret); 1852 1853 return ret; 1854 } 1855 1856 static int 1857 enable_mac_err_intr(struct hns3_adapter *hns, bool en) 1858 { 1859 struct hns3_hw *hw = &hns->hw; 1860 struct hns3_cmd_desc desc; 1861 int ret; 1862 1863 /* configure MAC common error interrupts */ 1864 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_COMMON_INT_EN, false); 1865 if (en) 1866 desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN); 1867 1868 desc.data[1] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN_MASK); 1869 1870 ret = hns3_cmd_send(hw, &desc, 1); 1871 if (ret) 1872 hns3_err(hw, "fail to %s MAC COMMON error intr: %d", 1873 en ? "enable" : "disable", ret); 1874 1875 return ret; 1876 } 1877 1878 static const struct hns3_hw_blk hw_blk[] = { 1879 { 1880 .name = "IGU_EGU", 1881 .enable_err_intr = enable_igu_egu_err_intr, 1882 }, 1883 { 1884 .name = "PPP", 1885 .enable_err_intr = enable_ppp_err_intr, 1886 }, 1887 { 1888 .name = "SSU", 1889 .enable_err_intr = enable_ssu_err_intr, 1890 }, 1891 { 1892 .name = "PPU", 1893 .enable_err_intr = enable_ppu_err_intr, 1894 }, 1895 { 1896 .name = "TM", 1897 .enable_err_intr = enable_tm_err_intr, 1898 }, 1899 { 1900 .name = "COMMON", 1901 .enable_err_intr = enable_common_err_intr, 1902 }, 1903 { 1904 .name = "MAC", 1905 .enable_err_intr = enable_mac_err_intr, 1906 }, 1907 { 1908 .name = NULL, 1909 .enable_err_intr = NULL, 1910 } 1911 }; 1912 1913 int 1914 hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en) 1915 { 1916 const struct hns3_hw_blk *module = hw_blk; 1917 int ret = 0; 1918 1919 while (module->enable_err_intr) { 1920 ret = module->enable_err_intr(hns, en); 1921 if (ret) 1922 return ret; 1923 1924 module++; 1925 } 1926 1927 return ret; 1928 } 1929 1930 static enum hns3_reset_level 1931 hns3_find_highest_level(struct hns3_adapter *hns, const char *reg, 1932 const struct hns3_hw_error *err, uint32_t err_sts) 1933 { 1934 enum hns3_reset_level reset_level = HNS3_FUNC_RESET; 1935 struct hns3_hw *hw = &hns->hw; 1936 bool need_reset = false; 1937 1938 while (err->msg) { 1939 if (err->int_msk & err_sts) { 1940 hns3_warn(hw, "%s %s found [error status=0x%x]", 1941 reg, err->msg, err_sts); 1942 if (err->reset_level != HNS3_NONE_RESET && 1943 err->reset_level >= reset_level) { 1944 reset_level = err->reset_level; 1945 need_reset = true; 1946 } 1947 } 1948 err++; 1949 } 1950 if (need_reset) 1951 return reset_level; 1952 else 1953 return HNS3_NONE_RESET; 1954 } 1955 1956 static int 1957 query_num_bds(struct hns3_hw *hw, bool is_ras, uint32_t *mpf_bd_num, 1958 uint32_t *pf_bd_num) 1959 { 1960 uint32_t mpf_min_bd_num, pf_min_bd_num; 1961 uint32_t mpf_bd_num_val, pf_bd_num_val; 1962 enum hns3_opcode_type opcode; 1963 struct hns3_cmd_desc desc; 1964 int ret; 1965 1966 if (is_ras) { 1967 opcode = HNS3_OPC_QUERY_RAS_INT_STS_BD_NUM; 1968 mpf_min_bd_num = HNS3_MPF_RAS_INT_MIN_BD_NUM; 1969 pf_min_bd_num = HNS3_PF_RAS_INT_MIN_BD_NUM; 1970 } else { 1971 opcode = HNS3_OPC_QUERY_MSIX_INT_STS_BD_NUM; 1972 mpf_min_bd_num = HNS3_MPF_MSIX_INT_MIN_BD_NUM; 1973 pf_min_bd_num = HNS3_PF_MSIX_INT_MIN_BD_NUM; 1974 } 1975 1976 hns3_cmd_setup_basic_desc(&desc, opcode, true); 1977 ret = hns3_cmd_send(hw, &desc, 1); 1978 if (ret) { 1979 hns3_err(hw, "query num bds in msix failed, ret = %d", ret); 1980 return ret; 1981 } 1982 1983 mpf_bd_num_val = rte_le_to_cpu_32(desc.data[0]); 1984 pf_bd_num_val = rte_le_to_cpu_32(desc.data[1]); 1985 if (mpf_bd_num_val < mpf_min_bd_num || pf_bd_num_val < pf_min_bd_num) { 1986 hns3_err(hw, "error bd num: mpf(%u), min_mpf(%u), " 1987 "pf(%u), min_pf(%u)", mpf_bd_num_val, mpf_min_bd_num, 1988 pf_bd_num_val, pf_min_bd_num); 1989 return -EINVAL; 1990 } 1991 1992 *mpf_bd_num = mpf_bd_num_val; 1993 *pf_bd_num = pf_bd_num_val; 1994 1995 return 0; 1996 } 1997 1998 void 1999 hns3_intr_unregister(const struct rte_intr_handle *hdl, 2000 rte_intr_callback_fn cb_fn, void *cb_arg) 2001 { 2002 int retry_cnt = 0; 2003 int ret; 2004 2005 do { 2006 ret = rte_intr_callback_unregister(hdl, cb_fn, cb_arg); 2007 if (ret >= 0) { 2008 break; 2009 } else if (ret != -EAGAIN) { 2010 PMD_INIT_LOG(ERR, "Failed to unregister intr: %d", ret); 2011 break; 2012 } 2013 rte_delay_ms(HNS3_INTR_UNREG_FAIL_DELAY_MS); 2014 } while (retry_cnt++ < HNS3_INTR_UNREG_FAIL_RETRY_CNT); 2015 } 2016 2017 static uint32_t 2018 hns3_get_hw_error_status(struct hns3_cmd_desc *desc, uint8_t desc_offset, 2019 uint8_t data_offset) 2020 { 2021 uint32_t status; 2022 uint32_t *desc_data; 2023 2024 if (desc_offset == 0) 2025 status = rte_le_to_cpu_32(desc[desc_offset].data[data_offset]); 2026 else { 2027 desc_data = (uint32_t *)&desc[desc_offset]; 2028 status = rte_le_to_cpu_32(*(desc_data + data_offset)); 2029 } 2030 2031 return status; 2032 } 2033 2034 static int 2035 hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, 2036 int num, RTE_ATOMIC(uint64_t) *levels, 2037 enum hns3_hw_err_report_type err_type) 2038 { 2039 const struct hns3_hw_error_desc *err = pf_ras_err_tbl; 2040 enum hns3_opcode_type opcode; 2041 enum hns3_reset_level req_level; 2042 struct hns3_hw *hw = &hns->hw; 2043 uint32_t status; 2044 int ret; 2045 2046 switch (err_type) { 2047 case MPF_MSIX_ERR: 2048 err = mpf_msix_err_tbl; 2049 opcode = HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT; 2050 break; 2051 case PF_MSIX_ERR: 2052 err = pf_msix_err_tbl; 2053 opcode = HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT; 2054 break; 2055 case MPF_RAS_ERR: 2056 err = mpf_ras_err_tbl; 2057 opcode = HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT; 2058 break; 2059 case PF_RAS_ERR: 2060 err = pf_ras_err_tbl; 2061 opcode = HNS3_OPC_QUERY_CLEAR_PF_RAS_INT; 2062 break; 2063 default: 2064 hns3_err(hw, "error hardware err_type = %d", err_type); 2065 return -EINVAL; 2066 } 2067 2068 /* query all hardware errors */ 2069 hns3_cmd_setup_basic_desc(&desc[0], opcode, true); 2070 ret = hns3_cmd_send(hw, &desc[0], num); 2071 if (ret) { 2072 hns3_err(hw, "query hw err int 0x%x cmd failed, ret = %d", 2073 opcode, ret); 2074 return ret; 2075 } 2076 2077 /* traverses the error table and process based on the error type */ 2078 while (err->msg) { 2079 status = hns3_get_hw_error_status(desc, err->desc_offset, 2080 err->data_offset); 2081 if (status) { 2082 /* 2083 * set the reset_level or non_reset flag based on 2084 * the error type and add error statistics. here just 2085 * set the flag, the actual reset action is in 2086 * hns3_msix_process. 2087 */ 2088 req_level = hns3_find_highest_level(hns, err->msg, 2089 err->hw_err, 2090 status); 2091 hns3_atomic_set_bit(req_level, levels); 2092 } 2093 err++; 2094 } 2095 2096 /* clear all hardware errors */ 2097 hns3_cmd_reuse_desc(&desc[0], false); 2098 ret = hns3_cmd_send(hw, &desc[0], num); 2099 if (ret) 2100 hns3_err(hw, "clear all hw err int cmd failed, ret = %d", 2101 ret); 2102 2103 return ret; 2104 } 2105 2106 void 2107 hns3_handle_msix_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels) 2108 { 2109 uint32_t mpf_bd_num, pf_bd_num, bd_num; 2110 struct hns3_hw *hw = &hns->hw; 2111 struct hns3_cmd_desc *desc; 2112 int ret; 2113 2114 /* query the number of bds for the MSIx int status */ 2115 ret = query_num_bds(hw, false, &mpf_bd_num, &pf_bd_num); 2116 if (ret) { 2117 hns3_err(hw, "fail to query msix int status bd num: ret = %d", 2118 ret); 2119 return; 2120 } 2121 2122 bd_num = RTE_MAX(mpf_bd_num, pf_bd_num); 2123 desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0); 2124 if (desc == NULL) { 2125 hns3_err(hw, 2126 "fail to zmalloc desc for handling msix error, size = %zu", 2127 bd_num * sizeof(struct hns3_cmd_desc)); 2128 return; 2129 } 2130 2131 /* handle all main PF MSIx errors */ 2132 ret = hns3_handle_hw_error(hns, desc, mpf_bd_num, levels, MPF_MSIX_ERR); 2133 if (ret) { 2134 hns3_err(hw, "fail to handle all main pf msix errors, ret = %d", 2135 ret); 2136 goto out; 2137 } 2138 2139 memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc)); 2140 2141 /* handle all PF MSIx errors */ 2142 ret = hns3_handle_hw_error(hns, desc, pf_bd_num, levels, PF_MSIX_ERR); 2143 if (ret) { 2144 hns3_err(hw, "fail to handle all pf msix errors, ret = %d", 2145 ret); 2146 goto out; 2147 } 2148 2149 out: 2150 rte_free(desc); 2151 } 2152 2153 void 2154 hns3_handle_ras_error(struct hns3_adapter *hns, RTE_ATOMIC(uint64_t) *levels) 2155 { 2156 uint32_t mpf_bd_num, pf_bd_num, bd_num; 2157 struct hns3_hw *hw = &hns->hw; 2158 struct hns3_cmd_desc *desc; 2159 uint32_t status; 2160 int ret; 2161 2162 status = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG); 2163 if ((status & HNS3_RAS_REG_NFE_MASK) == 0) 2164 return; 2165 2166 /* query the number of bds for the RAS int status */ 2167 ret = query_num_bds(hw, true, &mpf_bd_num, &pf_bd_num); 2168 if (ret) { 2169 hns3_err(hw, "fail to query ras int status bd num: ret = %d", 2170 ret); 2171 return; 2172 } 2173 2174 bd_num = RTE_MAX(mpf_bd_num, pf_bd_num); 2175 desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0); 2176 if (desc == NULL) { 2177 hns3_err(hw, 2178 "fail to zmalloc desc for handing ras error, size = %zu", 2179 bd_num * sizeof(struct hns3_cmd_desc)); 2180 return; 2181 } 2182 2183 /* handle all main PF RAS errors */ 2184 ret = hns3_handle_hw_error(hns, desc, mpf_bd_num, levels, MPF_RAS_ERR); 2185 if (ret) { 2186 hns3_err(hw, "fail to handle all main pf ras errors, ret = %d", 2187 ret); 2188 goto out; 2189 } 2190 2191 memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc)); 2192 2193 /* handle all PF RAS errors */ 2194 ret = hns3_handle_hw_error(hns, desc, pf_bd_num, levels, PF_RAS_ERR); 2195 if (ret) { 2196 hns3_err(hw, "fail to handle all pf ras errors, ret = %d", ret); 2197 goto out; 2198 } 2199 2200 out: 2201 rte_free(desc); 2202 } 2203 2204 static void 2205 hns3_handle_type_reg_error_data(struct hns3_hw *hw, 2206 struct hns3_mod_err_info *mod_err_info, 2207 struct hns3_type_reg_err_info *err_info) 2208 { 2209 #define HNS3_ERR_TYPE_MASK 0x7F 2210 #define HNS3_ERR_TYPE_IS_RAS_OFFSET 7 2211 2212 uint8_t mod_id, total_module, type_id, total_type; 2213 uint8_t is_ras; 2214 uint8_t i; 2215 2216 mod_id = mod_err_info->mod_id; 2217 type_id = err_info->type_id & HNS3_ERR_TYPE_MASK; 2218 is_ras = err_info->type_id >> HNS3_ERR_TYPE_IS_RAS_OFFSET; 2219 2220 total_module = RTE_DIM(hns3_hw_module_name); 2221 total_type = RTE_DIM(hns3_hw_error_type); 2222 2223 hns3_err(hw, "total_module:%u, total_type:%u", 2224 total_module, total_type); 2225 2226 if (mod_id < total_module && type_id < total_type) 2227 hns3_err(hw, "found %s %s, is %s error.", 2228 hns3_hw_module_name[mod_id].msg, 2229 hns3_hw_error_type[type_id].msg, 2230 is_ras ? "ras" : "msix"); 2231 else 2232 hns3_err(hw, "unknown module[%u] or type[%u].", 2233 mod_id, type_id); 2234 2235 hns3_err(hw, "reg_value:"); 2236 for (i = 0; i < err_info->reg_num; i++) 2237 hns3_err(hw, "0x%08x", err_info->reg[i]); 2238 } 2239 2240 static void 2241 hns3_handle_module_error_data(struct hns3_hw *hw, uint32_t *buf, 2242 uint32_t buf_size) 2243 { 2244 struct hns3_type_reg_err_info *type_reg_err_info; 2245 struct hns3_mod_err_info *mod_err_info; 2246 struct hns3_sum_err_info *sum_err_info; 2247 uint8_t mod_num, reset_type; 2248 uint32_t offset = 0; 2249 uint8_t err_num; 2250 uint8_t i; 2251 2252 sum_err_info = (struct hns3_sum_err_info *)&buf[offset++]; 2253 mod_num = sum_err_info->mod_num; 2254 reset_type = sum_err_info->reset_type; 2255 2256 if (reset_type >= HNS3_MAX_RESET) { 2257 hns3_err(hw, "invalid reset type = %u", reset_type); 2258 return; 2259 } 2260 2261 if (reset_type && reset_type != HNS3_NONE_RESET) 2262 hns3_atomic_set_bit(reset_type, &hw->reset.request); 2263 2264 hns3_err(hw, "reset_type = %s, mod_num = %u.", 2265 reset_string[reset_type], mod_num); 2266 2267 while (mod_num--) { 2268 if (offset >= buf_size) { 2269 hns3_err(hw, "offset(%u) exceeds buf's size(%u).", 2270 offset, buf_size); 2271 return; 2272 } 2273 mod_err_info = (struct hns3_mod_err_info *)&buf[offset++]; 2274 err_num = mod_err_info->err_num; 2275 for (i = 0; i < err_num; i++) { 2276 if (offset >= buf_size) { 2277 hns3_err(hw, 2278 "offset(%u) exceeds buf size(%u).", 2279 offset, buf_size); 2280 return; 2281 } 2282 2283 type_reg_err_info = (struct hns3_type_reg_err_info *) 2284 &buf[offset++]; 2285 hns3_handle_type_reg_error_data(hw, mod_err_info, 2286 type_reg_err_info); 2287 2288 offset += type_reg_err_info->reg_num; 2289 } 2290 } 2291 } 2292 2293 static int 2294 hns3_query_all_err_bd_num(struct hns3_hw *hw, uint32_t *bd_num) 2295 { 2296 struct hns3_cmd_desc desc; 2297 uint32_t bd_num_data; 2298 int ret; 2299 2300 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_ALL_ERR_BD_NUM, true); 2301 ret = hns3_cmd_send(hw, &desc, 1); 2302 if (ret) { 2303 hns3_err(hw, "failed to query error bd_num, ret = %d.", ret); 2304 return ret; 2305 } 2306 2307 bd_num_data = rte_le_to_cpu_32(desc.data[0]); 2308 *bd_num = bd_num_data; 2309 if (bd_num_data == 0) { 2310 hns3_err(hw, "the value of bd_num is 0!"); 2311 return -EINVAL; 2312 } 2313 2314 return 0; 2315 } 2316 2317 static int 2318 hns3_query_all_err_info(struct hns3_hw *hw, struct hns3_cmd_desc *desc, 2319 uint32_t bd_num) 2320 { 2321 int ret; 2322 2323 hns3_cmd_setup_basic_desc(desc, HNS3_OPC_QUERY_ALL_ERR_INFO, true); 2324 ret = hns3_cmd_send(hw, desc, bd_num); 2325 if (ret) { 2326 hns3_err(hw, "failed to query error info, ret = %d.", ret); 2327 return ret; 2328 } 2329 2330 return ret; 2331 } 2332 2333 static void 2334 hns3_handle_hw_error_v2(struct hns3_hw *hw) 2335 { 2336 uint32_t bd_num, buf_len, i, buf_size; 2337 struct hns3_cmd_desc *desc; 2338 uint32_t *desc_data; 2339 uint32_t *buf; 2340 int ret; 2341 2342 ret = hns3_query_all_err_bd_num(hw, &bd_num); 2343 if (ret) 2344 goto out; 2345 2346 desc = rte_zmalloc("hns3_ras", bd_num * sizeof(struct hns3_cmd_desc), 2347 0); 2348 if (desc == NULL) { 2349 hns3_err(hw, "failed to malloc hns3 ras cmd desc."); 2350 goto out; 2351 } 2352 2353 ret = hns3_query_all_err_info(hw, desc, bd_num); 2354 if (ret) 2355 goto err_desc; 2356 2357 buf_len = bd_num * sizeof(struct hns3_cmd_desc) - HNS3_DESC_NO_DATA_LEN; 2358 buf_size = buf_len / HNS3_DESC_DATA_UNIT_SIZE; 2359 2360 desc_data = rte_zmalloc("hns3_ras", buf_len, 0); 2361 if (desc_data == NULL) { 2362 hns3_err(hw, "failed to malloc hns3 ras desc data."); 2363 goto err_desc; 2364 } 2365 2366 buf = rte_zmalloc("hns3_ras", buf_len, 0); 2367 if (buf == NULL) { 2368 hns3_err(hw, "failed to malloc hns3 ras buf data."); 2369 goto err_buf_alloc; 2370 } 2371 2372 memcpy(desc_data, &desc[0].data[0], buf_len); 2373 for (i = 0; i < buf_size; i++) 2374 buf[i] = rte_le_to_cpu_32(desc_data[i]); 2375 2376 hns3_handle_module_error_data(hw, buf, buf_size); 2377 rte_free(buf); 2378 2379 err_buf_alloc: 2380 rte_free(desc_data); 2381 err_desc: 2382 rte_free(desc); 2383 out: 2384 return; 2385 } 2386 2387 void 2388 hns3_handle_error(struct hns3_adapter *hns) 2389 { 2390 struct hns3_hw *hw = &hns->hw; 2391 2392 if (hns3_dev_get_support(hw, RAS_IMP)) { 2393 hns3_handle_hw_error_v2(hw); 2394 hns3_schedule_reset(hns); 2395 } else { 2396 hns3_handle_msix_error(hns, &hw->reset.request); 2397 hns3_handle_ras_error(hns, &hw->reset.request); 2398 hns3_schedule_reset(hns); 2399 } 2400 } 2401 2402 int 2403 hns3_reset_init(struct hns3_hw *hw) 2404 { 2405 rte_spinlock_init(&hw->lock); 2406 hw->reset.level = HNS3_NONE_RESET; 2407 hw->reset.stage = RESET_STAGE_NONE; 2408 hw->reset.request = 0; 2409 hw->reset.pending = 0; 2410 hw->reset.resetting = 0; 2411 rte_atomic_store_explicit(&hw->reset.disable_cmd, 0, rte_memory_order_relaxed); 2412 hw->reset.wait_data = rte_zmalloc("wait_data", 2413 sizeof(struct hns3_wait_data), 0); 2414 if (!hw->reset.wait_data) { 2415 PMD_INIT_LOG(ERR, "Failed to allocate memory for wait_data"); 2416 return -ENOMEM; 2417 } 2418 return 0; 2419 } 2420 2421 void 2422 hns3_schedule_reset(struct hns3_adapter *hns) 2423 { 2424 struct hns3_hw *hw = &hns->hw; 2425 2426 /* Reschedule the reset process after successful initialization */ 2427 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED) { 2428 rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_PENDING, 2429 rte_memory_order_relaxed); 2430 return; 2431 } 2432 2433 if (hw->adapter_state >= HNS3_NIC_CLOSED) 2434 return; 2435 2436 /* Schedule restart alarm if it is not scheduled yet */ 2437 if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) == 2438 SCHEDULE_REQUESTED) 2439 return; 2440 if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) == 2441 SCHEDULE_DEFERRED) 2442 rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); 2443 2444 rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED, 2445 rte_memory_order_relaxed); 2446 2447 rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); 2448 } 2449 2450 void 2451 hns3_schedule_delayed_reset(struct hns3_adapter *hns) 2452 { 2453 #define DEFERRED_SCHED_US (3 * MSEC_PER_SEC * USEC_PER_MSEC) 2454 struct hns3_hw *hw = &hns->hw; 2455 2456 /* Do nothing if it is uninited or closed */ 2457 if (hw->adapter_state == HNS3_NIC_UNINITIALIZED || 2458 hw->adapter_state >= HNS3_NIC_CLOSED) { 2459 return; 2460 } 2461 2462 if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) != 2463 SCHEDULE_NONE) 2464 return; 2465 rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_DEFERRED, 2466 rte_memory_order_relaxed); 2467 rte_eal_alarm_set(DEFERRED_SCHED_US, hw->reset.ops->reset_service, hns); 2468 } 2469 2470 void 2471 hns3_wait_callback(void *param) 2472 { 2473 struct hns3_wait_data *data = (struct hns3_wait_data *)param; 2474 struct hns3_adapter *hns = data->hns; 2475 struct hns3_hw *hw = &hns->hw; 2476 uint64_t msec; 2477 bool done; 2478 2479 data->count--; 2480 if (data->check_completion) { 2481 /* 2482 * Check if the current time exceeds the deadline 2483 * or a pending reset coming, or reset during close. 2484 */ 2485 msec = hns3_clock_gettime_ms(); 2486 if (msec > data->end_ms || is_reset_pending(hns) || 2487 hw->adapter_state == HNS3_NIC_CLOSING) { 2488 done = false; 2489 data->count = 0; 2490 } else 2491 done = data->check_completion(hw); 2492 } else 2493 done = true; 2494 2495 if (!done && data->count > 0) { 2496 rte_eal_alarm_set(data->interval, hns3_wait_callback, data); 2497 return; 2498 } 2499 if (done) 2500 data->result = HNS3_WAIT_SUCCESS; 2501 else { 2502 hns3_err(hw, "%s wait timeout at stage %d", 2503 reset_string[hw->reset.level], hw->reset.stage); 2504 data->result = HNS3_WAIT_TIMEOUT; 2505 } 2506 hns3_schedule_reset(hns); 2507 } 2508 2509 void 2510 hns3_notify_reset_ready(struct hns3_hw *hw, bool enable) 2511 { 2512 uint32_t reg_val; 2513 2514 reg_val = hns3_read_dev(hw, HNS3_CMDQ_TX_DEPTH_REG); 2515 if (enable) 2516 reg_val |= HNS3_NIC_SW_RST_RDY; 2517 else 2518 reg_val &= ~HNS3_NIC_SW_RST_RDY; 2519 2520 hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, reg_val); 2521 } 2522 2523 int 2524 hns3_reset_req_hw_reset(struct hns3_adapter *hns) 2525 { 2526 struct hns3_hw *hw = &hns->hw; 2527 2528 if (hw->reset.wait_data->result == HNS3_WAIT_UNKNOWN) { 2529 hw->reset.wait_data->hns = hns; 2530 hw->reset.wait_data->check_completion = NULL; 2531 hw->reset.wait_data->interval = HNS3_RESET_SYNC_US; 2532 hw->reset.wait_data->count = 1; 2533 hw->reset.wait_data->result = HNS3_WAIT_REQUEST; 2534 rte_eal_alarm_set(hw->reset.wait_data->interval, 2535 hns3_wait_callback, hw->reset.wait_data); 2536 return -EAGAIN; 2537 } else if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST) 2538 return -EAGAIN; 2539 2540 /* inform hardware that preparatory work is done */ 2541 hns3_notify_reset_ready(hw, true); 2542 return 0; 2543 } 2544 2545 static void 2546 hns3_clear_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels) 2547 { 2548 uint64_t merge_cnt = hw->reset.stats.merge_cnt; 2549 uint64_t tmp; 2550 2551 switch (hw->reset.level) { 2552 case HNS3_IMP_RESET: 2553 hns3_atomic_clear_bit(HNS3_IMP_RESET, levels); 2554 tmp = hns3_test_and_clear_bit(HNS3_GLOBAL_RESET, levels); 2555 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2556 tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); 2557 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2558 break; 2559 case HNS3_GLOBAL_RESET: 2560 hns3_atomic_clear_bit(HNS3_GLOBAL_RESET, levels); 2561 tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); 2562 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2563 break; 2564 case HNS3_FUNC_RESET: 2565 hns3_atomic_clear_bit(HNS3_FUNC_RESET, levels); 2566 break; 2567 case HNS3_VF_RESET: 2568 hns3_atomic_clear_bit(HNS3_VF_RESET, levels); 2569 tmp = hns3_test_and_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); 2570 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2571 tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); 2572 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2573 break; 2574 case HNS3_VF_FULL_RESET: 2575 hns3_atomic_clear_bit(HNS3_VF_FULL_RESET, levels); 2576 tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); 2577 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2578 break; 2579 case HNS3_VF_PF_FUNC_RESET: 2580 hns3_atomic_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); 2581 tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); 2582 merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; 2583 break; 2584 case HNS3_VF_FUNC_RESET: 2585 hns3_atomic_clear_bit(HNS3_VF_FUNC_RESET, levels); 2586 break; 2587 case HNS3_FLR_RESET: 2588 hns3_atomic_clear_bit(HNS3_FLR_RESET, levels); 2589 break; 2590 case HNS3_NONE_RESET: 2591 default: 2592 return; 2593 }; 2594 2595 if (merge_cnt != hw->reset.stats.merge_cnt) { 2596 hns3_warn(hw, 2597 "No need to do low-level reset after %s reset. " 2598 "merge cnt: %" PRIu64 " total merge cnt: %" PRIu64, 2599 reset_string[hw->reset.level], 2600 hw->reset.stats.merge_cnt - merge_cnt, 2601 hw->reset.stats.merge_cnt); 2602 hw->reset.stats.merge_cnt = merge_cnt; 2603 } 2604 } 2605 2606 static bool 2607 hns3_reset_err_handle(struct hns3_adapter *hns) 2608 { 2609 #define MAX_RESET_FAIL_CNT 30 2610 2611 struct hns3_hw *hw = &hns->hw; 2612 2613 if (hw->adapter_state == HNS3_NIC_CLOSING) 2614 goto reset_fail; 2615 2616 if (is_reset_pending(hns)) { 2617 hw->reset.attempts = 0; 2618 hw->reset.stats.fail_cnt++; 2619 hns3_warn(hw, "%s reset fail because new Reset is pending " 2620 "attempts:%" PRIu64, 2621 reset_string[hw->reset.level], 2622 hw->reset.stats.fail_cnt); 2623 hw->reset.level = HNS3_NONE_RESET; 2624 return true; 2625 } 2626 2627 hw->reset.attempts++; 2628 if (hw->reset.attempts < MAX_RESET_FAIL_CNT) { 2629 hns3_atomic_set_bit(hw->reset.level, &hw->reset.pending); 2630 hns3_warn(hw, "%s retry to reset attempts: %d", 2631 reset_string[hw->reset.level], 2632 hw->reset.attempts); 2633 return true; 2634 } 2635 2636 /* 2637 * Failure to reset does not mean that the network port is 2638 * completely unavailable, so cmd still needs to be initialized. 2639 * Regardless of whether the execution is successful or not, the 2640 * flow after execution must be continued. 2641 */ 2642 if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed)) 2643 (void)hns3_cmd_init(hw); 2644 reset_fail: 2645 hw->reset.attempts = 0; 2646 hw->reset.stats.fail_cnt++; 2647 hns3_warn(hw, "%s reset fail fail_cnt:%" PRIu64 " success_cnt:%" PRIu64 2648 " global_cnt:%" PRIu64 " imp_cnt:%" PRIu64 2649 " request_cnt:%" PRIu64 " exec_cnt:%" PRIu64 2650 " merge_cnt:%" PRIu64 "adapter_state:%d", 2651 reset_string[hw->reset.level], hw->reset.stats.fail_cnt, 2652 hw->reset.stats.success_cnt, hw->reset.stats.global_cnt, 2653 hw->reset.stats.imp_cnt, hw->reset.stats.request_cnt, 2654 hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt, 2655 hw->adapter_state); 2656 2657 /* IMP no longer waiting the ready flag */ 2658 hns3_notify_reset_ready(hw, true); 2659 return false; 2660 } 2661 2662 static int 2663 hns3_reset_pre(struct hns3_adapter *hns) 2664 { 2665 struct hns3_hw *hw = &hns->hw; 2666 struct timeval tv; 2667 int ret; 2668 2669 if (hw->reset.stage == RESET_STAGE_NONE) { 2670 rte_atomic_store_explicit(&hns->hw.reset.resetting, 1, rte_memory_order_relaxed); 2671 hw->reset.stage = RESET_STAGE_DOWN; 2672 hns3_report_reset_begin(hw); 2673 ret = hw->reset.ops->stop_service(hns); 2674 hns3_clock_gettime(&tv); 2675 if (ret) { 2676 hns3_warn(hw, "Reset step1 down fail=%d time=%ld.%.6ld", 2677 ret, tv.tv_sec, tv.tv_usec); 2678 return ret; 2679 } 2680 hns3_warn(hw, "Reset step1 down success time=%ld.%.6ld", 2681 tv.tv_sec, tv.tv_usec); 2682 hw->reset.stage = RESET_STAGE_PREWAIT; 2683 } 2684 if (hw->reset.stage == RESET_STAGE_PREWAIT) { 2685 ret = hw->reset.ops->prepare_reset(hns); 2686 hns3_clock_gettime(&tv); 2687 if (ret) { 2688 hns3_warn(hw, 2689 "Reset step2 prepare wait fail=%d time=%ld.%.6ld", 2690 ret, tv.tv_sec, tv.tv_usec); 2691 return ret; 2692 } 2693 hns3_warn(hw, "Reset step2 prepare wait success time=%ld.%.6ld", 2694 tv.tv_sec, tv.tv_usec); 2695 hw->reset.stage = RESET_STAGE_REQ_HW_RESET; 2696 hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN; 2697 } 2698 return 0; 2699 } 2700 2701 static int 2702 hns3_reset_post(struct hns3_adapter *hns) 2703 { 2704 #define TIMEOUT_RETRIES_CNT 30 2705 struct hns3_hw *hw = &hns->hw; 2706 struct timeval tv_delta; 2707 struct timeval tv; 2708 int ret = 0; 2709 2710 if (hw->adapter_state == HNS3_NIC_CLOSING) { 2711 hns3_warn(hw, "Don't do reset_post during closing, just uninit cmd"); 2712 hns3_cmd_uninit(hw); 2713 return -EPERM; 2714 } 2715 2716 if (hw->reset.stage == RESET_STAGE_DEV_INIT) { 2717 rte_spinlock_lock(&hw->lock); 2718 if (hw->reset.mbuf_deferred_free) { 2719 hns3_dev_release_mbufs(hns); 2720 hw->reset.mbuf_deferred_free = false; 2721 } 2722 ret = hw->reset.ops->reinit_dev(hns); 2723 rte_spinlock_unlock(&hw->lock); 2724 hns3_clock_gettime(&tv); 2725 if (ret) { 2726 hns3_warn(hw, "Reset step5 devinit fail=%d retries=%d", 2727 ret, hw->reset.retries); 2728 goto err; 2729 } 2730 hns3_warn(hw, "Reset step5 devinit success time=%ld.%.6ld", 2731 tv.tv_sec, tv.tv_usec); 2732 hw->reset.retries = 0; 2733 hw->reset.stage = RESET_STAGE_RESTORE; 2734 rte_eal_alarm_set(SWITCH_CONTEXT_US, 2735 hw->reset.ops->reset_service, hns); 2736 return -EAGAIN; 2737 } 2738 if (hw->reset.stage == RESET_STAGE_RESTORE) { 2739 rte_spinlock_lock(&hw->lock); 2740 ret = hw->reset.ops->restore_conf(hns); 2741 rte_spinlock_unlock(&hw->lock); 2742 hns3_clock_gettime(&tv); 2743 if (ret) { 2744 hns3_warn(hw, 2745 "Reset step6 restore fail=%d retries=%d", 2746 ret, hw->reset.retries); 2747 goto err; 2748 } 2749 hns3_warn(hw, "Reset step6 restore success time=%ld.%.6ld", 2750 tv.tv_sec, tv.tv_usec); 2751 hw->reset.retries = 0; 2752 hw->reset.stage = RESET_STAGE_DONE; 2753 } 2754 if (hw->reset.stage == RESET_STAGE_DONE) { 2755 /* IMP will wait ready flag before reset */ 2756 hns3_notify_reset_ready(hw, false); 2757 hns3_clear_reset_level(hw, &hw->reset.pending); 2758 hns3_clear_reset_status(hw); 2759 rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed); 2760 hw->reset.attempts = 0; 2761 hw->reset.stats.success_cnt++; 2762 hw->reset.stage = RESET_STAGE_NONE; 2763 rte_spinlock_lock(&hw->lock); 2764 hw->reset.ops->start_service(hns); 2765 rte_spinlock_unlock(&hw->lock); 2766 hns3_clock_gettime(&tv); 2767 timersub(&tv, &hw->reset.start_time, &tv_delta); 2768 hns3_warn(hw, "%s reset done fail_cnt:%" PRIu64 2769 " success_cnt:%" PRIu64 " global_cnt:%" PRIu64 2770 " imp_cnt:%" PRIu64 " request_cnt:%" PRIu64 2771 " exec_cnt:%" PRIu64 " merge_cnt:%" PRIu64, 2772 reset_string[hw->reset.level], 2773 hw->reset.stats.fail_cnt, hw->reset.stats.success_cnt, 2774 hw->reset.stats.global_cnt, hw->reset.stats.imp_cnt, 2775 hw->reset.stats.request_cnt, hw->reset.stats.exec_cnt, 2776 hw->reset.stats.merge_cnt); 2777 hns3_warn(hw, 2778 "%s reset done delta %" PRIu64 " ms time=%ld.%.6ld", 2779 reset_string[hw->reset.level], 2780 hns3_clock_calctime_ms(&tv_delta), 2781 tv.tv_sec, tv.tv_usec); 2782 hw->reset.level = HNS3_NONE_RESET; 2783 hns3_report_reset_success(hw); 2784 } 2785 return 0; 2786 2787 err: 2788 if (ret == -ETIME) { 2789 hw->reset.retries++; 2790 if (hw->reset.retries < TIMEOUT_RETRIES_CNT) { 2791 rte_eal_alarm_set(HNS3_RESET_SYNC_US, 2792 hw->reset.ops->reset_service, hns); 2793 return -EAGAIN; 2794 } 2795 } 2796 hw->reset.retries = 0; 2797 return -EIO; 2798 } 2799 2800 static void 2801 hns3_reset_fail_handle(struct hns3_adapter *hns) 2802 { 2803 struct hns3_hw *hw = &hns->hw; 2804 struct timeval tv_delta; 2805 struct timeval tv; 2806 2807 hns3_clear_reset_level(hw, &hw->reset.pending); 2808 hns3_clear_reset_status(hw); 2809 if (hns3_reset_err_handle(hns)) { 2810 hw->reset.stage = RESET_STAGE_PREWAIT; 2811 hns3_schedule_reset(hns); 2812 return; 2813 } 2814 2815 rte_spinlock_lock(&hw->lock); 2816 if (hw->reset.mbuf_deferred_free) { 2817 hns3_dev_release_mbufs(hns); 2818 hw->reset.mbuf_deferred_free = false; 2819 } 2820 rte_spinlock_unlock(&hw->lock); 2821 rte_atomic_store_explicit(&hns->hw.reset.resetting, 0, rte_memory_order_relaxed); 2822 hw->reset.stage = RESET_STAGE_NONE; 2823 hns3_clock_gettime(&tv); 2824 timersub(&tv, &hw->reset.start_time, &tv_delta); 2825 hns3_warn(hw, "%s reset fail delta %" PRIu64 " ms time=%ld.%.6ld", 2826 reset_string[hw->reset.level], 2827 hns3_clock_calctime_ms(&tv_delta), 2828 tv.tv_sec, tv.tv_usec); 2829 hw->reset.level = HNS3_NONE_RESET; 2830 hns3_report_reset_failed(hw); 2831 } 2832 2833 /* 2834 * There are three scenarios as follows: 2835 * When the reset is not in progress, the reset process starts. 2836 * During the reset process, if the reset level has not changed, 2837 * the reset process continues; otherwise, the reset process is aborted. 2838 * hw->reset.level new_level action 2839 * HNS3_NONE_RESET HNS3_XXXX_RESET start reset 2840 * HNS3_XXXX_RESET HNS3_XXXX_RESET continue reset 2841 * HNS3_LOW_RESET HNS3_HIGH_RESET abort 2842 */ 2843 int 2844 hns3_reset_process(struct hns3_adapter *hns, enum hns3_reset_level new_level) 2845 { 2846 struct hns3_hw *hw = &hns->hw; 2847 struct timeval tv; 2848 int ret; 2849 2850 if (hw->reset.level == HNS3_NONE_RESET) { 2851 hw->reset.level = new_level; 2852 hw->reset.stats.exec_cnt++; 2853 hns3_clock_gettime(&hw->reset.start_time); 2854 hns3_warn(hw, "Start %s reset time=%ld.%.6ld", 2855 reset_string[hw->reset.level], 2856 hw->reset.start_time.tv_sec, 2857 hw->reset.start_time.tv_usec); 2858 } 2859 2860 if (is_reset_pending(hns)) { 2861 hns3_clock_gettime(&tv); 2862 hns3_warn(hw, 2863 "%s reset is aborted by high level time=%ld.%.6ld", 2864 reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec); 2865 if (hw->reset.wait_data->result == HNS3_WAIT_REQUEST) 2866 rte_eal_alarm_cancel(hns3_wait_callback, 2867 hw->reset.wait_data); 2868 goto err; 2869 } 2870 2871 ret = hns3_reset_pre(hns); 2872 if (ret) 2873 goto err; 2874 2875 if (hw->reset.stage == RESET_STAGE_REQ_HW_RESET) { 2876 ret = hns3_reset_req_hw_reset(hns); 2877 if (ret == -EAGAIN) 2878 return ret; 2879 hns3_clock_gettime(&tv); 2880 hns3_warn(hw, 2881 "Reset step3 request IMP reset success time=%ld.%.6ld", 2882 tv.tv_sec, tv.tv_usec); 2883 hw->reset.stage = RESET_STAGE_WAIT; 2884 hw->reset.wait_data->result = HNS3_WAIT_UNKNOWN; 2885 } 2886 if (hw->reset.stage == RESET_STAGE_WAIT) { 2887 ret = hw->reset.ops->wait_hardware_ready(hns); 2888 if (ret) 2889 goto retry; 2890 hns3_clock_gettime(&tv); 2891 hns3_warn(hw, "Reset step4 reset wait success time=%ld.%.6ld", 2892 tv.tv_sec, tv.tv_usec); 2893 hw->reset.stage = RESET_STAGE_DEV_INIT; 2894 } 2895 2896 ret = hns3_reset_post(hns); 2897 if (ret) 2898 goto retry; 2899 2900 return 0; 2901 retry: 2902 if (ret == -EAGAIN) 2903 return ret; 2904 err: 2905 hns3_reset_fail_handle(hns); 2906 2907 return -EIO; 2908 } 2909 2910 /* 2911 * The reset process can only be terminated after handshake with IMP(step3), 2912 * so that IMP can complete the reset process normally. 2913 */ 2914 void 2915 hns3_reset_abort(struct hns3_adapter *hns) 2916 { 2917 struct hns3_hw *hw = &hns->hw; 2918 struct timeval tv; 2919 int i; 2920 2921 for (i = 0; i < HNS3_QUIT_RESET_CNT; i++) { 2922 if (hw->reset.level == HNS3_NONE_RESET) 2923 break; 2924 rte_delay_ms(HNS3_QUIT_RESET_DELAY_MS); 2925 } 2926 2927 /* IMP no longer waiting the ready flag */ 2928 hns3_notify_reset_ready(hw, true); 2929 2930 rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); 2931 rte_eal_alarm_cancel(hns3_wait_callback, hw->reset.wait_data); 2932 2933 if (hw->reset.level != HNS3_NONE_RESET) { 2934 hns3_clock_gettime(&tv); 2935 hns3_err(hw, "Failed to terminate reset: %s time=%ld.%.6ld", 2936 reset_string[hw->reset.level], tv.tv_sec, tv.tv_usec); 2937 } 2938 } 2939 2940 static void 2941 hns3_report_lse(void *arg) 2942 { 2943 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg; 2944 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); 2945 2946 if (hw->adapter_state == HNS3_NIC_STARTED) 2947 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); 2948 } 2949 2950 void 2951 hns3_start_report_lse(struct rte_eth_dev *dev) 2952 { 2953 #define DELAY_REPORT_LSE_US 1 2954 /* 2955 * When this function called, the context may hold hns3_hw.lock, if 2956 * report lse right now, in some application such as bonding, it will 2957 * trigger call driver's ops which may acquire hns3_hw.lock again, so 2958 * lead to deadlock. 2959 * Here we use delay report to avoid the deadlock. 2960 */ 2961 rte_eal_alarm_set(DELAY_REPORT_LSE_US, hns3_report_lse, dev); 2962 } 2963 2964 void 2965 hns3_stop_report_lse(struct rte_eth_dev *dev) 2966 { 2967 rte_eal_alarm_cancel(hns3_report_lse, dev); 2968 } 2969