1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2021 HiSilicon Limited. 3 */ 4 5 #ifndef _HNS3_ETHDEV_H_ 6 #define _HNS3_ETHDEV_H_ 7 8 #include <pthread.h> 9 #include <sys/time.h> 10 #include <ethdev_driver.h> 11 #include <rte_byteorder.h> 12 #include <rte_io.h> 13 #include <rte_spinlock.h> 14 15 #include "hns3_cmd.h" 16 #include "hns3_mbx.h" 17 #include "hns3_rss.h" 18 #include "hns3_fdir.h" 19 #include "hns3_stats.h" 20 #include "hns3_tm.h" 21 22 /* Vendor ID */ 23 #define PCI_VENDOR_ID_HUAWEI 0x19e5 24 25 /* Device IDs */ 26 #define HNS3_DEV_ID_GE 0xA220 27 #define HNS3_DEV_ID_25GE 0xA221 28 #define HNS3_DEV_ID_25GE_RDMA 0xA222 29 #define HNS3_DEV_ID_50GE_RDMA 0xA224 30 #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 31 #define HNS3_DEV_ID_200G_RDMA 0xA228 32 #define HNS3_DEV_ID_100G_VF 0xA22E 33 #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F 34 35 /* PCI Config offsets */ 36 #define HNS3_PCI_REVISION_ID 0x08 37 #define HNS3_PCI_REVISION_ID_LEN 1 38 39 #define PCI_REVISION_ID_HIP08_B 0x21 40 #define PCI_REVISION_ID_HIP09_A 0x30 41 42 #define HNS3_PF_FUNC_ID 0 43 #define HNS3_1ST_VF_FUNC_ID 1 44 45 #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 46 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 47 48 #define HNS3_SW_SHIFT_AND_DISCARD_MODE 0 49 #define HNS3_HW_SHIFT_AND_DISCARD_MODE 1 50 51 #define HNS3_UNLIMIT_PROMISC_MODE 0 52 #define HNS3_LIMIT_PROMISC_MODE 1 53 54 #define HNS3_SPECIAL_PORT_SW_CKSUM_MODE 0 55 #define HNS3_SPECIAL_PORT_HW_CKSUM_MODE 1 56 57 #define HNS3_UC_MACADDR_NUM 128 58 #define HNS3_VF_UC_MACADDR_NUM 48 59 #define HNS3_MC_MACADDR_NUM 128 60 61 #define HNS3_MAX_BD_SIZE 65535 62 #define HNS3_MAX_NON_TSO_BD_PER_PKT 8 63 #define HNS3_MAX_TSO_BD_PER_PKT 63 64 #define HNS3_MAX_FRAME_LEN 9728 65 #define HNS3_VLAN_TAG_SIZE 4 66 #define HNS3_DEFAULT_RX_BUF_LEN 2048 67 #define HNS3_MAX_BD_PAYLEN (1024 * 1024 - 1) 68 #define HNS3_MAX_TSO_HDR_SIZE 512 69 #define HNS3_MAX_TSO_HDR_BD_NUM 3 70 #define HNS3_MAX_LRO_SIZE 64512 71 72 #define HNS3_ETH_OVERHEAD \ 73 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + HNS3_VLAN_TAG_SIZE * 2) 74 #define HNS3_PKTLEN_TO_MTU(pktlen) ((pktlen) - HNS3_ETH_OVERHEAD) 75 #define HNS3_MAX_MTU (HNS3_MAX_FRAME_LEN - HNS3_ETH_OVERHEAD) 76 #define HNS3_DEFAULT_MTU 1500UL 77 #define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD) 78 #define HNS3_HIP08_MIN_TX_PKT_LEN 33 79 #define HNS3_HIP09_MIN_TX_PKT_LEN 9 80 81 #define HNS3_BITS_PER_BYTE 8 82 83 #define HNS3_4_TCS 4 84 #define HNS3_8_TCS 8 85 86 #define HNS3_MAX_PF_NUM 8 87 #define HNS3_UMV_TBL_SIZE 3072 88 #define HNS3_DEFAULT_UMV_SPACE_PER_PF \ 89 (HNS3_UMV_TBL_SIZE / HNS3_MAX_PF_NUM) 90 91 #define HNS3_PF_CFG_BLOCK_SIZE 32 92 #define HNS3_PF_CFG_DESC_NUM \ 93 (HNS3_PF_CFG_BLOCK_SIZE / HNS3_CFG_RD_LEN_BYTES) 94 95 #define HNS3_DEFAULT_ENABLE_PFC_NUM 0 96 97 #define HNS3_INTR_UNREG_FAIL_RETRY_CNT 5 98 #define HNS3_INTR_UNREG_FAIL_DELAY_MS 500 99 100 #define HNS3_QUIT_RESET_CNT 10 101 #define HNS3_QUIT_RESET_DELAY_MS 100 102 103 #define HNS3_POLL_RESPONE_MS 1 104 105 #define HNS3_MAX_USER_PRIO 8 106 #define HNS3_PG_NUM 4 107 enum hns3_fc_mode { 108 HNS3_FC_NONE, 109 HNS3_FC_RX_PAUSE, 110 HNS3_FC_TX_PAUSE, 111 HNS3_FC_FULL, 112 HNS3_FC_DEFAULT 113 }; 114 115 #define HNS3_SCH_MODE_SP 0 116 #define HNS3_SCH_MODE_DWRR 1 117 struct hns3_pg_info { 118 uint8_t pg_id; 119 uint8_t pg_sch_mode; /* 0: sp; 1: dwrr */ 120 uint8_t tc_bit_map; 121 uint32_t bw_limit; 122 uint8_t tc_dwrr[HNS3_MAX_TC_NUM]; 123 }; 124 125 struct hns3_tc_info { 126 uint8_t tc_id; 127 uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */ 128 uint8_t pgid; 129 uint32_t bw_limit; 130 uint8_t up_to_tc_map; /* user priority maping on the TC */ 131 }; 132 133 struct hns3_dcb_info { 134 uint8_t num_tc; 135 uint8_t num_pg; /* It must be 1 if vNET-Base schd */ 136 uint8_t pg_dwrr[HNS3_PG_NUM]; 137 uint8_t prio_tc[HNS3_MAX_USER_PRIO]; 138 struct hns3_pg_info pg_info[HNS3_PG_NUM]; 139 struct hns3_tc_info tc_info[HNS3_MAX_TC_NUM]; 140 uint8_t hw_pfc_map; /* Allow for packet drop or not on this TC */ 141 uint8_t pfc_en; /* Pfc enabled or not for user priority */ 142 }; 143 144 enum hns3_fc_status { 145 HNS3_FC_STATUS_NONE, 146 HNS3_FC_STATUS_MAC_PAUSE, 147 HNS3_FC_STATUS_PFC, 148 }; 149 150 struct hns3_tc_queue_info { 151 uint16_t tqp_offset; /* TQP offset from base TQP */ 152 uint16_t tqp_count; /* Total TQPs */ 153 uint8_t tc; /* TC index */ 154 bool enable; /* If this TC is enable or not */ 155 }; 156 157 struct hns3_cfg { 158 uint8_t tc_num; 159 uint16_t tqp_desc_num; 160 uint16_t rx_buf_len; 161 uint16_t rss_size_max; 162 uint8_t phy_addr; 163 uint8_t media_type; 164 uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; 165 uint8_t default_speed; 166 uint32_t numa_node_map; 167 uint8_t speed_ability; 168 uint16_t umv_space; 169 }; 170 171 struct hns3_set_link_speed_cfg { 172 uint32_t speed; 173 uint8_t duplex : 1; 174 uint8_t autoneg : 1; 175 }; 176 177 /* mac media type */ 178 enum hns3_media_type { 179 HNS3_MEDIA_TYPE_UNKNOWN, 180 HNS3_MEDIA_TYPE_FIBER, 181 HNS3_MEDIA_TYPE_COPPER, 182 HNS3_MEDIA_TYPE_BACKPLANE, 183 HNS3_MEDIA_TYPE_NONE, 184 }; 185 186 #define HNS3_DEFAULT_QUERY 0 187 #define HNS3_ACTIVE_QUERY 1 188 189 struct hns3_mac { 190 uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; 191 uint8_t media_type; 192 uint8_t phy_addr; 193 uint8_t link_duplex : 1; /* ETH_LINK_[HALF/FULL]_DUPLEX */ 194 uint8_t link_autoneg : 1; /* ETH_LINK_[AUTONEG/FIXED] */ 195 uint8_t link_status : 1; /* ETH_LINK_[DOWN/UP] */ 196 uint32_t link_speed; /* ETH_SPEED_NUM_ */ 197 /* 198 * Some firmware versions support only the SFP speed query. In addition 199 * to the SFP speed query, some firmware supports the query of the speed 200 * capability, auto-negotiation capability, and FEC mode, which can be 201 * selected by the 'query_type' filed in the HNS3_OPC_GET_SFP_INFO CMD. 202 * This field is used to record the SFP information query mode. 203 * Value range: 204 * HNS3_DEFAULT_QUERY/HNS3_ACTIVE_QUERY 205 * 206 * - HNS3_DEFAULT_QUERY 207 * Speed obtained is from SFP. When the queried speed changes, the MAC 208 * speed needs to be reconfigured. 209 * 210 * - HNS3_ACTIVE_QUERY 211 * Speed obtained is from MAC. At this time, it is unnecessary for 212 * driver to reconfigured the MAC speed. In addition, more information, 213 * such as, the speed capability, auto-negotiation capability and FEC 214 * mode, can be obtained by the HNS3_OPC_GET_SFP_INFO CMD. 215 */ 216 uint8_t query_type; 217 uint32_t supported_speed; /* supported speed for current media type */ 218 uint32_t advertising; /* advertised capability in the local part */ 219 uint32_t lp_advertising; /* advertised capability in the link partner */ 220 uint8_t support_autoneg; 221 }; 222 223 struct hns3_fake_queue_data { 224 void **rx_queues; /* Array of pointers to fake RX queues. */ 225 void **tx_queues; /* Array of pointers to fake TX queues. */ 226 uint16_t nb_fake_rx_queues; /* Number of fake RX queues. */ 227 uint16_t nb_fake_tx_queues; /* Number of fake TX queues. */ 228 }; 229 230 #define HNS3_PORT_BASE_VLAN_DISABLE 0 231 #define HNS3_PORT_BASE_VLAN_ENABLE 1 232 struct hns3_port_base_vlan_config { 233 uint16_t state; 234 uint16_t pvid; 235 }; 236 237 /* Primary process maintains driver state in main thread. 238 * 239 * +---------------+ 240 * | UNINITIALIZED |<-----------+ 241 * +---------------+ | 242 * |.eth_dev_init |.eth_dev_uninit 243 * V | 244 * +---------------+------------+ 245 * | INITIALIZED | 246 * +---------------+<-----------<---------------+ 247 * |.dev_configure | | 248 * V |failed | 249 * +---------------+------------+ | 250 * | CONFIGURING | | 251 * +---------------+----+ | 252 * |success | | 253 * | | +---------------+ 254 * | | | CLOSING | 255 * | | +---------------+ 256 * | | ^ 257 * V |.dev_configure | 258 * +---------------+----+ |.dev_close 259 * | CONFIGURED |----------------------------+ 260 * +---------------+<-----------+ 261 * |.dev_start | 262 * V | 263 * +---------------+ | 264 * | STARTING |------------^ 265 * +---------------+ failed | 266 * |success | 267 * | +---------------+ 268 * | | STOPPING | 269 * | +---------------+ 270 * | ^ 271 * V |.dev_stop 272 * +---------------+------------+ 273 * | STARTED | 274 * +---------------+ 275 */ 276 enum hns3_adapter_state { 277 HNS3_NIC_UNINITIALIZED = 0, 278 HNS3_NIC_INITIALIZED, 279 HNS3_NIC_CONFIGURING, 280 HNS3_NIC_CONFIGURED, 281 HNS3_NIC_STARTING, 282 HNS3_NIC_STARTED, 283 HNS3_NIC_STOPPING, 284 HNS3_NIC_CLOSING, 285 HNS3_NIC_CLOSED, 286 HNS3_NIC_REMOVED, 287 HNS3_NIC_NSTATES 288 }; 289 290 /* Reset various stages, execute in order */ 291 enum hns3_reset_stage { 292 /* Stop query services, stop transceiver, disable MAC */ 293 RESET_STAGE_DOWN, 294 /* Clear reset completion flags, disable send command */ 295 RESET_STAGE_PREWAIT, 296 /* Inform IMP to start resetting */ 297 RESET_STAGE_REQ_HW_RESET, 298 /* Waiting for hardware reset to complete */ 299 RESET_STAGE_WAIT, 300 /* Reinitialize hardware */ 301 RESET_STAGE_DEV_INIT, 302 /* Restore user settings and enable MAC */ 303 RESET_STAGE_RESTORE, 304 /* Restart query services, start transceiver */ 305 RESET_STAGE_DONE, 306 /* Not in reset state */ 307 RESET_STAGE_NONE, 308 }; 309 310 enum hns3_reset_level { 311 HNS3_FLR_RESET, /* A VF perform FLR reset */ 312 HNS3_VF_FUNC_RESET, /* A VF function reset */ 313 314 /* 315 * All VFs under a PF perform function reset. 316 * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value 317 * of the reset level and the one defined in kernel driver should be 318 * same. 319 */ 320 HNS3_VF_PF_FUNC_RESET = 2, 321 322 /* 323 * All VFs under a PF perform FLR reset. 324 * Kernel PF driver use mailbox to inform DPDK VF to do reset, the value 325 * of the reset level and the one defined in kernel driver should be 326 * same. 327 * 328 * According to the protocol of PCIe, FLR to a PF resets the PF state as 329 * well as the SR-IOV extended capability including VF Enable which 330 * means that VFs no longer exist. 331 * 332 * In PF FLR, the register state of VF is not reliable, VF's driver 333 * should not access the registers of the VF device. 334 */ 335 HNS3_VF_FULL_RESET, 336 337 /* All VFs under the rootport perform a global or IMP reset */ 338 HNS3_VF_RESET, 339 340 /* 341 * The enumeration value of HNS3_FUNC_RESET/HNS3_GLOBAL_RESET/ 342 * HNS3_IMP_RESET/HNS3_NONE_RESET are also used by firmware, and 343 * can not be changed. 344 */ 345 346 HNS3_FUNC_RESET = 5, /* A PF function reset */ 347 348 /* All PFs under the rootport perform a global reset */ 349 HNS3_GLOBAL_RESET, 350 HNS3_IMP_RESET, /* All PFs under the rootport perform a IMP reset */ 351 HNS3_NONE_RESET, 352 HNS3_MAX_RESET 353 }; 354 355 enum hns3_wait_result { 356 HNS3_WAIT_UNKNOWN, 357 HNS3_WAIT_REQUEST, 358 HNS3_WAIT_SUCCESS, 359 HNS3_WAIT_TIMEOUT 360 }; 361 362 #define HNS3_RESET_SYNC_US 100000 363 364 struct hns3_reset_stats { 365 uint64_t request_cnt; /* Total request reset times */ 366 uint64_t global_cnt; /* Total GLOBAL reset times */ 367 uint64_t imp_cnt; /* Total IMP reset times */ 368 uint64_t exec_cnt; /* Total reset executive times */ 369 uint64_t success_cnt; /* Total reset successful times */ 370 uint64_t fail_cnt; /* Total reset failed times */ 371 uint64_t merge_cnt; /* Total merged in high reset times */ 372 }; 373 374 typedef bool (*check_completion_func)(struct hns3_hw *hw); 375 376 struct hns3_wait_data { 377 void *hns; 378 uint64_t end_ms; 379 uint64_t interval; 380 int16_t count; 381 enum hns3_wait_result result; 382 check_completion_func check_completion; 383 }; 384 385 struct hns3_reset_ops { 386 void (*reset_service)(void *arg); 387 int (*stop_service)(struct hns3_adapter *hns); 388 int (*prepare_reset)(struct hns3_adapter *hns); 389 int (*wait_hardware_ready)(struct hns3_adapter *hns); 390 int (*reinit_dev)(struct hns3_adapter *hns); 391 int (*restore_conf)(struct hns3_adapter *hns); 392 int (*start_service)(struct hns3_adapter *hns); 393 }; 394 395 enum hns3_schedule { 396 SCHEDULE_NONE, 397 SCHEDULE_PENDING, 398 SCHEDULE_REQUESTED, 399 SCHEDULE_DEFERRED, 400 }; 401 402 struct hns3_reset_data { 403 enum hns3_reset_stage stage; 404 uint16_t schedule; 405 /* Reset flag, covering the entire reset process */ 406 uint16_t resetting; 407 /* Used to disable sending cmds during reset */ 408 uint16_t disable_cmd; 409 /* The reset level being processed */ 410 enum hns3_reset_level level; 411 /* Reset level set, each bit represents a reset level */ 412 uint64_t pending; 413 /* Request reset level set, from interrupt or mailbox */ 414 uint64_t request; 415 int attempts; /* Reset failure retry */ 416 int retries; /* Timeout failure retry in reset_post */ 417 /* 418 * At the time of global or IMP reset, the command cannot be sent to 419 * stop the tx/rx queues. Tx/Rx queues may be access mbuf during the 420 * reset process, so the mbuf is required to be released after the reset 421 * is completed.The mbuf_deferred_free is used to mark whether mbuf 422 * needs to be released. 423 */ 424 bool mbuf_deferred_free; 425 struct timeval start_time; 426 struct hns3_reset_stats stats; 427 const struct hns3_reset_ops *ops; 428 struct hns3_wait_data *wait_data; 429 }; 430 431 #define HNS3_INTR_MAPPING_VEC_RSV_ONE 0 432 #define HNS3_INTR_MAPPING_VEC_ALL 1 433 434 #define HNS3_INTR_COALESCE_GL_UINT_2US 0 435 #define HNS3_INTR_COALESCE_GL_UINT_1US 1 436 437 #define HNS3_INTR_QL_NONE 0 438 439 struct hns3_queue_intr { 440 /* 441 * interrupt mapping mode. 442 * value range: 443 * HNS3_INTR_MAPPING_VEC_RSV_ONE/HNS3_INTR_MAPPING_VEC_ALL 444 * 445 * - HNS3_INTR_MAPPING_VEC_RSV_ONE 446 * For some versions of hardware network engine, because of the 447 * hardware constraint, we need implement clearing the mapping 448 * relationship configurations by binding all queues to the last 449 * interrupt vector and reserving the last interrupt vector. This 450 * method results in a decrease of the maximum queues when upper 451 * applications call the rte_eth_dev_configure API function to 452 * enable Rx interrupt. 453 * 454 * - HNS3_INTR_MAPPING_VEC_ALL 455 * PMD driver can map/unmmap all interrupt vectors with queues When 456 * Rx interrupt in enabled. 457 */ 458 uint8_t mapping_mode; 459 /* 460 * The unit of GL(gap limiter) configuration for interrupt coalesce of 461 * queue's interrupt. 462 * value range: 463 * HNS3_INTR_COALESCE_GL_UINT_2US/HNS3_INTR_COALESCE_GL_UINT_1US 464 */ 465 uint8_t gl_unit; 466 /* The max QL(quantity limiter) value */ 467 uint16_t int_ql_max; 468 }; 469 470 #define HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 0 471 #define HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 1 472 473 #define HNS3_PKTS_DROP_STATS_MODE1 0 474 #define HNS3_PKTS_DROP_STATS_MODE2 1 475 476 struct hns3_hw { 477 struct rte_eth_dev_data *data; 478 void *io_base; 479 uint8_t revision; /* PCI revision, low byte of class word */ 480 struct hns3_cmq cmq; 481 struct hns3_mbx_resp_status mbx_resp; /* mailbox response */ 482 struct hns3_mac mac; 483 /* 484 * This flag indicates dev_set_link_down() API is called, and is cleared 485 * by dev_set_link_up() or dev_start(). 486 */ 487 bool set_link_down; 488 unsigned int secondary_cnt; /* Number of secondary processes init'd. */ 489 struct hns3_tqp_stats tqp_stats; 490 /* Include Mac stats | Rx stats | Tx stats */ 491 struct hns3_mac_stats mac_stats; 492 struct hns3_rx_missed_stats imissed_stats; 493 uint64_t oerror_stats; 494 uint32_t fw_version; 495 uint16_t pf_vf_if_version; /* version of communication interface */ 496 497 uint16_t num_msi; 498 uint16_t total_tqps_num; /* total task queue pairs of this PF */ 499 uint16_t tqps_num; /* num task queue pairs of this function */ 500 uint16_t intr_tqps_num; /* num queue pairs mapping interrupt */ 501 uint16_t rss_size_max; /* HW defined max RSS task queue */ 502 uint16_t rx_buf_len; /* hold min hardware rx buf len */ 503 uint16_t num_tx_desc; /* desc num of per tx queue */ 504 uint16_t num_rx_desc; /* desc num of per rx queue */ 505 uint32_t mng_entry_num; /* number of manager table entry */ 506 uint32_t mac_entry_num; /* number of mac-vlan table entry */ 507 508 struct rte_ether_addr mc_addrs[HNS3_MC_MACADDR_NUM]; 509 int mc_addrs_num; /* Multicast mac addresses number */ 510 511 /* The configuration info of RSS */ 512 struct hns3_rss_conf rss_info; 513 bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */ 514 uint16_t rss_ind_tbl_size; 515 uint16_t rss_key_size; 516 517 uint8_t num_tc; /* Total number of enabled TCs */ 518 uint8_t hw_tc_map; 519 enum hns3_fc_mode requested_fc_mode; /* FC mode requested by user */ 520 struct hns3_dcb_info dcb_info; 521 enum hns3_fc_status current_fc_status; /* current flow control status */ 522 struct hns3_tc_queue_info tc_queue[HNS3_MAX_TC_NUM]; 523 uint16_t used_rx_queues; 524 uint16_t used_tx_queues; 525 526 /* Config max queue numbers between rx and tx queues from user */ 527 uint16_t cfg_max_queues; 528 struct hns3_fake_queue_data fkq_data; /* fake queue data */ 529 uint16_t alloc_rss_size; /* RX queue number per TC */ 530 uint16_t tx_qnum_per_tc; /* TX queue number per TC */ 531 532 uint32_t capability; 533 uint32_t max_tm_rate; 534 /* 535 * The minimum length of the packet supported by hardware in the Tx 536 * direction. 537 */ 538 uint32_t min_tx_pkt_len; 539 540 struct hns3_queue_intr intr; 541 /* 542 * tso mode. 543 * value range: 544 * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 545 * 546 * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 547 * In this mode, because of the hardware constraint, network driver 548 * software need erase the L4 len value of the TCP pseudo header 549 * and recalculate the TCP pseudo header checksum of packets that 550 * need TSO. 551 * 552 * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 553 * In this mode, hardware support recalculate the TCP pseudo header 554 * checksum of packets that need TSO, so network driver software 555 * not need to recalculate it. 556 */ 557 uint8_t tso_mode; 558 /* 559 * vlan mode. 560 * value range: 561 * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE 562 * 563 * - HNS3_SW_SHIFT_AND_DISCARD_MODE 564 * For some versions of hardware network engine, because of the 565 * hardware limitation, PMD driver needs to detect the PVID status 566 * to work with haredware to implement PVID-related functions. 567 * For example, driver need discard the stripped PVID tag to ensure 568 * the PVID will not report to mbuf and shift the inserted VLAN tag 569 * to avoid port based VLAN covering it. 570 * 571 * - HNS3_HW_SHIT_AND_DISCARD_MODE 572 * PMD driver does not need to process PVID-related functions in 573 * I/O process, Hardware will adjust the sequence between port based 574 * VLAN tag and BD VLAN tag automatically and VLAN tag stripped by 575 * PVID will be invisible to driver. And in this mode, hns3 is able 576 * to send a multi-layer VLAN packets when hw VLAN insert offload 577 * is enabled. 578 */ 579 uint8_t vlan_mode; 580 /* 581 * promisc mode. 582 * value range: 583 * HNS3_UNLIMIT_PROMISC_MODE/HNS3_LIMIT_PROMISC_MODE 584 * 585 * - HNS3_UNLIMIT_PROMISC_MODE 586 * In this mode, TX unicast promisc will be configured when promisc 587 * is set, driver can receive all the ingress and outgoing traffic. 588 * In the words, all the ingress packets, all the packets sent from 589 * the PF and other VFs on the same physical port. 590 * 591 * - HNS3_LIMIT_PROMISC_MODE 592 * In this mode, TX unicast promisc is shutdown when promisc mode 593 * is set. So, driver will only receive all the ingress traffic. 594 * The packets sent from the PF and other VFs on the same physical 595 * port won't be copied to the function which has set promisc mode. 596 */ 597 uint8_t promisc_mode; 598 599 /* 600 * drop_stats_mode mode. 601 * value range: 602 * HNS3_PKTS_DROP_STATS_MODE1/HNS3_PKTS_DROP_STATS_MODE2 603 * 604 * - HNS3_PKTS_DROP_STATS_MODE1 605 * This mode for kunpeng920. In this mode, port level imissed stats 606 * is supported. It only includes RPU drop stats. 607 * 608 * - HNS3_PKTS_DROP_STATS_MODE2 609 * This mode for kunpeng930. In this mode, imissed stats and oerrors 610 * stats is supported. Function level imissed stats is supported. It 611 * includes RPU drop stats in VF, and includes both RPU drop stats 612 * and SSU drop stats in PF. Oerror stats is also supported in PF. 613 */ 614 uint8_t drop_stats_mode; 615 616 uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ 617 /* 618 * udp checksum mode. 619 * value range: 620 * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE 621 * 622 * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE 623 * In this mode, HW can not do checksum for special UDP port like 624 * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel 625 * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need 626 * do the checksum for these packets to avoid a checksum error. 627 * 628 * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE 629 * In this mode, HW does not have the preceding problems and can 630 * directly calculate the checksum of these UDP packets. 631 */ 632 uint8_t udp_cksum_mode; 633 634 struct hns3_port_base_vlan_config port_base_vlan_cfg; 635 636 pthread_mutex_t flows_lock; /* rte_flow ops lock */ 637 struct hns3_fdir_rule_list flow_fdir_list; /* flow fdir rule list */ 638 struct hns3_rss_filter_list flow_rss_list; /* flow RSS rule list */ 639 struct hns3_flow_mem_list flow_list; 640 641 /* 642 * PMD setup and configuration is not thread safe. Since it is not 643 * performance sensitive, it is better to guarantee thread-safety 644 * and add device level lock. Adapter control operations which 645 * change its state should acquire the lock. 646 */ 647 rte_spinlock_t lock; 648 enum hns3_adapter_state adapter_state; 649 struct hns3_reset_data reset; 650 }; 651 652 #define HNS3_FLAG_TC_BASE_SCH_MODE 1 653 #define HNS3_FLAG_VNET_BASE_SCH_MODE 2 654 655 /* vlan entry information. */ 656 struct hns3_user_vlan_table { 657 LIST_ENTRY(hns3_user_vlan_table) next; 658 bool hd_tbl_status; 659 uint16_t vlan_id; 660 }; 661 662 /* Vlan tag configuration for RX direction */ 663 struct hns3_rx_vtag_cfg { 664 bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ 665 bool strip_tag1_en; /* Whether strip inner vlan tag */ 666 bool strip_tag2_en; /* Whether strip outer vlan tag */ 667 /* 668 * If strip_tag_en is enabled, this bit decide whether to map the vlan 669 * tag to descriptor. 670 */ 671 bool strip_tag1_discard_en; 672 bool strip_tag2_discard_en; 673 /* 674 * If this bit is enabled, only map inner/outer priority to descriptor 675 * and the vlan tag is always 0. 676 */ 677 bool vlan1_vlan_prionly; 678 bool vlan2_vlan_prionly; 679 }; 680 681 /* Vlan tag configuration for TX direction */ 682 struct hns3_tx_vtag_cfg { 683 bool accept_tag1; /* Whether accept tag1 packet from host */ 684 bool accept_untag1; /* Whether accept untag1 packet from host */ 685 bool accept_tag2; 686 bool accept_untag2; 687 bool insert_tag1_en; /* Whether insert outer vlan tag */ 688 bool insert_tag2_en; /* Whether insert inner vlan tag */ 689 /* 690 * In shift mode, hw will shift the sequence of port based VLAN and 691 * BD VLAN. 692 */ 693 bool tag_shift_mode_en; /* hw shift vlan tag automatically */ 694 uint16_t default_tag1; /* The default outer vlan tag to insert */ 695 uint16_t default_tag2; /* The default inner vlan tag to insert */ 696 }; 697 698 struct hns3_vtag_cfg { 699 struct hns3_rx_vtag_cfg rx_vcfg; 700 struct hns3_tx_vtag_cfg tx_vcfg; 701 }; 702 703 /* Request types for IPC. */ 704 enum hns3_mp_req_type { 705 HNS3_MP_REQ_START_RXTX = 1, 706 HNS3_MP_REQ_STOP_RXTX, 707 HNS3_MP_REQ_START_TX, 708 HNS3_MP_REQ_STOP_TX, 709 HNS3_MP_REQ_MAX 710 }; 711 712 /* Pameters for IPC. */ 713 struct hns3_mp_param { 714 enum hns3_mp_req_type type; 715 int port_id; 716 int result; 717 }; 718 719 /* Request timeout for IPC. */ 720 #define HNS3_MP_REQ_TIMEOUT_SEC 5 721 722 /* Key string for IPC. */ 723 #define HNS3_MP_NAME "net_hns3_mp" 724 725 #define HNS3_L2TBL_NUM 4 726 #define HNS3_L3TBL_NUM 16 727 #define HNS3_L4TBL_NUM 16 728 #define HNS3_OL2TBL_NUM 4 729 #define HNS3_OL3TBL_NUM 16 730 #define HNS3_OL4TBL_NUM 16 731 #define HNS3_PTYPE_NUM 256 732 733 struct hns3_ptype_table { 734 /* 735 * The next fields used to calc packet-type by the 736 * L3_ID/L4_ID/OL3_ID/OL4_ID from the Rx descriptor. 737 */ 738 uint32_t l3table[HNS3_L3TBL_NUM]; 739 uint32_t l4table[HNS3_L4TBL_NUM]; 740 uint32_t inner_l3table[HNS3_L3TBL_NUM]; 741 uint32_t inner_l4table[HNS3_L4TBL_NUM]; 742 uint32_t ol3table[HNS3_OL3TBL_NUM]; 743 uint32_t ol4table[HNS3_OL4TBL_NUM]; 744 745 /* 746 * The next field used to calc packet-type by the PTYPE from the Rx 747 * descriptor, it functions only when firmware report the capability of 748 * HNS3_CAPS_RXD_ADV_LAYOUT_B and driver enabled it. 749 */ 750 uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_aligned; 751 }; 752 753 #define HNS3_FIXED_MAX_TQP_NUM_MODE 0 754 #define HNS3_FLEX_MAX_TQP_NUM_MODE 1 755 756 struct hns3_pf { 757 struct hns3_adapter *adapter; 758 bool is_main_pf; 759 uint16_t func_num; /* num functions of this pf, include pf and vfs */ 760 761 /* 762 * tqp_config mode 763 * tqp_config_mode value range: 764 * HNS3_FIXED_MAX_TQP_NUM_MODE, 765 * HNS3_FLEX_MAX_TQP_NUM_MODE 766 * 767 * - HNS3_FIXED_MAX_TQP_NUM_MODE 768 * There is a limitation on the number of pf interrupts available for 769 * on some versions of network engines. In this case, the maximum 770 * queue number of pf can not be greater than the interrupt number, 771 * such as pf of network engine with revision_id 0x21. So the maximum 772 * number of queues must be fixed. 773 * 774 * - HNS3_FLEX_MAX_TQP_NUM_MODE 775 * In this mode, the maximum queue number of pf has not any constraint 776 * and comes from the macro RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF 777 * in the config file. Users can modify the macro according to their 778 * own application scenarios, which is more flexible to use. 779 */ 780 uint8_t tqp_config_mode; 781 782 uint32_t pkt_buf_size; /* Total pf buf size for tx/rx */ 783 uint32_t tx_buf_size; /* Tx buffer size for each TC */ 784 uint32_t dv_buf_size; /* Dv buffer size for each TC */ 785 786 uint16_t mps; /* Max packet size */ 787 788 uint8_t tx_sch_mode; 789 uint8_t tc_max; /* max number of tc driver supported */ 790 uint8_t local_max_tc; /* max number of local tc */ 791 uint8_t pfc_max; 792 uint8_t prio_tc[HNS3_MAX_USER_PRIO]; /* TC indexed by prio */ 793 uint16_t pause_time; 794 bool support_fc_autoneg; /* support FC autonegotiate */ 795 bool support_multi_tc_pause; 796 797 uint16_t wanted_umv_size; 798 uint16_t max_umv_size; 799 uint16_t used_umv_size; 800 801 bool support_sfp_query; 802 uint32_t fec_mode; /* current FEC mode for ethdev */ 803 804 bool ptp_enable; 805 806 /* Stores timestamp of last received packet on dev */ 807 uint64_t rx_timestamp; 808 809 struct hns3_vtag_cfg vtag_config; 810 LIST_HEAD(vlan_tbl, hns3_user_vlan_table) vlan_list; 811 812 struct hns3_fdir_info fdir; /* flow director info */ 813 LIST_HEAD(counters, hns3_flow_counter) flow_counters; 814 815 struct hns3_tm_conf tm_conf; 816 }; 817 818 enum { 819 HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED, 820 HNS3_PF_PUSH_LSC_CAP_SUPPORTED, 821 HNS3_PF_PUSH_LSC_CAP_UNKNOWN 822 }; 823 824 struct hns3_vf { 825 struct hns3_adapter *adapter; 826 827 /* Whether PF support push link status change to VF */ 828 uint16_t pf_push_lsc_cap; 829 830 /* 831 * If PF support push link status change, VF still need send request to 832 * get link status in some cases (such as reset recover stage), so use 833 * the req_link_info_cnt to control max request count. 834 */ 835 uint16_t req_link_info_cnt; 836 837 uint16_t poll_job_started; /* whether poll job is started */ 838 }; 839 840 struct hns3_adapter { 841 struct hns3_hw hw; 842 843 /* Specific for PF or VF */ 844 bool is_vf; /* false - PF, true - VF */ 845 union { 846 struct hns3_pf pf; 847 struct hns3_vf vf; 848 }; 849 850 uint32_t rx_func_hint; 851 uint32_t tx_func_hint; 852 853 uint64_t dev_caps_mask; 854 855 struct hns3_ptype_table ptype_tbl __rte_cache_aligned; 856 }; 857 858 enum { 859 HNS3_IO_FUNC_HINT_NONE = 0, 860 HNS3_IO_FUNC_HINT_VEC, 861 HNS3_IO_FUNC_HINT_SVE, 862 HNS3_IO_FUNC_HINT_SIMPLE, 863 HNS3_IO_FUNC_HINT_COMMON 864 }; 865 866 #define HNS3_DEVARG_RX_FUNC_HINT "rx_func_hint" 867 #define HNS3_DEVARG_TX_FUNC_HINT "tx_func_hint" 868 869 #define HNS3_DEVARG_DEV_CAPS_MASK "dev_caps_mask" 870 871 enum { 872 HNS3_DEV_SUPPORT_DCB_B, 873 HNS3_DEV_SUPPORT_COPPER_B, 874 HNS3_DEV_SUPPORT_FD_QUEUE_REGION_B, 875 HNS3_DEV_SUPPORT_PTP_B, 876 HNS3_DEV_SUPPORT_TX_PUSH_B, 877 HNS3_DEV_SUPPORT_INDEP_TXRX_B, 878 HNS3_DEV_SUPPORT_STASH_B, 879 HNS3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, 880 HNS3_DEV_SUPPORT_OUTER_UDP_CKSUM_B, 881 HNS3_DEV_SUPPORT_RAS_IMP_B, 882 HNS3_DEV_SUPPORT_TM_B, 883 HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 884 }; 885 886 #define hns3_dev_get_support(hw, _name) \ 887 hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_##_name##_B) 888 889 #define HNS3_DEV_PRIVATE_TO_HW(adapter) \ 890 (&((struct hns3_adapter *)adapter)->hw) 891 #define HNS3_DEV_PRIVATE_TO_PF(adapter) \ 892 (&((struct hns3_adapter *)adapter)->pf) 893 #define HNS3_DEV_PRIVATE_TO_VF(adapter) \ 894 (&((struct hns3_adapter *)adapter)->vf) 895 #define HNS3_DEV_HW_TO_ADAPTER(hw) \ 896 container_of(hw, struct hns3_adapter, hw) 897 898 static inline struct hns3_pf *HNS3_DEV_HW_TO_PF(struct hns3_hw *hw) 899 { 900 struct hns3_adapter *adapter = HNS3_DEV_HW_TO_ADAPTER(hw); 901 return &adapter->pf; 902 } 903 904 static inline struct hns3_vf *HNS3_DEV_HW_TO_VF(struct hns3_hw *hw) 905 { 906 struct hns3_adapter *adapter = HNS3_DEV_HW_TO_ADAPTER(hw); 907 return &adapter->vf; 908 } 909 910 #define hns3_set_field(origin, mask, shift, val) \ 911 do { \ 912 (origin) &= (~(mask)); \ 913 (origin) |= ((val) << (shift)) & (mask); \ 914 } while (0) 915 #define hns3_get_field(origin, mask, shift) \ 916 (((origin) & (mask)) >> (shift)) 917 #define hns3_set_bit(origin, shift, val) \ 918 hns3_set_field((origin), (0x1UL << (shift)), (shift), (val)) 919 #define hns3_get_bit(origin, shift) \ 920 hns3_get_field((origin), (0x1UL << (shift)), (shift)) 921 922 #define hns3_gen_field_val(mask, shift, val) (((val) << (shift)) & (mask)) 923 924 /* 925 * upper_32_bits - return bits 32-63 of a number 926 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress 927 * the "right shift count >= width of type" warning when that quantity is 928 * 32-bits. 929 */ 930 #define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16)) 931 932 /* lower_32_bits - return bits 0-31 of a number */ 933 #define lower_32_bits(n) ((uint32_t)(n)) 934 935 #define BIT(nr) (1UL << (nr)) 936 937 #define BIT_ULL(x) (1ULL << (x)) 938 939 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) 940 #define GENMASK(h, l) \ 941 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) 942 943 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) 944 #define rounddown(x, y) ((x) - ((x) % (y))) 945 946 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 947 948 /* 949 * Because hardware always access register in little-endian mode based on hns3 950 * network engine, so driver should also call rte_cpu_to_le_32 to convert data 951 * in little-endian mode before writing register and call rte_le_to_cpu_32 to 952 * convert data after reading from register. 953 * 954 * Here the driver encapsulates the data conversion operation in the register 955 * read/write operation function as below: 956 * hns3_write_reg 957 * hns3_write_reg_opt 958 * hns3_read_reg 959 * Therefore, when calling these functions, conversion is not required again. 960 */ 961 static inline void hns3_write_reg(void *base, uint32_t reg, uint32_t value) 962 { 963 rte_write32(rte_cpu_to_le_32(value), 964 (volatile void *)((char *)base + reg)); 965 } 966 967 /* 968 * The optimized function for writing registers reduces one address addition 969 * calculation, it was used in the '.rx_pkt_burst' and '.tx_pkt_burst' ops 970 * implementation function. 971 */ 972 static inline void hns3_write_reg_opt(volatile void *addr, uint32_t value) 973 { 974 rte_write32(rte_cpu_to_le_32(value), addr); 975 } 976 977 static inline uint32_t hns3_read_reg(void *base, uint32_t reg) 978 { 979 uint32_t read_val = rte_read32((volatile void *)((char *)base + reg)); 980 return rte_le_to_cpu_32(read_val); 981 } 982 983 #define hns3_write_dev(a, reg, value) \ 984 hns3_write_reg((a)->io_base, (reg), (value)) 985 986 #define hns3_read_dev(a, reg) \ 987 hns3_read_reg((a)->io_base, (reg)) 988 989 #define NEXT_ITEM_OF_ACTION(act, actions, index) \ 990 do { \ 991 act = (actions) + (index); \ 992 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ 993 (index)++; \ 994 act = actions + index; \ 995 } \ 996 } while (0) 997 998 #define MSEC_PER_SEC 1000L 999 #define USEC_PER_MSEC 1000L 1000 1001 void hns3_clock_gettime(struct timeval *tv); 1002 uint64_t hns3_clock_calctime_ms(struct timeval *tv); 1003 uint64_t hns3_clock_gettime_ms(void); 1004 1005 static inline uint64_t 1006 hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) 1007 { 1008 uint64_t res; 1009 1010 res = (__atomic_load_n(addr, __ATOMIC_RELAXED) & (1UL << nr)) != 0; 1011 return res; 1012 } 1013 1014 static inline void 1015 hns3_atomic_set_bit(unsigned int nr, volatile uint64_t *addr) 1016 { 1017 __atomic_fetch_or(addr, (1UL << nr), __ATOMIC_RELAXED); 1018 } 1019 1020 static inline void 1021 hns3_atomic_clear_bit(unsigned int nr, volatile uint64_t *addr) 1022 { 1023 __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED); 1024 } 1025 1026 static inline int64_t 1027 hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) 1028 { 1029 uint64_t mask = (1UL << nr); 1030 1031 return __atomic_fetch_and(addr, ~mask, __ATOMIC_RELAXED) & mask; 1032 } 1033 1034 int hns3_buffer_alloc(struct hns3_hw *hw); 1035 int hns3_dev_flow_ops_get(struct rte_eth_dev *dev, 1036 const struct rte_flow_ops **ops); 1037 bool hns3_is_reset_pending(struct hns3_adapter *hns); 1038 bool hns3vf_is_reset_pending(struct hns3_adapter *hns); 1039 void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); 1040 void hns3_ether_format_addr(char *buf, uint16_t size, 1041 const struct rte_ether_addr *ether_addr); 1042 int hns3_dev_infos_get(struct rte_eth_dev *eth_dev, 1043 struct rte_eth_dev_info *info); 1044 void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, 1045 uint32_t link_speed, uint8_t link_duplex); 1046 void hns3_parse_devargs(struct rte_eth_dev *dev); 1047 void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); 1048 int hns3_restore_ptp(struct hns3_adapter *hns); 1049 int hns3_mbuf_dyn_rx_timestamp_register(struct rte_eth_dev *dev, 1050 struct rte_eth_conf *conf); 1051 int hns3_ptp_init(struct hns3_hw *hw); 1052 int hns3_timesync_enable(struct rte_eth_dev *dev); 1053 int hns3_timesync_disable(struct rte_eth_dev *dev); 1054 int hns3_timesync_read_rx_timestamp(struct rte_eth_dev *dev, 1055 struct timespec *timestamp, 1056 uint32_t flags __rte_unused); 1057 int hns3_timesync_read_tx_timestamp(struct rte_eth_dev *dev, 1058 struct timespec *timestamp); 1059 int hns3_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts); 1060 int hns3_timesync_write_time(struct rte_eth_dev *dev, 1061 const struct timespec *ts); 1062 int hns3_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); 1063 1064 static inline bool 1065 is_reset_pending(struct hns3_adapter *hns) 1066 { 1067 bool ret; 1068 if (hns->is_vf) 1069 ret = hns3vf_is_reset_pending(hns); 1070 else 1071 ret = hns3_is_reset_pending(hns); 1072 return ret; 1073 } 1074 1075 static inline uint64_t 1076 hns3_txvlan_cap_get(struct hns3_hw *hw) 1077 { 1078 if (hw->port_base_vlan_cfg.state) 1079 return DEV_TX_OFFLOAD_VLAN_INSERT; 1080 else 1081 return DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT; 1082 } 1083 1084 #endif /* _HNS3_ETHDEV_H_ */ 1085