1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018-2019 Hisilicon Limited. 3 */ 4 5 #ifndef _HNS3_RXTX_H_ 6 #define _HNS3_RXTX_H_ 7 8 #include <stdint.h> 9 #include <rte_mbuf_core.h> 10 11 #define HNS3_MIN_RING_DESC 64 12 #define HNS3_MAX_RING_DESC 32768 13 #define HNS3_DEFAULT_RING_DESC 1024 14 #define HNS3_ALIGN_RING_DESC 32 15 #define HNS3_RING_BASE_ALIGN 128 16 #define HNS3_BULK_ALLOC_MBUF_NUM 32 17 18 #define HNS3_DEFAULT_RX_FREE_THRESH 32 19 #define HNS3_DEFAULT_TX_FREE_THRESH 32 20 #define HNS3_DEFAULT_TX_RS_THRESH 32 21 #define HNS3_TX_FAST_FREE_AHEAD 64 22 23 #define HNS3_DEFAULT_RX_BURST 32 24 #if (HNS3_DEFAULT_RX_BURST > 64) 25 #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n" 26 #endif 27 #define HNS3_DEFAULT_DESCS_PER_LOOP 4 28 #define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8 29 #if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP) 30 #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP 31 #else 32 #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP 33 #endif 34 #define HNS3_DEFAULT_RXQ_REARM_THRESH 64 35 #define HNS3_UINT8_BIT 8 36 #define HNS3_UINT16_BIT 16 37 #define HNS3_UINT32_BIT 32 38 39 #define HNS3_512_BD_BUF_SIZE 512 40 #define HNS3_1K_BD_BUF_SIZE 1024 41 #define HNS3_2K_BD_BUF_SIZE 2048 42 #define HNS3_4K_BD_BUF_SIZE 4096 43 44 #define HNS3_MIN_BD_BUF_SIZE HNS3_512_BD_BUF_SIZE 45 #define HNS3_MAX_BD_BUF_SIZE HNS3_4K_BD_BUF_SIZE 46 47 #define HNS3_BD_SIZE_512_TYPE 0 48 #define HNS3_BD_SIZE_1024_TYPE 1 49 #define HNS3_BD_SIZE_2048_TYPE 2 50 #define HNS3_BD_SIZE_4096_TYPE 3 51 52 #define HNS3_RX_FLAG_VLAN_PRESENT 0x1 53 #define HNS3_RX_FLAG_L3ID_IPV4 0x0 54 #define HNS3_RX_FLAG_L3ID_IPV6 0x1 55 #define HNS3_RX_FLAG_L4ID_UDP 0x0 56 #define HNS3_RX_FLAG_L4ID_TCP 0x1 57 58 #define HNS3_RXD_DMAC_S 0 59 #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) 60 #define HNS3_RXD_VLAN_S 2 61 #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) 62 #define HNS3_RXD_L3ID_S 4 63 #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) 64 #define HNS3_RXD_L4ID_S 8 65 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) 66 #define HNS3_RXD_FRAG_B 12 67 #define HNS3_RXD_STRP_TAGP_S 13 68 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) 69 70 #define HNS3_RXD_L2E_B 16 71 #define HNS3_RXD_L3E_B 17 72 #define HNS3_RXD_L4E_B 18 73 #define HNS3_RXD_TRUNCATE_B 19 74 #define HNS3_RXD_HOI_B 20 75 #define HNS3_RXD_DOI_B 21 76 #define HNS3_RXD_OL3E_B 22 77 #define HNS3_RXD_OL4E_B 23 78 #define HNS3_RXD_GRO_COUNT_S 24 79 #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) 80 #define HNS3_RXD_GRO_FIXID_B 30 81 #define HNS3_RXD_GRO_ECN_B 31 82 83 #define HNS3_RXD_ODMAC_S 0 84 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) 85 #define HNS3_RXD_OVLAN_S 2 86 #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) 87 #define HNS3_RXD_OL3ID_S 4 88 #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) 89 #define HNS3_RXD_OL4ID_S 8 90 #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) 91 #define HNS3_RXD_PTYPE_S 4 92 #define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S) 93 #define HNS3_RXD_FBHI_S 12 94 #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) 95 #define HNS3_RXD_FBLI_S 14 96 #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) 97 98 #define HNS3_RXD_BDTYPE_S 0 99 #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) 100 #define HNS3_RXD_VLD_B 4 101 #define HNS3_RXD_UDP0_B 5 102 #define HNS3_RXD_EXTEND_B 7 103 #define HNS3_RXD_FE_B 8 104 #define HNS3_RXD_LUM_B 9 105 #define HNS3_RXD_CRCP_B 10 106 #define HNS3_RXD_L3L4P_B 11 107 #define HNS3_RXD_TSIND_S 12 108 #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) 109 #define HNS3_RXD_LKBK_B 15 110 #define HNS3_RXD_GRO_SIZE_S 16 111 #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) 112 113 #define HNS3_TXD_L3T_S 0 114 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) 115 #define HNS3_TXD_L4T_S 2 116 #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) 117 #define HNS3_TXD_L3CS_B 4 118 #define HNS3_TXD_L4CS_B 5 119 #define HNS3_TXD_VLAN_B 6 120 #define HNS3_TXD_TSO_B 7 121 122 #define HNS3_TXD_L2LEN_S 8 123 #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) 124 #define HNS3_TXD_L3LEN_S 16 125 #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) 126 #define HNS3_TXD_L4LEN_S 24 127 #define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S) 128 129 #define HNS3_TXD_OL3T_S 0 130 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) 131 #define HNS3_TXD_OVLAN_B 2 132 #define HNS3_TXD_MACSEC_B 3 133 #define HNS3_TXD_TUNTYPE_S 4 134 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) 135 136 #define HNS3_TXD_BDTYPE_S 0 137 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) 138 #define HNS3_TXD_FE_B 4 139 #define HNS3_TXD_SC_S 5 140 #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) 141 #define HNS3_TXD_EXTEND_B 7 142 #define HNS3_TXD_VLD_B 8 143 #define HNS3_TXD_RI_B 9 144 #define HNS3_TXD_RA_B 10 145 #define HNS3_TXD_TSYN_B 11 146 #define HNS3_TXD_DECTTL_S 12 147 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) 148 149 #define HNS3_TXD_MSS_S 0 150 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) 151 152 #define HNS3_TXD_OL4CS_B 22 153 #define HNS3_L2_LEN_UNIT 1UL 154 #define HNS3_L3_LEN_UNIT 2UL 155 #define HNS3_L4_LEN_UNIT 2UL 156 157 #define HNS3_TXD_DEFAULT_BDTYPE 0 158 #define HNS3_TXD_VLD_CMD (0x1 << HNS3_TXD_VLD_B) 159 #define HNS3_TXD_FE_CMD (0x1 << HNS3_TXD_FE_B) 160 #define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE \ 161 (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE) 162 #define HNS3_TXD_SEND_SIZE_SHIFT 16 163 164 enum hns3_pkt_l2t_type { 165 HNS3_L2_TYPE_UNICAST, 166 HNS3_L2_TYPE_MULTICAST, 167 HNS3_L2_TYPE_BROADCAST, 168 HNS3_L2_TYPE_INVALID, 169 }; 170 171 enum hns3_pkt_l3t_type { 172 HNS3_L3T_NONE, 173 HNS3_L3T_IPV6, 174 HNS3_L3T_IPV4, 175 HNS3_L3T_RESERVED 176 }; 177 178 enum hns3_pkt_l4t_type { 179 HNS3_L4T_UNKNOWN, 180 HNS3_L4T_TCP, 181 HNS3_L4T_UDP, 182 HNS3_L4T_SCTP 183 }; 184 185 enum hns3_pkt_ol3t_type { 186 HNS3_OL3T_NONE, 187 HNS3_OL3T_IPV6, 188 HNS3_OL3T_IPV4_NO_CSUM, 189 HNS3_OL3T_IPV4_CSUM 190 }; 191 192 enum hns3_pkt_tun_type { 193 HNS3_TUN_NONE, 194 HNS3_TUN_MAC_IN_UDP, 195 HNS3_TUN_NVGRE, 196 HNS3_TUN_OTHER 197 }; 198 199 /* hardware spec ring buffer format */ 200 struct hns3_desc { 201 union { 202 uint64_t addr; 203 struct { 204 uint32_t addr0; 205 uint32_t addr1; 206 }; 207 }; 208 union { 209 struct { 210 uint16_t vlan_tag; 211 uint16_t send_size; 212 union { 213 /* 214 * L3T | L4T | L3CS | L4CS | VLAN | TSO | 215 * L2_LEN 216 */ 217 uint32_t type_cs_vlan_tso_len; 218 struct { 219 uint8_t type_cs_vlan_tso; 220 uint8_t l2_len; 221 uint8_t l3_len; 222 uint8_t l4_len; 223 }; 224 }; 225 uint16_t outer_vlan_tag; 226 uint16_t tv; 227 union { 228 /* OL3T | OVALAN | MACSEC */ 229 uint32_t ol_type_vlan_len_msec; 230 struct { 231 uint8_t ol_type_vlan_msec; 232 uint8_t ol2_len; 233 uint8_t ol3_len; 234 uint8_t ol4_len; 235 }; 236 }; 237 238 uint32_t paylen_fd_dop_ol4cs; 239 uint16_t tp_fe_sc_vld_ra_ri; 240 uint16_t mss; 241 } tx; 242 243 struct { 244 uint32_t l234_info; 245 uint16_t pkt_len; 246 uint16_t size; 247 uint32_t rss_hash; 248 uint16_t fd_id; 249 uint16_t vlan_tag; 250 union { 251 uint32_t ol_info; 252 struct { 253 uint16_t o_dm_vlan_id_fb; 254 uint16_t ot_vlan_tag; 255 }; 256 }; 257 union { 258 uint32_t bd_base_info; 259 struct { 260 uint16_t bdtype_vld_udp0; 261 uint16_t fe_lum_crcp_l3l4p; 262 }; 263 }; 264 } rx; 265 }; 266 } __rte_packed; 267 268 struct hns3_entry { 269 struct rte_mbuf *mbuf; 270 }; 271 272 struct hns3_rx_basic_stats { 273 uint64_t packets; 274 uint64_t bytes; 275 uint64_t errors; 276 }; 277 278 struct hns3_rx_dfx_stats { 279 uint64_t l3_csum_errors; 280 uint64_t l4_csum_errors; 281 uint64_t ol3_csum_errors; 282 uint64_t ol4_csum_errors; 283 }; 284 285 struct hns3_rx_bd_errors_stats { 286 uint64_t l2_errors; 287 uint64_t pkt_len_errors; 288 }; 289 290 struct hns3_rx_queue { 291 void *io_base; 292 volatile void *io_head_reg; 293 struct hns3_adapter *hns; 294 struct hns3_ptype_table *ptype_tbl; 295 struct rte_mempool *mb_pool; 296 struct hns3_desc *rx_ring; 297 uint64_t rx_ring_phys_addr; /* RX ring DMA address */ 298 const struct rte_memzone *mz; 299 struct hns3_entry *sw_ring; 300 struct rte_mbuf *pkt_first_seg; 301 struct rte_mbuf *pkt_last_seg; 302 303 uint16_t queue_id; 304 uint16_t port_id; 305 uint16_t nb_rx_desc; 306 uint16_t rx_buf_len; 307 /* 308 * threshold for the number of BDs waited to passed to hardware. If the 309 * number exceeds the threshold, driver will pass these BDs to hardware. 310 */ 311 uint16_t rx_free_thresh; 312 uint16_t next_to_use; /* index of next BD to be polled */ 313 uint16_t rx_free_hold; /* num of BDs waited to passed to hardware */ 314 uint16_t rx_rearm_start; /* index of BD that driver re-arming from */ 315 uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */ 316 317 /* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */ 318 uint8_t crc_len; 319 320 bool rx_deferred_start; /* don't start this queue in dev start */ 321 bool configured; /* indicate if rx queue has been configured */ 322 /* 323 * Indicate whether ignore the outer VLAN field in the Rx BD reported 324 * by the Hardware. Because the outer VLAN is the PVID if the PVID is 325 * set for some version of hardware network engine whose vlan mode is 326 * HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920. And this VLAN 327 * should not be transitted to the upper-layer application. For hardware 328 * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE, 329 * such as kunpeng 930, PVID will not be reported to the BDs. So, PMD 330 * driver does not need to perform PVID-related operation in Rx. At this 331 * point, the pvid_sw_discard_en will be false. 332 */ 333 bool pvid_sw_discard_en; 334 bool ptype_en; /* indicate if the ptype field enabled */ 335 bool enabled; /* indicate if Rx queue has been enabled */ 336 337 struct hns3_rx_basic_stats basic_stats; 338 /* DFX statistics that driver does not need to discard packets */ 339 struct hns3_rx_dfx_stats dfx_stats; 340 /* Error statistics that driver needs to discard packets */ 341 struct hns3_rx_bd_errors_stats err_stats; 342 343 struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; 344 uint16_t bulk_mbuf_num; 345 346 /* offset_table: used for vector, to solve execute re-order problem */ 347 uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1]; 348 uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */ 349 struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ 350 }; 351 352 struct hns3_tx_basic_stats { 353 uint64_t packets; 354 uint64_t bytes; 355 }; 356 357 /* 358 * The following items are used for the abnormal errors statistics in 359 * the Tx datapath. When upper level application calls the 360 * rte_eth_tx_burst API function to send multiple packets at a time with 361 * burst mode based on hns3 network engine, there are some abnormal 362 * conditions that cause the driver to fail to operate the hardware to 363 * send packets correctly. 364 * Note: When using burst mode to call the rte_eth_tx_burst API function 365 * to send multiple packets at a time. When the first abnormal error is 366 * detected, add one to the relevant error statistics item, and then 367 * exit the loop of sending multiple packets of the function. That is to 368 * say, even if there are multiple packets in which abnormal errors may 369 * be detected in the burst, the relevant error statistics in the driver 370 * will only be increased by one. 371 * The detail description of the Tx abnormal errors statistic items as 372 * below: 373 * - over_length_pkt_cnt 374 * Total number of greater than HNS3_MAX_FRAME_LEN the driver 375 * supported. 376 * 377 * - exceed_limit_bd_pkt_cnt 378 * Total number of exceeding the hardware limited bd which process 379 * a packet needed bd numbers. 380 * 381 * - exceed_limit_bd_reassem_fail 382 * Total number of exceeding the hardware limited bd fail which 383 * process a packet needed bd numbers and reassemble fail. 384 * 385 * - unsupported_tunnel_pkt_cnt 386 * Total number of unsupported tunnel packet. The unsupported tunnel 387 * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet 388 * with MPLS-in-UDP RFC 7510 header. 389 * 390 * - queue_full_cnt 391 * Total count which the available bd numbers in current bd queue is 392 * less than the bd numbers with the pkt process needed. 393 * 394 * - pkt_padding_fail_cnt 395 * Total count which the packet length is less than minimum packet 396 * length(struct hns3_tx_queue::min_tx_pkt_len) supported by 397 * hardware in Tx direction and fail to be appended with 0. 398 */ 399 struct hns3_tx_dfx_stats { 400 uint64_t over_length_pkt_cnt; 401 uint64_t exceed_limit_bd_pkt_cnt; 402 uint64_t exceed_limit_bd_reassem_fail; 403 uint64_t unsupported_tunnel_pkt_cnt; 404 uint64_t queue_full_cnt; 405 uint64_t pkt_padding_fail_cnt; 406 }; 407 408 struct hns3_tx_queue { 409 void *io_base; 410 volatile void *io_tail_reg; 411 struct hns3_adapter *hns; 412 struct hns3_desc *tx_ring; 413 uint64_t tx_ring_phys_addr; /* TX ring DMA address */ 414 const struct rte_memzone *mz; 415 struct hns3_entry *sw_ring; 416 417 uint16_t queue_id; 418 uint16_t port_id; 419 uint16_t nb_tx_desc; 420 /* 421 * index of next BD whose corresponding rte_mbuf can be released by 422 * driver. 423 */ 424 uint16_t next_to_clean; 425 /* index of next BD to be filled by driver to send packet */ 426 uint16_t next_to_use; 427 /* num of remaining BDs ready to be filled by driver to send packet */ 428 uint16_t tx_bd_ready; 429 430 /* threshold for free tx buffer if available BDs less than this value */ 431 uint16_t tx_free_thresh; 432 433 /* 434 * For better performance in tx datapath, releasing mbuf in batches is 435 * required. 436 * Only checking the VLD bit of the last descriptor in a batch of the 437 * thresh descriptors does not mean that these descriptors are all sent 438 * by hardware successfully. So we need to check that the VLD bits of 439 * all descriptors are cleared. and then free all mbufs in the batch. 440 * - tx_rs_thresh 441 * Number of mbufs released at a time. 442 * 443 * - free 444 * Tx mbuf free array used for preserving temporarily address of mbuf 445 * released back to mempool, when releasing mbuf in batches. 446 */ 447 uint16_t tx_rs_thresh; 448 struct rte_mbuf **free; 449 450 /* 451 * tso mode. 452 * value range: 453 * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 454 * 455 * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 456 * In this mode, because of the hardware constraint, network driver 457 * software need erase the L4 len value of the TCP pseudo header 458 * and recalculate the TCP pseudo header checksum of packets that 459 * need TSO. 460 * 461 * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 462 * In this mode, hardware support recalculate the TCP pseudo header 463 * checksum of packets that need TSO, so network driver software 464 * not need to recalculate it. 465 */ 466 uint8_t tso_mode; 467 /* 468 * udp checksum mode. 469 * value range: 470 * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE 471 * 472 * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE 473 * In this mode, HW can not do checksum for special UDP port like 474 * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel 475 * packets without the PKT_TX_TUNEL_MASK in the mbuf. So, PMD need 476 * do the checksum for these packets to avoid a checksum error. 477 * 478 * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE 479 * In this mode, HW does not have the preceding problems and can 480 * directly calculate the checksum of these UDP packets. 481 */ 482 uint8_t udp_cksum_mode; 483 /* 484 * The minimum length of the packet supported by hardware in the Tx 485 * direction. 486 */ 487 uint32_t min_tx_pkt_len; 488 489 uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ 490 bool tx_deferred_start; /* don't start this queue in dev start */ 491 bool configured; /* indicate if tx queue has been configured */ 492 /* 493 * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field 494 * of Tx BD. Because the outer VLAN will always be the PVID when the 495 * PVID is set and for some version of hardware network engine whose 496 * vlan mode is HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920, the 497 * PVID will overwrite the outer VLAN field of Tx BD. For the hardware 498 * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE, 499 * such as kunpeng 930, if the PVID is set, the hardware will shift the 500 * VLAN field automatically. So, PMD driver does not need to do 501 * PVID-related operations in Tx. And pvid_sw_shift_en will be false at 502 * this point. 503 */ 504 bool pvid_sw_shift_en; 505 bool enabled; /* indicate if Tx queue has been enabled */ 506 507 struct hns3_tx_basic_stats basic_stats; 508 struct hns3_tx_dfx_stats dfx_stats; 509 }; 510 511 #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \ 512 ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready) 513 514 struct hns3_queue_info { 515 const char *type; /* point to queue memory name */ 516 const char *ring_name; /* point to hardware ring name */ 517 uint16_t idx; 518 uint16_t nb_desc; 519 unsigned int socket_id; 520 }; 521 522 #define HNS3_TX_CKSUM_OFFLOAD_MASK ( \ 523 PKT_TX_OUTER_UDP_CKSUM | \ 524 PKT_TX_OUTER_IP_CKSUM | \ 525 PKT_TX_IP_CKSUM | \ 526 PKT_TX_TCP_SEG | \ 527 PKT_TX_L4_MASK) 528 529 enum hns3_cksum_status { 530 HNS3_CKSUM_NONE = 0, 531 HNS3_L3_CKSUM_ERR = 1, 532 HNS3_L4_CKSUM_ERR = 2, 533 HNS3_OUTER_L3_CKSUM_ERR = 4, 534 HNS3_OUTER_L4_CKSUM_ERR = 8 535 }; 536 537 static inline int 538 hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, 539 uint32_t bd_base_info, uint32_t l234_info, 540 uint32_t *cksum_err) 541 { 542 #define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \ 543 BIT(HNS3_RXD_TRUNCATE_B)) 544 #define CHECKSUM_ERR_FLAG (BIT(HNS3_RXD_L3E_B) | \ 545 BIT(HNS3_RXD_L4E_B) | \ 546 BIT(HNS3_RXD_OL3E_B) | \ 547 BIT(HNS3_RXD_OL4E_B)) 548 549 uint32_t tmp = 0; 550 551 /* 552 * If packet len bigger than mtu when recv with no-scattered algorithm, 553 * the first n bd will without FE bit, we need process this sisution. 554 * Note: we don't need add statistic counter because latest BD which 555 * with FE bit will mark HNS3_RXD_L2E_B bit. 556 */ 557 if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0)) 558 return -EINVAL; 559 560 if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) { 561 if (l234_info & BIT(HNS3_RXD_L2E_B)) 562 rxq->err_stats.l2_errors++; 563 else 564 rxq->err_stats.pkt_len_errors++; 565 return -EINVAL; 566 } 567 568 if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) { 569 if (likely((l234_info & CHECKSUM_ERR_FLAG) == 0)) { 570 *cksum_err = 0; 571 return 0; 572 } 573 574 if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { 575 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD; 576 rxq->dfx_stats.l3_csum_errors++; 577 tmp |= HNS3_L3_CKSUM_ERR; 578 } 579 580 if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { 581 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD; 582 rxq->dfx_stats.l4_csum_errors++; 583 tmp |= HNS3_L4_CKSUM_ERR; 584 } 585 586 if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) { 587 rxq->dfx_stats.ol3_csum_errors++; 588 tmp |= HNS3_OUTER_L3_CKSUM_ERR; 589 } 590 591 if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { 592 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; 593 rxq->dfx_stats.ol4_csum_errors++; 594 tmp |= HNS3_OUTER_L4_CKSUM_ERR; 595 } 596 } 597 *cksum_err = tmp; 598 599 return 0; 600 } 601 602 static inline void 603 hns3_rx_set_cksum_flag(struct rte_mbuf *rxm, const uint64_t packet_type, 604 const uint32_t cksum_err) 605 { 606 if (unlikely((packet_type & RTE_PTYPE_TUNNEL_MASK))) { 607 if (likely(packet_type & RTE_PTYPE_INNER_L3_MASK) && 608 (cksum_err & HNS3_L3_CKSUM_ERR) == 0) 609 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 610 if (likely(packet_type & RTE_PTYPE_INNER_L4_MASK) && 611 (cksum_err & HNS3_L4_CKSUM_ERR) == 0) 612 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 613 if (likely(packet_type & RTE_PTYPE_L4_MASK) && 614 (cksum_err & HNS3_OUTER_L4_CKSUM_ERR) == 0) 615 rxm->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; 616 } else { 617 if (likely(packet_type & RTE_PTYPE_L3_MASK) && 618 (cksum_err & HNS3_L3_CKSUM_ERR) == 0) 619 rxm->ol_flags |= PKT_RX_IP_CKSUM_GOOD; 620 if (likely(packet_type & RTE_PTYPE_L4_MASK) && 621 (cksum_err & HNS3_L4_CKSUM_ERR) == 0) 622 rxm->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 623 } 624 } 625 626 static inline uint32_t 627 hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info, 628 const uint32_t ol_info) 629 { 630 const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl; 631 uint32_t l2id, l3id, l4id; 632 uint32_t ol3id, ol4id, ol2id; 633 uint32_t ptype; 634 635 if (rxq->ptype_en) { 636 ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M, 637 HNS3_RXD_PTYPE_S); 638 return ptype_tbl->ptype[ptype]; 639 } 640 641 ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); 642 ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S); 643 ol2id = hns3_get_field(ol_info, HNS3_RXD_OVLAN_M, HNS3_RXD_OVLAN_S); 644 l2id = hns3_get_field(l234_info, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S); 645 l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 646 l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); 647 648 if (unlikely(ptype_tbl->ol4table[ol4id])) 649 return ptype_tbl->inner_l2table[l2id] | 650 ptype_tbl->inner_l3table[l3id] | 651 ptype_tbl->inner_l4table[l4id] | 652 ptype_tbl->ol3table[ol3id] | 653 ptype_tbl->ol4table[ol4id] | ptype_tbl->ol2table[ol2id]; 654 else 655 return ptype_tbl->l2l3table[l2id][l3id] | 656 ptype_tbl->l4table[l4id]; 657 } 658 659 void hns3_dev_rx_queue_release(void *queue); 660 void hns3_dev_tx_queue_release(void *queue); 661 void hns3_free_all_queues(struct rte_eth_dev *dev); 662 int hns3_reset_all_tqps(struct hns3_adapter *hns); 663 void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en); 664 int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 665 int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 666 void hns3_enable_all_queues(struct hns3_hw *hw, bool en); 667 int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue); 668 void hns3_start_tqps(struct hns3_hw *hw); 669 void hns3_stop_tqps(struct hns3_hw *hw); 670 int hns3_rxq_iterate(struct rte_eth_dev *dev, 671 int (*callback)(struct hns3_rx_queue *, void *), void *arg); 672 void hns3_dev_release_mbufs(struct hns3_adapter *hns); 673 int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, 674 unsigned int socket, const struct rte_eth_rxconf *conf, 675 struct rte_mempool *mp); 676 int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, 677 unsigned int socket, const struct rte_eth_txconf *conf); 678 uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); 679 int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); 680 int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); 681 int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 682 int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 683 uint16_t hns3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 684 uint16_t nb_pkts); 685 uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 686 uint16_t nb_pkts); 687 uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 688 uint16_t nb_pkts); 689 uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts, 690 uint16_t nb_pkts); 691 int hns3_rx_burst_mode_get(struct rte_eth_dev *dev, 692 __rte_unused uint16_t queue_id, 693 struct rte_eth_burst_mode *mode); 694 int hns3_rx_check_vec_support(struct rte_eth_dev *dev); 695 uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, 696 uint16_t nb_pkts); 697 uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 698 uint16_t nb_pkts); 699 uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 700 uint16_t nb_pkts); 701 uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 702 uint16_t nb_pkts); 703 uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts, 704 uint16_t nb_pkts); 705 int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, 706 __rte_unused uint16_t queue_id, 707 struct rte_eth_burst_mode *mode); 708 const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev); 709 void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); 710 void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); 711 uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); 712 void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, 713 uint8_t gl_idx, uint16_t gl_value); 714 void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, 715 uint16_t rl_value); 716 void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, 717 uint16_t ql_value); 718 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, 719 uint16_t nb_tx_q); 720 int hns3_config_gro(struct hns3_hw *hw, bool en); 721 int hns3_restore_gro_conf(struct hns3_hw *hw); 722 void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw); 723 void hns3_rx_scattered_reset(struct rte_eth_dev *dev); 724 void hns3_rx_scattered_calc(struct rte_eth_dev *dev); 725 int hns3_rx_check_vec_support(struct rte_eth_dev *dev); 726 int hns3_tx_check_vec_support(struct rte_eth_dev *dev); 727 void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq); 728 void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 729 struct rte_eth_rxq_info *qinfo); 730 void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 731 struct rte_eth_txq_info *qinfo); 732 uint32_t hns3_get_tqp_reg_offset(uint16_t idx); 733 int hns3_start_all_txqs(struct rte_eth_dev *dev); 734 int hns3_start_all_rxqs(struct rte_eth_dev *dev); 735 void hns3_stop_all_txqs(struct rte_eth_dev *dev); 736 void hns3_restore_tqp_enable_state(struct hns3_hw *hw); 737 int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt); 738 void hns3_enable_rxd_adv_layout(struct hns3_hw *hw); 739 int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); 740 int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 741 742 #endif /* _HNS3_RXTX_H_ */ 743