1bba63669SWei Hu (Xavier) /* SPDX-License-Identifier: BSD-3-Clause 253e6f86cSMin Hu (Connor) * Copyright(c) 2018-2021 HiSilicon Limited. 3bba63669SWei Hu (Xavier) */ 4bba63669SWei Hu (Xavier) 52ad146efSChengwen Feng #ifndef HNS3_RXTX_H 62ad146efSChengwen Feng #define HNS3_RXTX_H 7bba63669SWei Hu (Xavier) 8445b0c8eSLijun Ou #include <stdint.h> 91c757dd5SChengwen Feng 101c757dd5SChengwen Feng #include <ethdev_driver.h> 11445b0c8eSLijun Ou #include <rte_mbuf_core.h> 121c757dd5SChengwen Feng #include <rte_ethdev.h> 131c757dd5SChengwen Feng #include <rte_ethdev_core.h> 141c757dd5SChengwen Feng #include <rte_io.h> 151c757dd5SChengwen Feng #include <rte_mempool.h> 161c757dd5SChengwen Feng #include <rte_memzone.h> 171c757dd5SChengwen Feng 181c757dd5SChengwen Feng #include "hns3_ethdev.h" 19445b0c8eSLijun Ou 20ffd0ec01SWei Hu (Xavier) #define HNS3_MIN_RING_DESC 64 21bba63669SWei Hu (Xavier) #define HNS3_MAX_RING_DESC 32768 22bba63669SWei Hu (Xavier) #define HNS3_DEFAULT_RING_DESC 1024 23bba63669SWei Hu (Xavier) #define HNS3_ALIGN_RING_DESC 32 24bba63669SWei Hu (Xavier) #define HNS3_RING_BASE_ALIGN 128 25521ab3e9SWei Hu (Xavier) #define HNS3_BULK_ALLOC_MBUF_NUM 32 26521ab3e9SWei Hu (Xavier) 27ceabee45SWei Hu (Xavier) #define HNS3_DEFAULT_RX_FREE_THRESH 32 287ef93390SWei Hu (Xavier) #define HNS3_DEFAULT_TX_FREE_THRESH 32 297ef93390SWei Hu (Xavier) #define HNS3_DEFAULT_TX_RS_THRESH 32 307ef93390SWei Hu (Xavier) #define HNS3_TX_FAST_FREE_AHEAD 64 31bba63669SWei Hu (Xavier) 322d408d06SChengwen Feng #define HNS3_DEFAULT_RX_BURST 64 33a3d4f4d2SWei Hu (Xavier) #if (HNS3_DEFAULT_RX_BURST > 64) 34a3d4f4d2SWei Hu (Xavier) #error "PMD HNS3: HNS3_DEFAULT_RX_BURST must <= 64\n" 35a3d4f4d2SWei Hu (Xavier) #endif 36a3d4f4d2SWei Hu (Xavier) #define HNS3_DEFAULT_DESCS_PER_LOOP 4 37a3d4f4d2SWei Hu (Xavier) #define HNS3_SVE_DEFAULT_DESCS_PER_LOOP 8 38a3d4f4d2SWei Hu (Xavier) #if (HNS3_DEFAULT_DESCS_PER_LOOP > HNS3_SVE_DEFAULT_DESCS_PER_LOOP) 39a3d4f4d2SWei Hu (Xavier) #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_DEFAULT_DESCS_PER_LOOP 40a3d4f4d2SWei Hu (Xavier) #else 41a3d4f4d2SWei Hu (Xavier) #define HNS3_VECTOR_RX_OFFSET_TABLE_LEN HNS3_SVE_DEFAULT_DESCS_PER_LOOP 42a3d4f4d2SWei Hu (Xavier) #endif 43a3d4f4d2SWei Hu (Xavier) #define HNS3_DEFAULT_RXQ_REARM_THRESH 64 44e31f123dSWei Hu (Xavier) #define HNS3_UINT8_BIT 8 45e31f123dSWei Hu (Xavier) #define HNS3_UINT16_BIT 16 46e31f123dSWei Hu (Xavier) #define HNS3_UINT32_BIT 32 47e31f123dSWei Hu (Xavier) 48dfac40d9SWei Hu (Xavier) #define HNS3_512_BD_BUF_SIZE 512 49dfac40d9SWei Hu (Xavier) #define HNS3_1K_BD_BUF_SIZE 1024 50dfac40d9SWei Hu (Xavier) #define HNS3_2K_BD_BUF_SIZE 2048 51dfac40d9SWei Hu (Xavier) #define HNS3_4K_BD_BUF_SIZE 4096 52dfac40d9SWei Hu (Xavier) 53dfac40d9SWei Hu (Xavier) #define HNS3_MIN_BD_BUF_SIZE HNS3_512_BD_BUF_SIZE 54dfac40d9SWei Hu (Xavier) #define HNS3_MAX_BD_BUF_SIZE HNS3_4K_BD_BUF_SIZE 55dfac40d9SWei Hu (Xavier) 56bba63669SWei Hu (Xavier) #define HNS3_BD_SIZE_512_TYPE 0 57bba63669SWei Hu (Xavier) #define HNS3_BD_SIZE_1024_TYPE 1 58bba63669SWei Hu (Xavier) #define HNS3_BD_SIZE_2048_TYPE 2 59bba63669SWei Hu (Xavier) #define HNS3_BD_SIZE_4096_TYPE 3 60bba63669SWei Hu (Xavier) 61bba63669SWei Hu (Xavier) #define HNS3_RX_FLAG_VLAN_PRESENT 0x1 62bba63669SWei Hu (Xavier) #define HNS3_RX_FLAG_L3ID_IPV4 0x0 63bba63669SWei Hu (Xavier) #define HNS3_RX_FLAG_L3ID_IPV6 0x1 64bba63669SWei Hu (Xavier) #define HNS3_RX_FLAG_L4ID_UDP 0x0 65bba63669SWei Hu (Xavier) #define HNS3_RX_FLAG_L4ID_TCP 0x1 66bba63669SWei Hu (Xavier) 67bba63669SWei Hu (Xavier) #define HNS3_RXD_DMAC_S 0 68bba63669SWei Hu (Xavier) #define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) 69bba63669SWei Hu (Xavier) #define HNS3_RXD_VLAN_S 2 70bba63669SWei Hu (Xavier) #define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) 71bba63669SWei Hu (Xavier) #define HNS3_RXD_L3ID_S 4 72bba63669SWei Hu (Xavier) #define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) 73bba63669SWei Hu (Xavier) #define HNS3_RXD_L4ID_S 8 74bba63669SWei Hu (Xavier) #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) 75bba63669SWei Hu (Xavier) #define HNS3_RXD_FRAG_B 12 76bba63669SWei Hu (Xavier) #define HNS3_RXD_STRP_TAGP_S 13 77bba63669SWei Hu (Xavier) #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) 78bba63669SWei Hu (Xavier) 79bba63669SWei Hu (Xavier) #define HNS3_RXD_L2E_B 16 80bba63669SWei Hu (Xavier) #define HNS3_RXD_L3E_B 17 81bba63669SWei Hu (Xavier) #define HNS3_RXD_L4E_B 18 82521ab3e9SWei Hu (Xavier) #define HNS3_RXD_TRUNCATE_B 19 83bba63669SWei Hu (Xavier) #define HNS3_RXD_HOI_B 20 84bba63669SWei Hu (Xavier) #define HNS3_RXD_DOI_B 21 85bba63669SWei Hu (Xavier) #define HNS3_RXD_OL3E_B 22 86bba63669SWei Hu (Xavier) #define HNS3_RXD_OL4E_B 23 87bba63669SWei Hu (Xavier) #define HNS3_RXD_GRO_COUNT_S 24 88bba63669SWei Hu (Xavier) #define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) 89bba63669SWei Hu (Xavier) #define HNS3_RXD_GRO_FIXID_B 30 90bba63669SWei Hu (Xavier) #define HNS3_RXD_GRO_ECN_B 31 91bba63669SWei Hu (Xavier) 92bba63669SWei Hu (Xavier) #define HNS3_RXD_ODMAC_S 0 93bba63669SWei Hu (Xavier) #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) 94bba63669SWei Hu (Xavier) #define HNS3_RXD_OVLAN_S 2 95bba63669SWei Hu (Xavier) #define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) 96bba63669SWei Hu (Xavier) #define HNS3_RXD_OL3ID_S 4 97bba63669SWei Hu (Xavier) #define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) 98bba63669SWei Hu (Xavier) #define HNS3_RXD_OL4ID_S 8 99bba63669SWei Hu (Xavier) #define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) 100fb5e9069SChengwen Feng #define HNS3_RXD_PTYPE_S 4 101fb5e9069SChengwen Feng #define HNS3_RXD_PTYPE_M (0xff << HNS3_RXD_PTYPE_S) 102bba63669SWei Hu (Xavier) #define HNS3_RXD_FBHI_S 12 103bba63669SWei Hu (Xavier) #define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) 104bba63669SWei Hu (Xavier) #define HNS3_RXD_FBLI_S 14 105bba63669SWei Hu (Xavier) #define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) 106bba63669SWei Hu (Xavier) 107bba63669SWei Hu (Xavier) #define HNS3_RXD_BDTYPE_S 0 108bba63669SWei Hu (Xavier) #define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) 109bba63669SWei Hu (Xavier) #define HNS3_RXD_VLD_B 4 110bba63669SWei Hu (Xavier) #define HNS3_RXD_UDP0_B 5 111bba63669SWei Hu (Xavier) #define HNS3_RXD_EXTEND_B 7 112bba63669SWei Hu (Xavier) #define HNS3_RXD_FE_B 8 113bba63669SWei Hu (Xavier) #define HNS3_RXD_LUM_B 9 114bba63669SWei Hu (Xavier) #define HNS3_RXD_CRCP_B 10 115bba63669SWei Hu (Xavier) #define HNS3_RXD_L3L4P_B 11 11638b539d9SMin Hu (Connor) 11738b539d9SMin Hu (Connor) #define HNS3_RXD_TS_VLD_B 14 118bba63669SWei Hu (Xavier) #define HNS3_RXD_GRO_SIZE_S 16 1191f295c40SWei Hu (Xavier) #define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) 120bba63669SWei Hu (Xavier) 121bba63669SWei Hu (Xavier) #define HNS3_TXD_L3T_S 0 122bba63669SWei Hu (Xavier) #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) 123bba63669SWei Hu (Xavier) #define HNS3_TXD_L4T_S 2 124bba63669SWei Hu (Xavier) #define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) 125bba63669SWei Hu (Xavier) #define HNS3_TXD_L3CS_B 4 126bba63669SWei Hu (Xavier) #define HNS3_TXD_L4CS_B 5 127bba63669SWei Hu (Xavier) #define HNS3_TXD_VLAN_B 6 128bba63669SWei Hu (Xavier) #define HNS3_TXD_TSO_B 7 129bba63669SWei Hu (Xavier) 130bba63669SWei Hu (Xavier) #define HNS3_TXD_L2LEN_S 8 131bba63669SWei Hu (Xavier) #define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) 132bba63669SWei Hu (Xavier) #define HNS3_TXD_L3LEN_S 16 133bba63669SWei Hu (Xavier) #define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) 134bba63669SWei Hu (Xavier) #define HNS3_TXD_L4LEN_S 24 135bba63669SWei Hu (Xavier) #define HNS3_TXD_L4LEN_M (0xffUL << HNS3_TXD_L4LEN_S) 136bba63669SWei Hu (Xavier) 1376393fc0bSDongdong Liu #define HNS3_TXD_L4_START_S 8 1386393fc0bSDongdong Liu #define HNS3_TXD_L4_START_M (0xffff << HNS3_TXD_L4_START_S) 1396393fc0bSDongdong Liu 140bba63669SWei Hu (Xavier) #define HNS3_TXD_OL3T_S 0 141bba63669SWei Hu (Xavier) #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) 142bba63669SWei Hu (Xavier) #define HNS3_TXD_OVLAN_B 2 143bba63669SWei Hu (Xavier) #define HNS3_TXD_MACSEC_B 3 144bba63669SWei Hu (Xavier) #define HNS3_TXD_TUNTYPE_S 4 145bba63669SWei Hu (Xavier) #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) 146bba63669SWei Hu (Xavier) 1476393fc0bSDongdong Liu #define HNS3_TXD_L4_CKS_OFFSET_S 8 1486393fc0bSDongdong Liu #define HNS3_TXD_L4_CKS_OFFSET_M (0xffff << HNS3_TXD_L4_CKS_OFFSET_S) 1496393fc0bSDongdong Liu 150bba63669SWei Hu (Xavier) #define HNS3_TXD_BDTYPE_S 0 151bba63669SWei Hu (Xavier) #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) 152bba63669SWei Hu (Xavier) #define HNS3_TXD_FE_B 4 153bba63669SWei Hu (Xavier) #define HNS3_TXD_SC_S 5 154bba63669SWei Hu (Xavier) #define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) 155bba63669SWei Hu (Xavier) #define HNS3_TXD_EXTEND_B 7 156bba63669SWei Hu (Xavier) #define HNS3_TXD_VLD_B 8 157bba63669SWei Hu (Xavier) #define HNS3_TXD_RI_B 9 158bba63669SWei Hu (Xavier) #define HNS3_TXD_RA_B 10 159bba63669SWei Hu (Xavier) #define HNS3_TXD_TSYN_B 11 160bba63669SWei Hu (Xavier) #define HNS3_TXD_DECTTL_S 12 161bba63669SWei Hu (Xavier) #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) 162bba63669SWei Hu (Xavier) 163bba63669SWei Hu (Xavier) #define HNS3_TXD_MSS_S 0 164bba63669SWei Hu (Xavier) #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) 165bba63669SWei Hu (Xavier) 1666393fc0bSDongdong Liu #define HNS3_TXD_CKST_B 14 1676393fc0bSDongdong Liu 168d0ab89e6SChengchang Tang #define HNS3_TXD_OL4CS_B 22 169bba63669SWei Hu (Xavier) #define HNS3_L2_LEN_UNIT 1UL 170bba63669SWei Hu (Xavier) #define HNS3_L3_LEN_UNIT 2UL 171bba63669SWei Hu (Xavier) #define HNS3_L4_LEN_UNIT 2UL 1726393fc0bSDongdong Liu #define HNS3_SIMPLE_BD_UNIT 1UL 173bba63669SWei Hu (Xavier) 174e31f123dSWei Hu (Xavier) #define HNS3_TXD_DEFAULT_BDTYPE 0 175e31f123dSWei Hu (Xavier) #define HNS3_TXD_VLD_CMD (0x1 << HNS3_TXD_VLD_B) 176e31f123dSWei Hu (Xavier) #define HNS3_TXD_FE_CMD (0x1 << HNS3_TXD_FE_B) 177e31f123dSWei Hu (Xavier) #define HNS3_TXD_DEFAULT_VLD_FE_BDTYPE \ 178e31f123dSWei Hu (Xavier) (HNS3_TXD_VLD_CMD | HNS3_TXD_FE_CMD | HNS3_TXD_DEFAULT_BDTYPE) 179e31f123dSWei Hu (Xavier) #define HNS3_TXD_SEND_SIZE_SHIFT 16 180e31f123dSWei Hu (Xavier) 181bba63669SWei Hu (Xavier) enum hns3_pkt_l2t_type { 182bba63669SWei Hu (Xavier) HNS3_L2_TYPE_UNICAST, 183bba63669SWei Hu (Xavier) HNS3_L2_TYPE_MULTICAST, 184bba63669SWei Hu (Xavier) HNS3_L2_TYPE_BROADCAST, 185bba63669SWei Hu (Xavier) HNS3_L2_TYPE_INVALID, 186bba63669SWei Hu (Xavier) }; 187bba63669SWei Hu (Xavier) 188bba63669SWei Hu (Xavier) enum hns3_pkt_l3t_type { 189bba63669SWei Hu (Xavier) HNS3_L3T_NONE, 190bba63669SWei Hu (Xavier) HNS3_L3T_IPV6, 191bba63669SWei Hu (Xavier) HNS3_L3T_IPV4, 192bba63669SWei Hu (Xavier) HNS3_L3T_RESERVED 193bba63669SWei Hu (Xavier) }; 194bba63669SWei Hu (Xavier) 195bba63669SWei Hu (Xavier) enum hns3_pkt_l4t_type { 196bba63669SWei Hu (Xavier) HNS3_L4T_UNKNOWN, 197bba63669SWei Hu (Xavier) HNS3_L4T_TCP, 198bba63669SWei Hu (Xavier) HNS3_L4T_UDP, 199bba63669SWei Hu (Xavier) HNS3_L4T_SCTP 200bba63669SWei Hu (Xavier) }; 201bba63669SWei Hu (Xavier) 202bba63669SWei Hu (Xavier) enum hns3_pkt_ol3t_type { 203bba63669SWei Hu (Xavier) HNS3_OL3T_NONE, 204bba63669SWei Hu (Xavier) HNS3_OL3T_IPV6, 205bba63669SWei Hu (Xavier) HNS3_OL3T_IPV4_NO_CSUM, 206bba63669SWei Hu (Xavier) HNS3_OL3T_IPV4_CSUM 207bba63669SWei Hu (Xavier) }; 208bba63669SWei Hu (Xavier) 209bba63669SWei Hu (Xavier) enum hns3_pkt_tun_type { 210bba63669SWei Hu (Xavier) HNS3_TUN_NONE, 211bba63669SWei Hu (Xavier) HNS3_TUN_MAC_IN_UDP, 212bba63669SWei Hu (Xavier) HNS3_TUN_NVGRE, 213bba63669SWei Hu (Xavier) HNS3_TUN_OTHER 214bba63669SWei Hu (Xavier) }; 215bba63669SWei Hu (Xavier) 216bba63669SWei Hu (Xavier) /* hardware spec ring buffer format */ 217*e7750639SAndre Muezerie struct __rte_packed_begin hns3_desc { 218bba63669SWei Hu (Xavier) union { 219bba63669SWei Hu (Xavier) uint64_t addr; 22038b539d9SMin Hu (Connor) uint64_t timestamp; 22138b539d9SMin Hu (Connor) 222bba63669SWei Hu (Xavier) struct { 223bba63669SWei Hu (Xavier) uint32_t addr0; 224bba63669SWei Hu (Xavier) uint32_t addr1; 225bba63669SWei Hu (Xavier) }; 226bba63669SWei Hu (Xavier) }; 227bba63669SWei Hu (Xavier) union { 228bba63669SWei Hu (Xavier) struct { 229bba63669SWei Hu (Xavier) uint16_t vlan_tag; 230bba63669SWei Hu (Xavier) uint16_t send_size; 231bba63669SWei Hu (Xavier) union { 232bba63669SWei Hu (Xavier) /* 233bba63669SWei Hu (Xavier) * L3T | L4T | L3CS | L4CS | VLAN | TSO | 234bba63669SWei Hu (Xavier) * L2_LEN 235bba63669SWei Hu (Xavier) */ 236bba63669SWei Hu (Xavier) uint32_t type_cs_vlan_tso_len; 237bba63669SWei Hu (Xavier) struct { 238bba63669SWei Hu (Xavier) uint8_t type_cs_vlan_tso; 239bba63669SWei Hu (Xavier) uint8_t l2_len; 240bba63669SWei Hu (Xavier) uint8_t l3_len; 241bba63669SWei Hu (Xavier) uint8_t l4_len; 242bba63669SWei Hu (Xavier) }; 243bba63669SWei Hu (Xavier) }; 244bba63669SWei Hu (Xavier) uint16_t outer_vlan_tag; 245bba63669SWei Hu (Xavier) uint16_t tv; 246bba63669SWei Hu (Xavier) union { 247bba63669SWei Hu (Xavier) /* OL3T | OVALAN | MACSEC */ 248bba63669SWei Hu (Xavier) uint32_t ol_type_vlan_len_msec; 249bba63669SWei Hu (Xavier) struct { 250bba63669SWei Hu (Xavier) uint8_t ol_type_vlan_msec; 251bba63669SWei Hu (Xavier) uint8_t ol2_len; 252bba63669SWei Hu (Xavier) uint8_t ol3_len; 253bba63669SWei Hu (Xavier) uint8_t ol4_len; 254bba63669SWei Hu (Xavier) }; 255bba63669SWei Hu (Xavier) }; 256bba63669SWei Hu (Xavier) 257d0ab89e6SChengchang Tang uint32_t paylen_fd_dop_ol4cs; 258bba63669SWei Hu (Xavier) uint16_t tp_fe_sc_vld_ra_ri; 2596393fc0bSDongdong Liu uint16_t ckst_mss; 260bba63669SWei Hu (Xavier) } tx; 261bba63669SWei Hu (Xavier) 262bba63669SWei Hu (Xavier) struct { 263bba63669SWei Hu (Xavier) uint32_t l234_info; 264bba63669SWei Hu (Xavier) uint16_t pkt_len; 265bba63669SWei Hu (Xavier) uint16_t size; 266bba63669SWei Hu (Xavier) uint32_t rss_hash; 267bba63669SWei Hu (Xavier) uint16_t fd_id; 268bba63669SWei Hu (Xavier) uint16_t vlan_tag; 269bba63669SWei Hu (Xavier) union { 270bba63669SWei Hu (Xavier) uint32_t ol_info; 271bba63669SWei Hu (Xavier) struct { 272bba63669SWei Hu (Xavier) uint16_t o_dm_vlan_id_fb; 273bba63669SWei Hu (Xavier) uint16_t ot_vlan_tag; 274bba63669SWei Hu (Xavier) }; 275bba63669SWei Hu (Xavier) }; 276a3d4f4d2SWei Hu (Xavier) union { 277bba63669SWei Hu (Xavier) uint32_t bd_base_info; 278a3d4f4d2SWei Hu (Xavier) struct { 279a3d4f4d2SWei Hu (Xavier) uint16_t bdtype_vld_udp0; 280a3d4f4d2SWei Hu (Xavier) uint16_t fe_lum_crcp_l3l4p; 281a3d4f4d2SWei Hu (Xavier) }; 282a3d4f4d2SWei Hu (Xavier) }; 283bba63669SWei Hu (Xavier) } rx; 284bba63669SWei Hu (Xavier) }; 285*e7750639SAndre Muezerie } __rte_packed_end; 286bba63669SWei Hu (Xavier) 287bba63669SWei Hu (Xavier) struct hns3_entry { 288bba63669SWei Hu (Xavier) struct rte_mbuf *mbuf; 289bba63669SWei Hu (Xavier) }; 290bba63669SWei Hu (Xavier) 29186c551d1SHuisong Li struct hns3_rx_basic_stats { 29286c551d1SHuisong Li uint64_t packets; 29386c551d1SHuisong Li uint64_t bytes; 29486c551d1SHuisong Li uint64_t errors; 29586c551d1SHuisong Li }; 29686c551d1SHuisong Li 2979b77f1feSHuisong Li struct hns3_rx_dfx_stats { 2989b77f1feSHuisong Li uint64_t l3_csum_errors; 2999b77f1feSHuisong Li uint64_t l4_csum_errors; 3009b77f1feSHuisong Li uint64_t ol3_csum_errors; 3019b77f1feSHuisong Li uint64_t ol4_csum_errors; 3029b77f1feSHuisong Li }; 3039b77f1feSHuisong Li 3049b77f1feSHuisong Li struct hns3_rx_bd_errors_stats { 3059b77f1feSHuisong Li uint64_t l2_errors; 3069b77f1feSHuisong Li uint64_t pkt_len_errors; 3079b77f1feSHuisong Li }; 3089b77f1feSHuisong Li 309bba63669SWei Hu (Xavier) struct hns3_rx_queue { 310323df894SWei Hu (Xavier) volatile void *io_head_reg; 311521ab3e9SWei Hu (Xavier) struct hns3_ptype_table *ptype_tbl; 312bba63669SWei Hu (Xavier) struct rte_mempool *mb_pool; 313bba63669SWei Hu (Xavier) struct hns3_desc *rx_ring; 314bba63669SWei Hu (Xavier) struct hns3_entry *sw_ring; 315bba63669SWei Hu (Xavier) 316bba63669SWei Hu (Xavier) uint16_t port_id; 317bba63669SWei Hu (Xavier) uint16_t nb_rx_desc; 318ceabee45SWei Hu (Xavier) /* 319ceabee45SWei Hu (Xavier) * threshold for the number of BDs waited to passed to hardware. If the 320ceabee45SWei Hu (Xavier) * number exceeds the threshold, driver will pass these BDs to hardware. 321ceabee45SWei Hu (Xavier) */ 322bba63669SWei Hu (Xavier) uint16_t rx_free_thresh; 323521ab3e9SWei Hu (Xavier) uint16_t next_to_use; /* index of next BD to be polled */ 324ceabee45SWei Hu (Xavier) uint16_t rx_free_hold; /* num of BDs waited to passed to hardware */ 325a3d4f4d2SWei Hu (Xavier) uint16_t rx_rearm_start; /* index of BD that driver re-arming from */ 326a3d4f4d2SWei Hu (Xavier) uint16_t rx_rearm_nb; /* number of remaining BDs to be re-armed */ 3278c744977SChengchang Tang 328295968d1SFerruh Yigit /* 4 if RTE_ETH_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */ 3298973d7c4SMin Hu (Connor) uint8_t crc_len; 3308973d7c4SMin Hu (Connor) 331992b24a1SWei Hu (Xavier) /* 332992b24a1SWei Hu (Xavier) * Indicate whether ignore the outer VLAN field in the Rx BD reported 333992b24a1SWei Hu (Xavier) * by the Hardware. Because the outer VLAN is the PVID if the PVID is 334992b24a1SWei Hu (Xavier) * set for some version of hardware network engine whose vlan mode is 335992b24a1SWei Hu (Xavier) * HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920. And this VLAN 336992b24a1SWei Hu (Xavier) * should not be transitted to the upper-layer application. For hardware 337992b24a1SWei Hu (Xavier) * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE, 338992b24a1SWei Hu (Xavier) * such as kunpeng 930, PVID will not be reported to the BDs. So, PMD 339f8dbaebbSSean Morrissey * does not need to perform PVID-related operation in Rx. At this 340992b24a1SWei Hu (Xavier) * point, the pvid_sw_discard_en will be false. 341992b24a1SWei Hu (Xavier) */ 3429261fd3cSChengwen Feng uint8_t pvid_sw_discard_en:1; 3439261fd3cSChengwen Feng uint8_t ptype_en:1; /* indicate if the ptype field enabled */ 3449261fd3cSChengwen Feng 3459261fd3cSChengwen Feng uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */ 3469261fd3cSChengwen Feng /* offset_table: used for vector, to solve execute re-order problem */ 3479261fd3cSChengwen Feng uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1]; 3489261fd3cSChengwen Feng 3499261fd3cSChengwen Feng uint16_t bulk_mbuf_num; /* indicate bulk_mbuf valid nums */ 350bba63669SWei Hu (Xavier) 35186c551d1SHuisong Li struct hns3_rx_basic_stats basic_stats; 3529261fd3cSChengwen Feng 3539261fd3cSChengwen Feng struct rte_mbuf *pkt_first_seg; 3549261fd3cSChengwen Feng struct rte_mbuf *pkt_last_seg; 3559261fd3cSChengwen Feng 3569261fd3cSChengwen Feng struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM]; 3579261fd3cSChengwen Feng 3589b77f1feSHuisong Li /* DFX statistics that driver does not need to discard packets */ 3599b77f1feSHuisong Li struct hns3_rx_dfx_stats dfx_stats; 3609b77f1feSHuisong Li /* Error statistics that driver needs to discard packets */ 3619b77f1feSHuisong Li struct hns3_rx_bd_errors_stats err_stats; 362521ab3e9SWei Hu (Xavier) 363a3d4f4d2SWei Hu (Xavier) struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */ 3649261fd3cSChengwen Feng 3659261fd3cSChengwen Feng /* 3669261fd3cSChengwen Feng * The following fields are not accessed in the I/O path, so they are 3679261fd3cSChengwen Feng * placed at the end. 3689261fd3cSChengwen Feng */ 36927595cd8STyler Retzlaff alignas(RTE_CACHE_LINE_SIZE) void *io_base; 3709261fd3cSChengwen Feng struct hns3_adapter *hns; 3719261fd3cSChengwen Feng uint64_t rx_ring_phys_addr; /* RX ring DMA address */ 3729261fd3cSChengwen Feng const struct rte_memzone *mz; 3739261fd3cSChengwen Feng 3749261fd3cSChengwen Feng uint16_t queue_id; 3759261fd3cSChengwen Feng uint16_t rx_buf_len; 3769261fd3cSChengwen Feng 3779261fd3cSChengwen Feng bool configured; /* indicate if rx queue has been configured */ 3789261fd3cSChengwen Feng bool rx_deferred_start; /* don't start this queue in dev start */ 3799261fd3cSChengwen Feng bool enabled; /* indicate if Rx queue has been enabled */ 380bba63669SWei Hu (Xavier) }; 381bba63669SWei Hu (Xavier) 38286c551d1SHuisong Li struct hns3_tx_basic_stats { 38386c551d1SHuisong Li uint64_t packets; 38486c551d1SHuisong Li uint64_t bytes; 38586c551d1SHuisong Li }; 38686c551d1SHuisong Li 3879b77f1feSHuisong Li /* 3889b77f1feSHuisong Li * The following items are used for the abnormal errors statistics in 3899b77f1feSHuisong Li * the Tx datapath. When upper level application calls the 3909b77f1feSHuisong Li * rte_eth_tx_burst API function to send multiple packets at a time with 3919b77f1feSHuisong Li * burst mode based on hns3 network engine, there are some abnormal 3929b77f1feSHuisong Li * conditions that cause the driver to fail to operate the hardware to 3939b77f1feSHuisong Li * send packets correctly. 3949b77f1feSHuisong Li * Note: When using burst mode to call the rte_eth_tx_burst API function 3959b77f1feSHuisong Li * to send multiple packets at a time. When the first abnormal error is 3969b77f1feSHuisong Li * detected, add one to the relevant error statistics item, and then 3979b77f1feSHuisong Li * exit the loop of sending multiple packets of the function. That is to 3989b77f1feSHuisong Li * say, even if there are multiple packets in which abnormal errors may 3999b77f1feSHuisong Li * be detected in the burst, the relevant error statistics in the driver 4009b77f1feSHuisong Li * will only be increased by one. 4019b77f1feSHuisong Li * The detail description of the Tx abnormal errors statistic items as 4029b77f1feSHuisong Li * below: 4039b77f1feSHuisong Li * - over_length_pkt_cnt 4049b77f1feSHuisong Li * Total number of greater than HNS3_MAX_FRAME_LEN the driver 4059b77f1feSHuisong Li * supported. 4069b77f1feSHuisong Li * 4079b77f1feSHuisong Li * - exceed_limit_bd_pkt_cnt 4089b77f1feSHuisong Li * Total number of exceeding the hardware limited bd which process 4099b77f1feSHuisong Li * a packet needed bd numbers. 4109b77f1feSHuisong Li * 4119b77f1feSHuisong Li * - exceed_limit_bd_reassem_fail 4129b77f1feSHuisong Li * Total number of exceeding the hardware limited bd fail which 4139b77f1feSHuisong Li * process a packet needed bd numbers and reassemble fail. 4149b77f1feSHuisong Li * 4159b77f1feSHuisong Li * - unsupported_tunnel_pkt_cnt 4169b77f1feSHuisong Li * Total number of unsupported tunnel packet. The unsupported tunnel 4179b77f1feSHuisong Li * type: vxlan_gpe, gtp, ipip and MPLSINUDP, MPLSINUDP is a packet 4189b77f1feSHuisong Li * with MPLS-in-UDP RFC 7510 header. 4199b77f1feSHuisong Li * 4209b77f1feSHuisong Li * - queue_full_cnt 4219b77f1feSHuisong Li * Total count which the available bd numbers in current bd queue is 4229b77f1feSHuisong Li * less than the bd numbers with the pkt process needed. 4239b77f1feSHuisong Li * 4249b77f1feSHuisong Li * - pkt_padding_fail_cnt 4259b77f1feSHuisong Li * Total count which the packet length is less than minimum packet 4269b77f1feSHuisong Li * length(struct hns3_tx_queue::min_tx_pkt_len) supported by 4279b77f1feSHuisong Li * hardware in Tx direction and fail to be appended with 0. 4289b77f1feSHuisong Li */ 4299b77f1feSHuisong Li struct hns3_tx_dfx_stats { 4309b77f1feSHuisong Li uint64_t over_length_pkt_cnt; 4319b77f1feSHuisong Li uint64_t exceed_limit_bd_pkt_cnt; 4329b77f1feSHuisong Li uint64_t exceed_limit_bd_reassem_fail; 4339b77f1feSHuisong Li uint64_t unsupported_tunnel_pkt_cnt; 4349b77f1feSHuisong Li uint64_t queue_full_cnt; 4359b77f1feSHuisong Li uint64_t pkt_padding_fail_cnt; 4369b77f1feSHuisong Li }; 4379b77f1feSHuisong Li 438bba63669SWei Hu (Xavier) struct hns3_tx_queue { 43923e317ddSChengwen Feng /* The io_tail_reg is write-only if working in tx push mode */ 4407ef93390SWei Hu (Xavier) volatile void *io_tail_reg; 441bba63669SWei Hu (Xavier) struct hns3_desc *tx_ring; 442bba63669SWei Hu (Xavier) struct hns3_entry *sw_ring; 443bba63669SWei Hu (Xavier) 444bba63669SWei Hu (Xavier) uint16_t nb_tx_desc; 4457ef93390SWei Hu (Xavier) /* 4467ef93390SWei Hu (Xavier) * index of next BD whose corresponding rte_mbuf can be released by 4477ef93390SWei Hu (Xavier) * driver. 4487ef93390SWei Hu (Xavier) */ 449bba63669SWei Hu (Xavier) uint16_t next_to_clean; 4507ef93390SWei Hu (Xavier) /* index of next BD to be filled by driver to send packet */ 451bba63669SWei Hu (Xavier) uint16_t next_to_use; 4527ef93390SWei Hu (Xavier) /* num of remaining BDs ready to be filled by driver to send packet */ 453bba63669SWei Hu (Xavier) uint16_t tx_bd_ready; 454bba63669SWei Hu (Xavier) 4557ef93390SWei Hu (Xavier) /* threshold for free tx buffer if available BDs less than this value */ 4567ef93390SWei Hu (Xavier) uint16_t tx_free_thresh; 4577ef93390SWei Hu (Xavier) 4587ef93390SWei Hu (Xavier) /* 4599261fd3cSChengwen Feng * The minimum length of the packet supported by hardware in the Tx 4609261fd3cSChengwen Feng * direction. 4619261fd3cSChengwen Feng */ 4629261fd3cSChengwen Feng uint8_t min_tx_pkt_len; 4639261fd3cSChengwen Feng 4649261fd3cSChengwen Feng uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */ 4659261fd3cSChengwen Feng 4669261fd3cSChengwen Feng /* 4679261fd3cSChengwen Feng * tso mode. 4689261fd3cSChengwen Feng * value range: 4699261fd3cSChengwen Feng * HNS3_TSO_SW_CAL_PSEUDO_H_CSUM/HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 4709261fd3cSChengwen Feng * 4719261fd3cSChengwen Feng * - HNS3_TSO_SW_CAL_PSEUDO_H_CSUM 4729261fd3cSChengwen Feng * In this mode, because of the hardware constraint, network driver 4739261fd3cSChengwen Feng * software need erase the L4 len value of the TCP pseudo header 4749261fd3cSChengwen Feng * and recalculate the TCP pseudo header checksum of packets that 4759261fd3cSChengwen Feng * need TSO. 4769261fd3cSChengwen Feng * 4779261fd3cSChengwen Feng * - HNS3_TSO_HW_CAL_PSEUDO_H_CSUM 4789261fd3cSChengwen Feng * In this mode, hardware support recalculate the TCP pseudo header 4799261fd3cSChengwen Feng * checksum of packets that need TSO, so network driver software 4809261fd3cSChengwen Feng * not need to recalculate it. 4819261fd3cSChengwen Feng */ 4829261fd3cSChengwen Feng uint16_t tso_mode:1; 4839261fd3cSChengwen Feng /* 4849261fd3cSChengwen Feng * udp checksum mode. 4859261fd3cSChengwen Feng * value range: 4869261fd3cSChengwen Feng * HNS3_SPECIAL_PORT_HW_CKSUM_MODE/HNS3_SPECIAL_PORT_SW_CKSUM_MODE 4879261fd3cSChengwen Feng * 4889261fd3cSChengwen Feng * - HNS3_SPECIAL_PORT_SW_CKSUM_MODE 4899261fd3cSChengwen Feng * In this mode, HW can not do checksum for special UDP port like 4909261fd3cSChengwen Feng * 4789, 4790, 6081 for non-tunnel UDP packets and UDP tunnel 491daa02b5cSOlivier Matz * packets without the RTE_MBUF_F_TX_TUNEL_MASK in the mbuf. So, PMD need 4929261fd3cSChengwen Feng * do the checksum for these packets to avoid a checksum error. 4939261fd3cSChengwen Feng * 4949261fd3cSChengwen Feng * - HNS3_SPECIAL_PORT_HW_CKSUM_MODE 4959261fd3cSChengwen Feng * In this mode, HW does not have the preceding problems and can 4969261fd3cSChengwen Feng * directly calculate the checksum of these UDP packets. 4979261fd3cSChengwen Feng */ 4989261fd3cSChengwen Feng uint16_t udp_cksum_mode:1; 4999261fd3cSChengwen Feng 5006393fc0bSDongdong Liu /* check whether the simple BD mode is supported */ 5019261fd3cSChengwen Feng uint16_t simple_bd_enable:1; 5029261fd3cSChengwen Feng uint16_t tx_push_enable:1; /* check whether the tx push is enabled */ 5039261fd3cSChengwen Feng /* 5049261fd3cSChengwen Feng * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field 5059261fd3cSChengwen Feng * of Tx BD. Because the outer VLAN will always be the PVID when the 5069261fd3cSChengwen Feng * PVID is set and for some version of hardware network engine whose 5079261fd3cSChengwen Feng * vlan mode is HNS3_SW_SHIFT_AND_DISCARD_MODE, such as kunpeng 920, the 5089261fd3cSChengwen Feng * PVID will overwrite the outer VLAN field of Tx BD. For the hardware 5099261fd3cSChengwen Feng * network engine whose vlan mode is HNS3_HW_SHIFT_AND_DISCARD_MODE, 5109261fd3cSChengwen Feng * such as kunpeng 930, if the PVID is set, the hardware will shift the 511f8dbaebbSSean Morrissey * VLAN field automatically. So, PMD does not need to do 5129261fd3cSChengwen Feng * PVID-related operations in Tx. And pvid_sw_shift_en will be false at 5139261fd3cSChengwen Feng * this point. 5149261fd3cSChengwen Feng */ 5159261fd3cSChengwen Feng uint16_t pvid_sw_shift_en:1; 5163cc817c1SChengwen Feng /* check whether the mbuf fast free offload is enabled */ 5173cc817c1SChengwen Feng uint16_t mbuf_fast_free_en:1; 5189261fd3cSChengwen Feng 5199261fd3cSChengwen Feng /* 5207ef93390SWei Hu (Xavier) * For better performance in tx datapath, releasing mbuf in batches is 5217ef93390SWei Hu (Xavier) * required. 5227ef93390SWei Hu (Xavier) * Only checking the VLD bit of the last descriptor in a batch of the 5237ef93390SWei Hu (Xavier) * thresh descriptors does not mean that these descriptors are all sent 5247ef93390SWei Hu (Xavier) * by hardware successfully. So we need to check that the VLD bits of 5257ef93390SWei Hu (Xavier) * all descriptors are cleared. and then free all mbufs in the batch. 5267ef93390SWei Hu (Xavier) * - tx_rs_thresh 5277ef93390SWei Hu (Xavier) * Number of mbufs released at a time. 528e31f123dSWei Hu (Xavier) * 529e31f123dSWei Hu (Xavier) * - free 530e31f123dSWei Hu (Xavier) * Tx mbuf free array used for preserving temporarily address of mbuf 531e31f123dSWei Hu (Xavier) * released back to mempool, when releasing mbuf in batches. 5327ef93390SWei Hu (Xavier) */ 5337ef93390SWei Hu (Xavier) uint16_t tx_rs_thresh; 534e31f123dSWei Hu (Xavier) struct rte_mbuf **free; 5357ef93390SWei Hu (Xavier) 53686c551d1SHuisong Li struct hns3_tx_basic_stats basic_stats; 5379b77f1feSHuisong Li struct hns3_tx_dfx_stats dfx_stats; 5389261fd3cSChengwen Feng 5399261fd3cSChengwen Feng /* 5409261fd3cSChengwen Feng * The following fields are not accessed in the I/O path, so they are 5419261fd3cSChengwen Feng * placed at the end. 5429261fd3cSChengwen Feng */ 54327595cd8STyler Retzlaff alignas(RTE_CACHE_LINE_SIZE) void *io_base; 5449261fd3cSChengwen Feng struct hns3_adapter *hns; 5459261fd3cSChengwen Feng uint64_t tx_ring_phys_addr; /* TX ring DMA address */ 5469261fd3cSChengwen Feng const struct rte_memzone *mz; 5479261fd3cSChengwen Feng 5489261fd3cSChengwen Feng uint16_t port_id; 5499261fd3cSChengwen Feng uint16_t queue_id; 5509261fd3cSChengwen Feng 5519261fd3cSChengwen Feng bool configured; /* indicate if tx queue has been configured */ 5529261fd3cSChengwen Feng bool tx_deferred_start; /* don't start this queue in dev start */ 5539261fd3cSChengwen Feng bool enabled; /* indicate if Tx queue has been enabled */ 554bba63669SWei Hu (Xavier) }; 555bba63669SWei Hu (Xavier) 556e5e6ffc3SDengdui Huang #define RX_BD_LOG(hw, level, rxdp) \ 557e5e6ffc3SDengdui Huang PMD_RX_LOG(hw, level, "Rx descriptor: " \ 558e5e6ffc3SDengdui Huang "l234_info=%#x pkt_len=%u size=%u rss_hash=%#x fd_id=%u vlan_tag=%u " \ 559e5e6ffc3SDengdui Huang "o_dm_vlan_id_fb=%#x ot_vlan_tag=%u bd_base_info=%#x", \ 560e5e6ffc3SDengdui Huang rte_le_to_cpu_32((rxdp)->rx.l234_info), \ 561e5e6ffc3SDengdui Huang rte_le_to_cpu_16((rxdp)->rx.pkt_len), \ 562e5e6ffc3SDengdui Huang rte_le_to_cpu_16((rxdp)->rx.size), \ 563e5e6ffc3SDengdui Huang rte_le_to_cpu_32((rxdp)->rx.rss_hash), \ 564e5e6ffc3SDengdui Huang rte_le_to_cpu_16((rxdp)->rx.fd_id), \ 565e5e6ffc3SDengdui Huang rte_le_to_cpu_16((rxdp)->rx.vlan_tag), \ 566e5e6ffc3SDengdui Huang rte_le_to_cpu_16((rxdp)->rx.o_dm_vlan_id_fb), \ 567e5e6ffc3SDengdui Huang rte_le_to_cpu_16((rxdp)->rx.ot_vlan_tag), \ 568e5e6ffc3SDengdui Huang rte_le_to_cpu_32((rxdp)->rx.bd_base_info)) 569e5e6ffc3SDengdui Huang 570e5e6ffc3SDengdui Huang #define TX_BD_LOG(hw, level, txdp) \ 571e5e6ffc3SDengdui Huang PMD_TX_LOG(hw, level, "Tx descriptor: " \ 572e5e6ffc3SDengdui Huang "vlan_tag=%u send_size=%u type_cs_vlan_tso_len=%#x outer_vlan_tag=%u " \ 573e5e6ffc3SDengdui Huang "tv=%#x ol_type_vlan_len_msec=%#x paylen_fd_dop_ol4cs=%#x " \ 574e5e6ffc3SDengdui Huang "tp_fe_sc_vld_ra_ri=%#x ckst_mss=%u", \ 575e5e6ffc3SDengdui Huang rte_le_to_cpu_16((txdp)->tx.vlan_tag), \ 576e5e6ffc3SDengdui Huang rte_le_to_cpu_16((txdp)->tx.send_size), \ 577e5e6ffc3SDengdui Huang rte_le_to_cpu_32((txdp)->tx.type_cs_vlan_tso_len), \ 578e5e6ffc3SDengdui Huang rte_le_to_cpu_16((txdp)->tx.outer_vlan_tag), \ 579e5e6ffc3SDengdui Huang rte_le_to_cpu_16((txdp)->tx.tv), \ 580e5e6ffc3SDengdui Huang rte_le_to_cpu_32((txdp)->tx.ol_type_vlan_len_msec), \ 581e5e6ffc3SDengdui Huang rte_le_to_cpu_32((txdp)->tx.paylen_fd_dop_ol4cs), \ 582e5e6ffc3SDengdui Huang rte_le_to_cpu_16((txdp)->tx.tp_fe_sc_vld_ra_ri), \ 583e5e6ffc3SDengdui Huang rte_le_to_cpu_16((txdp)->tx.ckst_mss)) 584e5e6ffc3SDengdui Huang 5857ef93390SWei Hu (Xavier) #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \ 5867ef93390SWei Hu (Xavier) ((txq)->nb_tx_desc - 1 - (txq)->tx_bd_ready) 5877ef93390SWei Hu (Xavier) 588a951c1edSWei Hu (Xavier) struct hns3_queue_info { 589a951c1edSWei Hu (Xavier) const char *type; /* point to queue memory name */ 590a951c1edSWei Hu (Xavier) const char *ring_name; /* point to hardware ring name */ 591a951c1edSWei Hu (Xavier) uint16_t idx; 592a951c1edSWei Hu (Xavier) uint16_t nb_desc; 593a951c1edSWei Hu (Xavier) unsigned int socket_id; 594a951c1edSWei Hu (Xavier) }; 595a951c1edSWei Hu (Xavier) 596daa02b5cSOlivier Matz #define HNS3_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \ 597daa02b5cSOlivier Matz RTE_MBUF_F_TX_OUTER_IP_CKSUM | \ 598daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 599daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 600daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 601bba63669SWei Hu (Xavier) 602bba63669SWei Hu (Xavier) enum hns3_cksum_status { 603bba63669SWei Hu (Xavier) HNS3_CKSUM_NONE = 0, 604bba63669SWei Hu (Xavier) HNS3_L3_CKSUM_ERR = 1, 605bba63669SWei Hu (Xavier) HNS3_L4_CKSUM_ERR = 2, 606bba63669SWei Hu (Xavier) HNS3_OUTER_L3_CKSUM_ERR = 4, 607bba63669SWei Hu (Xavier) HNS3_OUTER_L4_CKSUM_ERR = 8 608bba63669SWei Hu (Xavier) }; 609bba63669SWei Hu (Xavier) 61038b539d9SMin Hu (Connor) extern uint64_t hns3_timestamp_rx_dynflag; 61138b539d9SMin Hu (Connor) extern int hns3_timestamp_dynfield_offset; 61238b539d9SMin Hu (Connor) 613bd739929SChengwen Feng static inline void 614bd739929SChengwen Feng hns3_rx_set_cksum_flag(struct hns3_rx_queue *rxq, 615bd739929SChengwen Feng struct rte_mbuf *rxm, 616bd739929SChengwen Feng uint32_t l234_info) 617521ab3e9SWei Hu (Xavier) { 618bd739929SChengwen Feng #define HNS3_RXD_CKSUM_ERR_MASK (BIT(HNS3_RXD_L3E_B) | \ 619521ab3e9SWei Hu (Xavier) BIT(HNS3_RXD_L4E_B) | \ 620521ab3e9SWei Hu (Xavier) BIT(HNS3_RXD_OL3E_B) | \ 621521ab3e9SWei Hu (Xavier) BIT(HNS3_RXD_OL4E_B)) 622521ab3e9SWei Hu (Xavier) 623bd739929SChengwen Feng if (likely((l234_info & HNS3_RXD_CKSUM_ERR_MASK) == 0)) { 624daa02b5cSOlivier Matz rxm->ol_flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD); 625bd739929SChengwen Feng return; 626bd739929SChengwen Feng } 627bd739929SChengwen Feng 628bd739929SChengwen Feng if (unlikely(l234_info & BIT(HNS3_RXD_L3E_B))) { 629daa02b5cSOlivier Matz rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 630bd739929SChengwen Feng rxq->dfx_stats.l3_csum_errors++; 631bd739929SChengwen Feng } else { 632daa02b5cSOlivier Matz rxm->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 633bd739929SChengwen Feng } 634bd739929SChengwen Feng 635bd739929SChengwen Feng if (unlikely(l234_info & BIT(HNS3_RXD_L4E_B))) { 636daa02b5cSOlivier Matz rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 637bd739929SChengwen Feng rxq->dfx_stats.l4_csum_errors++; 638bd739929SChengwen Feng } else { 639daa02b5cSOlivier Matz rxm->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 640bd739929SChengwen Feng } 641bd739929SChengwen Feng 642bd739929SChengwen Feng if (unlikely(l234_info & BIT(HNS3_RXD_OL3E_B))) 643bd739929SChengwen Feng rxq->dfx_stats.ol3_csum_errors++; 644bd739929SChengwen Feng 645bd739929SChengwen Feng if (unlikely(l234_info & BIT(HNS3_RXD_OL4E_B))) { 646daa02b5cSOlivier Matz rxm->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; 647bd739929SChengwen Feng rxq->dfx_stats.ol4_csum_errors++; 648bd739929SChengwen Feng } 649bd739929SChengwen Feng } 650bd739929SChengwen Feng 651bd739929SChengwen Feng static inline int 652bd739929SChengwen Feng hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm, 653bd739929SChengwen Feng uint32_t bd_base_info, uint32_t l234_info) 654bd739929SChengwen Feng { 655bd739929SChengwen Feng #define L2E_TRUNC_ERR_FLAG (BIT(HNS3_RXD_L2E_B) | \ 656bd739929SChengwen Feng BIT(HNS3_RXD_TRUNCATE_B)) 657521ab3e9SWei Hu (Xavier) 658521ab3e9SWei Hu (Xavier) /* 659521ab3e9SWei Hu (Xavier) * If packet len bigger than mtu when recv with no-scattered algorithm, 6607be78d02SJosh Soref * the first n bd will without FE bit, we need process this situation. 661521ab3e9SWei Hu (Xavier) * Note: we don't need add statistic counter because latest BD which 662521ab3e9SWei Hu (Xavier) * with FE bit will mark HNS3_RXD_L2E_B bit. 663521ab3e9SWei Hu (Xavier) */ 664521ab3e9SWei Hu (Xavier) if (unlikely((bd_base_info & BIT(HNS3_RXD_FE_B)) == 0)) 665521ab3e9SWei Hu (Xavier) return -EINVAL; 666521ab3e9SWei Hu (Xavier) 667521ab3e9SWei Hu (Xavier) if (unlikely((l234_info & L2E_TRUNC_ERR_FLAG) || rxm->pkt_len == 0)) { 668521ab3e9SWei Hu (Xavier) if (l234_info & BIT(HNS3_RXD_L2E_B)) 6699b77f1feSHuisong Li rxq->err_stats.l2_errors++; 670521ab3e9SWei Hu (Xavier) else 6719b77f1feSHuisong Li rxq->err_stats.pkt_len_errors++; 672521ab3e9SWei Hu (Xavier) return -EINVAL; 673521ab3e9SWei Hu (Xavier) } 674521ab3e9SWei Hu (Xavier) 675bd739929SChengwen Feng if (bd_base_info & BIT(HNS3_RXD_L3L4P_B)) 676bd739929SChengwen Feng hns3_rx_set_cksum_flag(rxq, rxm, l234_info); 677521ab3e9SWei Hu (Xavier) 678521ab3e9SWei Hu (Xavier) return 0; 679521ab3e9SWei Hu (Xavier) } 680521ab3e9SWei Hu (Xavier) 681521ab3e9SWei Hu (Xavier) static inline uint32_t 682521ab3e9SWei Hu (Xavier) hns3_rx_calc_ptype(struct hns3_rx_queue *rxq, const uint32_t l234_info, 683521ab3e9SWei Hu (Xavier) const uint32_t ol_info) 684521ab3e9SWei Hu (Xavier) { 685521ab3e9SWei Hu (Xavier) const struct hns3_ptype_table * const ptype_tbl = rxq->ptype_tbl; 6861f303606SChengwen Feng uint32_t ol3id, ol4id; 6871f303606SChengwen Feng uint32_t l3id, l4id; 688fb5e9069SChengwen Feng uint32_t ptype; 689fb5e9069SChengwen Feng 690fb5e9069SChengwen Feng if (rxq->ptype_en) { 691fb5e9069SChengwen Feng ptype = hns3_get_field(ol_info, HNS3_RXD_PTYPE_M, 692fb5e9069SChengwen Feng HNS3_RXD_PTYPE_S); 693fb5e9069SChengwen Feng return ptype_tbl->ptype[ptype]; 694fb5e9069SChengwen Feng } 695521ab3e9SWei Hu (Xavier) 696521ab3e9SWei Hu (Xavier) ol4id = hns3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); 697521ab3e9SWei Hu (Xavier) ol3id = hns3_get_field(ol_info, HNS3_RXD_OL3ID_M, HNS3_RXD_OL3ID_S); 698521ab3e9SWei Hu (Xavier) l3id = hns3_get_field(l234_info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S); 699521ab3e9SWei Hu (Xavier) l4id = hns3_get_field(l234_info, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S); 700521ab3e9SWei Hu (Xavier) 701521ab3e9SWei Hu (Xavier) if (unlikely(ptype_tbl->ol4table[ol4id])) 7021f303606SChengwen Feng return ptype_tbl->inner_l3table[l3id] | 703521ab3e9SWei Hu (Xavier) ptype_tbl->inner_l4table[l4id] | 7040e98d5e6SChengchang Tang ptype_tbl->ol3table[ol3id] | 7051f303606SChengwen Feng ptype_tbl->ol4table[ol4id]; 706521ab3e9SWei Hu (Xavier) else 7071f303606SChengwen Feng return ptype_tbl->l3table[l3id] | ptype_tbl->l4table[l4id]; 708521ab3e9SWei Hu (Xavier) } 709521ab3e9SWei Hu (Xavier) 71023e317ddSChengwen Feng /* 71123e317ddSChengwen Feng * If enable using Tx push feature and also device support it, then use quick 71223e317ddSChengwen Feng * doorbell (bar45) to inform the hardware. 71323e317ddSChengwen Feng * 71423e317ddSChengwen Feng * The other cases (such as: device don't support or user don't enable using) 71523e317ddSChengwen Feng * then use normal doorbell (bar23) to inform the hardware. 71623e317ddSChengwen Feng */ 71723e317ddSChengwen Feng static inline void 71823e317ddSChengwen Feng hns3_write_txq_tail_reg(struct hns3_tx_queue *txq, uint32_t value) 71923e317ddSChengwen Feng { 72023e317ddSChengwen Feng rte_io_wmb(); 72123e317ddSChengwen Feng if (txq->tx_push_enable) 72223e317ddSChengwen Feng rte_write64_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg); 72323e317ddSChengwen Feng else 72423e317ddSChengwen Feng rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg); 72523e317ddSChengwen Feng } 72623e317ddSChengwen Feng 7277483341aSXueming Li void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id); 7287483341aSXueming Li void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id); 729bba63669SWei Hu (Xavier) void hns3_free_all_queues(struct rte_eth_dev *dev); 730fa29fe45SChengchang Tang int hns3_reset_all_tqps(struct hns3_adapter *hns); 731c4ae39b2SChengwen Feng void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en); 73202a7b556SHao Chen int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); 73302a7b556SHao Chen int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); 734c4ae39b2SChengwen Feng void hns3_enable_all_queues(struct hns3_hw *hw, bool en); 735fa29fe45SChengchang Tang int hns3_init_queues(struct hns3_adapter *hns, bool reset_queue); 736fa29fe45SChengchang Tang void hns3_start_tqps(struct hns3_hw *hw); 737fa29fe45SChengchang Tang void hns3_stop_tqps(struct hns3_hw *hw); 738a3d4f4d2SWei Hu (Xavier) int hns3_rxq_iterate(struct rte_eth_dev *dev, 739a3d4f4d2SWei Hu (Xavier) int (*callback)(struct hns3_rx_queue *, void *), void *arg); 740bba63669SWei Hu (Xavier) void hns3_dev_release_mbufs(struct hns3_adapter *hns); 741bba63669SWei Hu (Xavier) int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, 74282c2ca6dSMin Hu (Connor) unsigned int socket_id, 74382c2ca6dSMin Hu (Connor) const struct rte_eth_rxconf *conf, 744bba63669SWei Hu (Xavier) struct rte_mempool *mp); 745bba63669SWei Hu (Xavier) int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, 74682c2ca6dSMin Hu (Connor) unsigned int socket_id, 74782c2ca6dSMin Hu (Connor) const struct rte_eth_txconf *conf); 7488d7d4fcdSKonstantin Ananyev uint32_t hns3_rx_queue_count(void *rx_queue); 749fa29fe45SChengchang Tang int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); 750fa29fe45SChengchang Tang int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); 751fa29fe45SChengchang Tang int hns3_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); 752fa29fe45SChengchang Tang int hns3_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); 753aa5baf47SChengwen Feng uint16_t hns3_recv_pkts_simple(void *rx_queue, struct rte_mbuf **rx_pkts, 754bba63669SWei Hu (Xavier) uint16_t nb_pkts); 755521ab3e9SWei Hu (Xavier) uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 756521ab3e9SWei Hu (Xavier) uint16_t nb_pkts); 75782c2ca6dSMin Hu (Connor) uint16_t hns3_recv_pkts_vec(void *__restrict rx_queue, 75882c2ca6dSMin Hu (Connor) struct rte_mbuf **__restrict rx_pkts, 759a3d4f4d2SWei Hu (Xavier) uint16_t nb_pkts); 76082c2ca6dSMin Hu (Connor) uint16_t hns3_recv_pkts_vec_sve(void *__restrict rx_queue, 76182c2ca6dSMin Hu (Connor) struct rte_mbuf **__restrict rx_pkts, 762952ebaccSWei Hu (Xavier) uint16_t nb_pkts); 763521ab3e9SWei Hu (Xavier) int hns3_rx_burst_mode_get(struct rte_eth_dev *dev, 764521ab3e9SWei Hu (Xavier) __rte_unused uint16_t queue_id, 765521ab3e9SWei Hu (Xavier) struct rte_eth_burst_mode *mode); 766bba63669SWei Hu (Xavier) uint16_t hns3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, 767bba63669SWei Hu (Xavier) uint16_t nb_pkts); 7687ef93390SWei Hu (Xavier) uint16_t hns3_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, 7697ef93390SWei Hu (Xavier) uint16_t nb_pkts); 770bba63669SWei Hu (Xavier) uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 771bba63669SWei Hu (Xavier) uint16_t nb_pkts); 772e31f123dSWei Hu (Xavier) uint16_t hns3_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 773e31f123dSWei Hu (Xavier) uint16_t nb_pkts); 774f0c243a6SChengwen Feng uint16_t hns3_xmit_pkts_vec_sve(void *tx_queue, struct rte_mbuf **tx_pkts, 775f0c243a6SChengwen Feng uint16_t nb_pkts); 7767ef93390SWei Hu (Xavier) int hns3_tx_burst_mode_get(struct rte_eth_dev *dev, 7777ef93390SWei Hu (Xavier) __rte_unused uint16_t queue_id, 7787ef93390SWei Hu (Xavier) struct rte_eth_burst_mode *mode); 779ba6a168aSSivaramakrishnan Venkat const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev, 780ba6a168aSSivaramakrishnan Venkat size_t *no_of_elements); 781521ab3e9SWei Hu (Xavier) void hns3_init_rx_ptype_tble(struct rte_eth_dev *dev); 782bba63669SWei Hu (Xavier) void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev); 7832b6b0981SChengchang Tang uint32_t hns3_get_tqp_intr_reg_offset(uint16_t tqp_intr_id); 784ef2e785cSWei Hu (Xavier) void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id, 785ef2e785cSWei Hu (Xavier) uint8_t gl_idx, uint16_t gl_value); 786ef2e785cSWei Hu (Xavier) void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, 787ef2e785cSWei Hu (Xavier) uint16_t rl_value); 78827911a6eSWei Hu (Xavier) void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, 78927911a6eSWei Hu (Xavier) uint16_t ql_value); 790a951c1edSWei Hu (Xavier) int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q, 791a951c1edSWei Hu (Xavier) uint16_t nb_tx_q); 7921f295c40SWei Hu (Xavier) int hns3_config_gro(struct hns3_hw *hw, bool en); 7931f295c40SWei Hu (Xavier) int hns3_restore_gro_conf(struct hns3_hw *hw); 794992b24a1SWei Hu (Xavier) void hns3_update_all_queues_pvid_proc_en(struct hns3_hw *hw); 795521ab3e9SWei Hu (Xavier) void hns3_rx_scattered_reset(struct rte_eth_dev *dev); 796521ab3e9SWei Hu (Xavier) void hns3_rx_scattered_calc(struct rte_eth_dev *dev); 797a3d4f4d2SWei Hu (Xavier) int hns3_rx_check_vec_support(struct rte_eth_dev *dev); 798e31f123dSWei Hu (Xavier) int hns3_tx_check_vec_support(struct rte_eth_dev *dev); 799a3d4f4d2SWei Hu (Xavier) void hns3_rxq_vec_setup(struct hns3_rx_queue *rxq); 800091a0f95SHuisong Li void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 801091a0f95SHuisong Li struct rte_eth_rxq_info *qinfo); 802091a0f95SHuisong Li void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 803091a0f95SHuisong Li struct rte_eth_txq_info *qinfo); 80482c2ca6dSMin Hu (Connor) uint32_t hns3_get_tqp_reg_offset(uint16_t queue_id); 805fa29fe45SChengchang Tang int hns3_start_all_txqs(struct rte_eth_dev *dev); 806fa29fe45SChengchang Tang int hns3_start_all_rxqs(struct rte_eth_dev *dev); 807fa29fe45SChengchang Tang void hns3_stop_all_txqs(struct rte_eth_dev *dev); 80880ec1bbdSChengchang Tang void hns3_restore_tqp_enable_state(struct hns3_hw *hw); 809dfecc320SChengwen Feng int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt); 810fb5e9069SChengwen Feng void hns3_enable_rxd_adv_layout(struct hns3_hw *hw); 81163e05f19SHongbo Zheng int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset); 812656a6d9cSHongbo Zheng int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset); 81323e317ddSChengwen Feng void hns3_tx_push_init(struct rte_eth_dev *dev); 814168b7d79SHuisong Li void hns3_stop_tx_datapath(struct rte_eth_dev *dev); 815168b7d79SHuisong Li void hns3_start_tx_datapath(struct rte_eth_dev *dev); 8164ba28c95SHuisong Li void hns3_stop_rxtx_datapath(struct rte_eth_dev *dev); 8174ba28c95SHuisong Li void hns3_start_rxtx_datapath(struct rte_eth_dev *dev); 8189e1e7ddeSChengwen Feng int hns3_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); 81976d79456SWei Hu (Xavier) 8202ad146efSChengwen Feng #endif /* HNS3_RXTX_H */ 821