164727024SZiyang Xuan /* SPDX-License-Identifier: BSD-3-Clause 264727024SZiyang Xuan * Copyright(c) 2017 Huawei Technologies Co., Ltd 364727024SZiyang Xuan */ 464727024SZiyang Xuan 564727024SZiyang Xuan #include <rte_mbuf.h> 664727024SZiyang Xuan #include <rte_tcp.h> 764727024SZiyang Xuan #include <rte_sctp.h> 864727024SZiyang Xuan #include <rte_udp.h> 964727024SZiyang Xuan #include <rte_ip.h> 1036f98ed2SDavid Marchand #ifdef RTE_ARCH_ARM64 11076221c8SZiyang Xuan #include <arm_neon.h> 12076221c8SZiyang Xuan #endif 1364727024SZiyang Xuan 1464727024SZiyang Xuan #include "base/hinic_compat.h" 1564727024SZiyang Xuan #include "base/hinic_pmd_hwdev.h" 1664727024SZiyang Xuan #include "base/hinic_pmd_hwif.h" 1764727024SZiyang Xuan #include "base/hinic_pmd_wq.h" 1864727024SZiyang Xuan #include "base/hinic_pmd_nicio.h" 19ef6f2f5cSXiaoyun Wang #include "base/hinic_pmd_niccfg.h" 2064727024SZiyang Xuan #include "hinic_pmd_ethdev.h" 2164727024SZiyang Xuan #include "hinic_pmd_tx.h" 2264727024SZiyang Xuan 23076221c8SZiyang Xuan /* packet header and tx offload info */ 24c3ba1f0fSXiaoyun Wang #define ETHER_LEN_NO_VLAN 14 25c3ba1f0fSXiaoyun Wang #define ETHER_LEN_WITH_VLAN 18 26076221c8SZiyang Xuan #define VXLANLEN 8 27076221c8SZiyang Xuan #define MAX_PLD_OFFSET 221 28076221c8SZiyang Xuan #define MAX_SINGLE_SGE_SIZE 65536 29076221c8SZiyang Xuan #define TSO_ENABLE 1 30076221c8SZiyang Xuan #define TX_MSS_DEFAULT 0x3E00 31076221c8SZiyang Xuan #define TX_MSS_MIN 0x50 32076221c8SZiyang Xuan 33076221c8SZiyang Xuan #define HINIC_NONTSO_PKT_MAX_SGE 17 /* non-tso max sge 17 */ 34076221c8SZiyang Xuan #define HINIC_NONTSO_SEG_NUM_INVALID(num) \ 35076221c8SZiyang Xuan ((num) > HINIC_NONTSO_PKT_MAX_SGE) 36076221c8SZiyang Xuan 37076221c8SZiyang Xuan #define HINIC_TSO_PKT_MAX_SGE 127 /* tso max sge 127 */ 38076221c8SZiyang Xuan #define HINIC_TSO_SEG_NUM_INVALID(num) ((num) > HINIC_TSO_PKT_MAX_SGE) 39076221c8SZiyang Xuan 40076221c8SZiyang Xuan /* sizeof(struct hinic_sq_bufdesc) == 16, shift 4 */ 41076221c8SZiyang Xuan #define HINIC_BUF_DESC_SIZE(nr_descs) (SIZE_8BYTES(((u32)nr_descs) << 4)) 42076221c8SZiyang Xuan 43076221c8SZiyang Xuan #define MASKED_SQ_IDX(sq, idx) ((idx) & (sq)->wq->mask) 44076221c8SZiyang Xuan 45076221c8SZiyang Xuan /* SQ_CTRL */ 46076221c8SZiyang Xuan #define SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 47076221c8SZiyang Xuan #define SQ_CTRL_TASKSECT_LEN_SHIFT 16 48076221c8SZiyang Xuan #define SQ_CTRL_DATA_FORMAT_SHIFT 22 49076221c8SZiyang Xuan #define SQ_CTRL_LEN_SHIFT 29 50076221c8SZiyang Xuan #define SQ_CTRL_OWNER_SHIFT 31 51076221c8SZiyang Xuan 52076221c8SZiyang Xuan #define SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFFU 53076221c8SZiyang Xuan #define SQ_CTRL_TASKSECT_LEN_MASK 0x1FU 54076221c8SZiyang Xuan #define SQ_CTRL_DATA_FORMAT_MASK 0x1U 55076221c8SZiyang Xuan #define SQ_CTRL_LEN_MASK 0x3U 56076221c8SZiyang Xuan #define SQ_CTRL_OWNER_MASK 0x1U 57076221c8SZiyang Xuan 58076221c8SZiyang Xuan #define SQ_CTRL_SET(val, member) \ 59076221c8SZiyang Xuan (((val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT) 60076221c8SZiyang Xuan 61076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 62076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 63076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 64076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 65076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 66076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 67076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 68076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 69076221c8SZiyang Xuan 70076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU 71076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U 72076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U 73076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U 74076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU 75076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U 76076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U 77076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U 78076221c8SZiyang Xuan 79076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_SET(val, member) \ 80076221c8SZiyang Xuan (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \ 81076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_##member##_SHIFT) 82076221c8SZiyang Xuan 83076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_GET(val, member) \ 84076221c8SZiyang Xuan (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \ 85076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_##member##_MASK) 86076221c8SZiyang Xuan 87076221c8SZiyang Xuan #define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ 88076221c8SZiyang Xuan ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ 89076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) 90076221c8SZiyang Xuan 91076221c8SZiyang Xuan #define SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 92076221c8SZiyang Xuan #define SQ_TASK_INFO0_L4OFFLOAD_SHIFT 8 93076221c8SZiyang Xuan #define SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 94076221c8SZiyang Xuan #define SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 95076221c8SZiyang Xuan #define SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 96076221c8SZiyang Xuan #define SQ_TASK_INFO0_UFO_AVD_SHIFT 14 97076221c8SZiyang Xuan #define SQ_TASK_INFO0_TSO_UFO_SHIFT 15 98076221c8SZiyang Xuan #define SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 99076221c8SZiyang Xuan 100076221c8SZiyang Xuan #define SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFFU 101076221c8SZiyang Xuan #define SQ_TASK_INFO0_L4OFFLOAD_MASK 0x3U 102076221c8SZiyang Xuan #define SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3U 103076221c8SZiyang Xuan #define SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1U 104076221c8SZiyang Xuan #define SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1U 105076221c8SZiyang Xuan #define SQ_TASK_INFO0_UFO_AVD_MASK 0x1U 106076221c8SZiyang Xuan #define SQ_TASK_INFO0_TSO_UFO_MASK 0x1U 107076221c8SZiyang Xuan #define SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFFU 108076221c8SZiyang Xuan 109076221c8SZiyang Xuan #define SQ_TASK_INFO0_SET(val, member) \ 110076221c8SZiyang Xuan (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ 111076221c8SZiyang Xuan SQ_TASK_INFO0_##member##_SHIFT) 112076221c8SZiyang Xuan 113076221c8SZiyang Xuan #define SQ_TASK_INFO1_MD_TYPE_SHIFT 8 114076221c8SZiyang Xuan #define SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 115076221c8SZiyang Xuan #define SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 116076221c8SZiyang Xuan 117076221c8SZiyang Xuan #define SQ_TASK_INFO1_MD_TYPE_MASK 0xFFU 118076221c8SZiyang Xuan #define SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFFU 119076221c8SZiyang Xuan #define SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFFU 120076221c8SZiyang Xuan 121076221c8SZiyang Xuan #define SQ_TASK_INFO1_SET(val, member) \ 122076221c8SZiyang Xuan (((val) & SQ_TASK_INFO1_##member##_MASK) << \ 123076221c8SZiyang Xuan SQ_TASK_INFO1_##member##_SHIFT) 124076221c8SZiyang Xuan 125076221c8SZiyang Xuan #define SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 126076221c8SZiyang Xuan #define SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 127076221c8SZiyang Xuan #define SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 128076221c8SZiyang Xuan #define SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 129076221c8SZiyang Xuan 130076221c8SZiyang Xuan #define SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFFU 131076221c8SZiyang Xuan #define SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFFU 132076221c8SZiyang Xuan #define SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7U 133076221c8SZiyang Xuan #define SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3U 134076221c8SZiyang Xuan 135076221c8SZiyang Xuan #define SQ_TASK_INFO2_SET(val, member) \ 136076221c8SZiyang Xuan (((val) & SQ_TASK_INFO2_##member##_MASK) << \ 137076221c8SZiyang Xuan SQ_TASK_INFO2_##member##_SHIFT) 138076221c8SZiyang Xuan 139076221c8SZiyang Xuan #define SQ_TASK_INFO4_L2TYPE_SHIFT 31 140076221c8SZiyang Xuan 141076221c8SZiyang Xuan #define SQ_TASK_INFO4_L2TYPE_MASK 0x1U 142076221c8SZiyang Xuan 143076221c8SZiyang Xuan #define SQ_TASK_INFO4_SET(val, member) \ 144076221c8SZiyang Xuan (((u32)(val) & SQ_TASK_INFO4_##member##_MASK) << \ 145076221c8SZiyang Xuan SQ_TASK_INFO4_##member##_SHIFT) 146076221c8SZiyang Xuan 147076221c8SZiyang Xuan /* SQ_DB */ 148076221c8SZiyang Xuan #define SQ_DB_OFF 0x00000800 149076221c8SZiyang Xuan #define SQ_DB_INFO_HI_PI_SHIFT 0 150076221c8SZiyang Xuan #define SQ_DB_INFO_QID_SHIFT 8 151076221c8SZiyang Xuan #define SQ_DB_INFO_CFLAG_SHIFT 23 152076221c8SZiyang Xuan #define SQ_DB_INFO_COS_SHIFT 24 153076221c8SZiyang Xuan #define SQ_DB_INFO_TYPE_SHIFT 27 154076221c8SZiyang Xuan 155076221c8SZiyang Xuan #define SQ_DB_INFO_HI_PI_MASK 0xFFU 156076221c8SZiyang Xuan #define SQ_DB_INFO_QID_MASK 0x3FFU 157076221c8SZiyang Xuan #define SQ_DB_INFO_CFLAG_MASK 0x1U 158076221c8SZiyang Xuan #define SQ_DB_INFO_COS_MASK 0x7U 159076221c8SZiyang Xuan #define SQ_DB_INFO_TYPE_MASK 0x1FU 160076221c8SZiyang Xuan #define SQ_DB_INFO_SET(val, member) \ 161076221c8SZiyang Xuan (((u32)(val) & SQ_DB_INFO_##member##_MASK) << \ 162076221c8SZiyang Xuan SQ_DB_INFO_##member##_SHIFT) 163076221c8SZiyang Xuan 164076221c8SZiyang Xuan #define SQ_DB 1 165076221c8SZiyang Xuan #define SQ_CFLAG_DP 0 /* CFLAG_DATA_PATH */ 166076221c8SZiyang Xuan 167076221c8SZiyang Xuan #define SQ_DB_PI_LOW_MASK 0xFF 168076221c8SZiyang Xuan #define SQ_DB_PI_LOW(pi) ((pi) & SQ_DB_PI_LOW_MASK) 169076221c8SZiyang Xuan #define SQ_DB_PI_HI_SHIFT 8 170076221c8SZiyang Xuan #define SQ_DB_PI_HIGH(pi) ((pi) >> SQ_DB_PI_HI_SHIFT) 171076221c8SZiyang Xuan #define SQ_DB_ADDR(sq, pi) \ 172076221c8SZiyang Xuan ((u64 *)((u8 __iomem *)((sq)->db_addr) + SQ_DB_OFF) + SQ_DB_PI_LOW(pi)) 173076221c8SZiyang Xuan 174076221c8SZiyang Xuan /* txq wq operations */ 175076221c8SZiyang Xuan #define HINIC_GET_SQ_WQE_MASK(txq) ((txq)->wq->mask) 176076221c8SZiyang Xuan 177076221c8SZiyang Xuan #define HINIC_GET_SQ_HW_CI(txq) \ 178076221c8SZiyang Xuan ((be16_to_cpu(*(txq)->cons_idx_addr)) & HINIC_GET_SQ_WQE_MASK(txq)) 179076221c8SZiyang Xuan 180076221c8SZiyang Xuan #define HINIC_GET_SQ_LOCAL_CI(txq) \ 181076221c8SZiyang Xuan (((txq)->wq->cons_idx) & HINIC_GET_SQ_WQE_MASK(txq)) 182076221c8SZiyang Xuan 183076221c8SZiyang Xuan #define HINIC_UPDATE_SQ_LOCAL_CI(txq, wqebb_cnt) \ 184076221c8SZiyang Xuan do { \ 185076221c8SZiyang Xuan (txq)->wq->cons_idx += wqebb_cnt; \ 186076221c8SZiyang Xuan (txq)->wq->delta += wqebb_cnt; \ 187076221c8SZiyang Xuan } while (0) 188076221c8SZiyang Xuan 189076221c8SZiyang Xuan #define HINIC_GET_SQ_FREE_WQEBBS(txq) ((txq)->wq->delta - 1) 190076221c8SZiyang Xuan 191076221c8SZiyang Xuan #define HINIC_IS_SQ_EMPTY(txq) (((txq)->wq->delta) == ((txq)->q_depth)) 192076221c8SZiyang Xuan 193076221c8SZiyang Xuan #define BUF_DESC_SIZE_SHIFT 4 194076221c8SZiyang Xuan 195076221c8SZiyang Xuan #define HINIC_SQ_WQE_SIZE(num_sge) \ 196076221c8SZiyang Xuan (sizeof(struct hinic_sq_ctrl) + sizeof(struct hinic_sq_task) + \ 197076221c8SZiyang Xuan (unsigned int)((num_sge) << BUF_DESC_SIZE_SHIFT)) 198076221c8SZiyang Xuan 199076221c8SZiyang Xuan #define HINIC_SQ_WQEBB_CNT(num_sge) \ 200076221c8SZiyang Xuan (int)(ALIGN(HINIC_SQ_WQE_SIZE((u32)num_sge), \ 201076221c8SZiyang Xuan HINIC_SQ_WQEBB_SIZE) >> HINIC_SQ_WQEBB_SHIFT) 202076221c8SZiyang Xuan 203076221c8SZiyang Xuan 204076221c8SZiyang Xuan static inline void hinic_sq_wqe_cpu_to_be32(void *data, int nr_wqebb) 205076221c8SZiyang Xuan { 20636f98ed2SDavid Marchand #if defined(RTE_ARCH_X86_64) 207076221c8SZiyang Xuan int i; 208076221c8SZiyang Xuan __m128i *wqe_line = (__m128i *)data; 209076221c8SZiyang Xuan __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 210076221c8SZiyang Xuan 11, 4, 5, 6, 7, 0, 1, 2, 3); 211076221c8SZiyang Xuan 212076221c8SZiyang Xuan for (i = 0; i < nr_wqebb; i++) { 213076221c8SZiyang Xuan /* convert 64B wqebb using 4 SSE instructions */ 214076221c8SZiyang Xuan wqe_line[0] = _mm_shuffle_epi8(wqe_line[0], shuf_mask); 215076221c8SZiyang Xuan wqe_line[1] = _mm_shuffle_epi8(wqe_line[1], shuf_mask); 216076221c8SZiyang Xuan wqe_line[2] = _mm_shuffle_epi8(wqe_line[2], shuf_mask); 217076221c8SZiyang Xuan wqe_line[3] = _mm_shuffle_epi8(wqe_line[3], shuf_mask); 218076221c8SZiyang Xuan wqe_line += 4; 219076221c8SZiyang Xuan } 22036f98ed2SDavid Marchand #elif defined(RTE_ARCH_ARM64) 221076221c8SZiyang Xuan int i; 222076221c8SZiyang Xuan uint8x16_t *wqe_line = (uint8x16_t *)data; 223076221c8SZiyang Xuan const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 224076221c8SZiyang Xuan 9, 8, 15, 14, 13, 12}; 225076221c8SZiyang Xuan 226076221c8SZiyang Xuan for (i = 0; i < nr_wqebb; i++) { 227076221c8SZiyang Xuan wqe_line[0] = vqtbl1q_u8(wqe_line[0], shuf_mask); 228076221c8SZiyang Xuan wqe_line[1] = vqtbl1q_u8(wqe_line[1], shuf_mask); 229076221c8SZiyang Xuan wqe_line[2] = vqtbl1q_u8(wqe_line[2], shuf_mask); 230076221c8SZiyang Xuan wqe_line[3] = vqtbl1q_u8(wqe_line[3], shuf_mask); 231076221c8SZiyang Xuan wqe_line += 4; 232076221c8SZiyang Xuan } 233076221c8SZiyang Xuan #else 234076221c8SZiyang Xuan hinic_cpu_to_be32(data, nr_wqebb * HINIC_SQ_WQEBB_SIZE); 235076221c8SZiyang Xuan #endif 236076221c8SZiyang Xuan } 237076221c8SZiyang Xuan 238076221c8SZiyang Xuan static inline void hinic_sge_cpu_to_be32(void *data, int nr_sge) 239076221c8SZiyang Xuan { 24036f98ed2SDavid Marchand #if defined(RTE_ARCH_X86_64) 241076221c8SZiyang Xuan int i; 242076221c8SZiyang Xuan __m128i *sge_line = (__m128i *)data; 243076221c8SZiyang Xuan __m128i shuf_mask = _mm_set_epi8(12, 13, 14, 15, 8, 9, 10, 244076221c8SZiyang Xuan 11, 4, 5, 6, 7, 0, 1, 2, 3); 245076221c8SZiyang Xuan 246076221c8SZiyang Xuan for (i = 0; i < nr_sge; i++) { 247076221c8SZiyang Xuan /* convert 16B sge using 1 SSE instructions */ 248076221c8SZiyang Xuan *sge_line = _mm_shuffle_epi8(*sge_line, shuf_mask); 249076221c8SZiyang Xuan sge_line++; 250076221c8SZiyang Xuan } 25136f98ed2SDavid Marchand #elif defined(RTE_ARCH_ARM64) 252076221c8SZiyang Xuan int i; 253076221c8SZiyang Xuan uint8x16_t *sge_line = (uint8x16_t *)data; 254076221c8SZiyang Xuan const uint8x16_t shuf_mask = {3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 255076221c8SZiyang Xuan 9, 8, 15, 14, 13, 12}; 256076221c8SZiyang Xuan 257076221c8SZiyang Xuan for (i = 0; i < nr_sge; i++) { 258076221c8SZiyang Xuan *sge_line = vqtbl1q_u8(*sge_line, shuf_mask); 259076221c8SZiyang Xuan sge_line++; 260076221c8SZiyang Xuan } 261076221c8SZiyang Xuan #else 262076221c8SZiyang Xuan hinic_cpu_to_be32(data, nr_sge * sizeof(struct hinic_sq_bufdesc)); 263076221c8SZiyang Xuan #endif 264076221c8SZiyang Xuan } 265076221c8SZiyang Xuan 266076221c8SZiyang Xuan void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) 267076221c8SZiyang Xuan { 268076221c8SZiyang Xuan if (!txq || !stats) { 269076221c8SZiyang Xuan PMD_DRV_LOG(ERR, "Txq or stats is NULL"); 270076221c8SZiyang Xuan return; 271076221c8SZiyang Xuan } 272076221c8SZiyang Xuan 273076221c8SZiyang Xuan memcpy(stats, &txq->txq_stats, sizeof(txq->txq_stats)); 274076221c8SZiyang Xuan } 275076221c8SZiyang Xuan 276076221c8SZiyang Xuan void hinic_txq_stats_reset(struct hinic_txq *txq) 277076221c8SZiyang Xuan { 278076221c8SZiyang Xuan struct hinic_txq_stats *txq_stats; 279076221c8SZiyang Xuan 280076221c8SZiyang Xuan if (txq == NULL) 281076221c8SZiyang Xuan return; 282076221c8SZiyang Xuan 283076221c8SZiyang Xuan txq_stats = &txq->txq_stats; 284076221c8SZiyang Xuan memset(txq_stats, 0, sizeof(*txq_stats)); 285076221c8SZiyang Xuan } 286076221c8SZiyang Xuan 287076221c8SZiyang Xuan static inline struct rte_mbuf *hinic_copy_tx_mbuf(struct hinic_nic_dev *nic_dev, 288076221c8SZiyang Xuan struct rte_mbuf *mbuf, 289076221c8SZiyang Xuan u16 sge_cnt) 290076221c8SZiyang Xuan { 291076221c8SZiyang Xuan struct rte_mbuf *dst_mbuf; 292076221c8SZiyang Xuan u32 offset = 0; 293076221c8SZiyang Xuan u16 i; 294076221c8SZiyang Xuan 295076221c8SZiyang Xuan if (unlikely(!nic_dev->cpy_mpool)) 296076221c8SZiyang Xuan return NULL; 297076221c8SZiyang Xuan 298076221c8SZiyang Xuan dst_mbuf = rte_pktmbuf_alloc(nic_dev->cpy_mpool); 299076221c8SZiyang Xuan if (unlikely(!dst_mbuf)) 300076221c8SZiyang Xuan return NULL; 301076221c8SZiyang Xuan 302076221c8SZiyang Xuan dst_mbuf->data_off = 0; 303076221c8SZiyang Xuan for (i = 0; i < sge_cnt; i++) { 304076221c8SZiyang Xuan rte_memcpy((char *)dst_mbuf->buf_addr + offset, 305076221c8SZiyang Xuan (char *)mbuf->buf_addr + mbuf->data_off, 306076221c8SZiyang Xuan mbuf->data_len); 307076221c8SZiyang Xuan dst_mbuf->data_len += mbuf->data_len; 308076221c8SZiyang Xuan offset += mbuf->data_len; 309076221c8SZiyang Xuan mbuf = mbuf->next; 310076221c8SZiyang Xuan } 311076221c8SZiyang Xuan 312ee750eaaSXiaoyun Wang dst_mbuf->pkt_len = dst_mbuf->data_len; 313ee750eaaSXiaoyun Wang 314076221c8SZiyang Xuan return dst_mbuf; 315076221c8SZiyang Xuan } 316076221c8SZiyang Xuan 317076221c8SZiyang Xuan static inline bool hinic_mbuf_dma_map_sge(struct hinic_txq *txq, 318076221c8SZiyang Xuan struct rte_mbuf *mbuf, 319076221c8SZiyang Xuan struct hinic_sq_bufdesc *sges, 320076221c8SZiyang Xuan struct hinic_wqe_info *sqe_info) 321076221c8SZiyang Xuan { 322076221c8SZiyang Xuan dma_addr_t dma_addr; 323076221c8SZiyang Xuan u16 i, around_sges; 324076221c8SZiyang Xuan u16 nb_segs = sqe_info->sge_cnt - sqe_info->cpy_mbuf_cnt; 325076221c8SZiyang Xuan u16 real_nb_segs = mbuf->nb_segs; 326076221c8SZiyang Xuan struct hinic_sq_bufdesc *sge_idx = sges; 327076221c8SZiyang Xuan 328076221c8SZiyang Xuan if (unlikely(sqe_info->around)) { 329076221c8SZiyang Xuan /* parts of wqe is in sq bottom while parts 330076221c8SZiyang Xuan * of wqe is in sq head 331076221c8SZiyang Xuan */ 332076221c8SZiyang Xuan i = 0; 333076221c8SZiyang Xuan for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr; 334076221c8SZiyang Xuan sge_idx++) { 33554faba22SXiaoyun Wang if (unlikely(mbuf == NULL)) { 33654faba22SXiaoyun Wang txq->txq_stats.mbuf_null++; 33754faba22SXiaoyun Wang return false; 33854faba22SXiaoyun Wang } 33954faba22SXiaoyun Wang 340076221c8SZiyang Xuan dma_addr = rte_mbuf_data_iova(mbuf); 34154faba22SXiaoyun Wang if (unlikely(mbuf->data_len == 0)) { 34254faba22SXiaoyun Wang txq->txq_stats.sge_len0++; 34354faba22SXiaoyun Wang return false; 34454faba22SXiaoyun Wang } 345076221c8SZiyang Xuan hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, 346076221c8SZiyang Xuan mbuf->data_len); 347076221c8SZiyang Xuan mbuf = mbuf->next; 348076221c8SZiyang Xuan i++; 349076221c8SZiyang Xuan } 350076221c8SZiyang Xuan 351076221c8SZiyang Xuan around_sges = nb_segs - i; 352076221c8SZiyang Xuan sge_idx = (struct hinic_sq_bufdesc *) 353076221c8SZiyang Xuan ((void *)txq->sq_head_addr); 354076221c8SZiyang Xuan for (; i < nb_segs; i++) { 35554faba22SXiaoyun Wang if (unlikely(mbuf == NULL)) { 35654faba22SXiaoyun Wang txq->txq_stats.mbuf_null++; 35754faba22SXiaoyun Wang return false; 35854faba22SXiaoyun Wang } 35954faba22SXiaoyun Wang 360076221c8SZiyang Xuan dma_addr = rte_mbuf_data_iova(mbuf); 36154faba22SXiaoyun Wang if (unlikely(mbuf->data_len == 0)) { 36254faba22SXiaoyun Wang txq->txq_stats.sge_len0++; 36354faba22SXiaoyun Wang return false; 36454faba22SXiaoyun Wang } 365076221c8SZiyang Xuan hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, 366076221c8SZiyang Xuan mbuf->data_len); 367076221c8SZiyang Xuan mbuf = mbuf->next; 368076221c8SZiyang Xuan sge_idx++; 369076221c8SZiyang Xuan } 370076221c8SZiyang Xuan 371076221c8SZiyang Xuan /* covert sges at head to big endian */ 372076221c8SZiyang Xuan hinic_sge_cpu_to_be32((void *)txq->sq_head_addr, around_sges); 373076221c8SZiyang Xuan } else { 374076221c8SZiyang Xuan /* wqe is in continuous space */ 375076221c8SZiyang Xuan for (i = 0; i < nb_segs; i++) { 37654faba22SXiaoyun Wang if (unlikely(mbuf == NULL)) { 37754faba22SXiaoyun Wang txq->txq_stats.mbuf_null++; 37854faba22SXiaoyun Wang return false; 37954faba22SXiaoyun Wang } 38054faba22SXiaoyun Wang 381076221c8SZiyang Xuan dma_addr = rte_mbuf_data_iova(mbuf); 38254faba22SXiaoyun Wang if (unlikely(mbuf->data_len == 0)) { 38354faba22SXiaoyun Wang txq->txq_stats.sge_len0++; 38454faba22SXiaoyun Wang return false; 38554faba22SXiaoyun Wang } 386076221c8SZiyang Xuan hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, 387076221c8SZiyang Xuan mbuf->data_len); 388076221c8SZiyang Xuan mbuf = mbuf->next; 389076221c8SZiyang Xuan sge_idx++; 390076221c8SZiyang Xuan } 391076221c8SZiyang Xuan } 392076221c8SZiyang Xuan 393076221c8SZiyang Xuan /* for now: support non-tso over 17 sge, copy the last 2 mbuf */ 394076221c8SZiyang Xuan if (unlikely(sqe_info->cpy_mbuf_cnt != 0)) { 395076221c8SZiyang Xuan /* copy invalid mbuf segs to a valid buffer, lost performance */ 396076221c8SZiyang Xuan txq->txq_stats.cpy_pkts += 1; 397076221c8SZiyang Xuan mbuf = hinic_copy_tx_mbuf(txq->nic_dev, mbuf, 398076221c8SZiyang Xuan real_nb_segs - nb_segs); 399076221c8SZiyang Xuan if (unlikely(!mbuf)) 400076221c8SZiyang Xuan return false; 401076221c8SZiyang Xuan 402076221c8SZiyang Xuan txq->tx_info[sqe_info->pi].cpy_mbuf = mbuf; 403076221c8SZiyang Xuan 404076221c8SZiyang Xuan /* deal with the last mbuf */ 405076221c8SZiyang Xuan dma_addr = rte_mbuf_data_iova(mbuf); 40654faba22SXiaoyun Wang if (unlikely(mbuf->data_len == 0)) { 40754faba22SXiaoyun Wang txq->txq_stats.sge_len0++; 40854faba22SXiaoyun Wang return false; 40954faba22SXiaoyun Wang } 410076221c8SZiyang Xuan hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr, 411076221c8SZiyang Xuan mbuf->data_len); 412076221c8SZiyang Xuan if (unlikely(sqe_info->around)) 413076221c8SZiyang Xuan hinic_sge_cpu_to_be32((void *)sge_idx, 1); 414076221c8SZiyang Xuan } 415076221c8SZiyang Xuan 416076221c8SZiyang Xuan return true; 417076221c8SZiyang Xuan } 418076221c8SZiyang Xuan 419076221c8SZiyang Xuan static inline void hinic_fill_sq_wqe_header(struct hinic_sq_ctrl *ctrl, 420076221c8SZiyang Xuan u32 queue_info, int nr_descs, 421076221c8SZiyang Xuan u8 owner) 422076221c8SZiyang Xuan { 423076221c8SZiyang Xuan u32 ctrl_size, task_size, bufdesc_size; 424076221c8SZiyang Xuan 425076221c8SZiyang Xuan ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); 426076221c8SZiyang Xuan task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); 427076221c8SZiyang Xuan bufdesc_size = HINIC_BUF_DESC_SIZE(nr_descs); 428076221c8SZiyang Xuan 429076221c8SZiyang Xuan ctrl->ctrl_fmt = SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | 430076221c8SZiyang Xuan SQ_CTRL_SET(task_size, TASKSECT_LEN) | 431076221c8SZiyang Xuan SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | 432076221c8SZiyang Xuan SQ_CTRL_SET(ctrl_size, LEN) | 433076221c8SZiyang Xuan SQ_CTRL_SET(owner, OWNER); 434076221c8SZiyang Xuan 435076221c8SZiyang Xuan ctrl->queue_info = queue_info; 436076221c8SZiyang Xuan ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); 437076221c8SZiyang Xuan 438076221c8SZiyang Xuan if (!SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS)) { 439076221c8SZiyang Xuan ctrl->queue_info |= 440076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); 441076221c8SZiyang Xuan } else if (SQ_CTRL_QUEUE_INFO_GET(ctrl->queue_info, MSS) < TX_MSS_MIN) { 442076221c8SZiyang Xuan /* mss should not be less than 80 */ 443076221c8SZiyang Xuan ctrl->queue_info = 444076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_CLEAR(ctrl->queue_info, MSS); 445076221c8SZiyang Xuan ctrl->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); 446076221c8SZiyang Xuan } 447076221c8SZiyang Xuan } 448076221c8SZiyang Xuan 449076221c8SZiyang Xuan static inline bool hinic_is_tso_sge_valid(struct rte_mbuf *mbuf, 450076221c8SZiyang Xuan struct hinic_tx_offload_info 451076221c8SZiyang Xuan *poff_info, 452076221c8SZiyang Xuan struct hinic_wqe_info *sqe_info) 453076221c8SZiyang Xuan { 454c6f9f0b7SXiaoyun Wang u32 total_len, limit_len, checked_len, left_len, adjust_mss; 455076221c8SZiyang Xuan u32 i, first_mss_sges, left_sges; 456076221c8SZiyang Xuan struct rte_mbuf *mbuf_head, *mbuf_pre; 457076221c8SZiyang Xuan 458076221c8SZiyang Xuan left_sges = mbuf->nb_segs; 459076221c8SZiyang Xuan mbuf_head = mbuf; 460076221c8SZiyang Xuan 461076221c8SZiyang Xuan /* tso sge number validation */ 462076221c8SZiyang Xuan if (unlikely(left_sges >= HINIC_NONTSO_PKT_MAX_SGE)) { 463076221c8SZiyang Xuan checked_len = 0; 464c6f9f0b7SXiaoyun Wang adjust_mss = mbuf->tso_segsz >= TX_MSS_MIN ? 465c6f9f0b7SXiaoyun Wang mbuf->tso_segsz : TX_MSS_MIN; 466c6f9f0b7SXiaoyun Wang limit_len = adjust_mss + poff_info->payload_offset; 467076221c8SZiyang Xuan first_mss_sges = HINIC_NONTSO_PKT_MAX_SGE; 468076221c8SZiyang Xuan 469076221c8SZiyang Xuan /* each continues 17 mbufs segmust do one check */ 470076221c8SZiyang Xuan while (left_sges >= HINIC_NONTSO_PKT_MAX_SGE) { 471076221c8SZiyang Xuan /* total len of first 16 mbufs must equal 472076221c8SZiyang Xuan * or more than limit_len 473076221c8SZiyang Xuan */ 474076221c8SZiyang Xuan total_len = 0; 475076221c8SZiyang Xuan for (i = 0; i < first_mss_sges; i++) { 476076221c8SZiyang Xuan total_len += mbuf->data_len; 477076221c8SZiyang Xuan mbuf_pre = mbuf; 478076221c8SZiyang Xuan mbuf = mbuf->next; 479076221c8SZiyang Xuan if (total_len >= limit_len) { 480c6f9f0b7SXiaoyun Wang limit_len = adjust_mss; 481076221c8SZiyang Xuan break; 482076221c8SZiyang Xuan } 483076221c8SZiyang Xuan } 484076221c8SZiyang Xuan 485076221c8SZiyang Xuan checked_len += total_len; 486076221c8SZiyang Xuan 487076221c8SZiyang Xuan /* try to copy if not valid */ 488076221c8SZiyang Xuan if (unlikely(first_mss_sges == i)) { 489076221c8SZiyang Xuan left_sges -= first_mss_sges; 490076221c8SZiyang Xuan checked_len -= mbuf_pre->data_len; 491076221c8SZiyang Xuan 492076221c8SZiyang Xuan left_len = mbuf_head->pkt_len - checked_len; 493076221c8SZiyang Xuan if (left_len > HINIC_COPY_MBUF_SIZE) 494076221c8SZiyang Xuan return false; 495076221c8SZiyang Xuan 496076221c8SZiyang Xuan sqe_info->sge_cnt = mbuf_head->nb_segs - 497076221c8SZiyang Xuan left_sges; 498076221c8SZiyang Xuan sqe_info->cpy_mbuf_cnt = 1; 499076221c8SZiyang Xuan 500076221c8SZiyang Xuan return true; 501076221c8SZiyang Xuan } 502076221c8SZiyang Xuan first_mss_sges = (HINIC_NONTSO_PKT_MAX_SGE - 1); 503076221c8SZiyang Xuan 504076221c8SZiyang Xuan /* continue next 16 mbufs */ 505076221c8SZiyang Xuan left_sges -= (i + 1); 506076221c8SZiyang Xuan } /* end of while */ 507076221c8SZiyang Xuan } 508076221c8SZiyang Xuan 509076221c8SZiyang Xuan sqe_info->sge_cnt = mbuf_head->nb_segs; 510076221c8SZiyang Xuan return true; 511076221c8SZiyang Xuan } 512076221c8SZiyang Xuan 513076221c8SZiyang Xuan static inline void 514076221c8SZiyang Xuan hinic_set_l4_csum_info(struct hinic_sq_task *task, 515076221c8SZiyang Xuan u32 *queue_info, struct hinic_tx_offload_info *poff_info) 516076221c8SZiyang Xuan { 517c3ba1f0fSXiaoyun Wang u32 tcp_udp_cs, sctp = 0; 518076221c8SZiyang Xuan u16 l2hdr_len; 519076221c8SZiyang Xuan 520076221c8SZiyang Xuan if (unlikely(poff_info->inner_l4_type == SCTP_OFFLOAD_ENABLE)) 521076221c8SZiyang Xuan sctp = 1; 522076221c8SZiyang Xuan 523076221c8SZiyang Xuan tcp_udp_cs = poff_info->inner_l4_tcp_udp; 524076221c8SZiyang Xuan 525c3ba1f0fSXiaoyun Wang if (poff_info->tunnel_type == TUNNEL_UDP_CSUM || 526c3ba1f0fSXiaoyun Wang poff_info->tunnel_type == TUNNEL_UDP_NO_CSUM) { 527076221c8SZiyang Xuan l2hdr_len = poff_info->outer_l2_len; 528076221c8SZiyang Xuan 529076221c8SZiyang Xuan task->pkt_info2 |= 530076221c8SZiyang Xuan SQ_TASK_INFO2_SET(poff_info->outer_l3_type, OUTER_L3TYPE) | 531076221c8SZiyang Xuan SQ_TASK_INFO2_SET(poff_info->outer_l3_len, OUTER_L3LEN); 532076221c8SZiyang Xuan task->pkt_info2 |= 533076221c8SZiyang Xuan SQ_TASK_INFO2_SET(poff_info->tunnel_type, TUNNEL_L4TYPE) | 534076221c8SZiyang Xuan SQ_TASK_INFO2_SET(poff_info->tunnel_length, TUNNEL_L4LEN); 535076221c8SZiyang Xuan } else { 536076221c8SZiyang Xuan l2hdr_len = poff_info->inner_l2_len; 537076221c8SZiyang Xuan } 538076221c8SZiyang Xuan 539076221c8SZiyang Xuan task->pkt_info0 |= SQ_TASK_INFO0_SET(l2hdr_len, L2HDR_LEN); 540076221c8SZiyang Xuan task->pkt_info1 |= 541076221c8SZiyang Xuan SQ_TASK_INFO1_SET(poff_info->inner_l3_len, INNER_L3LEN); 542076221c8SZiyang Xuan task->pkt_info0 |= 543076221c8SZiyang Xuan SQ_TASK_INFO0_SET(poff_info->inner_l3_type, INNER_L3TYPE); 544076221c8SZiyang Xuan task->pkt_info1 |= 545076221c8SZiyang Xuan SQ_TASK_INFO1_SET(poff_info->inner_l4_len, INNER_L4LEN); 546076221c8SZiyang Xuan task->pkt_info0 |= 547076221c8SZiyang Xuan SQ_TASK_INFO0_SET(poff_info->inner_l4_type, L4OFFLOAD); 548076221c8SZiyang Xuan *queue_info |= 549076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_SET(poff_info->payload_offset, PLDOFF) | 550076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_SET(tcp_udp_cs, TCPUDP_CS) | 551076221c8SZiyang Xuan SQ_CTRL_QUEUE_INFO_SET(sctp, SCTP); 552076221c8SZiyang Xuan } 553076221c8SZiyang Xuan 554076221c8SZiyang Xuan static inline void 555076221c8SZiyang Xuan hinic_set_tso_info(struct hinic_sq_task *task, 556076221c8SZiyang Xuan u32 *queue_info, struct rte_mbuf *mbuf, 557076221c8SZiyang Xuan struct hinic_tx_offload_info *poff_info) 558076221c8SZiyang Xuan { 559076221c8SZiyang Xuan hinic_set_l4_csum_info(task, queue_info, poff_info); 560076221c8SZiyang Xuan 561076221c8SZiyang Xuan /* wqe for tso */ 562076221c8SZiyang Xuan task->pkt_info0 |= 563076221c8SZiyang Xuan SQ_TASK_INFO0_SET(poff_info->inner_l3_type, INNER_L3TYPE); 564076221c8SZiyang Xuan task->pkt_info0 |= SQ_TASK_INFO0_SET(TSO_ENABLE, TSO_UFO); 565076221c8SZiyang Xuan *queue_info |= SQ_CTRL_QUEUE_INFO_SET(TSO_ENABLE, TSO); 566076221c8SZiyang Xuan /* qsf was initialized in prepare_sq_wqe */ 567076221c8SZiyang Xuan *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); 568076221c8SZiyang Xuan *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mbuf->tso_segsz, MSS); 569076221c8SZiyang Xuan } 570076221c8SZiyang Xuan 571076221c8SZiyang Xuan static inline void 572076221c8SZiyang Xuan hinic_set_vlan_tx_offload(struct hinic_sq_task *task, 573076221c8SZiyang Xuan u32 *queue_info, u16 vlan_tag, u16 vlan_pri) 574076221c8SZiyang Xuan { 575076221c8SZiyang Xuan task->pkt_info0 |= SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) | 576076221c8SZiyang Xuan SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD); 577076221c8SZiyang Xuan 578076221c8SZiyang Xuan *queue_info |= SQ_CTRL_QUEUE_INFO_SET(vlan_pri, PRI); 579076221c8SZiyang Xuan } 580076221c8SZiyang Xuan 581076221c8SZiyang Xuan static inline void 582076221c8SZiyang Xuan hinic_fill_tx_offload_info(struct rte_mbuf *mbuf, 583076221c8SZiyang Xuan struct hinic_sq_task *task, u32 *queue_info, 584076221c8SZiyang Xuan struct hinic_tx_offload_info *tx_off_info) 585076221c8SZiyang Xuan { 586076221c8SZiyang Xuan u16 vlan_tag; 587076221c8SZiyang Xuan uint64_t ol_flags = mbuf->ol_flags; 588076221c8SZiyang Xuan 589076221c8SZiyang Xuan /* clear DW0~2 of task section for offload */ 590076221c8SZiyang Xuan task->pkt_info0 = 0; 591076221c8SZiyang Xuan task->pkt_info1 = 0; 592076221c8SZiyang Xuan task->pkt_info2 = 0; 593076221c8SZiyang Xuan 594076221c8SZiyang Xuan /* Base VLAN */ 595daa02b5cSOlivier Matz if (unlikely(ol_flags & RTE_MBUF_F_TX_VLAN)) { 596076221c8SZiyang Xuan vlan_tag = mbuf->vlan_tci; 597076221c8SZiyang Xuan hinic_set_vlan_tx_offload(task, queue_info, vlan_tag, 598076221c8SZiyang Xuan vlan_tag >> VLAN_PRIO_SHIFT); 599076221c8SZiyang Xuan } 600076221c8SZiyang Xuan 601076221c8SZiyang Xuan /* non checksum or tso */ 602076221c8SZiyang Xuan if (unlikely(!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK))) 603076221c8SZiyang Xuan return; 604076221c8SZiyang Xuan 605daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG)) 606076221c8SZiyang Xuan /* set tso info for task and qsf */ 607076221c8SZiyang Xuan hinic_set_tso_info(task, queue_info, mbuf, tx_off_info); 608076221c8SZiyang Xuan else /* just support l4 checksum offload */ 609076221c8SZiyang Xuan hinic_set_l4_csum_info(task, queue_info, tx_off_info); 610076221c8SZiyang Xuan } 611076221c8SZiyang Xuan 612076221c8SZiyang Xuan static inline void hinic_xmit_mbuf_cleanup(struct hinic_txq *txq) 613076221c8SZiyang Xuan { 614076221c8SZiyang Xuan struct hinic_tx_info *tx_info; 615076221c8SZiyang Xuan struct rte_mbuf *mbuf, *m, *mbuf_free[HINIC_MAX_TX_FREE_BULK]; 616076221c8SZiyang Xuan int i, nb_free = 0; 617076221c8SZiyang Xuan u16 hw_ci, sw_ci, sq_mask; 618076221c8SZiyang Xuan int wqebb_cnt = 0; 619076221c8SZiyang Xuan 620076221c8SZiyang Xuan hw_ci = HINIC_GET_SQ_HW_CI(txq); 621076221c8SZiyang Xuan sw_ci = HINIC_GET_SQ_LOCAL_CI(txq); 622076221c8SZiyang Xuan sq_mask = HINIC_GET_SQ_WQE_MASK(txq); 623076221c8SZiyang Xuan 624076221c8SZiyang Xuan for (i = 0; i < txq->tx_free_thresh; ++i) { 625076221c8SZiyang Xuan tx_info = &txq->tx_info[sw_ci]; 626076221c8SZiyang Xuan if (hw_ci == sw_ci || 627076221c8SZiyang Xuan (((hw_ci - sw_ci) & sq_mask) < tx_info->wqebb_cnt)) 628076221c8SZiyang Xuan break; 629076221c8SZiyang Xuan 630076221c8SZiyang Xuan sw_ci = (sw_ci + tx_info->wqebb_cnt) & sq_mask; 631076221c8SZiyang Xuan 632076221c8SZiyang Xuan if (unlikely(tx_info->cpy_mbuf != NULL)) { 633076221c8SZiyang Xuan rte_pktmbuf_free(tx_info->cpy_mbuf); 634076221c8SZiyang Xuan tx_info->cpy_mbuf = NULL; 635076221c8SZiyang Xuan } 636076221c8SZiyang Xuan 637076221c8SZiyang Xuan wqebb_cnt += tx_info->wqebb_cnt; 638076221c8SZiyang Xuan mbuf = tx_info->mbuf; 639076221c8SZiyang Xuan 640076221c8SZiyang Xuan if (likely(mbuf->nb_segs == 1)) { 641076221c8SZiyang Xuan m = rte_pktmbuf_prefree_seg(mbuf); 642076221c8SZiyang Xuan tx_info->mbuf = NULL; 643076221c8SZiyang Xuan 644076221c8SZiyang Xuan if (unlikely(m == NULL)) 645076221c8SZiyang Xuan continue; 646076221c8SZiyang Xuan 647076221c8SZiyang Xuan mbuf_free[nb_free++] = m; 648076221c8SZiyang Xuan if (unlikely(m->pool != mbuf_free[0]->pool || 649076221c8SZiyang Xuan nb_free >= HINIC_MAX_TX_FREE_BULK)) { 650076221c8SZiyang Xuan rte_mempool_put_bulk(mbuf_free[0]->pool, 651076221c8SZiyang Xuan (void **)mbuf_free, (nb_free - 1)); 652076221c8SZiyang Xuan nb_free = 0; 653076221c8SZiyang Xuan mbuf_free[nb_free++] = m; 654076221c8SZiyang Xuan } 655076221c8SZiyang Xuan } else { 656076221c8SZiyang Xuan rte_pktmbuf_free(mbuf); 657076221c8SZiyang Xuan tx_info->mbuf = NULL; 658076221c8SZiyang Xuan } 659076221c8SZiyang Xuan } 660076221c8SZiyang Xuan 661076221c8SZiyang Xuan if (nb_free > 0) 662076221c8SZiyang Xuan rte_mempool_put_bulk(mbuf_free[0]->pool, (void **)mbuf_free, 663076221c8SZiyang Xuan nb_free); 664076221c8SZiyang Xuan 665076221c8SZiyang Xuan HINIC_UPDATE_SQ_LOCAL_CI(txq, wqebb_cnt); 666076221c8SZiyang Xuan } 667076221c8SZiyang Xuan 668076221c8SZiyang Xuan static inline struct hinic_sq_wqe * 669076221c8SZiyang Xuan hinic_get_sq_wqe(struct hinic_txq *txq, int wqebb_cnt, 670076221c8SZiyang Xuan struct hinic_wqe_info *wqe_info) 671076221c8SZiyang Xuan { 672076221c8SZiyang Xuan u32 cur_pi, end_pi; 673076221c8SZiyang Xuan u16 remain_wqebbs; 674076221c8SZiyang Xuan struct hinic_sq *sq = txq->sq; 675076221c8SZiyang Xuan struct hinic_wq *wq = txq->wq; 676076221c8SZiyang Xuan 677076221c8SZiyang Xuan /* record current pi */ 678076221c8SZiyang Xuan cur_pi = MASKED_WQE_IDX(wq, wq->prod_idx); 679076221c8SZiyang Xuan end_pi = cur_pi + wqebb_cnt; 680076221c8SZiyang Xuan 681076221c8SZiyang Xuan /* update next pi and delta */ 682076221c8SZiyang Xuan wq->prod_idx += wqebb_cnt; 683076221c8SZiyang Xuan wq->delta -= wqebb_cnt; 684076221c8SZiyang Xuan 685076221c8SZiyang Xuan /* return current pi and owner */ 686076221c8SZiyang Xuan wqe_info->pi = cur_pi; 687076221c8SZiyang Xuan wqe_info->owner = sq->owner; 688076221c8SZiyang Xuan wqe_info->around = 0; 689076221c8SZiyang Xuan wqe_info->seq_wqebbs = wqebb_cnt; 690076221c8SZiyang Xuan 691076221c8SZiyang Xuan if (unlikely(end_pi >= txq->q_depth)) { 692076221c8SZiyang Xuan /* update owner of next prod_idx */ 693076221c8SZiyang Xuan sq->owner = !sq->owner; 694076221c8SZiyang Xuan 695076221c8SZiyang Xuan /* turn around to head */ 696076221c8SZiyang Xuan if (unlikely(end_pi > txq->q_depth)) { 697076221c8SZiyang Xuan wqe_info->around = 1; 698076221c8SZiyang Xuan remain_wqebbs = txq->q_depth - cur_pi; 699076221c8SZiyang Xuan wqe_info->seq_wqebbs = remain_wqebbs; 700076221c8SZiyang Xuan } 701076221c8SZiyang Xuan } 702076221c8SZiyang Xuan 703076221c8SZiyang Xuan return (struct hinic_sq_wqe *)WQ_WQE_ADDR(wq, cur_pi); 704076221c8SZiyang Xuan } 705076221c8SZiyang Xuan 706076221c8SZiyang Xuan static inline uint16_t 707076221c8SZiyang Xuan hinic_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags) 708076221c8SZiyang Xuan { 709076221c8SZiyang Xuan struct ipv4_psd_header { 710076221c8SZiyang Xuan uint32_t src_addr; /* IP address of source host. */ 711076221c8SZiyang Xuan uint32_t dst_addr; /* IP address of destination host. */ 712076221c8SZiyang Xuan uint8_t zero; /* zero. */ 713076221c8SZiyang Xuan uint8_t proto; /* L4 protocol type. */ 714076221c8SZiyang Xuan uint16_t len; /* L4 length. */ 715076221c8SZiyang Xuan } psd_hdr; 716076221c8SZiyang Xuan 717076221c8SZiyang Xuan psd_hdr.src_addr = ipv4_hdr->src_addr; 718076221c8SZiyang Xuan psd_hdr.dst_addr = ipv4_hdr->dst_addr; 719076221c8SZiyang Xuan psd_hdr.zero = 0; 720076221c8SZiyang Xuan psd_hdr.proto = ipv4_hdr->next_proto_id; 721daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 722076221c8SZiyang Xuan psd_hdr.len = 0; 723076221c8SZiyang Xuan } else { 724076221c8SZiyang Xuan psd_hdr.len = 725076221c8SZiyang Xuan rte_cpu_to_be_16(rte_be_to_cpu_16(ipv4_hdr->total_length) - 7269863627fSMichael Pfeiffer rte_ipv4_hdr_len(ipv4_hdr)); 727076221c8SZiyang Xuan } 728076221c8SZiyang Xuan return rte_raw_cksum(&psd_hdr, sizeof(psd_hdr)); 729076221c8SZiyang Xuan } 730076221c8SZiyang Xuan 731076221c8SZiyang Xuan static inline uint16_t 732076221c8SZiyang Xuan hinic_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags) 733076221c8SZiyang Xuan { 734076221c8SZiyang Xuan uint32_t sum; 735076221c8SZiyang Xuan struct { 736076221c8SZiyang Xuan uint32_t len; /* L4 length. */ 737076221c8SZiyang Xuan uint32_t proto; /* L4 protocol - top 3 bytes must be zero */ 738076221c8SZiyang Xuan } psd_hdr; 739076221c8SZiyang Xuan 740076221c8SZiyang Xuan psd_hdr.proto = (ipv6_hdr->proto << 24); 741daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) 742076221c8SZiyang Xuan psd_hdr.len = 0; 743076221c8SZiyang Xuan else 744076221c8SZiyang Xuan psd_hdr.len = ipv6_hdr->payload_len; 745076221c8SZiyang Xuan 746*89b5642dSRobin Jarry sum = __rte_raw_cksum(&ipv6_hdr->src_addr, 747076221c8SZiyang Xuan sizeof(ipv6_hdr->src_addr) + sizeof(ipv6_hdr->dst_addr), 0); 748076221c8SZiyang Xuan sum = __rte_raw_cksum(&psd_hdr, sizeof(psd_hdr), sum); 749076221c8SZiyang Xuan return __rte_raw_cksum_reduce(sum); 750076221c8SZiyang Xuan } 751076221c8SZiyang Xuan 7528c8b6123SXiaoyun Wang static inline void hinic_get_outer_cs_pld_offset(struct rte_mbuf *m, 7538c8b6123SXiaoyun Wang struct hinic_tx_offload_info *off_info) 754c3ba1f0fSXiaoyun Wang { 755c3ba1f0fSXiaoyun Wang uint64_t ol_flags = m->ol_flags; 756c3ba1f0fSXiaoyun Wang 757daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) 7588c8b6123SXiaoyun Wang off_info->payload_offset = m->outer_l2_len + m->outer_l3_len + 7598c8b6123SXiaoyun Wang m->l2_len + m->l3_len; 760daa02b5cSOlivier Matz else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) 7618c8b6123SXiaoyun Wang off_info->payload_offset = m->outer_l2_len + m->outer_l3_len + 7628c8b6123SXiaoyun Wang m->l2_len + m->l3_len + m->l4_len; 763c3ba1f0fSXiaoyun Wang } 7648c8b6123SXiaoyun Wang 7658c8b6123SXiaoyun Wang static inline void hinic_get_pld_offset(struct rte_mbuf *m, 7668c8b6123SXiaoyun Wang struct hinic_tx_offload_info *off_info) 7678c8b6123SXiaoyun Wang { 7688c8b6123SXiaoyun Wang uint64_t ol_flags = m->ol_flags; 7698c8b6123SXiaoyun Wang 770daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) || 771daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_SCTP_CKSUM)) 772c3ba1f0fSXiaoyun Wang off_info->payload_offset = m->l2_len + m->l3_len; 773daa02b5cSOlivier Matz else if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) || (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) 774c3ba1f0fSXiaoyun Wang off_info->payload_offset = m->l2_len + m->l3_len + 775c3ba1f0fSXiaoyun Wang m->l4_len; 776c3ba1f0fSXiaoyun Wang } 777c3ba1f0fSXiaoyun Wang 7788c8b6123SXiaoyun Wang static inline void hinic_analyze_tx_info(struct rte_mbuf *mbuf, 779c3ba1f0fSXiaoyun Wang struct hinic_tx_offload_info *off_info) 780c3ba1f0fSXiaoyun Wang { 781c3ba1f0fSXiaoyun Wang struct rte_ether_hdr *eth_hdr; 782c3ba1f0fSXiaoyun Wang struct rte_vlan_hdr *vlan_hdr; 783d8d677baSXiaoyun Wang struct rte_ipv4_hdr *ipv4_hdr; 784d8d677baSXiaoyun Wang u16 eth_type; 785c3ba1f0fSXiaoyun Wang 786d8d677baSXiaoyun Wang eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); 787d8d677baSXiaoyun Wang eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); 788c3ba1f0fSXiaoyun Wang 789d8d677baSXiaoyun Wang if (eth_type == RTE_ETHER_TYPE_VLAN) { 790c3ba1f0fSXiaoyun Wang off_info->outer_l2_len = ETHER_LEN_WITH_VLAN; 791d8d677baSXiaoyun Wang vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); 792d8d677baSXiaoyun Wang eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); 793c3ba1f0fSXiaoyun Wang } else { 794c3ba1f0fSXiaoyun Wang off_info->outer_l2_len = ETHER_LEN_NO_VLAN; 795c3ba1f0fSXiaoyun Wang } 796c3ba1f0fSXiaoyun Wang 797d8d677baSXiaoyun Wang if (eth_type == RTE_ETHER_TYPE_IPV4) { 798d8d677baSXiaoyun Wang ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, 799d8d677baSXiaoyun Wang off_info->outer_l2_len); 800d8d677baSXiaoyun Wang off_info->outer_l3_len = rte_ipv4_hdr_len(ipv4_hdr); 801d8d677baSXiaoyun Wang } else if (eth_type == RTE_ETHER_TYPE_IPV6) { 802c3ba1f0fSXiaoyun Wang /* not support ipv6 extension header */ 803c3ba1f0fSXiaoyun Wang off_info->outer_l3_len = sizeof(struct rte_ipv6_hdr); 804c3ba1f0fSXiaoyun Wang } 805c3ba1f0fSXiaoyun Wang } 806c3ba1f0fSXiaoyun Wang 8078c8b6123SXiaoyun Wang static inline void hinic_analyze_outer_ip_vxlan(struct rte_mbuf *mbuf, 808076221c8SZiyang Xuan struct hinic_tx_offload_info *off_info) 809076221c8SZiyang Xuan { 8108c8b6123SXiaoyun Wang struct rte_ether_hdr *eth_hdr; 8118c8b6123SXiaoyun Wang struct rte_vlan_hdr *vlan_hdr; 8128c8b6123SXiaoyun Wang struct rte_ipv4_hdr *ipv4_hdr; 8138c8b6123SXiaoyun Wang struct rte_udp_hdr *udp_hdr; 8148c8b6123SXiaoyun Wang u16 eth_type = 0; 8158c8b6123SXiaoyun Wang 8168c8b6123SXiaoyun Wang eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); 8178c8b6123SXiaoyun Wang eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); 8188c8b6123SXiaoyun Wang 8198c8b6123SXiaoyun Wang if (eth_type == RTE_ETHER_TYPE_VLAN) { 8208c8b6123SXiaoyun Wang vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); 8218c8b6123SXiaoyun Wang eth_type = rte_be_to_cpu_16(vlan_hdr->eth_proto); 8228c8b6123SXiaoyun Wang } 8238c8b6123SXiaoyun Wang 8248c8b6123SXiaoyun Wang if (eth_type == RTE_ETHER_TYPE_IPV4) { 8258c8b6123SXiaoyun Wang ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, 8268c8b6123SXiaoyun Wang mbuf->outer_l2_len); 8278c8b6123SXiaoyun Wang off_info->outer_l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; 8288c8b6123SXiaoyun Wang ipv4_hdr->hdr_checksum = 0; 8298c8b6123SXiaoyun Wang 8308c8b6123SXiaoyun Wang udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + 8318c8b6123SXiaoyun Wang mbuf->outer_l3_len); 8328c8b6123SXiaoyun Wang udp_hdr->dgram_cksum = 0; 8338c8b6123SXiaoyun Wang } else if (eth_type == RTE_ETHER_TYPE_IPV6) { 8348c8b6123SXiaoyun Wang off_info->outer_l3_type = IPV6_PKT; 8358c8b6123SXiaoyun Wang 8368c8b6123SXiaoyun Wang udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, 8378c8b6123SXiaoyun Wang (mbuf->outer_l2_len + 8388c8b6123SXiaoyun Wang mbuf->outer_l3_len)); 8398c8b6123SXiaoyun Wang udp_hdr->dgram_cksum = 0; 8408c8b6123SXiaoyun Wang } 8418c8b6123SXiaoyun Wang } 8428c8b6123SXiaoyun Wang 8438c8b6123SXiaoyun Wang static inline uint8_t hinic_analyze_l3_type(struct rte_mbuf *mbuf) 8448c8b6123SXiaoyun Wang { 8458c8b6123SXiaoyun Wang uint8_t l3_type; 8468c8b6123SXiaoyun Wang uint64_t ol_flags = mbuf->ol_flags; 8478c8b6123SXiaoyun Wang 848daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IPV4) 849daa02b5cSOlivier Matz l3_type = (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? 8508c8b6123SXiaoyun Wang IPV4_PKT_WITH_CHKSUM_OFFLOAD : 8518c8b6123SXiaoyun Wang IPV4_PKT_NO_CHKSUM_OFFLOAD; 852daa02b5cSOlivier Matz else if (ol_flags & RTE_MBUF_F_TX_IPV6) 8538c8b6123SXiaoyun Wang l3_type = IPV6_PKT; 8548c8b6123SXiaoyun Wang else 8558c8b6123SXiaoyun Wang l3_type = UNKNOWN_L3TYPE; 8568c8b6123SXiaoyun Wang 8578c8b6123SXiaoyun Wang return l3_type; 8588c8b6123SXiaoyun Wang } 8598c8b6123SXiaoyun Wang 8608c8b6123SXiaoyun Wang static inline void hinic_calculate_tcp_checksum(struct rte_mbuf *mbuf, 8618c8b6123SXiaoyun Wang struct hinic_tx_offload_info *off_info, 8628c8b6123SXiaoyun Wang uint64_t inner_l3_offset) 8638c8b6123SXiaoyun Wang { 864076221c8SZiyang Xuan struct rte_ipv4_hdr *ipv4_hdr; 865076221c8SZiyang Xuan struct rte_ipv6_hdr *ipv6_hdr; 866076221c8SZiyang Xuan struct rte_tcp_hdr *tcp_hdr; 8678c8b6123SXiaoyun Wang uint64_t ol_flags = mbuf->ol_flags; 8688c8b6123SXiaoyun Wang 869daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IPV4) { 8708c8b6123SXiaoyun Wang ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, 8718c8b6123SXiaoyun Wang inner_l3_offset); 8728c8b6123SXiaoyun Wang 873daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 8748c8b6123SXiaoyun Wang ipv4_hdr->hdr_checksum = 0; 8758c8b6123SXiaoyun Wang 8768c8b6123SXiaoyun Wang tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + 8778c8b6123SXiaoyun Wang mbuf->l3_len); 8788c8b6123SXiaoyun Wang tcp_hdr->cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, ol_flags); 8798c8b6123SXiaoyun Wang } else { 8808c8b6123SXiaoyun Wang ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *, 8818c8b6123SXiaoyun Wang inner_l3_offset); 8828c8b6123SXiaoyun Wang tcp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, 8838c8b6123SXiaoyun Wang (inner_l3_offset + 8848c8b6123SXiaoyun Wang mbuf->l3_len)); 8858c8b6123SXiaoyun Wang tcp_hdr->cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, ol_flags); 8868c8b6123SXiaoyun Wang } 8878c8b6123SXiaoyun Wang 8888c8b6123SXiaoyun Wang off_info->inner_l4_type = TCP_OFFLOAD_ENABLE; 8898c8b6123SXiaoyun Wang off_info->inner_l4_tcp_udp = 1; 8908c8b6123SXiaoyun Wang } 8918c8b6123SXiaoyun Wang 8928c8b6123SXiaoyun Wang static inline void hinic_calculate_udp_checksum(struct rte_mbuf *mbuf, 8938c8b6123SXiaoyun Wang struct hinic_tx_offload_info *off_info, 8948c8b6123SXiaoyun Wang uint64_t inner_l3_offset) 8958c8b6123SXiaoyun Wang { 8968c8b6123SXiaoyun Wang struct rte_ipv4_hdr *ipv4_hdr; 8978c8b6123SXiaoyun Wang struct rte_ipv6_hdr *ipv6_hdr; 898076221c8SZiyang Xuan struct rte_udp_hdr *udp_hdr; 8998c8b6123SXiaoyun Wang uint64_t ol_flags = mbuf->ol_flags; 9008c8b6123SXiaoyun Wang 901daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IPV4) { 9028c8b6123SXiaoyun Wang ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, 9038c8b6123SXiaoyun Wang inner_l3_offset); 9048c8b6123SXiaoyun Wang 905daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 9068c8b6123SXiaoyun Wang ipv4_hdr->hdr_checksum = 0; 9078c8b6123SXiaoyun Wang 9088c8b6123SXiaoyun Wang udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + 9098c8b6123SXiaoyun Wang mbuf->l3_len); 9108c8b6123SXiaoyun Wang udp_hdr->dgram_cksum = hinic_ipv4_phdr_cksum(ipv4_hdr, 9118c8b6123SXiaoyun Wang ol_flags); 9128c8b6123SXiaoyun Wang } else { 9138c8b6123SXiaoyun Wang ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv6_hdr *, 9148c8b6123SXiaoyun Wang inner_l3_offset); 9158c8b6123SXiaoyun Wang 9168c8b6123SXiaoyun Wang udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, 9178c8b6123SXiaoyun Wang (inner_l3_offset + 9188c8b6123SXiaoyun Wang mbuf->l3_len)); 9198c8b6123SXiaoyun Wang udp_hdr->dgram_cksum = hinic_ipv6_phdr_cksum(ipv6_hdr, 9208c8b6123SXiaoyun Wang ol_flags); 9218c8b6123SXiaoyun Wang } 9228c8b6123SXiaoyun Wang 9238c8b6123SXiaoyun Wang off_info->inner_l4_type = UDP_OFFLOAD_ENABLE; 9248c8b6123SXiaoyun Wang off_info->inner_l4_tcp_udp = 1; 9258c8b6123SXiaoyun Wang } 9268c8b6123SXiaoyun Wang 9278c8b6123SXiaoyun Wang static inline void 9288c8b6123SXiaoyun Wang hinic_calculate_sctp_checksum(struct hinic_tx_offload_info *off_info) 9298c8b6123SXiaoyun Wang { 9308c8b6123SXiaoyun Wang off_info->inner_l4_type = SCTP_OFFLOAD_ENABLE; 9318c8b6123SXiaoyun Wang off_info->inner_l4_tcp_udp = 0; 9328c8b6123SXiaoyun Wang off_info->inner_l4_len = sizeof(struct rte_sctp_hdr); 9338c8b6123SXiaoyun Wang } 9348c8b6123SXiaoyun Wang 9358c8b6123SXiaoyun Wang static inline void hinic_calculate_checksum(struct rte_mbuf *mbuf, 9368c8b6123SXiaoyun Wang struct hinic_tx_offload_info *off_info, 9378c8b6123SXiaoyun Wang uint64_t inner_l3_offset) 9388c8b6123SXiaoyun Wang { 9398c8b6123SXiaoyun Wang uint64_t ol_flags = mbuf->ol_flags; 9408c8b6123SXiaoyun Wang 941daa02b5cSOlivier Matz switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 942daa02b5cSOlivier Matz case RTE_MBUF_F_TX_UDP_CKSUM: 9438c8b6123SXiaoyun Wang hinic_calculate_udp_checksum(mbuf, off_info, inner_l3_offset); 9448c8b6123SXiaoyun Wang break; 9458c8b6123SXiaoyun Wang 946daa02b5cSOlivier Matz case RTE_MBUF_F_TX_TCP_CKSUM: 9478c8b6123SXiaoyun Wang hinic_calculate_tcp_checksum(mbuf, off_info, inner_l3_offset); 9488c8b6123SXiaoyun Wang break; 9498c8b6123SXiaoyun Wang 950daa02b5cSOlivier Matz case RTE_MBUF_F_TX_SCTP_CKSUM: 9518c8b6123SXiaoyun Wang hinic_calculate_sctp_checksum(off_info); 9528c8b6123SXiaoyun Wang break; 9538c8b6123SXiaoyun Wang 9548c8b6123SXiaoyun Wang default: 955daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) 9568c8b6123SXiaoyun Wang hinic_calculate_tcp_checksum(mbuf, off_info, 9578c8b6123SXiaoyun Wang inner_l3_offset); 9588c8b6123SXiaoyun Wang break; 9598c8b6123SXiaoyun Wang } 9608c8b6123SXiaoyun Wang } 9618c8b6123SXiaoyun Wang 9628c8b6123SXiaoyun Wang static inline int hinic_tx_offload_pkt_prepare(struct rte_mbuf *m, 9638c8b6123SXiaoyun Wang struct hinic_tx_offload_info *off_info) 9648c8b6123SXiaoyun Wang { 965c3ba1f0fSXiaoyun Wang uint64_t inner_l3_offset; 966076221c8SZiyang Xuan uint64_t ol_flags = m->ol_flags; 967076221c8SZiyang Xuan 968c3ba1f0fSXiaoyun Wang /* Check if the packets set available offload flags */ 969076221c8SZiyang Xuan if (!(ol_flags & HINIC_TX_CKSUM_OFFLOAD_MASK)) 970076221c8SZiyang Xuan return 0; 971076221c8SZiyang Xuan 972c3ba1f0fSXiaoyun Wang /* Support only vxlan offload */ 973daa02b5cSOlivier Matz if (unlikely((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) && 974daa02b5cSOlivier Matz !(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN))) 975c3ba1f0fSXiaoyun Wang return -ENOTSUP; 976076221c8SZiyang Xuan 977c3ba1f0fSXiaoyun Wang #ifdef RTE_LIBRTE_ETHDEV_DEBUG 978c3ba1f0fSXiaoyun Wang if (rte_validate_tx_offload(m) != 0) 979c3ba1f0fSXiaoyun Wang return -EINVAL; 980c3ba1f0fSXiaoyun Wang #endif 981c3ba1f0fSXiaoyun Wang 982daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) { 9838c8b6123SXiaoyun Wang off_info->tunnel_type = TUNNEL_UDP_NO_CSUM; 9848c8b6123SXiaoyun Wang 9858c8b6123SXiaoyun Wang /* inner_l4_tcp_udp csum should be set to calculate outer 9868c8b6123SXiaoyun Wang * udp checksum when vxlan packets without inner l3 and l4 9878c8b6123SXiaoyun Wang */ 9888c8b6123SXiaoyun Wang off_info->inner_l4_tcp_udp = 1; 9898c8b6123SXiaoyun Wang 990daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 991daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) || 992daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { 993c3ba1f0fSXiaoyun Wang inner_l3_offset = m->l2_len + m->outer_l2_len + 994c3ba1f0fSXiaoyun Wang m->outer_l3_len; 995076221c8SZiyang Xuan off_info->outer_l2_len = m->outer_l2_len; 996076221c8SZiyang Xuan off_info->outer_l3_len = m->outer_l3_len; 997076221c8SZiyang Xuan /* just support vxlan tunneling pkt */ 998076221c8SZiyang Xuan off_info->inner_l2_len = m->l2_len - VXLANLEN - 9998c8b6123SXiaoyun Wang sizeof(struct rte_udp_hdr); 1000076221c8SZiyang Xuan off_info->tunnel_length = m->l2_len; 1001c3ba1f0fSXiaoyun Wang 10028c8b6123SXiaoyun Wang hinic_analyze_outer_ip_vxlan(m, off_info); 10038c8b6123SXiaoyun Wang 10048c8b6123SXiaoyun Wang hinic_get_outer_cs_pld_offset(m, off_info); 1005076221c8SZiyang Xuan } else { 1006c3ba1f0fSXiaoyun Wang inner_l3_offset = m->l2_len; 1007c3ba1f0fSXiaoyun Wang hinic_analyze_tx_info(m, off_info); 1008c3ba1f0fSXiaoyun Wang /* just support vxlan tunneling pkt */ 1009c3ba1f0fSXiaoyun Wang off_info->inner_l2_len = m->l2_len - VXLANLEN - 10108c8b6123SXiaoyun Wang sizeof(struct rte_udp_hdr) - 10118c8b6123SXiaoyun Wang off_info->outer_l2_len - 1012c3ba1f0fSXiaoyun Wang off_info->outer_l3_len; 1013c3ba1f0fSXiaoyun Wang off_info->tunnel_length = m->l2_len - 10148c8b6123SXiaoyun Wang off_info->outer_l2_len - 10158c8b6123SXiaoyun Wang off_info->outer_l3_len; 10168c8b6123SXiaoyun Wang off_info->outer_l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; 1017c3ba1f0fSXiaoyun Wang 10188c8b6123SXiaoyun Wang hinic_get_pld_offset(m, off_info); 1019c3ba1f0fSXiaoyun Wang } 1020c3ba1f0fSXiaoyun Wang } else { 1021c3ba1f0fSXiaoyun Wang inner_l3_offset = m->l2_len; 1022076221c8SZiyang Xuan off_info->inner_l2_len = m->l2_len; 1023076221c8SZiyang Xuan off_info->tunnel_type = NOT_TUNNEL; 1024076221c8SZiyang Xuan 10258c8b6123SXiaoyun Wang hinic_get_pld_offset(m, off_info); 1026c3ba1f0fSXiaoyun Wang } 1027076221c8SZiyang Xuan 1028076221c8SZiyang Xuan /* invalid udp or tcp header */ 1029076221c8SZiyang Xuan if (unlikely(off_info->payload_offset > MAX_PLD_OFFSET)) 1030076221c8SZiyang Xuan return -EINVAL; 1031076221c8SZiyang Xuan 10328c8b6123SXiaoyun Wang off_info->inner_l3_len = m->l3_len; 10338c8b6123SXiaoyun Wang off_info->inner_l4_len = m->l4_len; 10348c8b6123SXiaoyun Wang off_info->inner_l3_type = hinic_analyze_l3_type(m); 1035076221c8SZiyang Xuan 1036076221c8SZiyang Xuan /* Process the pseudo-header checksum */ 10378c8b6123SXiaoyun Wang hinic_calculate_checksum(m, off_info, inner_l3_offset); 1038076221c8SZiyang Xuan 1039076221c8SZiyang Xuan return 0; 1040076221c8SZiyang Xuan } 1041076221c8SZiyang Xuan 1042076221c8SZiyang Xuan static inline bool hinic_get_sge_txoff_info(struct rte_mbuf *mbuf_pkt, 1043076221c8SZiyang Xuan struct hinic_wqe_info *sqe_info, 1044076221c8SZiyang Xuan struct hinic_tx_offload_info 1045076221c8SZiyang Xuan *off_info) 1046076221c8SZiyang Xuan { 1047076221c8SZiyang Xuan u16 i, total_len, sge_cnt = mbuf_pkt->nb_segs; 1048076221c8SZiyang Xuan struct rte_mbuf *mbuf; 1049076221c8SZiyang Xuan int ret; 1050076221c8SZiyang Xuan 1051076221c8SZiyang Xuan memset(off_info, 0, sizeof(*off_info)); 1052076221c8SZiyang Xuan 1053076221c8SZiyang Xuan ret = hinic_tx_offload_pkt_prepare(mbuf_pkt, off_info); 1054076221c8SZiyang Xuan if (unlikely(ret)) 1055076221c8SZiyang Xuan return false; 1056076221c8SZiyang Xuan 1057076221c8SZiyang Xuan sqe_info->cpy_mbuf_cnt = 0; 1058076221c8SZiyang Xuan 1059076221c8SZiyang Xuan /* non tso mbuf */ 1060daa02b5cSOlivier Matz if (likely(!(mbuf_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG))) { 1061076221c8SZiyang Xuan if (unlikely(mbuf_pkt->pkt_len > MAX_SINGLE_SGE_SIZE)) { 1062076221c8SZiyang Xuan /* non tso packet len must less than 64KB */ 1063076221c8SZiyang Xuan return false; 1064076221c8SZiyang Xuan } else if (unlikely(HINIC_NONTSO_SEG_NUM_INVALID(sge_cnt))) { 1065076221c8SZiyang Xuan /* non tso packet buffer number must less than 17 1066076221c8SZiyang Xuan * the mbuf segs more than 17 must copy to one buffer 1067076221c8SZiyang Xuan */ 1068076221c8SZiyang Xuan total_len = 0; 1069076221c8SZiyang Xuan mbuf = mbuf_pkt; 1070076221c8SZiyang Xuan for (i = 0; i < (HINIC_NONTSO_PKT_MAX_SGE - 1) ; i++) { 1071076221c8SZiyang Xuan total_len += mbuf->data_len; 1072076221c8SZiyang Xuan mbuf = mbuf->next; 1073076221c8SZiyang Xuan } 1074076221c8SZiyang Xuan 1075076221c8SZiyang Xuan /* default support copy total 4k mbuf segs */ 1076076221c8SZiyang Xuan if ((u32)(total_len + (u16)HINIC_COPY_MBUF_SIZE) < 1077076221c8SZiyang Xuan mbuf_pkt->pkt_len) 1078076221c8SZiyang Xuan return false; 1079076221c8SZiyang Xuan 1080076221c8SZiyang Xuan sqe_info->sge_cnt = HINIC_NONTSO_PKT_MAX_SGE; 1081076221c8SZiyang Xuan sqe_info->cpy_mbuf_cnt = 1; 1082076221c8SZiyang Xuan return true; 1083076221c8SZiyang Xuan } 1084076221c8SZiyang Xuan 1085076221c8SZiyang Xuan /* valid non tso mbuf */ 1086076221c8SZiyang Xuan sqe_info->sge_cnt = sge_cnt; 1087076221c8SZiyang Xuan } else { 1088076221c8SZiyang Xuan /* tso mbuf */ 1089076221c8SZiyang Xuan if (unlikely(HINIC_TSO_SEG_NUM_INVALID(sge_cnt))) 1090076221c8SZiyang Xuan /* too many mbuf segs */ 1091076221c8SZiyang Xuan return false; 1092076221c8SZiyang Xuan 1093076221c8SZiyang Xuan /* check tso mbuf segs are valid or not */ 1094076221c8SZiyang Xuan if (unlikely(!hinic_is_tso_sge_valid(mbuf_pkt, 1095076221c8SZiyang Xuan off_info, sqe_info))) 1096076221c8SZiyang Xuan return false; 1097076221c8SZiyang Xuan } 1098076221c8SZiyang Xuan 1099076221c8SZiyang Xuan return true; 1100076221c8SZiyang Xuan } 1101076221c8SZiyang Xuan 1102076221c8SZiyang Xuan static inline void hinic_sq_write_db(struct hinic_sq *sq, int cos) 1103076221c8SZiyang Xuan { 1104076221c8SZiyang Xuan u16 prod_idx; 1105076221c8SZiyang Xuan u32 hi_prod_idx; 1106076221c8SZiyang Xuan struct hinic_sq_db sq_db; 1107076221c8SZiyang Xuan 1108076221c8SZiyang Xuan prod_idx = MASKED_SQ_IDX(sq, sq->wq->prod_idx); 1109076221c8SZiyang Xuan hi_prod_idx = SQ_DB_PI_HIGH(prod_idx); 1110076221c8SZiyang Xuan 1111076221c8SZiyang Xuan sq_db.db_info = SQ_DB_INFO_SET(hi_prod_idx, HI_PI) | 1112076221c8SZiyang Xuan SQ_DB_INFO_SET(SQ_DB, TYPE) | 1113076221c8SZiyang Xuan SQ_DB_INFO_SET(SQ_CFLAG_DP, CFLAG) | 1114076221c8SZiyang Xuan SQ_DB_INFO_SET(cos, COS) | 1115076221c8SZiyang Xuan SQ_DB_INFO_SET(sq->q_id, QID); 1116076221c8SZiyang Xuan 1117076221c8SZiyang Xuan /* Data should be written to HW in Big Endian Format */ 1118076221c8SZiyang Xuan sq_db.db_info = cpu_to_be32(sq_db.db_info); 1119076221c8SZiyang Xuan 1120076221c8SZiyang Xuan /* Write all before the doorbell */ 1121076221c8SZiyang Xuan rte_wmb(); 1122076221c8SZiyang Xuan writel(sq_db.db_info, SQ_DB_ADDR(sq, prod_idx)); 1123076221c8SZiyang Xuan } 1124076221c8SZiyang Xuan 1125076221c8SZiyang Xuan u16 hinic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, u16 nb_pkts) 1126076221c8SZiyang Xuan { 1127076221c8SZiyang Xuan int free_wqebb_cnt, wqe_wqebb_cnt; 1128076221c8SZiyang Xuan u32 queue_info, tx_bytes = 0; 1129076221c8SZiyang Xuan u16 nb_tx; 1130076221c8SZiyang Xuan struct hinic_wqe_info sqe_info; 1131076221c8SZiyang Xuan struct hinic_tx_offload_info off_info; 1132076221c8SZiyang Xuan struct rte_mbuf *mbuf_pkt; 1133076221c8SZiyang Xuan struct hinic_txq *txq = tx_queue; 1134076221c8SZiyang Xuan struct hinic_tx_info *tx_info; 1135076221c8SZiyang Xuan struct hinic_sq_wqe *sq_wqe; 1136076221c8SZiyang Xuan struct hinic_sq_task *task; 1137076221c8SZiyang Xuan 1138076221c8SZiyang Xuan /* reclaim tx mbuf before xmit new packet */ 1139076221c8SZiyang Xuan if (HINIC_GET_SQ_FREE_WQEBBS(txq) < txq->tx_free_thresh) 1140076221c8SZiyang Xuan hinic_xmit_mbuf_cleanup(txq); 1141076221c8SZiyang Xuan 1142076221c8SZiyang Xuan /* tx loop routine */ 1143076221c8SZiyang Xuan for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1144076221c8SZiyang Xuan mbuf_pkt = *tx_pkts++; 1145076221c8SZiyang Xuan queue_info = 0; 1146076221c8SZiyang Xuan 11477be78d02SJosh Soref /* 1. parse sge and tx offload info from mbuf */ 1148076221c8SZiyang Xuan if (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt, 1149076221c8SZiyang Xuan &sqe_info, &off_info))) { 1150076221c8SZiyang Xuan txq->txq_stats.off_errs++; 1151076221c8SZiyang Xuan break; 1152076221c8SZiyang Xuan } 1153076221c8SZiyang Xuan 1154076221c8SZiyang Xuan /* 2. try to get enough wqebb */ 1155076221c8SZiyang Xuan wqe_wqebb_cnt = HINIC_SQ_WQEBB_CNT(sqe_info.sge_cnt); 1156076221c8SZiyang Xuan free_wqebb_cnt = HINIC_GET_SQ_FREE_WQEBBS(txq); 1157076221c8SZiyang Xuan if (unlikely(wqe_wqebb_cnt > free_wqebb_cnt)) { 1158076221c8SZiyang Xuan /* reclaim again */ 1159076221c8SZiyang Xuan hinic_xmit_mbuf_cleanup(txq); 1160076221c8SZiyang Xuan free_wqebb_cnt = HINIC_GET_SQ_FREE_WQEBBS(txq); 1161076221c8SZiyang Xuan if (unlikely(wqe_wqebb_cnt > free_wqebb_cnt)) { 1162076221c8SZiyang Xuan txq->txq_stats.tx_busy += (nb_pkts - nb_tx); 1163076221c8SZiyang Xuan break; 1164076221c8SZiyang Xuan } 1165076221c8SZiyang Xuan } 1166076221c8SZiyang Xuan 1167076221c8SZiyang Xuan /* 3. get sq tail wqe address from wqe_page, 1168076221c8SZiyang Xuan * sq have enough wqebb for this packet 1169076221c8SZiyang Xuan */ 1170076221c8SZiyang Xuan sq_wqe = hinic_get_sq_wqe(txq, wqe_wqebb_cnt, &sqe_info); 1171076221c8SZiyang Xuan 1172076221c8SZiyang Xuan /* 4. fill sq wqe sge section */ 1173076221c8SZiyang Xuan if (unlikely(!hinic_mbuf_dma_map_sge(txq, mbuf_pkt, 1174076221c8SZiyang Xuan sq_wqe->buf_descs, 1175076221c8SZiyang Xuan &sqe_info))) { 1176076221c8SZiyang Xuan hinic_return_sq_wqe(txq->nic_dev->hwdev, txq->q_id, 1177076221c8SZiyang Xuan wqe_wqebb_cnt, sqe_info.owner); 1178076221c8SZiyang Xuan txq->txq_stats.off_errs++; 1179076221c8SZiyang Xuan break; 1180076221c8SZiyang Xuan } 1181076221c8SZiyang Xuan 1182076221c8SZiyang Xuan /* 5. fill sq wqe task section and queue info */ 1183076221c8SZiyang Xuan task = &sq_wqe->task; 1184076221c8SZiyang Xuan 1185076221c8SZiyang Xuan /* tx packet offload configure */ 1186076221c8SZiyang Xuan hinic_fill_tx_offload_info(mbuf_pkt, task, &queue_info, 1187076221c8SZiyang Xuan &off_info); 1188076221c8SZiyang Xuan 1189076221c8SZiyang Xuan /* 6. record tx info */ 1190076221c8SZiyang Xuan tx_info = &txq->tx_info[sqe_info.pi]; 1191076221c8SZiyang Xuan tx_info->mbuf = mbuf_pkt; 1192076221c8SZiyang Xuan tx_info->wqebb_cnt = wqe_wqebb_cnt; 1193076221c8SZiyang Xuan 1194076221c8SZiyang Xuan /* 7. fill sq wqe header section */ 1195076221c8SZiyang Xuan hinic_fill_sq_wqe_header(&sq_wqe->ctrl, queue_info, 1196076221c8SZiyang Xuan sqe_info.sge_cnt, sqe_info.owner); 1197076221c8SZiyang Xuan 1198076221c8SZiyang Xuan /* 8.convert continue or bottom wqe byteorder to big endian */ 1199076221c8SZiyang Xuan hinic_sq_wqe_cpu_to_be32(sq_wqe, sqe_info.seq_wqebbs); 1200076221c8SZiyang Xuan 1201076221c8SZiyang Xuan tx_bytes += mbuf_pkt->pkt_len; 1202076221c8SZiyang Xuan } 1203076221c8SZiyang Xuan 1204076221c8SZiyang Xuan /* 9. write sq doorbell in burst mode */ 1205076221c8SZiyang Xuan if (nb_tx) { 1206076221c8SZiyang Xuan hinic_sq_write_db(txq->sq, txq->cos); 1207076221c8SZiyang Xuan 1208076221c8SZiyang Xuan txq->txq_stats.packets += nb_tx; 1209076221c8SZiyang Xuan txq->txq_stats.bytes += tx_bytes; 1210076221c8SZiyang Xuan } 1211076221c8SZiyang Xuan txq->txq_stats.burst_pkts = nb_tx; 1212076221c8SZiyang Xuan 1213076221c8SZiyang Xuan return nb_tx; 1214076221c8SZiyang Xuan } 121564727024SZiyang Xuan 1216c3ba1f0fSXiaoyun Wang void hinic_free_all_tx_mbufs(struct hinic_txq *txq) 121764727024SZiyang Xuan { 121864727024SZiyang Xuan u16 ci; 121964727024SZiyang Xuan struct hinic_nic_dev *nic_dev = txq->nic_dev; 122064727024SZiyang Xuan struct hinic_tx_info *tx_info; 122164727024SZiyang Xuan int free_wqebbs = hinic_get_sq_free_wqebbs(nic_dev->hwdev, 122264727024SZiyang Xuan txq->q_id) + 1; 122364727024SZiyang Xuan 122464727024SZiyang Xuan while (free_wqebbs < txq->q_depth) { 122564727024SZiyang Xuan ci = hinic_get_sq_local_ci(nic_dev->hwdev, txq->q_id); 122664727024SZiyang Xuan 122764727024SZiyang Xuan tx_info = &txq->tx_info[ci]; 122864727024SZiyang Xuan 122964727024SZiyang Xuan if (unlikely(tx_info->cpy_mbuf != NULL)) { 123064727024SZiyang Xuan rte_pktmbuf_free(tx_info->cpy_mbuf); 123164727024SZiyang Xuan tx_info->cpy_mbuf = NULL; 123264727024SZiyang Xuan } 123364727024SZiyang Xuan 123464727024SZiyang Xuan rte_pktmbuf_free(tx_info->mbuf); 123564727024SZiyang Xuan hinic_update_sq_local_ci(nic_dev->hwdev, txq->q_id, 123664727024SZiyang Xuan tx_info->wqebb_cnt); 123764727024SZiyang Xuan 123864727024SZiyang Xuan free_wqebbs += tx_info->wqebb_cnt; 123964727024SZiyang Xuan tx_info->mbuf = NULL; 124064727024SZiyang Xuan } 124164727024SZiyang Xuan } 124264727024SZiyang Xuan 124364727024SZiyang Xuan void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev) 124464727024SZiyang Xuan { 124564727024SZiyang Xuan u16 q_id; 124664727024SZiyang Xuan struct hinic_nic_dev *nic_dev = 124764727024SZiyang Xuan HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 124864727024SZiyang Xuan 124964727024SZiyang Xuan for (q_id = 0; q_id < nic_dev->num_sq; q_id++) { 1250a2177d2eSXiaoyun Wang if (eth_dev->data->tx_queues != NULL) 125164727024SZiyang Xuan eth_dev->data->tx_queues[q_id] = NULL; 125264727024SZiyang Xuan 125364727024SZiyang Xuan if (nic_dev->txqs[q_id] == NULL) 125464727024SZiyang Xuan continue; 125564727024SZiyang Xuan 125664727024SZiyang Xuan /* stop tx queue free tx mbuf */ 1257c3ba1f0fSXiaoyun Wang hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); 125864727024SZiyang Xuan hinic_free_tx_resources(nic_dev->txqs[q_id]); 125964727024SZiyang Xuan 126064727024SZiyang Xuan /* free txq */ 126164727024SZiyang Xuan kfree(nic_dev->txqs[q_id]); 126264727024SZiyang Xuan nic_dev->txqs[q_id] = NULL; 126364727024SZiyang Xuan } 126464727024SZiyang Xuan } 126564727024SZiyang Xuan 12662c473729SZiyang Xuan void hinic_free_all_tx_mbuf(struct rte_eth_dev *eth_dev) 12672c473729SZiyang Xuan { 12682c473729SZiyang Xuan u16 q_id; 12692c473729SZiyang Xuan struct hinic_nic_dev *nic_dev = 12702c473729SZiyang Xuan HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev); 12712c473729SZiyang Xuan 12722c473729SZiyang Xuan for (q_id = 0; q_id < nic_dev->num_sq; q_id++) 12732c473729SZiyang Xuan /* stop tx queue free tx mbuf */ 1274c3ba1f0fSXiaoyun Wang hinic_free_all_tx_mbufs(nic_dev->txqs[q_id]); 12752c473729SZiyang Xuan } 12762c473729SZiyang Xuan 12772c473729SZiyang Xuan int hinic_setup_tx_resources(struct hinic_txq *txq) 12782c473729SZiyang Xuan { 12792c473729SZiyang Xuan u64 tx_info_sz; 12802c473729SZiyang Xuan 12812c473729SZiyang Xuan tx_info_sz = txq->q_depth * sizeof(*txq->tx_info); 12821b7b9f17SXiaoyun Wang txq->tx_info = rte_zmalloc_socket("tx_info", tx_info_sz, 12831b7b9f17SXiaoyun Wang RTE_CACHE_LINE_SIZE, txq->socket_id); 12842c473729SZiyang Xuan if (!txq->tx_info) 12852c473729SZiyang Xuan return -ENOMEM; 12862c473729SZiyang Xuan 12872c473729SZiyang Xuan return HINIC_OK; 12882c473729SZiyang Xuan } 12892c473729SZiyang Xuan 129064727024SZiyang Xuan void hinic_free_tx_resources(struct hinic_txq *txq) 129164727024SZiyang Xuan { 129264727024SZiyang Xuan if (txq->tx_info == NULL) 129364727024SZiyang Xuan return; 129464727024SZiyang Xuan 12951b7b9f17SXiaoyun Wang rte_free(txq->tx_info); 129664727024SZiyang Xuan txq->tx_info = NULL; 129764727024SZiyang Xuan } 129864727024SZiyang Xuan 12991b7b9f17SXiaoyun Wang int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, 13001b7b9f17SXiaoyun Wang u16 sq_depth, unsigned int socket_id) 13012c473729SZiyang Xuan { 13022c473729SZiyang Xuan int err; 13032c473729SZiyang Xuan struct hinic_nic_io *nic_io = hwdev->nic_io; 13042c473729SZiyang Xuan struct hinic_qp *qp = &nic_io->qps[q_id]; 13052c473729SZiyang Xuan struct hinic_sq *sq = &qp->sq; 13062c473729SZiyang Xuan void __iomem *db_addr; 13072c473729SZiyang Xuan volatile u32 *ci_addr; 13082c473729SZiyang Xuan 13092c473729SZiyang Xuan sq->sq_depth = sq_depth; 13102c473729SZiyang Xuan nic_io->sq_depth = sq_depth; 13112c473729SZiyang Xuan 13122c473729SZiyang Xuan /* alloc wq */ 13132c473729SZiyang Xuan err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id], 13141b7b9f17SXiaoyun Wang HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth, 13151b7b9f17SXiaoyun Wang socket_id); 13162c473729SZiyang Xuan if (err) { 13172c473729SZiyang Xuan PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ"); 13182c473729SZiyang Xuan return err; 13192c473729SZiyang Xuan } 13202c473729SZiyang Xuan 13212c473729SZiyang Xuan /* alloc sq doorbell space */ 13222c473729SZiyang Xuan err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr); 13232c473729SZiyang Xuan if (err) { 13242c473729SZiyang Xuan PMD_DRV_LOG(ERR, "Failed to init db addr"); 13252c473729SZiyang Xuan goto alloc_db_err; 13262c473729SZiyang Xuan } 13272c473729SZiyang Xuan 13282c473729SZiyang Xuan /* clear hardware ci */ 13292c473729SZiyang Xuan ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id); 13302c473729SZiyang Xuan *ci_addr = 0; 13312c473729SZiyang Xuan 13322c473729SZiyang Xuan sq->q_id = q_id; 13332c473729SZiyang Xuan sq->wq = &nic_io->sq_wq[q_id]; 13342c473729SZiyang Xuan sq->owner = 1; 13352c473729SZiyang Xuan sq->cons_idx_addr = (volatile u16 *)ci_addr; 13362c473729SZiyang Xuan sq->db_addr = db_addr; 13372c473729SZiyang Xuan 13382c473729SZiyang Xuan return HINIC_OK; 13392c473729SZiyang Xuan 13402c473729SZiyang Xuan alloc_db_err: 13412c473729SZiyang Xuan hinic_wq_free(nic_io->hwdev, &nic_io->sq_wq[q_id]); 13422c473729SZiyang Xuan 13432c473729SZiyang Xuan return err; 13442c473729SZiyang Xuan } 13452c473729SZiyang Xuan 134664727024SZiyang Xuan void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id) 134764727024SZiyang Xuan { 134864727024SZiyang Xuan struct hinic_nic_io *nic_io; 134964727024SZiyang Xuan struct hinic_qp *qp; 135064727024SZiyang Xuan 135164727024SZiyang Xuan nic_io = hwdev->nic_io; 135264727024SZiyang Xuan qp = &nic_io->qps[q_id]; 135364727024SZiyang Xuan 135464727024SZiyang Xuan if (qp->sq.wq == NULL) 135564727024SZiyang Xuan return; 135664727024SZiyang Xuan 135764727024SZiyang Xuan hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr); 135864727024SZiyang Xuan hinic_wq_free(nic_io->hwdev, qp->sq.wq); 135964727024SZiyang Xuan qp->sq.wq = NULL; 136064727024SZiyang Xuan } 1361