1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Intel Corporation 3 */ 4 5 #ifndef _ICE_RXTX_VEC_COMMON_H_ 6 #define _ICE_RXTX_VEC_COMMON_H_ 7 8 #include "../common/rx.h" 9 #include "ice_rxtx.h" 10 11 static inline uint16_t 12 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, 13 uint16_t nb_bufs, uint8_t *split_flags) 14 { 15 struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/ 16 struct rte_mbuf *start = rxq->pkt_first_seg; 17 struct rte_mbuf *end = rxq->pkt_last_seg; 18 unsigned int pkt_idx, buf_idx; 19 20 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 21 if (end) { 22 /* processing a split packet */ 23 end->next = rx_bufs[buf_idx]; 24 rx_bufs[buf_idx]->data_len += rxq->crc_len; 25 26 start->nb_segs++; 27 start->pkt_len += rx_bufs[buf_idx]->data_len; 28 end = end->next; 29 30 if (!split_flags[buf_idx]) { 31 /* it's the last packet of the set */ 32 start->hash = end->hash; 33 start->vlan_tci = end->vlan_tci; 34 start->ol_flags = end->ol_flags; 35 /* we need to strip crc for the whole packet */ 36 start->pkt_len -= rxq->crc_len; 37 if (end->data_len > rxq->crc_len) { 38 end->data_len -= rxq->crc_len; 39 } else { 40 /* free up last mbuf */ 41 struct rte_mbuf *secondlast = start; 42 43 start->nb_segs--; 44 while (secondlast->next != end) 45 secondlast = secondlast->next; 46 secondlast->data_len -= (rxq->crc_len - 47 end->data_len); 48 secondlast->next = NULL; 49 rte_pktmbuf_free_seg(end); 50 } 51 pkts[pkt_idx++] = start; 52 start = NULL; 53 end = NULL; 54 } 55 } else { 56 /* not processing a split packet */ 57 if (!split_flags[buf_idx]) { 58 /* not a split packet, save and skip */ 59 pkts[pkt_idx++] = rx_bufs[buf_idx]; 60 continue; 61 } 62 start = rx_bufs[buf_idx]; 63 end = start; 64 rx_bufs[buf_idx]->data_len += rxq->crc_len; 65 rx_bufs[buf_idx]->pkt_len += rxq->crc_len; 66 } 67 } 68 69 /* save the partial packet for next time */ 70 rxq->pkt_first_seg = start; 71 rxq->pkt_last_seg = end; 72 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 73 return pkt_idx; 74 } 75 76 static inline int 77 ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) 78 { 79 return (txq->ice_tx_ring[idx].cmd_type_offset_bsz & 80 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == 81 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); 82 } 83 84 static inline void 85 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq) 86 { 87 const unsigned int mask = rxq->nb_rx_desc - 1; 88 unsigned int i; 89 90 if (unlikely(!rxq->sw_ring)) { 91 PMD_DRV_LOG(DEBUG, "sw_ring is NULL"); 92 return; 93 } 94 95 if (rxq->rxrearm_nb >= rxq->nb_rx_desc) 96 return; 97 98 /* free all mbufs that are valid in the ring */ 99 if (rxq->rxrearm_nb == 0) { 100 for (i = 0; i < rxq->nb_rx_desc; i++) { 101 if (rxq->sw_ring[i].mbuf) 102 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 103 } 104 } else { 105 for (i = rxq->rx_tail; 106 i != rxq->rxrearm_start; 107 i = (i + 1) & mask) { 108 if (rxq->sw_ring[i].mbuf) 109 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 110 } 111 } 112 113 rxq->rxrearm_nb = rxq->nb_rx_desc; 114 115 /* set all entries to NULL */ 116 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); 117 } 118 119 #define ICE_TX_NO_VECTOR_FLAGS ( \ 120 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ 121 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 122 RTE_ETH_TX_OFFLOAD_TCP_TSO | \ 123 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ 124 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ 125 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ 126 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ 127 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) 128 129 #define ICE_TX_VECTOR_OFFLOAD ( \ 130 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ 131 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ 132 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ 133 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ 134 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ 135 RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 136 137 #define ICE_RX_VECTOR_OFFLOAD ( \ 138 RTE_ETH_RX_OFFLOAD_CHECKSUM | \ 139 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \ 140 RTE_ETH_RX_OFFLOAD_VLAN | \ 141 RTE_ETH_RX_OFFLOAD_RSS_HASH) 142 143 #define ICE_VECTOR_PATH 0 144 #define ICE_VECTOR_OFFLOAD_PATH 1 145 146 static inline int 147 ice_rx_vec_queue_default(struct ice_rx_queue *rxq) 148 { 149 if (!rxq) 150 return -1; 151 152 if (!ci_rxq_vec_capable(rxq->nb_rx_desc, rxq->rx_free_thresh, rxq->offloads)) 153 return -1; 154 155 if (rxq->proto_xtr != PROTO_XTR_NONE) 156 return -1; 157 158 if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD) 159 return ICE_VECTOR_OFFLOAD_PATH; 160 161 return ICE_VECTOR_PATH; 162 } 163 164 static inline int 165 ice_tx_vec_queue_default(struct ci_tx_queue *txq) 166 { 167 if (!txq) 168 return -1; 169 170 if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST || 171 txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ) 172 return -1; 173 174 if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS) 175 return -1; 176 177 if (txq->offloads & ICE_TX_VECTOR_OFFLOAD) 178 return ICE_VECTOR_OFFLOAD_PATH; 179 180 return ICE_VECTOR_PATH; 181 } 182 183 static inline int 184 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev) 185 { 186 int i; 187 struct ice_rx_queue *rxq; 188 int ret = 0; 189 int result = 0; 190 191 for (i = 0; i < dev->data->nb_rx_queues; i++) { 192 rxq = dev->data->rx_queues[i]; 193 ret = (ice_rx_vec_queue_default(rxq)); 194 if (ret < 0) 195 return -1; 196 if (ret == ICE_VECTOR_OFFLOAD_PATH) 197 result = ret; 198 } 199 200 return result; 201 } 202 203 static inline int 204 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev) 205 { 206 int i; 207 struct ci_tx_queue *txq; 208 int ret = 0; 209 int result = 0; 210 211 for (i = 0; i < dev->data->nb_tx_queues; i++) { 212 txq = dev->data->tx_queues[i]; 213 ret = ice_tx_vec_queue_default(txq); 214 if (ret < 0) 215 return -1; 216 if (ret == ICE_VECTOR_OFFLOAD_PATH) 217 result = ret; 218 } 219 220 return result; 221 } 222 223 static inline void 224 ice_txd_enable_offload(struct rte_mbuf *tx_pkt, 225 uint64_t *txd_hi) 226 { 227 uint64_t ol_flags = tx_pkt->ol_flags; 228 uint32_t td_cmd = 0; 229 uint32_t td_offset = 0; 230 231 /* Tx Checksum Offload */ 232 /* SET MACLEN */ 233 td_offset |= (tx_pkt->l2_len >> 1) << 234 ICE_TX_DESC_LEN_MACLEN_S; 235 236 /* Enable L3 checksum offload */ 237 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 238 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 239 td_offset |= (tx_pkt->l3_len >> 2) << 240 ICE_TX_DESC_LEN_IPLEN_S; 241 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 242 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 243 td_offset |= (tx_pkt->l3_len >> 2) << 244 ICE_TX_DESC_LEN_IPLEN_S; 245 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 246 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 247 td_offset |= (tx_pkt->l3_len >> 2) << 248 ICE_TX_DESC_LEN_IPLEN_S; 249 } 250 251 /* Enable L4 checksum offloads */ 252 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 253 case RTE_MBUF_F_TX_TCP_CKSUM: 254 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 255 td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 256 ICE_TX_DESC_LEN_L4_LEN_S; 257 break; 258 case RTE_MBUF_F_TX_SCTP_CKSUM: 259 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 260 td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 261 ICE_TX_DESC_LEN_L4_LEN_S; 262 break; 263 case RTE_MBUF_F_TX_UDP_CKSUM: 264 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 265 td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 266 ICE_TX_DESC_LEN_L4_LEN_S; 267 break; 268 default: 269 break; 270 } 271 272 *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S; 273 274 /* Tx VLAN/QINQ insertion Offload */ 275 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 276 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; 277 *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << 278 ICE_TXD_QW1_L2TAG1_S); 279 } 280 281 *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S; 282 } 283 #endif 284