1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2019 Intel Corporation 3 */ 4 5 #ifndef _ICE_RXTX_VEC_COMMON_H_ 6 #define _ICE_RXTX_VEC_COMMON_H_ 7 8 #include "../common/rx.h" 9 #include "ice_rxtx.h" 10 11 static inline uint16_t 12 ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, 13 uint16_t nb_bufs, uint8_t *split_flags) 14 { 15 struct rte_mbuf *pkts[ICE_VPMD_RX_BURST] = {0}; /*finished pkts*/ 16 struct rte_mbuf *start = rxq->pkt_first_seg; 17 struct rte_mbuf *end = rxq->pkt_last_seg; 18 unsigned int pkt_idx, buf_idx; 19 20 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 21 if (end) { 22 /* processing a split packet */ 23 end->next = rx_bufs[buf_idx]; 24 rx_bufs[buf_idx]->data_len += rxq->crc_len; 25 26 start->nb_segs++; 27 start->pkt_len += rx_bufs[buf_idx]->data_len; 28 end = end->next; 29 30 if (!split_flags[buf_idx]) { 31 /* it's the last packet of the set */ 32 start->hash = end->hash; 33 start->vlan_tci = end->vlan_tci; 34 start->ol_flags = end->ol_flags; 35 /* we need to strip crc for the whole packet */ 36 start->pkt_len -= rxq->crc_len; 37 if (end->data_len > rxq->crc_len) { 38 end->data_len -= rxq->crc_len; 39 } else { 40 /* free up last mbuf */ 41 struct rte_mbuf *secondlast = start; 42 43 start->nb_segs--; 44 while (secondlast->next != end) 45 secondlast = secondlast->next; 46 secondlast->data_len -= (rxq->crc_len - 47 end->data_len); 48 secondlast->next = NULL; 49 rte_pktmbuf_free_seg(end); 50 } 51 pkts[pkt_idx++] = start; 52 start = NULL; 53 end = NULL; 54 } 55 } else { 56 /* not processing a split packet */ 57 if (!split_flags[buf_idx]) { 58 /* not a split packet, save and skip */ 59 pkts[pkt_idx++] = rx_bufs[buf_idx]; 60 continue; 61 } 62 start = rx_bufs[buf_idx]; 63 end = start; 64 rx_bufs[buf_idx]->data_len += rxq->crc_len; 65 rx_bufs[buf_idx]->pkt_len += rxq->crc_len; 66 } 67 } 68 69 /* save the partial packet for next time */ 70 rxq->pkt_first_seg = start; 71 rxq->pkt_last_seg = end; 72 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 73 return pkt_idx; 74 } 75 76 static inline int 77 ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) 78 { 79 return (txq->ice_tx_ring[idx].cmd_type_offset_bsz & 80 rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == 81 rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); 82 } 83 84 static __rte_always_inline int 85 ice_tx_free_bufs_vec(struct ci_tx_queue *txq) 86 { 87 return ci_tx_free_bufs(txq, ice_tx_desc_done); 88 } 89 90 static inline void 91 _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq) 92 { 93 const unsigned int mask = rxq->nb_rx_desc - 1; 94 unsigned int i; 95 96 if (unlikely(!rxq->sw_ring)) { 97 PMD_DRV_LOG(DEBUG, "sw_ring is NULL"); 98 return; 99 } 100 101 if (rxq->rxrearm_nb >= rxq->nb_rx_desc) 102 return; 103 104 /* free all mbufs that are valid in the ring */ 105 if (rxq->rxrearm_nb == 0) { 106 for (i = 0; i < rxq->nb_rx_desc; i++) { 107 if (rxq->sw_ring[i].mbuf) 108 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 109 } 110 } else { 111 for (i = rxq->rx_tail; 112 i != rxq->rxrearm_start; 113 i = (i + 1) & mask) { 114 if (rxq->sw_ring[i].mbuf) 115 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 116 } 117 } 118 119 rxq->rxrearm_nb = rxq->nb_rx_desc; 120 121 /* set all entries to NULL */ 122 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); 123 } 124 125 static inline int 126 ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) 127 { 128 uintptr_t p; 129 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 130 131 mb_def.nb_segs = 1; 132 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 133 mb_def.port = rxq->port_id; 134 rte_mbuf_refcnt_set(&mb_def, 1); 135 136 /* prevent compiler reordering: rearm_data covers previous fields */ 137 rte_compiler_barrier(); 138 p = (uintptr_t)&mb_def.rearm_data; 139 rxq->mbuf_initializer = *(uint64_t *)p; 140 return 0; 141 } 142 143 #define ICE_TX_NO_VECTOR_FLAGS ( \ 144 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ 145 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ 146 RTE_ETH_TX_OFFLOAD_TCP_TSO | \ 147 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ 148 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ 149 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ 150 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ 151 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) 152 153 #define ICE_TX_VECTOR_OFFLOAD ( \ 154 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ 155 RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ 156 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ 157 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ 158 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ 159 RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 160 161 #define ICE_RX_VECTOR_OFFLOAD ( \ 162 RTE_ETH_RX_OFFLOAD_CHECKSUM | \ 163 RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \ 164 RTE_ETH_RX_OFFLOAD_VLAN | \ 165 RTE_ETH_RX_OFFLOAD_RSS_HASH) 166 167 #define ICE_VECTOR_PATH 0 168 #define ICE_VECTOR_OFFLOAD_PATH 1 169 170 static inline int 171 ice_rx_vec_queue_default(struct ice_rx_queue *rxq) 172 { 173 if (!rxq) 174 return -1; 175 176 if (!rte_is_power_of_2(rxq->nb_rx_desc)) 177 return -1; 178 179 if (rxq->rx_free_thresh < ICE_VPMD_RX_BURST) 180 return -1; 181 182 if (rxq->nb_rx_desc % rxq->rx_free_thresh) 183 return -1; 184 185 if (rxq->proto_xtr != PROTO_XTR_NONE) 186 return -1; 187 188 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 189 return -1; 190 191 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) 192 return -1; 193 194 if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD) 195 return ICE_VECTOR_OFFLOAD_PATH; 196 197 return ICE_VECTOR_PATH; 198 } 199 200 static inline int 201 ice_tx_vec_queue_default(struct ci_tx_queue *txq) 202 { 203 if (!txq) 204 return -1; 205 206 if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST || 207 txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ) 208 return -1; 209 210 if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS) 211 return -1; 212 213 if (txq->offloads & ICE_TX_VECTOR_OFFLOAD) 214 return ICE_VECTOR_OFFLOAD_PATH; 215 216 return ICE_VECTOR_PATH; 217 } 218 219 static inline int 220 ice_rx_vec_dev_check_default(struct rte_eth_dev *dev) 221 { 222 int i; 223 struct ice_rx_queue *rxq; 224 int ret = 0; 225 int result = 0; 226 227 for (i = 0; i < dev->data->nb_rx_queues; i++) { 228 rxq = dev->data->rx_queues[i]; 229 ret = (ice_rx_vec_queue_default(rxq)); 230 if (ret < 0) 231 return -1; 232 if (ret == ICE_VECTOR_OFFLOAD_PATH) 233 result = ret; 234 } 235 236 return result; 237 } 238 239 static inline int 240 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev) 241 { 242 int i; 243 struct ci_tx_queue *txq; 244 int ret = 0; 245 int result = 0; 246 247 for (i = 0; i < dev->data->nb_tx_queues; i++) { 248 txq = dev->data->tx_queues[i]; 249 ret = ice_tx_vec_queue_default(txq); 250 if (ret < 0) 251 return -1; 252 if (ret == ICE_VECTOR_OFFLOAD_PATH) 253 result = ret; 254 } 255 256 return result; 257 } 258 259 static inline void 260 ice_txd_enable_offload(struct rte_mbuf *tx_pkt, 261 uint64_t *txd_hi) 262 { 263 uint64_t ol_flags = tx_pkt->ol_flags; 264 uint32_t td_cmd = 0; 265 uint32_t td_offset = 0; 266 267 /* Tx Checksum Offload */ 268 /* SET MACLEN */ 269 td_offset |= (tx_pkt->l2_len >> 1) << 270 ICE_TX_DESC_LEN_MACLEN_S; 271 272 /* Enable L3 checksum offload */ 273 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 274 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 275 td_offset |= (tx_pkt->l3_len >> 2) << 276 ICE_TX_DESC_LEN_IPLEN_S; 277 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 278 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 279 td_offset |= (tx_pkt->l3_len >> 2) << 280 ICE_TX_DESC_LEN_IPLEN_S; 281 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 282 td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 283 td_offset |= (tx_pkt->l3_len >> 2) << 284 ICE_TX_DESC_LEN_IPLEN_S; 285 } 286 287 /* Enable L4 checksum offloads */ 288 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 289 case RTE_MBUF_F_TX_TCP_CKSUM: 290 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 291 td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 292 ICE_TX_DESC_LEN_L4_LEN_S; 293 break; 294 case RTE_MBUF_F_TX_SCTP_CKSUM: 295 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 296 td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 297 ICE_TX_DESC_LEN_L4_LEN_S; 298 break; 299 case RTE_MBUF_F_TX_UDP_CKSUM: 300 td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 301 td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 302 ICE_TX_DESC_LEN_L4_LEN_S; 303 break; 304 default: 305 break; 306 } 307 308 *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S; 309 310 /* Tx VLAN/QINQ insertion Offload */ 311 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 312 td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; 313 *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << 314 ICE_TXD_QW1_L2TAG1_S); 315 } 316 317 *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S; 318 } 319 #endif 320