1c1d14583SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 2c1d14583SBruce Richardson * Copyright(c) 2018 Intel Corporation 3c1d14583SBruce Richardson */ 4c1d14583SBruce Richardson 5c1d14583SBruce Richardson #include <ethdev_driver.h> 6c1d14583SBruce Richardson #include <rte_net.h> 7c1d14583SBruce Richardson #include <rte_vect.h> 8c1d14583SBruce Richardson 9c1d14583SBruce Richardson #include "ice_rxtx.h" 10c1d14583SBruce Richardson #include "ice_rxtx_vec_common.h" 11c1d14583SBruce Richardson 12c1d14583SBruce Richardson #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \ 13c1d14583SBruce Richardson RTE_MBUF_F_TX_L4_MASK | \ 14c1d14583SBruce Richardson RTE_MBUF_F_TX_TCP_SEG | \ 15c1d14583SBruce Richardson RTE_MBUF_F_TX_UDP_SEG | \ 16c1d14583SBruce Richardson RTE_MBUF_F_TX_OUTER_IP_CKSUM) 17c1d14583SBruce Richardson 18c1d14583SBruce Richardson /** 19c1d14583SBruce Richardson * The mbuf dynamic field pointer for protocol extraction metadata. 20c1d14583SBruce Richardson */ 21c1d14583SBruce Richardson #define ICE_DYNF_PROTO_XTR_METADATA(m, n) \ 22c1d14583SBruce Richardson RTE_MBUF_DYNFIELD((m), (n), uint32_t *) 23c1d14583SBruce Richardson 24c1d14583SBruce Richardson static int 25c1d14583SBruce Richardson ice_monitor_callback(const uint64_t value, 26c1d14583SBruce Richardson const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) 27c1d14583SBruce Richardson { 28c1d14583SBruce Richardson const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); 29c1d14583SBruce Richardson /* 30c1d14583SBruce Richardson * we expect the DD bit to be set to 1 if this descriptor was already 31c1d14583SBruce Richardson * written to. 32c1d14583SBruce Richardson */ 33c1d14583SBruce Richardson return (value & m) == m ? -1 : 0; 34c1d14583SBruce Richardson } 35c1d14583SBruce Richardson 36c1d14583SBruce Richardson int 37c1d14583SBruce Richardson ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 38c1d14583SBruce Richardson { 39c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 40c1d14583SBruce Richardson struct ice_rx_queue *rxq = rx_queue; 41c1d14583SBruce Richardson uint16_t desc; 42c1d14583SBruce Richardson 43c1d14583SBruce Richardson desc = rxq->rx_tail; 44c1d14583SBruce Richardson rxdp = &rxq->rx_ring[desc]; 45c1d14583SBruce Richardson /* watch for changes in status bit */ 46c1d14583SBruce Richardson pmc->addr = &rxdp->wb.status_error0; 47c1d14583SBruce Richardson 48c1d14583SBruce Richardson /* comparison callback */ 49c1d14583SBruce Richardson pmc->fn = ice_monitor_callback; 50c1d14583SBruce Richardson 51c1d14583SBruce Richardson /* register is 16-bit */ 52c1d14583SBruce Richardson pmc->size = sizeof(uint16_t); 53c1d14583SBruce Richardson 54c1d14583SBruce Richardson return 0; 55c1d14583SBruce Richardson } 56c1d14583SBruce Richardson 57c1d14583SBruce Richardson 58c1d14583SBruce Richardson static inline uint8_t 59c1d14583SBruce Richardson ice_proto_xtr_type_to_rxdid(uint8_t xtr_type) 60c1d14583SBruce Richardson { 61c1d14583SBruce Richardson static uint8_t rxdid_map[] = { 62c1d14583SBruce Richardson [PROTO_XTR_NONE] = ICE_RXDID_COMMS_OVS, 63c1d14583SBruce Richardson [PROTO_XTR_VLAN] = ICE_RXDID_COMMS_AUX_VLAN, 64c1d14583SBruce Richardson [PROTO_XTR_IPV4] = ICE_RXDID_COMMS_AUX_IPV4, 65c1d14583SBruce Richardson [PROTO_XTR_IPV6] = ICE_RXDID_COMMS_AUX_IPV6, 66c1d14583SBruce Richardson [PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW, 67c1d14583SBruce Richardson [PROTO_XTR_TCP] = ICE_RXDID_COMMS_AUX_TCP, 68c1d14583SBruce Richardson [PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET, 69c1d14583SBruce Richardson }; 70c1d14583SBruce Richardson 71c1d14583SBruce Richardson return xtr_type < RTE_DIM(rxdid_map) ? 72c1d14583SBruce Richardson rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS; 73c1d14583SBruce Richardson } 74c1d14583SBruce Richardson 75c1d14583SBruce Richardson static inline void 76c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq, 77c1d14583SBruce Richardson struct rte_mbuf *mb, 78c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp) 79c1d14583SBruce Richardson { 80c1d14583SBruce Richardson volatile struct ice_32b_rx_flex_desc_comms *desc = 81c1d14583SBruce Richardson (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 82c1d14583SBruce Richardson uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0); 83c1d14583SBruce Richardson 84c1d14583SBruce Richardson if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 85c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 86c1d14583SBruce Richardson mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 87c1d14583SBruce Richardson } 88c1d14583SBruce Richardson 89c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 90c1d14583SBruce Richardson if (desc->flow_id != 0xFFFFFFFF) { 91c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 92c1d14583SBruce Richardson mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 93c1d14583SBruce Richardson } 94c1d14583SBruce Richardson #endif 95c1d14583SBruce Richardson } 96c1d14583SBruce Richardson 97c1d14583SBruce Richardson static inline void 98c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq, 99c1d14583SBruce Richardson struct rte_mbuf *mb, 100c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp) 101c1d14583SBruce Richardson { 102c1d14583SBruce Richardson volatile struct ice_32b_rx_flex_desc_comms_ovs *desc = 103c1d14583SBruce Richardson (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp; 104c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 105c1d14583SBruce Richardson uint16_t stat_err; 106c1d14583SBruce Richardson #endif 107c1d14583SBruce Richardson 108c1d14583SBruce Richardson if (desc->flow_id != 0xFFFFFFFF) { 109c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 110c1d14583SBruce Richardson mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 111c1d14583SBruce Richardson } 112c1d14583SBruce Richardson 113c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 114c1d14583SBruce Richardson stat_err = rte_le_to_cpu_16(desc->status_error0); 115c1d14583SBruce Richardson if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 116c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 117c1d14583SBruce Richardson mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 118c1d14583SBruce Richardson } 119c1d14583SBruce Richardson #endif 120c1d14583SBruce Richardson } 121c1d14583SBruce Richardson 122c1d14583SBruce Richardson static inline void 123c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq, 124c1d14583SBruce Richardson struct rte_mbuf *mb, 125c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp) 126c1d14583SBruce Richardson { 127c1d14583SBruce Richardson volatile struct ice_32b_rx_flex_desc_comms *desc = 128c1d14583SBruce Richardson (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 129c1d14583SBruce Richardson uint16_t stat_err; 130c1d14583SBruce Richardson 131c1d14583SBruce Richardson stat_err = rte_le_to_cpu_16(desc->status_error0); 132c1d14583SBruce Richardson if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 133c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 134c1d14583SBruce Richardson mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 135c1d14583SBruce Richardson } 136c1d14583SBruce Richardson 137c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 138c1d14583SBruce Richardson if (desc->flow_id != 0xFFFFFFFF) { 139c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 140c1d14583SBruce Richardson mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 141c1d14583SBruce Richardson } 142c1d14583SBruce Richardson 143c1d14583SBruce Richardson if (rxq->xtr_ol_flag) { 144c1d14583SBruce Richardson uint32_t metadata = 0; 145c1d14583SBruce Richardson 146c1d14583SBruce Richardson stat_err = rte_le_to_cpu_16(desc->status_error1); 147c1d14583SBruce Richardson 148c1d14583SBruce Richardson if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S)) 149c1d14583SBruce Richardson metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); 150c1d14583SBruce Richardson 151c1d14583SBruce Richardson if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S)) 152c1d14583SBruce Richardson metadata |= 153c1d14583SBruce Richardson rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16; 154c1d14583SBruce Richardson 155c1d14583SBruce Richardson if (metadata) { 156c1d14583SBruce Richardson mb->ol_flags |= rxq->xtr_ol_flag; 157c1d14583SBruce Richardson 158c1d14583SBruce Richardson *ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata; 159c1d14583SBruce Richardson } 160c1d14583SBruce Richardson } 161c1d14583SBruce Richardson #else 162c1d14583SBruce Richardson RTE_SET_USED(rxq); 163c1d14583SBruce Richardson #endif 164c1d14583SBruce Richardson } 165c1d14583SBruce Richardson 166c1d14583SBruce Richardson static inline void 167c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq, 168c1d14583SBruce Richardson struct rte_mbuf *mb, 169c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp) 170c1d14583SBruce Richardson { 171c1d14583SBruce Richardson volatile struct ice_32b_rx_flex_desc_comms *desc = 172c1d14583SBruce Richardson (volatile struct ice_32b_rx_flex_desc_comms *)rxdp; 173c1d14583SBruce Richardson uint16_t stat_err; 174c1d14583SBruce Richardson 175c1d14583SBruce Richardson stat_err = rte_le_to_cpu_16(desc->status_error0); 176c1d14583SBruce Richardson if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) { 177c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; 178c1d14583SBruce Richardson mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); 179c1d14583SBruce Richardson } 180c1d14583SBruce Richardson 181c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 182c1d14583SBruce Richardson if (desc->flow_id != 0xFFFFFFFF) { 183c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID; 184c1d14583SBruce Richardson mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); 185c1d14583SBruce Richardson } 186c1d14583SBruce Richardson 187c1d14583SBruce Richardson if (rxq->xtr_ol_flag) { 188c1d14583SBruce Richardson uint32_t metadata = 0; 189c1d14583SBruce Richardson 190c1d14583SBruce Richardson if (desc->flex_ts.flex.aux0 != 0xFFFF) 191c1d14583SBruce Richardson metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0); 192c1d14583SBruce Richardson else if (desc->flex_ts.flex.aux1 != 0xFFFF) 193c1d14583SBruce Richardson metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1); 194c1d14583SBruce Richardson 195c1d14583SBruce Richardson if (metadata) { 196c1d14583SBruce Richardson mb->ol_flags |= rxq->xtr_ol_flag; 197c1d14583SBruce Richardson 198c1d14583SBruce Richardson *ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata; 199c1d14583SBruce Richardson } 200c1d14583SBruce Richardson } 201c1d14583SBruce Richardson #else 202c1d14583SBruce Richardson RTE_SET_USED(rxq); 203c1d14583SBruce Richardson #endif 204c1d14583SBruce Richardson } 205c1d14583SBruce Richardson 206c1d14583SBruce Richardson static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = { 207c1d14583SBruce Richardson [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 208c1d14583SBruce Richardson [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 209c1d14583SBruce Richardson [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 210c1d14583SBruce Richardson [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 211c1d14583SBruce Richardson [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1, 212c1d14583SBruce Richardson [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2, 213c1d14583SBruce Richardson [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic, 214c1d14583SBruce Richardson [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs, 215c1d14583SBruce Richardson }; 216c1d14583SBruce Richardson 217c1d14583SBruce Richardson void 218c1d14583SBruce Richardson ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid) 219c1d14583SBruce Richardson { 220c1d14583SBruce Richardson rxq->rxdid = rxdid; 221c1d14583SBruce Richardson 222c1d14583SBruce Richardson switch (rxdid) { 223c1d14583SBruce Richardson case ICE_RXDID_COMMS_AUX_VLAN: 224c1d14583SBruce Richardson case ICE_RXDID_COMMS_AUX_IPV4: 225c1d14583SBruce Richardson case ICE_RXDID_COMMS_AUX_IPV6: 226c1d14583SBruce Richardson case ICE_RXDID_COMMS_AUX_IPV6_FLOW: 227c1d14583SBruce Richardson case ICE_RXDID_COMMS_AUX_TCP: 228c1d14583SBruce Richardson case ICE_RXDID_COMMS_AUX_IP_OFFSET: 229c1d14583SBruce Richardson break; 230c1d14583SBruce Richardson case ICE_RXDID_COMMS_GENERIC: 231c1d14583SBruce Richardson /* fallthrough */ 232c1d14583SBruce Richardson case ICE_RXDID_COMMS_OVS: 233c1d14583SBruce Richardson break; 234c1d14583SBruce Richardson 235c1d14583SBruce Richardson default: 236c1d14583SBruce Richardson /* update this according to the RXDID for PROTO_XTR_NONE */ 237c1d14583SBruce Richardson rxq->rxdid = ICE_RXDID_COMMS_OVS; 238c1d14583SBruce Richardson break; 239c1d14583SBruce Richardson } 240c1d14583SBruce Richardson 241c1d14583SBruce Richardson if (rxq->xtr_field_offs == -1) 242c1d14583SBruce Richardson rxq->xtr_ol_flag = 0; 243c1d14583SBruce Richardson } 244c1d14583SBruce Richardson 245c1d14583SBruce Richardson static int 246c1d14583SBruce Richardson ice_program_hw_rx_queue(struct ice_rx_queue *rxq) 247c1d14583SBruce Richardson { 248c1d14583SBruce Richardson struct ice_vsi *vsi = rxq->vsi; 249c1d14583SBruce Richardson struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 250c1d14583SBruce Richardson struct ice_pf *pf = ICE_VSI_TO_PF(vsi); 251c1d14583SBruce Richardson struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data; 252c1d14583SBruce Richardson struct ice_rlan_ctx rx_ctx; 253c1d14583SBruce Richardson uint16_t buf_size; 254c1d14583SBruce Richardson uint32_t rxdid = ICE_RXDID_COMMS_OVS; 255c1d14583SBruce Richardson uint32_t regval; 256c1d14583SBruce Richardson struct ice_adapter *ad = rxq->vsi->adapter; 257c1d14583SBruce Richardson uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD; 258c1d14583SBruce Richardson int err; 259c1d14583SBruce Richardson 260c1d14583SBruce Richardson /* Set buffer size as the head split is disabled. */ 261c1d14583SBruce Richardson buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - 262c1d14583SBruce Richardson RTE_PKTMBUF_HEADROOM); 263c1d14583SBruce Richardson rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); 264c1d14583SBruce Richardson rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE); 265c1d14583SBruce Richardson rxq->max_pkt_len = 266c1d14583SBruce Richardson RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len, 267c1d14583SBruce Richardson frame_size); 268c1d14583SBruce Richardson 269c1d14583SBruce Richardson if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN || 270c1d14583SBruce Richardson rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) { 271c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "maximum packet length must " 272c1d14583SBruce Richardson "be larger than %u and smaller than %u", 273c1d14583SBruce Richardson (uint32_t)RTE_ETHER_MIN_LEN, 274c1d14583SBruce Richardson (uint32_t)ICE_FRAME_SIZE_MAX); 275c1d14583SBruce Richardson return -EINVAL; 276c1d14583SBruce Richardson } 277c1d14583SBruce Richardson 278c1d14583SBruce Richardson if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 279c1d14583SBruce Richardson /* Register mbuf field and flag for Rx timestamp */ 280c1d14583SBruce Richardson err = rte_mbuf_dyn_rx_timestamp_register( 281c1d14583SBruce Richardson &ice_timestamp_dynfield_offset, 282c1d14583SBruce Richardson &ice_timestamp_dynflag); 283c1d14583SBruce Richardson if (err) { 284c1d14583SBruce Richardson PMD_DRV_LOG(ERR, 285c1d14583SBruce Richardson "Cannot register mbuf field/flag for timestamp"); 286c1d14583SBruce Richardson return -EINVAL; 287c1d14583SBruce Richardson } 288c1d14583SBruce Richardson rxq->ts_enable = true; 289c1d14583SBruce Richardson } 290c1d14583SBruce Richardson 291c1d14583SBruce Richardson memset(&rx_ctx, 0, sizeof(rx_ctx)); 292c1d14583SBruce Richardson 293c1d14583SBruce Richardson if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 294c1d14583SBruce Richardson uint32_t proto_hdr; 295c1d14583SBruce Richardson proto_hdr = rxq->rxseg[0].proto_hdr; 296c1d14583SBruce Richardson 297c1d14583SBruce Richardson if (proto_hdr == RTE_PTYPE_UNKNOWN) { 298c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Buffer split protocol must be configured"); 299c1d14583SBruce Richardson return -EINVAL; 300c1d14583SBruce Richardson } 301c1d14583SBruce Richardson 302c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_L4_MASK) { 303c1d14583SBruce Richardson case RTE_PTYPE_L4_TCP: 304c1d14583SBruce Richardson case RTE_PTYPE_L4_UDP: 305c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 306c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; 307c1d14583SBruce Richardson goto set_hsplit_finish; 308c1d14583SBruce Richardson case RTE_PTYPE_L4_SCTP: 309c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 310c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; 311c1d14583SBruce Richardson goto set_hsplit_finish; 312c1d14583SBruce Richardson } 313c1d14583SBruce Richardson 314c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_L3_MASK) { 315c1d14583SBruce Richardson case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN: 316c1d14583SBruce Richardson case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN: 317c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 318c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; 319c1d14583SBruce Richardson goto set_hsplit_finish; 320c1d14583SBruce Richardson } 321c1d14583SBruce Richardson 322c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_L2_MASK) { 323c1d14583SBruce Richardson case RTE_PTYPE_L2_ETHER: 324c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 325c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; 326c1d14583SBruce Richardson rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2; 327c1d14583SBruce Richardson goto set_hsplit_finish; 328c1d14583SBruce Richardson } 329c1d14583SBruce Richardson 330c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) { 331c1d14583SBruce Richardson case RTE_PTYPE_INNER_L4_TCP: 332c1d14583SBruce Richardson case RTE_PTYPE_INNER_L4_UDP: 333c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 334c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP; 335c1d14583SBruce Richardson goto set_hsplit_finish; 336c1d14583SBruce Richardson case RTE_PTYPE_INNER_L4_SCTP: 337c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 338c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP; 339c1d14583SBruce Richardson goto set_hsplit_finish; 340c1d14583SBruce Richardson } 341c1d14583SBruce Richardson 342c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) { 343c1d14583SBruce Richardson case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN: 344c1d14583SBruce Richardson case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN: 345c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 346c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP; 347c1d14583SBruce Richardson goto set_hsplit_finish; 348c1d14583SBruce Richardson } 349c1d14583SBruce Richardson 350c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) { 351c1d14583SBruce Richardson case RTE_PTYPE_INNER_L2_ETHER: 352c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 353c1d14583SBruce Richardson rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2; 354c1d14583SBruce Richardson goto set_hsplit_finish; 355c1d14583SBruce Richardson } 356c1d14583SBruce Richardson 357c1d14583SBruce Richardson switch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) { 358c1d14583SBruce Richardson case RTE_PTYPE_TUNNEL_GRENAT: 359c1d14583SBruce Richardson rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT; 360c1d14583SBruce Richardson rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS; 361c1d14583SBruce Richardson goto set_hsplit_finish; 362c1d14583SBruce Richardson } 363c1d14583SBruce Richardson 364c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Buffer split protocol is not supported"); 365c1d14583SBruce Richardson return -EINVAL; 366c1d14583SBruce Richardson 367c1d14583SBruce Richardson set_hsplit_finish: 368c1d14583SBruce Richardson rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE; 369c1d14583SBruce Richardson } else { 370c1d14583SBruce Richardson rxq->rx_hdr_len = 0; 371c1d14583SBruce Richardson rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */ 372c1d14583SBruce Richardson } 373c1d14583SBruce Richardson 374c1d14583SBruce Richardson rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 375c1d14583SBruce Richardson rx_ctx.qlen = rxq->nb_rx_desc; 376c1d14583SBruce Richardson rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 377c1d14583SBruce Richardson rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; 378c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 379c1d14583SBruce Richardson rx_ctx.dsize = 1; /* 32B descriptors */ 380c1d14583SBruce Richardson #endif 381c1d14583SBruce Richardson rx_ctx.rxmax = rxq->max_pkt_len; 382c1d14583SBruce Richardson /* TPH: Transaction Layer Packet (TLP) processing hints */ 383c1d14583SBruce Richardson rx_ctx.tphrdesc_ena = 1; 384c1d14583SBruce Richardson rx_ctx.tphwdesc_ena = 1; 385c1d14583SBruce Richardson rx_ctx.tphdata_ena = 1; 386c1d14583SBruce Richardson rx_ctx.tphhead_ena = 1; 387c1d14583SBruce Richardson /* Low Receive Queue Threshold defined in 64 descriptors units. 388c1d14583SBruce Richardson * When the number of free descriptors goes below the lrxqthresh, 389c1d14583SBruce Richardson * an immediate interrupt is triggered. 390c1d14583SBruce Richardson */ 391c1d14583SBruce Richardson rx_ctx.lrxqthresh = 2; 392c1d14583SBruce Richardson /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ 393c1d14583SBruce Richardson rx_ctx.l2tsel = 1; 394c1d14583SBruce Richardson rx_ctx.showiv = 0; 395c1d14583SBruce Richardson rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; 396c1d14583SBruce Richardson 397c1d14583SBruce Richardson rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr); 398c1d14583SBruce Richardson 399c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u", 400c1d14583SBruce Richardson rxq->port_id, rxq->queue_id, rxdid); 401c1d14583SBruce Richardson 402c1d14583SBruce Richardson if (!(pf->supported_rxdid & BIT(rxdid))) { 403c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)", 404c1d14583SBruce Richardson rxdid); 405c1d14583SBruce Richardson return -EINVAL; 406c1d14583SBruce Richardson } 407c1d14583SBruce Richardson 408c1d14583SBruce Richardson rxq->rxdid = rxdid; 409c1d14583SBruce Richardson 410c1d14583SBruce Richardson /* Enable Flexible Descriptors in the queue context which 411c1d14583SBruce Richardson * allows this driver to select a specific receive descriptor format 412c1d14583SBruce Richardson */ 413c1d14583SBruce Richardson regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 414c1d14583SBruce Richardson QRXFLXP_CNTXT_RXDID_IDX_M; 415c1d14583SBruce Richardson 416c1d14583SBruce Richardson /* increasing context priority to pick up profile ID; 417c1d14583SBruce Richardson * default is 0x01; setting to 0x03 to ensure profile 418c1d14583SBruce Richardson * is programming if prev context is of same priority 419c1d14583SBruce Richardson */ 420c1d14583SBruce Richardson regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 421c1d14583SBruce Richardson QRXFLXP_CNTXT_RXDID_PRIO_M; 422c1d14583SBruce Richardson 423c1d14583SBruce Richardson if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 424c1d14583SBruce Richardson regval |= QRXFLXP_CNTXT_TS_M; 425c1d14583SBruce Richardson 426c1d14583SBruce Richardson ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); 427c1d14583SBruce Richardson 428c1d14583SBruce Richardson err = ice_clear_rxq_ctx(hw, rxq->reg_idx); 429c1d14583SBruce Richardson if (err) { 430c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", 431c1d14583SBruce Richardson rxq->queue_id); 432c1d14583SBruce Richardson return -EINVAL; 433c1d14583SBruce Richardson } 434c1d14583SBruce Richardson err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); 435c1d14583SBruce Richardson if (err) { 436c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", 437c1d14583SBruce Richardson rxq->queue_id); 438c1d14583SBruce Richardson return -EINVAL; 439c1d14583SBruce Richardson } 440c1d14583SBruce Richardson 441c1d14583SBruce Richardson /* Check if scattered RX needs to be used. */ 442c1d14583SBruce Richardson if (frame_size > buf_size) 443c1d14583SBruce Richardson dev_data->scattered_rx = 1; 444c1d14583SBruce Richardson 445c1d14583SBruce Richardson rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); 446c1d14583SBruce Richardson 447c1d14583SBruce Richardson /* Init the Rx tail register*/ 448c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 449c1d14583SBruce Richardson 450c1d14583SBruce Richardson return 0; 451c1d14583SBruce Richardson } 452c1d14583SBruce Richardson 453c1d14583SBruce Richardson /* Allocate mbufs for all descriptors in rx queue */ 454c1d14583SBruce Richardson static int 455c1d14583SBruce Richardson ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) 456c1d14583SBruce Richardson { 457c1d14583SBruce Richardson struct ice_rx_entry *rxe = rxq->sw_ring; 458c1d14583SBruce Richardson uint64_t dma_addr; 459c1d14583SBruce Richardson uint16_t i; 460c1d14583SBruce Richardson 461c1d14583SBruce Richardson for (i = 0; i < rxq->nb_rx_desc; i++) { 462c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxd; 463c1d14583SBruce Richardson rxd = &rxq->rx_ring[i]; 464c1d14583SBruce Richardson struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); 465c1d14583SBruce Richardson 466c1d14583SBruce Richardson if (unlikely(!mbuf)) { 467c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); 468c1d14583SBruce Richardson return -ENOMEM; 469c1d14583SBruce Richardson } 470c1d14583SBruce Richardson 471c1d14583SBruce Richardson mbuf->data_off = RTE_PKTMBUF_HEADROOM; 472c1d14583SBruce Richardson mbuf->nb_segs = 1; 473c1d14583SBruce Richardson mbuf->port = rxq->port_id; 474c1d14583SBruce Richardson 475c1d14583SBruce Richardson dma_addr = 476c1d14583SBruce Richardson rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 477c1d14583SBruce Richardson 478c1d14583SBruce Richardson if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 479c1d14583SBruce Richardson rte_mbuf_refcnt_set(mbuf, 1); 480c1d14583SBruce Richardson mbuf->next = NULL; 481c1d14583SBruce Richardson rxd->read.hdr_addr = 0; 482c1d14583SBruce Richardson rxd->read.pkt_addr = dma_addr; 483c1d14583SBruce Richardson } else { 484c1d14583SBruce Richardson struct rte_mbuf *mbuf_pay; 485c1d14583SBruce Richardson mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); 486c1d14583SBruce Richardson if (unlikely(!mbuf_pay)) { 487c1d14583SBruce Richardson rte_pktmbuf_free(mbuf); 488c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX"); 489c1d14583SBruce Richardson return -ENOMEM; 490c1d14583SBruce Richardson } 491c1d14583SBruce Richardson 492c1d14583SBruce Richardson mbuf_pay->next = NULL; 493c1d14583SBruce Richardson mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM; 494c1d14583SBruce Richardson mbuf_pay->nb_segs = 1; 495c1d14583SBruce Richardson mbuf_pay->port = rxq->port_id; 496c1d14583SBruce Richardson mbuf->next = mbuf_pay; 497c1d14583SBruce Richardson 498c1d14583SBruce Richardson rxd->read.hdr_addr = dma_addr; 499c1d14583SBruce Richardson /* The LS bit should be set to zero regardless of 500c1d14583SBruce Richardson * buffer split enablement. 501c1d14583SBruce Richardson */ 502c1d14583SBruce Richardson rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay)); 503c1d14583SBruce Richardson } 504c1d14583SBruce Richardson 505c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 506c1d14583SBruce Richardson rxd->read.rsvd1 = 0; 507c1d14583SBruce Richardson rxd->read.rsvd2 = 0; 508c1d14583SBruce Richardson #endif 509c1d14583SBruce Richardson rxe[i].mbuf = mbuf; 510c1d14583SBruce Richardson } 511c1d14583SBruce Richardson 512c1d14583SBruce Richardson return 0; 513c1d14583SBruce Richardson } 514c1d14583SBruce Richardson 515c1d14583SBruce Richardson /* Free all mbufs for descriptors in rx queue */ 516c1d14583SBruce Richardson static void 517c1d14583SBruce Richardson _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq) 518c1d14583SBruce Richardson { 519c1d14583SBruce Richardson uint16_t i; 520c1d14583SBruce Richardson 521c1d14583SBruce Richardson if (!rxq || !rxq->sw_ring) { 522c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL"); 523c1d14583SBruce Richardson return; 524c1d14583SBruce Richardson } 525c1d14583SBruce Richardson 526c1d14583SBruce Richardson for (i = 0; i < rxq->nb_rx_desc; i++) { 527c1d14583SBruce Richardson if (rxq->sw_ring[i].mbuf) { 528c1d14583SBruce Richardson rte_pktmbuf_free(rxq->sw_ring[i].mbuf); 529c1d14583SBruce Richardson rxq->sw_ring[i].mbuf = NULL; 530c1d14583SBruce Richardson } 531c1d14583SBruce Richardson } 532c1d14583SBruce Richardson if (rxq->rx_nb_avail == 0) 533c1d14583SBruce Richardson return; 534c1d14583SBruce Richardson for (i = 0; i < rxq->rx_nb_avail; i++) 535c1d14583SBruce Richardson rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]); 536c1d14583SBruce Richardson 537c1d14583SBruce Richardson rxq->rx_nb_avail = 0; 538c1d14583SBruce Richardson } 539c1d14583SBruce Richardson 540c1d14583SBruce Richardson /* turn on or off rx queue 541c1d14583SBruce Richardson * @q_idx: queue index in pf scope 542c1d14583SBruce Richardson * @on: turn on or off the queue 543c1d14583SBruce Richardson */ 544c1d14583SBruce Richardson static int 545c1d14583SBruce Richardson ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on) 546c1d14583SBruce Richardson { 547c1d14583SBruce Richardson uint32_t reg; 548c1d14583SBruce Richardson uint16_t j; 549c1d14583SBruce Richardson 550c1d14583SBruce Richardson /* QRX_CTRL = QRX_ENA */ 551c1d14583SBruce Richardson reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); 552c1d14583SBruce Richardson 553c1d14583SBruce Richardson if (on) { 554c1d14583SBruce Richardson if (reg & QRX_CTRL_QENA_STAT_M) 555c1d14583SBruce Richardson return 0; /* Already on, skip */ 556c1d14583SBruce Richardson reg |= QRX_CTRL_QENA_REQ_M; 557c1d14583SBruce Richardson } else { 558c1d14583SBruce Richardson if (!(reg & QRX_CTRL_QENA_STAT_M)) 559c1d14583SBruce Richardson return 0; /* Already off, skip */ 560c1d14583SBruce Richardson reg &= ~QRX_CTRL_QENA_REQ_M; 561c1d14583SBruce Richardson } 562c1d14583SBruce Richardson 563c1d14583SBruce Richardson /* Write the register */ 564c1d14583SBruce Richardson ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg); 565c1d14583SBruce Richardson /* Check the result. It is said that QENA_STAT 566c1d14583SBruce Richardson * follows the QENA_REQ not more than 10 use. 567c1d14583SBruce Richardson * TODO: need to change the wait counter later 568c1d14583SBruce Richardson */ 569c1d14583SBruce Richardson for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) { 570c1d14583SBruce Richardson rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US); 571c1d14583SBruce Richardson reg = ICE_READ_REG(hw, QRX_CTRL(q_idx)); 572c1d14583SBruce Richardson if (on) { 573c1d14583SBruce Richardson if ((reg & QRX_CTRL_QENA_REQ_M) && 574c1d14583SBruce Richardson (reg & QRX_CTRL_QENA_STAT_M)) 575c1d14583SBruce Richardson break; 576c1d14583SBruce Richardson } else { 577c1d14583SBruce Richardson if (!(reg & QRX_CTRL_QENA_REQ_M) && 578c1d14583SBruce Richardson !(reg & QRX_CTRL_QENA_STAT_M)) 579c1d14583SBruce Richardson break; 580c1d14583SBruce Richardson } 581c1d14583SBruce Richardson } 582c1d14583SBruce Richardson 583c1d14583SBruce Richardson /* Check if it is timeout */ 584c1d14583SBruce Richardson if (j >= ICE_CHK_Q_ENA_COUNT) { 585c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]", 586c1d14583SBruce Richardson (on ? "enable" : "disable"), q_idx); 587c1d14583SBruce Richardson return -ETIMEDOUT; 588c1d14583SBruce Richardson } 589c1d14583SBruce Richardson 590c1d14583SBruce Richardson return 0; 591c1d14583SBruce Richardson } 592c1d14583SBruce Richardson 593c1d14583SBruce Richardson static inline int 594c1d14583SBruce Richardson ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq) 595c1d14583SBruce Richardson { 596c1d14583SBruce Richardson int ret = 0; 597c1d14583SBruce Richardson 598c1d14583SBruce Richardson if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) { 599c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 600c1d14583SBruce Richardson "rxq->rx_free_thresh=%d, " 601c1d14583SBruce Richardson "ICE_RX_MAX_BURST=%d", 602c1d14583SBruce Richardson rxq->rx_free_thresh, ICE_RX_MAX_BURST); 603c1d14583SBruce Richardson ret = -EINVAL; 604c1d14583SBruce Richardson } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) { 605c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 606c1d14583SBruce Richardson "rxq->rx_free_thresh=%d, " 607c1d14583SBruce Richardson "rxq->nb_rx_desc=%d", 608c1d14583SBruce Richardson rxq->rx_free_thresh, rxq->nb_rx_desc); 609c1d14583SBruce Richardson ret = -EINVAL; 610c1d14583SBruce Richardson } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { 611c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " 612c1d14583SBruce Richardson "rxq->nb_rx_desc=%d, " 613c1d14583SBruce Richardson "rxq->rx_free_thresh=%d", 614c1d14583SBruce Richardson rxq->nb_rx_desc, rxq->rx_free_thresh); 615c1d14583SBruce Richardson ret = -EINVAL; 616c1d14583SBruce Richardson } 617c1d14583SBruce Richardson 618c1d14583SBruce Richardson return ret; 619c1d14583SBruce Richardson } 620c1d14583SBruce Richardson 621c1d14583SBruce Richardson /* reset fields in ice_rx_queue back to default */ 622c1d14583SBruce Richardson static void 623c1d14583SBruce Richardson ice_reset_rx_queue(struct ice_rx_queue *rxq) 624c1d14583SBruce Richardson { 625c1d14583SBruce Richardson unsigned int i; 626c1d14583SBruce Richardson uint16_t len; 627c1d14583SBruce Richardson 628c1d14583SBruce Richardson if (!rxq) { 629c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); 630c1d14583SBruce Richardson return; 631c1d14583SBruce Richardson } 632c1d14583SBruce Richardson 633c1d14583SBruce Richardson len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST); 634c1d14583SBruce Richardson 635c1d14583SBruce Richardson for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) 636c1d14583SBruce Richardson ((volatile char *)rxq->rx_ring)[i] = 0; 637c1d14583SBruce Richardson 638c1d14583SBruce Richardson memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); 639c1d14583SBruce Richardson for (i = 0; i < ICE_RX_MAX_BURST; ++i) 640c1d14583SBruce Richardson rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; 641c1d14583SBruce Richardson 642c1d14583SBruce Richardson rxq->rx_nb_avail = 0; 643c1d14583SBruce Richardson rxq->rx_next_avail = 0; 644c1d14583SBruce Richardson rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 645c1d14583SBruce Richardson 646c1d14583SBruce Richardson rxq->rx_tail = 0; 647c1d14583SBruce Richardson rxq->nb_rx_hold = 0; 648c1d14583SBruce Richardson rxq->pkt_first_seg = NULL; 649c1d14583SBruce Richardson rxq->pkt_last_seg = NULL; 650c1d14583SBruce Richardson 651c1d14583SBruce Richardson rxq->rxrearm_start = 0; 652c1d14583SBruce Richardson rxq->rxrearm_nb = 0; 653c1d14583SBruce Richardson } 654c1d14583SBruce Richardson 655c1d14583SBruce Richardson int 656c1d14583SBruce Richardson ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 657c1d14583SBruce Richardson { 658c1d14583SBruce Richardson struct ice_rx_queue *rxq; 659c1d14583SBruce Richardson int err; 660c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 661c1d14583SBruce Richardson 662c1d14583SBruce Richardson PMD_INIT_FUNC_TRACE(); 663c1d14583SBruce Richardson 664c1d14583SBruce Richardson if (rx_queue_id >= dev->data->nb_rx_queues) { 665c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "RX queue %u is out of range %u", 666c1d14583SBruce Richardson rx_queue_id, dev->data->nb_rx_queues); 667c1d14583SBruce Richardson return -EINVAL; 668c1d14583SBruce Richardson } 669c1d14583SBruce Richardson 670c1d14583SBruce Richardson rxq = dev->data->rx_queues[rx_queue_id]; 671c1d14583SBruce Richardson if (!rxq || !rxq->q_set) { 672c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "RX queue %u not available or setup", 673c1d14583SBruce Richardson rx_queue_id); 674c1d14583SBruce Richardson return -EINVAL; 675c1d14583SBruce Richardson } 676c1d14583SBruce Richardson 677c1d14583SBruce Richardson if (dev->data->rx_queue_state[rx_queue_id] == 678c1d14583SBruce Richardson RTE_ETH_QUEUE_STATE_STARTED) 679c1d14583SBruce Richardson return 0; 680c1d14583SBruce Richardson 681c1d14583SBruce Richardson if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) 682c1d14583SBruce Richardson rxq->ts_enable = true; 683c1d14583SBruce Richardson err = ice_program_hw_rx_queue(rxq); 684c1d14583SBruce Richardson if (err) { 685c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "fail to program RX queue %u", 686c1d14583SBruce Richardson rx_queue_id); 687c1d14583SBruce Richardson return -EIO; 688c1d14583SBruce Richardson } 689c1d14583SBruce Richardson 690c1d14583SBruce Richardson err = ice_alloc_rx_queue_mbufs(rxq); 691c1d14583SBruce Richardson if (err) { 692c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); 693c1d14583SBruce Richardson return -ENOMEM; 694c1d14583SBruce Richardson } 695c1d14583SBruce Richardson 696c1d14583SBruce Richardson /* Init the RX tail register. */ 697c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 698c1d14583SBruce Richardson 699c1d14583SBruce Richardson err = ice_switch_rx_queue(hw, rxq->reg_idx, true); 700c1d14583SBruce Richardson if (err) { 701c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", 702c1d14583SBruce Richardson rx_queue_id); 703c1d14583SBruce Richardson 704c1d14583SBruce Richardson rxq->rx_rel_mbufs(rxq); 705c1d14583SBruce Richardson ice_reset_rx_queue(rxq); 706c1d14583SBruce Richardson return -EINVAL; 707c1d14583SBruce Richardson } 708c1d14583SBruce Richardson 709c1d14583SBruce Richardson dev->data->rx_queue_state[rx_queue_id] = 710c1d14583SBruce Richardson RTE_ETH_QUEUE_STATE_STARTED; 711c1d14583SBruce Richardson 712c1d14583SBruce Richardson return 0; 713c1d14583SBruce Richardson } 714c1d14583SBruce Richardson 715c1d14583SBruce Richardson int 716c1d14583SBruce Richardson ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 717c1d14583SBruce Richardson { 718c1d14583SBruce Richardson struct ice_rx_queue *rxq; 719c1d14583SBruce Richardson int err; 720c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 721c1d14583SBruce Richardson 722c1d14583SBruce Richardson if (rx_queue_id < dev->data->nb_rx_queues) { 723c1d14583SBruce Richardson rxq = dev->data->rx_queues[rx_queue_id]; 724c1d14583SBruce Richardson 725c1d14583SBruce Richardson if (dev->data->rx_queue_state[rx_queue_id] == 726c1d14583SBruce Richardson RTE_ETH_QUEUE_STATE_STOPPED) 727c1d14583SBruce Richardson return 0; 728c1d14583SBruce Richardson 729c1d14583SBruce Richardson err = ice_switch_rx_queue(hw, rxq->reg_idx, false); 730c1d14583SBruce Richardson if (err) { 731c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", 732c1d14583SBruce Richardson rx_queue_id); 733c1d14583SBruce Richardson return -EINVAL; 734c1d14583SBruce Richardson } 735c1d14583SBruce Richardson rxq->rx_rel_mbufs(rxq); 736c1d14583SBruce Richardson ice_reset_rx_queue(rxq); 737c1d14583SBruce Richardson dev->data->rx_queue_state[rx_queue_id] = 738c1d14583SBruce Richardson RTE_ETH_QUEUE_STATE_STOPPED; 739c1d14583SBruce Richardson } 740c1d14583SBruce Richardson 741c1d14583SBruce Richardson return 0; 742c1d14583SBruce Richardson } 743c1d14583SBruce Richardson 744c1d14583SBruce Richardson int 745c1d14583SBruce Richardson ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 746c1d14583SBruce Richardson { 747c038157aSBruce Richardson struct ci_tx_queue *txq; 748c1d14583SBruce Richardson int err; 749c1d14583SBruce Richardson struct ice_vsi *vsi; 750c1d14583SBruce Richardson struct ice_hw *hw; 751c1d14583SBruce Richardson struct ice_pf *pf; 752c1d14583SBruce Richardson struct ice_aqc_add_tx_qgrp *txq_elem; 753c1d14583SBruce Richardson struct ice_tlan_ctx tx_ctx; 754c1d14583SBruce Richardson int buf_len; 755552979dfSBruce Richardson struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 756c1d14583SBruce Richardson 757c1d14583SBruce Richardson PMD_INIT_FUNC_TRACE(); 758c1d14583SBruce Richardson 759c1d14583SBruce Richardson if (tx_queue_id >= dev->data->nb_tx_queues) { 760c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", 761c1d14583SBruce Richardson tx_queue_id, dev->data->nb_tx_queues); 762c1d14583SBruce Richardson return -EINVAL; 763c1d14583SBruce Richardson } 764c1d14583SBruce Richardson 765c1d14583SBruce Richardson txq = dev->data->tx_queues[tx_queue_id]; 766c1d14583SBruce Richardson if (!txq || !txq->q_set) { 767c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "TX queue %u is not available or setup", 768c1d14583SBruce Richardson tx_queue_id); 769c1d14583SBruce Richardson return -EINVAL; 770c1d14583SBruce Richardson } 771c1d14583SBruce Richardson 772c1d14583SBruce Richardson if (dev->data->tx_queue_state[tx_queue_id] == 773c1d14583SBruce Richardson RTE_ETH_QUEUE_STATE_STARTED) 774c1d14583SBruce Richardson return 0; 775c1d14583SBruce Richardson 776c1d14583SBruce Richardson buf_len = ice_struct_size(txq_elem, txqs, 1); 777c1d14583SBruce Richardson txq_elem = ice_malloc(hw, buf_len); 778c1d14583SBruce Richardson if (!txq_elem) 779c1d14583SBruce Richardson return -ENOMEM; 780c1d14583SBruce Richardson 7814d0f54d9SBruce Richardson vsi = txq->ice_vsi; 782c1d14583SBruce Richardson hw = ICE_VSI_TO_HW(vsi); 783c1d14583SBruce Richardson pf = ICE_VSI_TO_PF(vsi); 784c1d14583SBruce Richardson 785c1d14583SBruce Richardson memset(&tx_ctx, 0, sizeof(tx_ctx)); 786c1d14583SBruce Richardson txq_elem->num_txqs = 1; 787c1d14583SBruce Richardson txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); 788c1d14583SBruce Richardson 789c1d14583SBruce Richardson tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 790c1d14583SBruce Richardson tx_ctx.qlen = txq->nb_tx_desc; 791c1d14583SBruce Richardson tx_ctx.pf_num = hw->pf_id; 792c1d14583SBruce Richardson tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 793c1d14583SBruce Richardson tx_ctx.src_vsi = vsi->vsi_id; 794c1d14583SBruce Richardson tx_ctx.port_num = hw->port_info->lport; 795c1d14583SBruce Richardson tx_ctx.tso_ena = 1; /* tso enable */ 796c1d14583SBruce Richardson tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ 797c1d14583SBruce Richardson tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ 798c1d14583SBruce Richardson tx_ctx.tsyn_ena = 1; 799c1d14583SBruce Richardson 800c1d14583SBruce Richardson ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, 801c1d14583SBruce Richardson ice_tlan_ctx_info); 802c1d14583SBruce Richardson 803c1d14583SBruce Richardson txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); 804c1d14583SBruce Richardson 805c1d14583SBruce Richardson /* Init the Tx tail register*/ 806c1d14583SBruce Richardson ICE_PCI_REG_WRITE(txq->qtx_tail, 0); 807c1d14583SBruce Richardson 808c1d14583SBruce Richardson /* Fix me, we assume TC always 0 here */ 809c1d14583SBruce Richardson err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, 810c1d14583SBruce Richardson txq_elem, buf_len, NULL); 811c1d14583SBruce Richardson if (err) { 812c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to add lan txq"); 813c1d14583SBruce Richardson rte_free(txq_elem); 814c1d14583SBruce Richardson return -EIO; 815c1d14583SBruce Richardson } 816c1d14583SBruce Richardson /* store the schedule node id */ 817c1d14583SBruce Richardson txq->q_teid = txq_elem->txqs[0].q_teid; 818c1d14583SBruce Richardson 819c1d14583SBruce Richardson /* move the queue to correct position in hierarchy, if explicit hierarchy configured */ 820c1d14583SBruce Richardson if (pf->tm_conf.committed) 821c1d14583SBruce Richardson if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) { 822c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node"); 823c1d14583SBruce Richardson rte_free(txq_elem); 824c1d14583SBruce Richardson return -EIO; 825c1d14583SBruce Richardson } 826c1d14583SBruce Richardson 827552979dfSBruce Richardson /* record what kind of descriptor cleanup we need on teardown */ 828552979dfSBruce Richardson txq->vector_tx = ad->tx_vec_allowed; 829552979dfSBruce Richardson txq->vector_sw_ring = ad->tx_use_avx512; 830552979dfSBruce Richardson 831c1d14583SBruce Richardson dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 832c1d14583SBruce Richardson 833c1d14583SBruce Richardson rte_free(txq_elem); 834c1d14583SBruce Richardson return 0; 835c1d14583SBruce Richardson } 836c1d14583SBruce Richardson 837c1d14583SBruce Richardson static int 838c1d14583SBruce Richardson ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) 839c1d14583SBruce Richardson { 840c1d14583SBruce Richardson struct ice_vsi *vsi = rxq->vsi; 841c1d14583SBruce Richardson struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 842c1d14583SBruce Richardson uint32_t rxdid = ICE_RXDID_LEGACY_1; 843c1d14583SBruce Richardson struct ice_rlan_ctx rx_ctx; 844c1d14583SBruce Richardson uint32_t regval; 845c1d14583SBruce Richardson int err; 846c1d14583SBruce Richardson 847c1d14583SBruce Richardson rxq->rx_hdr_len = 0; 848c1d14583SBruce Richardson rxq->rx_buf_len = 1024; 849c1d14583SBruce Richardson 850c1d14583SBruce Richardson memset(&rx_ctx, 0, sizeof(rx_ctx)); 851c1d14583SBruce Richardson 852c1d14583SBruce Richardson rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 853c1d14583SBruce Richardson rx_ctx.qlen = rxq->nb_rx_desc; 854c1d14583SBruce Richardson rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 855c1d14583SBruce Richardson rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; 856c1d14583SBruce Richardson rx_ctx.dtype = 0; /* No Buffer Split mode */ 857c1d14583SBruce Richardson rx_ctx.dsize = 1; /* 32B descriptors */ 858c1d14583SBruce Richardson rx_ctx.rxmax = ICE_ETH_MAX_LEN; 859c1d14583SBruce Richardson /* TPH: Transaction Layer Packet (TLP) processing hints */ 860c1d14583SBruce Richardson rx_ctx.tphrdesc_ena = 1; 861c1d14583SBruce Richardson rx_ctx.tphwdesc_ena = 1; 862c1d14583SBruce Richardson rx_ctx.tphdata_ena = 1; 863c1d14583SBruce Richardson rx_ctx.tphhead_ena = 1; 864c1d14583SBruce Richardson /* Low Receive Queue Threshold defined in 64 descriptors units. 865c1d14583SBruce Richardson * When the number of free descriptors goes below the lrxqthresh, 866c1d14583SBruce Richardson * an immediate interrupt is triggered. 867c1d14583SBruce Richardson */ 868c1d14583SBruce Richardson rx_ctx.lrxqthresh = 2; 869c1d14583SBruce Richardson /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ 870c1d14583SBruce Richardson rx_ctx.l2tsel = 1; 871c1d14583SBruce Richardson rx_ctx.showiv = 0; 872c1d14583SBruce Richardson rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; 873c1d14583SBruce Richardson 874c1d14583SBruce Richardson /* Enable Flexible Descriptors in the queue context which 875c1d14583SBruce Richardson * allows this driver to select a specific receive descriptor format 876c1d14583SBruce Richardson */ 877c1d14583SBruce Richardson regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & 878c1d14583SBruce Richardson QRXFLXP_CNTXT_RXDID_IDX_M; 879c1d14583SBruce Richardson 880c1d14583SBruce Richardson /* increasing context priority to pick up profile ID; 881c1d14583SBruce Richardson * default is 0x01; setting to 0x03 to ensure profile 882c1d14583SBruce Richardson * is programming if prev context is of same priority 883c1d14583SBruce Richardson */ 884c1d14583SBruce Richardson regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & 885c1d14583SBruce Richardson QRXFLXP_CNTXT_RXDID_PRIO_M; 886c1d14583SBruce Richardson 887c1d14583SBruce Richardson ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); 888c1d14583SBruce Richardson 889c1d14583SBruce Richardson err = ice_clear_rxq_ctx(hw, rxq->reg_idx); 890c1d14583SBruce Richardson if (err) { 891c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", 892c1d14583SBruce Richardson rxq->queue_id); 893c1d14583SBruce Richardson return -EINVAL; 894c1d14583SBruce Richardson } 895c1d14583SBruce Richardson err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); 896c1d14583SBruce Richardson if (err) { 897c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", 898c1d14583SBruce Richardson rxq->queue_id); 899c1d14583SBruce Richardson return -EINVAL; 900c1d14583SBruce Richardson } 901c1d14583SBruce Richardson 902c1d14583SBruce Richardson rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); 903c1d14583SBruce Richardson 904c1d14583SBruce Richardson /* Init the Rx tail register*/ 905c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 906c1d14583SBruce Richardson 907c1d14583SBruce Richardson return 0; 908c1d14583SBruce Richardson } 909c1d14583SBruce Richardson 910c1d14583SBruce Richardson int 911c1d14583SBruce Richardson ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 912c1d14583SBruce Richardson { 913c1d14583SBruce Richardson struct ice_rx_queue *rxq; 914c1d14583SBruce Richardson int err; 915c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 916c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 917c1d14583SBruce Richardson 918c1d14583SBruce Richardson PMD_INIT_FUNC_TRACE(); 919c1d14583SBruce Richardson 920c1d14583SBruce Richardson rxq = pf->fdir.rxq; 921c1d14583SBruce Richardson if (!rxq || !rxq->q_set) { 922c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup", 923c1d14583SBruce Richardson rx_queue_id); 924c1d14583SBruce Richardson return -EINVAL; 925c1d14583SBruce Richardson } 926c1d14583SBruce Richardson 927c1d14583SBruce Richardson err = ice_fdir_program_hw_rx_queue(rxq); 928c1d14583SBruce Richardson if (err) { 929c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u", 930c1d14583SBruce Richardson rx_queue_id); 931c1d14583SBruce Richardson return -EIO; 932c1d14583SBruce Richardson } 933c1d14583SBruce Richardson 934c1d14583SBruce Richardson /* Init the RX tail register. */ 935c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 936c1d14583SBruce Richardson 937c1d14583SBruce Richardson err = ice_switch_rx_queue(hw, rxq->reg_idx, true); 938c1d14583SBruce Richardson if (err) { 939c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", 940c1d14583SBruce Richardson rx_queue_id); 941c1d14583SBruce Richardson 942c1d14583SBruce Richardson ice_reset_rx_queue(rxq); 943c1d14583SBruce Richardson return -EINVAL; 944c1d14583SBruce Richardson } 945c1d14583SBruce Richardson 946c1d14583SBruce Richardson return 0; 947c1d14583SBruce Richardson } 948c1d14583SBruce Richardson 949c1d14583SBruce Richardson int 950c1d14583SBruce Richardson ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 951c1d14583SBruce Richardson { 952c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 953c038157aSBruce Richardson struct ci_tx_queue *txq; 954c1d14583SBruce Richardson int err; 955c1d14583SBruce Richardson struct ice_vsi *vsi; 956c1d14583SBruce Richardson struct ice_hw *hw; 957c1d14583SBruce Richardson struct ice_aqc_add_tx_qgrp *txq_elem; 958c1d14583SBruce Richardson struct ice_tlan_ctx tx_ctx; 959c1d14583SBruce Richardson int buf_len; 960c1d14583SBruce Richardson 961c1d14583SBruce Richardson PMD_INIT_FUNC_TRACE(); 962c1d14583SBruce Richardson 963c1d14583SBruce Richardson txq = pf->fdir.txq; 964c1d14583SBruce Richardson if (!txq || !txq->q_set) { 965c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup", 966c1d14583SBruce Richardson tx_queue_id); 967c1d14583SBruce Richardson return -EINVAL; 968c1d14583SBruce Richardson } 969c1d14583SBruce Richardson 970c1d14583SBruce Richardson buf_len = ice_struct_size(txq_elem, txqs, 1); 971c1d14583SBruce Richardson txq_elem = ice_malloc(hw, buf_len); 972c1d14583SBruce Richardson if (!txq_elem) 973c1d14583SBruce Richardson return -ENOMEM; 974c1d14583SBruce Richardson 9754d0f54d9SBruce Richardson vsi = txq->ice_vsi; 976c1d14583SBruce Richardson hw = ICE_VSI_TO_HW(vsi); 977c1d14583SBruce Richardson 978c1d14583SBruce Richardson memset(&tx_ctx, 0, sizeof(tx_ctx)); 979c1d14583SBruce Richardson txq_elem->num_txqs = 1; 980c1d14583SBruce Richardson txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); 981c1d14583SBruce Richardson 982c1d14583SBruce Richardson tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; 983c1d14583SBruce Richardson tx_ctx.qlen = txq->nb_tx_desc; 984c1d14583SBruce Richardson tx_ctx.pf_num = hw->pf_id; 985c1d14583SBruce Richardson tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; 986c1d14583SBruce Richardson tx_ctx.src_vsi = vsi->vsi_id; 987c1d14583SBruce Richardson tx_ctx.port_num = hw->port_info->lport; 988c1d14583SBruce Richardson tx_ctx.tso_ena = 1; /* tso enable */ 989c1d14583SBruce Richardson tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ 990c1d14583SBruce Richardson tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ 991c1d14583SBruce Richardson 992c1d14583SBruce Richardson ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx, 993c1d14583SBruce Richardson ice_tlan_ctx_info); 994c1d14583SBruce Richardson 995c1d14583SBruce Richardson txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); 996c1d14583SBruce Richardson 997c1d14583SBruce Richardson /* Init the Tx tail register*/ 998c1d14583SBruce Richardson ICE_PCI_REG_WRITE(txq->qtx_tail, 0); 999c1d14583SBruce Richardson 1000c1d14583SBruce Richardson /* Fix me, we assume TC always 0 here */ 1001c1d14583SBruce Richardson err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, 1002c1d14583SBruce Richardson txq_elem, buf_len, NULL); 1003c1d14583SBruce Richardson if (err) { 1004c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); 1005c1d14583SBruce Richardson rte_free(txq_elem); 1006c1d14583SBruce Richardson return -EIO; 1007c1d14583SBruce Richardson } 1008c1d14583SBruce Richardson /* store the schedule node id */ 1009c1d14583SBruce Richardson txq->q_teid = txq_elem->txqs[0].q_teid; 1010c1d14583SBruce Richardson 1011c1d14583SBruce Richardson rte_free(txq_elem); 1012c1d14583SBruce Richardson return 0; 1013c1d14583SBruce Richardson } 1014c1d14583SBruce Richardson 1015c1d14583SBruce Richardson static void 1016c038157aSBruce Richardson ice_reset_tx_queue(struct ci_tx_queue *txq) 1017c1d14583SBruce Richardson { 10185cc9919fSBruce Richardson struct ci_tx_entry *txe; 1019c1d14583SBruce Richardson uint16_t i, prev, size; 1020c1d14583SBruce Richardson 1021c1d14583SBruce Richardson if (!txq) { 1022c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); 1023c1d14583SBruce Richardson return; 1024c1d14583SBruce Richardson } 1025c1d14583SBruce Richardson 1026c1d14583SBruce Richardson txe = txq->sw_ring; 1027c1d14583SBruce Richardson size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc; 1028c1d14583SBruce Richardson for (i = 0; i < size; i++) 10294d0f54d9SBruce Richardson ((volatile char *)txq->ice_tx_ring)[i] = 0; 1030c1d14583SBruce Richardson 1031c1d14583SBruce Richardson prev = (uint16_t)(txq->nb_tx_desc - 1); 1032c1d14583SBruce Richardson for (i = 0; i < txq->nb_tx_desc; i++) { 10334d0f54d9SBruce Richardson volatile struct ice_tx_desc *txd = &txq->ice_tx_ring[i]; 1034c1d14583SBruce Richardson 1035c1d14583SBruce Richardson txd->cmd_type_offset_bsz = 1036c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE); 1037c1d14583SBruce Richardson txe[i].mbuf = NULL; 1038c1d14583SBruce Richardson txe[i].last_id = i; 1039c1d14583SBruce Richardson txe[prev].next_id = i; 1040c1d14583SBruce Richardson prev = i; 1041c1d14583SBruce Richardson } 1042c1d14583SBruce Richardson 1043c1d14583SBruce Richardson txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 1044c1d14583SBruce Richardson txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 1045c1d14583SBruce Richardson 1046c1d14583SBruce Richardson txq->tx_tail = 0; 1047c1d14583SBruce Richardson txq->nb_tx_used = 0; 1048c1d14583SBruce Richardson 1049c1d14583SBruce Richardson txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); 1050c1d14583SBruce Richardson txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); 1051c1d14583SBruce Richardson } 1052c1d14583SBruce Richardson 1053c1d14583SBruce Richardson int 1054c1d14583SBruce Richardson ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1055c1d14583SBruce Richardson { 1056c038157aSBruce Richardson struct ci_tx_queue *txq; 1057c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1058c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1059c1d14583SBruce Richardson struct ice_vsi *vsi = pf->main_vsi; 1060c1d14583SBruce Richardson uint16_t q_ids[1]; 1061c1d14583SBruce Richardson uint32_t q_teids[1]; 1062c1d14583SBruce Richardson uint16_t q_handle = tx_queue_id; 1063c1d14583SBruce Richardson int status; 1064c1d14583SBruce Richardson 1065c1d14583SBruce Richardson if (tx_queue_id >= dev->data->nb_tx_queues) { 1066c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "TX queue %u is out of range %u", 1067c1d14583SBruce Richardson tx_queue_id, dev->data->nb_tx_queues); 1068c1d14583SBruce Richardson return -EINVAL; 1069c1d14583SBruce Richardson } 1070c1d14583SBruce Richardson 1071c1d14583SBruce Richardson txq = dev->data->tx_queues[tx_queue_id]; 1072c1d14583SBruce Richardson if (!txq) { 1073c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "TX queue %u is not available", 1074c1d14583SBruce Richardson tx_queue_id); 1075c1d14583SBruce Richardson return -EINVAL; 1076c1d14583SBruce Richardson } 1077c1d14583SBruce Richardson 1078c1d14583SBruce Richardson if (dev->data->tx_queue_state[tx_queue_id] == 1079c1d14583SBruce Richardson RTE_ETH_QUEUE_STATE_STOPPED) 1080c1d14583SBruce Richardson return 0; 1081c1d14583SBruce Richardson 1082c1d14583SBruce Richardson q_ids[0] = txq->reg_idx; 1083c1d14583SBruce Richardson q_teids[0] = txq->q_teid; 1084c1d14583SBruce Richardson 1085c1d14583SBruce Richardson /* Fix me, we assume TC always 0 here */ 1086c1d14583SBruce Richardson status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, 1087c1d14583SBruce Richardson q_ids, q_teids, ICE_NO_RESET, 0, NULL); 1088c1d14583SBruce Richardson if (status != ICE_SUCCESS) { 1089c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); 1090c1d14583SBruce Richardson return -EINVAL; 1091c1d14583SBruce Richardson } 1092c1d14583SBruce Richardson 1093*cef05386SBruce Richardson ci_txq_release_all_mbufs(txq, false); 1094c1d14583SBruce Richardson ice_reset_tx_queue(txq); 1095c1d14583SBruce Richardson dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 1096c1d14583SBruce Richardson 1097c1d14583SBruce Richardson return 0; 1098c1d14583SBruce Richardson } 1099c1d14583SBruce Richardson 1100c1d14583SBruce Richardson int 1101c1d14583SBruce Richardson ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1102c1d14583SBruce Richardson { 1103c1d14583SBruce Richardson struct ice_rx_queue *rxq; 1104c1d14583SBruce Richardson int err; 1105c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1106c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1107c1d14583SBruce Richardson 1108c1d14583SBruce Richardson rxq = pf->fdir.rxq; 1109c1d14583SBruce Richardson 1110c1d14583SBruce Richardson err = ice_switch_rx_queue(hw, rxq->reg_idx, false); 1111c1d14583SBruce Richardson if (err) { 1112c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", 1113c1d14583SBruce Richardson rx_queue_id); 1114c1d14583SBruce Richardson return -EINVAL; 1115c1d14583SBruce Richardson } 1116c1d14583SBruce Richardson rxq->rx_rel_mbufs(rxq); 1117c1d14583SBruce Richardson 1118c1d14583SBruce Richardson return 0; 1119c1d14583SBruce Richardson } 1120c1d14583SBruce Richardson 1121c1d14583SBruce Richardson int 1122c1d14583SBruce Richardson ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 1123c1d14583SBruce Richardson { 1124c038157aSBruce Richardson struct ci_tx_queue *txq; 1125c1d14583SBruce Richardson struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); 1126c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1127c1d14583SBruce Richardson struct ice_vsi *vsi = pf->main_vsi; 1128c1d14583SBruce Richardson uint16_t q_ids[1]; 1129c1d14583SBruce Richardson uint32_t q_teids[1]; 1130c1d14583SBruce Richardson uint16_t q_handle = tx_queue_id; 1131c1d14583SBruce Richardson int status; 1132c1d14583SBruce Richardson 1133c1d14583SBruce Richardson txq = pf->fdir.txq; 1134c1d14583SBruce Richardson if (!txq) { 1135c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "TX queue %u is not available", 1136c1d14583SBruce Richardson tx_queue_id); 1137c1d14583SBruce Richardson return -EINVAL; 1138c1d14583SBruce Richardson } 1139c1d14583SBruce Richardson if (txq->qtx_tail == NULL) { 1140c1d14583SBruce Richardson PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id); 1141c1d14583SBruce Richardson return 0; 1142c1d14583SBruce Richardson } 11434d0f54d9SBruce Richardson vsi = txq->ice_vsi; 1144c1d14583SBruce Richardson 1145c1d14583SBruce Richardson q_ids[0] = txq->reg_idx; 1146c1d14583SBruce Richardson q_teids[0] = txq->q_teid; 1147c1d14583SBruce Richardson 1148c1d14583SBruce Richardson /* Fix me, we assume TC always 0 here */ 1149c1d14583SBruce Richardson status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, 1150c1d14583SBruce Richardson q_ids, q_teids, ICE_NO_RESET, 0, NULL); 1151c1d14583SBruce Richardson if (status != ICE_SUCCESS) { 1152c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); 1153c1d14583SBruce Richardson return -EINVAL; 1154c1d14583SBruce Richardson } 1155c1d14583SBruce Richardson 1156*cef05386SBruce Richardson ci_txq_release_all_mbufs(txq, false); 1157c1d14583SBruce Richardson txq->qtx_tail = NULL; 1158c1d14583SBruce Richardson 1159c1d14583SBruce Richardson return 0; 1160c1d14583SBruce Richardson } 1161c1d14583SBruce Richardson 1162c1d14583SBruce Richardson int 1163c1d14583SBruce Richardson ice_rx_queue_setup(struct rte_eth_dev *dev, 1164c1d14583SBruce Richardson uint16_t queue_idx, 1165c1d14583SBruce Richardson uint16_t nb_desc, 1166c1d14583SBruce Richardson unsigned int socket_id, 1167c1d14583SBruce Richardson const struct rte_eth_rxconf *rx_conf, 1168c1d14583SBruce Richardson struct rte_mempool *mp) 1169c1d14583SBruce Richardson { 1170c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1171c1d14583SBruce Richardson struct ice_adapter *ad = 1172c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 1173c1d14583SBruce Richardson struct ice_vsi *vsi = pf->main_vsi; 1174c1d14583SBruce Richardson struct ice_rx_queue *rxq; 1175c1d14583SBruce Richardson const struct rte_memzone *rz; 1176c1d14583SBruce Richardson uint32_t ring_size; 1177c1d14583SBruce Richardson uint16_t len; 1178c1d14583SBruce Richardson int use_def_burst_func = 1; 1179c1d14583SBruce Richardson uint64_t offloads; 1180c1d14583SBruce Richardson uint16_t n_seg = rx_conf->rx_nseg; 1181c1d14583SBruce Richardson uint16_t i; 1182c1d14583SBruce Richardson 1183c1d14583SBruce Richardson if (nb_desc % ICE_ALIGN_RING_DESC != 0 || 1184c1d14583SBruce Richardson nb_desc > ICE_MAX_RING_DESC || 1185c1d14583SBruce Richardson nb_desc < ICE_MIN_RING_DESC) { 1186c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is " 1187c1d14583SBruce Richardson "invalid", nb_desc); 1188c1d14583SBruce Richardson return -EINVAL; 1189c1d14583SBruce Richardson } 1190c1d14583SBruce Richardson 1191c1d14583SBruce Richardson offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 1192c1d14583SBruce Richardson 1193c1d14583SBruce Richardson if (mp) 1194c1d14583SBruce Richardson n_seg = 1; 1195c1d14583SBruce Richardson 1196c1d14583SBruce Richardson if (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1197c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured", 1198c1d14583SBruce Richardson dev->data->port_id, queue_idx); 1199c1d14583SBruce Richardson return -EINVAL; 1200c1d14583SBruce Richardson } 1201c1d14583SBruce Richardson 1202c1d14583SBruce Richardson /* Free memory if needed */ 1203c1d14583SBruce Richardson if (dev->data->rx_queues[queue_idx]) { 1204c1d14583SBruce Richardson ice_rx_queue_release(dev->data->rx_queues[queue_idx]); 1205c1d14583SBruce Richardson dev->data->rx_queues[queue_idx] = NULL; 1206c1d14583SBruce Richardson } 1207c1d14583SBruce Richardson 1208c1d14583SBruce Richardson /* Allocate the rx queue data structure */ 1209c1d14583SBruce Richardson rxq = rte_zmalloc_socket(NULL, 1210c1d14583SBruce Richardson sizeof(struct ice_rx_queue), 1211c1d14583SBruce Richardson RTE_CACHE_LINE_SIZE, 1212c1d14583SBruce Richardson socket_id); 1213c1d14583SBruce Richardson 1214c1d14583SBruce Richardson if (!rxq) { 1215c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Failed to allocate memory for " 1216c1d14583SBruce Richardson "rx queue data structure"); 1217c1d14583SBruce Richardson return -ENOMEM; 1218c1d14583SBruce Richardson } 1219c1d14583SBruce Richardson 1220c1d14583SBruce Richardson rxq->rxseg_nb = n_seg; 1221c1d14583SBruce Richardson if (n_seg > 1) { 1222c1d14583SBruce Richardson for (i = 0; i < n_seg; i++) 1223c1d14583SBruce Richardson memcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split, 1224c1d14583SBruce Richardson sizeof(struct rte_eth_rxseg_split)); 1225c1d14583SBruce Richardson 1226c1d14583SBruce Richardson rxq->mp = rxq->rxseg[0].mp; 1227c1d14583SBruce Richardson } else { 1228c1d14583SBruce Richardson rxq->mp = mp; 1229c1d14583SBruce Richardson } 1230c1d14583SBruce Richardson 1231c1d14583SBruce Richardson rxq->nb_rx_desc = nb_desc; 1232c1d14583SBruce Richardson rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1233c1d14583SBruce Richardson rxq->queue_id = queue_idx; 1234c1d14583SBruce Richardson rxq->offloads = offloads; 1235c1d14583SBruce Richardson 1236c1d14583SBruce Richardson rxq->reg_idx = vsi->base_queue + queue_idx; 1237c1d14583SBruce Richardson rxq->port_id = dev->data->port_id; 1238c1d14583SBruce Richardson if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 1239c1d14583SBruce Richardson rxq->crc_len = RTE_ETHER_CRC_LEN; 1240c1d14583SBruce Richardson else 1241c1d14583SBruce Richardson rxq->crc_len = 0; 1242c1d14583SBruce Richardson 1243c1d14583SBruce Richardson rxq->drop_en = rx_conf->rx_drop_en; 1244c1d14583SBruce Richardson rxq->vsi = vsi; 1245c1d14583SBruce Richardson rxq->rx_deferred_start = rx_conf->rx_deferred_start; 1246c1d14583SBruce Richardson rxq->proto_xtr = pf->proto_xtr != NULL ? 1247c1d14583SBruce Richardson pf->proto_xtr[queue_idx] : PROTO_XTR_NONE; 1248c1d14583SBruce Richardson if (rxq->proto_xtr != PROTO_XTR_NONE && 1249c1d14583SBruce Richardson ad->devargs.xtr_flag_offs[rxq->proto_xtr] != 0xff) 1250c1d14583SBruce Richardson rxq->xtr_ol_flag = 1ULL << ad->devargs.xtr_flag_offs[rxq->proto_xtr]; 1251c1d14583SBruce Richardson rxq->xtr_field_offs = ad->devargs.xtr_field_offs; 1252c1d14583SBruce Richardson 1253c1d14583SBruce Richardson /* Allocate the maximum number of RX ring hardware descriptor. */ 1254c1d14583SBruce Richardson len = ICE_MAX_RING_DESC; 1255c1d14583SBruce Richardson 1256c1d14583SBruce Richardson /** 1257c1d14583SBruce Richardson * Allocating a little more memory because vectorized/bulk_alloc Rx 1258c1d14583SBruce Richardson * functions doesn't check boundaries each time. 1259c1d14583SBruce Richardson */ 1260c1d14583SBruce Richardson len += ICE_RX_MAX_BURST; 1261c1d14583SBruce Richardson 1262c1d14583SBruce Richardson /* Allocate the maximum number of RX ring hardware descriptor. */ 1263c1d14583SBruce Richardson ring_size = sizeof(union ice_rx_flex_desc) * len; 1264c1d14583SBruce Richardson ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 1265c1d14583SBruce Richardson rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, 1266c1d14583SBruce Richardson ring_size, ICE_RING_BASE_ALIGN, 1267c1d14583SBruce Richardson socket_id); 1268c1d14583SBruce Richardson if (!rz) { 1269c1d14583SBruce Richardson ice_rx_queue_release(rxq); 1270c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX"); 1271c1d14583SBruce Richardson return -ENOMEM; 1272c1d14583SBruce Richardson } 1273c1d14583SBruce Richardson 1274c1d14583SBruce Richardson rxq->mz = rz; 1275c1d14583SBruce Richardson /* Zero all the descriptors in the ring. */ 1276c1d14583SBruce Richardson memset(rz->addr, 0, ring_size); 1277c1d14583SBruce Richardson 1278c1d14583SBruce Richardson rxq->rx_ring_dma = rz->iova; 1279c1d14583SBruce Richardson rxq->rx_ring = rz->addr; 1280c1d14583SBruce Richardson 1281c1d14583SBruce Richardson /* always reserve more for bulk alloc */ 1282c1d14583SBruce Richardson len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST); 1283c1d14583SBruce Richardson 1284c1d14583SBruce Richardson /* Allocate the software ring. */ 1285c1d14583SBruce Richardson rxq->sw_ring = rte_zmalloc_socket(NULL, 1286c1d14583SBruce Richardson sizeof(struct ice_rx_entry) * len, 1287c1d14583SBruce Richardson RTE_CACHE_LINE_SIZE, 1288c1d14583SBruce Richardson socket_id); 1289c1d14583SBruce Richardson if (!rxq->sw_ring) { 1290c1d14583SBruce Richardson ice_rx_queue_release(rxq); 1291c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring"); 1292c1d14583SBruce Richardson return -ENOMEM; 1293c1d14583SBruce Richardson } 1294c1d14583SBruce Richardson 1295c1d14583SBruce Richardson ice_reset_rx_queue(rxq); 1296c1d14583SBruce Richardson rxq->q_set = true; 1297c1d14583SBruce Richardson dev->data->rx_queues[queue_idx] = rxq; 1298c1d14583SBruce Richardson rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; 1299c1d14583SBruce Richardson 1300c1d14583SBruce Richardson use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq); 1301c1d14583SBruce Richardson 1302c1d14583SBruce Richardson if (!use_def_burst_func) { 1303c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " 1304c1d14583SBruce Richardson "satisfied. Rx Burst Bulk Alloc function will be " 1305c1d14583SBruce Richardson "used on port=%d, queue=%d.", 1306c1d14583SBruce Richardson rxq->port_id, rxq->queue_id); 1307c1d14583SBruce Richardson } else { 1308c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " 1309c1d14583SBruce Richardson "not satisfied, Scattered Rx is requested. " 1310c1d14583SBruce Richardson "on port=%d, queue=%d.", 1311c1d14583SBruce Richardson rxq->port_id, rxq->queue_id); 1312c1d14583SBruce Richardson ad->rx_bulk_alloc_allowed = false; 1313c1d14583SBruce Richardson } 1314c1d14583SBruce Richardson 1315c1d14583SBruce Richardson return 0; 1316c1d14583SBruce Richardson } 1317c1d14583SBruce Richardson 1318c1d14583SBruce Richardson void 1319c1d14583SBruce Richardson ice_rx_queue_release(void *rxq) 1320c1d14583SBruce Richardson { 1321c1d14583SBruce Richardson struct ice_rx_queue *q = (struct ice_rx_queue *)rxq; 1322c1d14583SBruce Richardson 1323c1d14583SBruce Richardson if (!q) { 1324c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL"); 1325c1d14583SBruce Richardson return; 1326c1d14583SBruce Richardson } 1327c1d14583SBruce Richardson 1328c1d14583SBruce Richardson if (q->rx_rel_mbufs != NULL) 1329c1d14583SBruce Richardson q->rx_rel_mbufs(q); 1330c1d14583SBruce Richardson rte_free(q->sw_ring); 1331c1d14583SBruce Richardson rte_memzone_free(q->mz); 1332c1d14583SBruce Richardson rte_free(q); 1333c1d14583SBruce Richardson } 1334c1d14583SBruce Richardson 1335c1d14583SBruce Richardson int 1336c1d14583SBruce Richardson ice_tx_queue_setup(struct rte_eth_dev *dev, 1337c1d14583SBruce Richardson uint16_t queue_idx, 1338c1d14583SBruce Richardson uint16_t nb_desc, 1339c1d14583SBruce Richardson unsigned int socket_id, 1340c1d14583SBruce Richardson const struct rte_eth_txconf *tx_conf) 1341c1d14583SBruce Richardson { 1342c1d14583SBruce Richardson struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); 1343c1d14583SBruce Richardson struct ice_vsi *vsi = pf->main_vsi; 1344c038157aSBruce Richardson struct ci_tx_queue *txq; 1345c1d14583SBruce Richardson const struct rte_memzone *tz; 1346c1d14583SBruce Richardson uint32_t ring_size; 1347c1d14583SBruce Richardson uint16_t tx_rs_thresh, tx_free_thresh; 1348c1d14583SBruce Richardson uint64_t offloads; 1349c1d14583SBruce Richardson 1350c1d14583SBruce Richardson offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; 1351c1d14583SBruce Richardson 1352c1d14583SBruce Richardson if (nb_desc % ICE_ALIGN_RING_DESC != 0 || 1353c1d14583SBruce Richardson nb_desc > ICE_MAX_RING_DESC || 1354c1d14583SBruce Richardson nb_desc < ICE_MIN_RING_DESC) { 1355c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is " 1356c1d14583SBruce Richardson "invalid", nb_desc); 1357c1d14583SBruce Richardson return -EINVAL; 1358c1d14583SBruce Richardson } 1359c1d14583SBruce Richardson 1360c1d14583SBruce Richardson /** 1361c1d14583SBruce Richardson * The following two parameters control the setting of the RS bit on 1362c1d14583SBruce Richardson * transmit descriptors. TX descriptors will have their RS bit set 1363c1d14583SBruce Richardson * after txq->tx_rs_thresh descriptors have been used. The TX 1364c1d14583SBruce Richardson * descriptor ring will be cleaned after txq->tx_free_thresh 1365c1d14583SBruce Richardson * descriptors are used or if the number of descriptors required to 1366c1d14583SBruce Richardson * transmit a packet is greater than the number of free TX descriptors. 1367c1d14583SBruce Richardson * 1368c1d14583SBruce Richardson * The following constraints must be satisfied: 1369c1d14583SBruce Richardson * - tx_rs_thresh must be greater than 0. 1370c1d14583SBruce Richardson * - tx_rs_thresh must be less than the size of the ring minus 2. 1371c1d14583SBruce Richardson * - tx_rs_thresh must be less than or equal to tx_free_thresh. 1372c1d14583SBruce Richardson * - tx_rs_thresh must be a divisor of the ring size. 1373c1d14583SBruce Richardson * - tx_free_thresh must be greater than 0. 1374c1d14583SBruce Richardson * - tx_free_thresh must be less than the size of the ring minus 3. 1375c1d14583SBruce Richardson * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. 1376c1d14583SBruce Richardson * 1377c1d14583SBruce Richardson * One descriptor in the TX ring is used as a sentinel to avoid a H/W 1378c1d14583SBruce Richardson * race condition, hence the maximum threshold constraints. When set 1379c1d14583SBruce Richardson * to zero use default values. 1380c1d14583SBruce Richardson */ 1381c1d14583SBruce Richardson tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ? 1382c1d14583SBruce Richardson tx_conf->tx_free_thresh : 1383c1d14583SBruce Richardson ICE_DEFAULT_TX_FREE_THRESH); 1384c1d14583SBruce Richardson /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */ 1385c1d14583SBruce Richardson tx_rs_thresh = 1386c1d14583SBruce Richardson (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ? 1387c1d14583SBruce Richardson nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH; 1388c1d14583SBruce Richardson if (tx_conf->tx_rs_thresh) 1389c1d14583SBruce Richardson tx_rs_thresh = tx_conf->tx_rs_thresh; 1390c1d14583SBruce Richardson if (tx_rs_thresh + tx_free_thresh > nb_desc) { 1391c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " 1392c1d14583SBruce Richardson "exceed nb_desc. (tx_rs_thresh=%u " 1393c1d14583SBruce Richardson "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", 1394c1d14583SBruce Richardson (unsigned int)tx_rs_thresh, 1395c1d14583SBruce Richardson (unsigned int)tx_free_thresh, 1396c1d14583SBruce Richardson (unsigned int)nb_desc, 1397c1d14583SBruce Richardson (int)dev->data->port_id, 1398c1d14583SBruce Richardson (int)queue_idx); 1399c1d14583SBruce Richardson return -EINVAL; 1400c1d14583SBruce Richardson } 1401c1d14583SBruce Richardson if (tx_rs_thresh >= (nb_desc - 2)) { 1402c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " 1403c1d14583SBruce Richardson "number of TX descriptors minus 2. " 1404c1d14583SBruce Richardson "(tx_rs_thresh=%u port=%d queue=%d)", 1405c1d14583SBruce Richardson (unsigned int)tx_rs_thresh, 1406c1d14583SBruce Richardson (int)dev->data->port_id, 1407c1d14583SBruce Richardson (int)queue_idx); 1408c1d14583SBruce Richardson return -EINVAL; 1409c1d14583SBruce Richardson } 1410c1d14583SBruce Richardson if (tx_free_thresh >= (nb_desc - 3)) { 1411c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " 1412c1d14583SBruce Richardson "tx_free_thresh must be less than the " 1413c1d14583SBruce Richardson "number of TX descriptors minus 3. " 1414c1d14583SBruce Richardson "(tx_free_thresh=%u port=%d queue=%d)", 1415c1d14583SBruce Richardson (unsigned int)tx_free_thresh, 1416c1d14583SBruce Richardson (int)dev->data->port_id, 1417c1d14583SBruce Richardson (int)queue_idx); 1418c1d14583SBruce Richardson return -EINVAL; 1419c1d14583SBruce Richardson } 1420c1d14583SBruce Richardson if (tx_rs_thresh > tx_free_thresh) { 1421c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or " 1422c1d14583SBruce Richardson "equal to tx_free_thresh. (tx_free_thresh=%u" 1423c1d14583SBruce Richardson " tx_rs_thresh=%u port=%d queue=%d)", 1424c1d14583SBruce Richardson (unsigned int)tx_free_thresh, 1425c1d14583SBruce Richardson (unsigned int)tx_rs_thresh, 1426c1d14583SBruce Richardson (int)dev->data->port_id, 1427c1d14583SBruce Richardson (int)queue_idx); 1428c1d14583SBruce Richardson return -EINVAL; 1429c1d14583SBruce Richardson } 1430c1d14583SBruce Richardson if ((nb_desc % tx_rs_thresh) != 0) { 1431c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the " 1432c1d14583SBruce Richardson "number of TX descriptors. (tx_rs_thresh=%u" 1433c1d14583SBruce Richardson " port=%d queue=%d)", 1434c1d14583SBruce Richardson (unsigned int)tx_rs_thresh, 1435c1d14583SBruce Richardson (int)dev->data->port_id, 1436c1d14583SBruce Richardson (int)queue_idx); 1437c1d14583SBruce Richardson return -EINVAL; 1438c1d14583SBruce Richardson } 1439c1d14583SBruce Richardson if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) { 1440c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " 1441c1d14583SBruce Richardson "tx_rs_thresh is greater than 1. " 1442c1d14583SBruce Richardson "(tx_rs_thresh=%u port=%d queue=%d)", 1443c1d14583SBruce Richardson (unsigned int)tx_rs_thresh, 1444c1d14583SBruce Richardson (int)dev->data->port_id, 1445c1d14583SBruce Richardson (int)queue_idx); 1446c1d14583SBruce Richardson return -EINVAL; 1447c1d14583SBruce Richardson } 1448c1d14583SBruce Richardson 1449c1d14583SBruce Richardson /* Free memory if needed. */ 1450c1d14583SBruce Richardson if (dev->data->tx_queues[queue_idx]) { 1451c1d14583SBruce Richardson ice_tx_queue_release(dev->data->tx_queues[queue_idx]); 1452c1d14583SBruce Richardson dev->data->tx_queues[queue_idx] = NULL; 1453c1d14583SBruce Richardson } 1454c1d14583SBruce Richardson 1455c1d14583SBruce Richardson /* Allocate the TX queue data structure. */ 1456c1d14583SBruce Richardson txq = rte_zmalloc_socket(NULL, 1457c038157aSBruce Richardson sizeof(struct ci_tx_queue), 1458c1d14583SBruce Richardson RTE_CACHE_LINE_SIZE, 1459c1d14583SBruce Richardson socket_id); 1460c1d14583SBruce Richardson if (!txq) { 1461c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Failed to allocate memory for " 1462c1d14583SBruce Richardson "tx queue structure"); 1463c1d14583SBruce Richardson return -ENOMEM; 1464c1d14583SBruce Richardson } 1465c1d14583SBruce Richardson 1466c1d14583SBruce Richardson /* Allocate TX hardware ring descriptors. */ 1467c1d14583SBruce Richardson ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC; 1468c1d14583SBruce Richardson ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 14694d0f54d9SBruce Richardson tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx, 1470c1d14583SBruce Richardson ring_size, ICE_RING_BASE_ALIGN, 1471c1d14583SBruce Richardson socket_id); 1472c1d14583SBruce Richardson if (!tz) { 1473c1d14583SBruce Richardson ice_tx_queue_release(txq); 1474c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX"); 1475c1d14583SBruce Richardson return -ENOMEM; 1476c1d14583SBruce Richardson } 1477c1d14583SBruce Richardson 1478c1d14583SBruce Richardson txq->mz = tz; 1479c1d14583SBruce Richardson txq->nb_tx_desc = nb_desc; 1480c1d14583SBruce Richardson txq->tx_rs_thresh = tx_rs_thresh; 1481c1d14583SBruce Richardson txq->tx_free_thresh = tx_free_thresh; 1482c1d14583SBruce Richardson txq->queue_id = queue_idx; 1483c1d14583SBruce Richardson 1484c1d14583SBruce Richardson txq->reg_idx = vsi->base_queue + queue_idx; 1485c1d14583SBruce Richardson txq->port_id = dev->data->port_id; 1486c1d14583SBruce Richardson txq->offloads = offloads; 14874d0f54d9SBruce Richardson txq->ice_vsi = vsi; 1488c1d14583SBruce Richardson txq->tx_deferred_start = tx_conf->tx_deferred_start; 1489c1d14583SBruce Richardson 1490c1d14583SBruce Richardson txq->tx_ring_dma = tz->iova; 14914d0f54d9SBruce Richardson txq->ice_tx_ring = tz->addr; 1492c1d14583SBruce Richardson 1493c1d14583SBruce Richardson /* Allocate software ring */ 1494c1d14583SBruce Richardson txq->sw_ring = 1495c1d14583SBruce Richardson rte_zmalloc_socket(NULL, 14965cc9919fSBruce Richardson sizeof(struct ci_tx_entry) * nb_desc, 1497c1d14583SBruce Richardson RTE_CACHE_LINE_SIZE, 1498c1d14583SBruce Richardson socket_id); 1499c1d14583SBruce Richardson if (!txq->sw_ring) { 1500c1d14583SBruce Richardson ice_tx_queue_release(txq); 1501c1d14583SBruce Richardson PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring"); 1502c1d14583SBruce Richardson return -ENOMEM; 1503c1d14583SBruce Richardson } 1504c1d14583SBruce Richardson 1505c1d14583SBruce Richardson ice_reset_tx_queue(txq); 1506c1d14583SBruce Richardson txq->q_set = true; 1507c1d14583SBruce Richardson dev->data->tx_queues[queue_idx] = txq; 1508c1d14583SBruce Richardson ice_set_tx_function_flag(dev, txq); 1509c1d14583SBruce Richardson 1510c1d14583SBruce Richardson return 0; 1511c1d14583SBruce Richardson } 1512c1d14583SBruce Richardson 1513c1d14583SBruce Richardson void 1514c1d14583SBruce Richardson ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1515c1d14583SBruce Richardson { 1516c1d14583SBruce Richardson ice_rx_queue_release(dev->data->rx_queues[qid]); 1517c1d14583SBruce Richardson } 1518c1d14583SBruce Richardson 1519c1d14583SBruce Richardson void 1520c1d14583SBruce Richardson ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1521c1d14583SBruce Richardson { 1522c1d14583SBruce Richardson ice_tx_queue_release(dev->data->tx_queues[qid]); 1523c1d14583SBruce Richardson } 1524c1d14583SBruce Richardson 1525c1d14583SBruce Richardson void 1526c1d14583SBruce Richardson ice_tx_queue_release(void *txq) 1527c1d14583SBruce Richardson { 1528c038157aSBruce Richardson struct ci_tx_queue *q = (struct ci_tx_queue *)txq; 1529c1d14583SBruce Richardson 1530c1d14583SBruce Richardson if (!q) { 1531c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL"); 1532c1d14583SBruce Richardson return; 1533c1d14583SBruce Richardson } 1534c1d14583SBruce Richardson 1535*cef05386SBruce Richardson ci_txq_release_all_mbufs(q, false); 1536c1d14583SBruce Richardson rte_free(q->sw_ring); 1537c1d14583SBruce Richardson rte_memzone_free(q->mz); 1538c1d14583SBruce Richardson rte_free(q); 1539c1d14583SBruce Richardson } 1540c1d14583SBruce Richardson 1541c1d14583SBruce Richardson void 1542c1d14583SBruce Richardson ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1543c1d14583SBruce Richardson struct rte_eth_rxq_info *qinfo) 1544c1d14583SBruce Richardson { 1545c1d14583SBruce Richardson struct ice_rx_queue *rxq; 1546c1d14583SBruce Richardson 1547c1d14583SBruce Richardson rxq = dev->data->rx_queues[queue_id]; 1548c1d14583SBruce Richardson 1549c1d14583SBruce Richardson qinfo->mp = rxq->mp; 1550c1d14583SBruce Richardson qinfo->scattered_rx = dev->data->scattered_rx; 1551c1d14583SBruce Richardson qinfo->nb_desc = rxq->nb_rx_desc; 1552c1d14583SBruce Richardson 1553c1d14583SBruce Richardson qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; 1554c1d14583SBruce Richardson qinfo->conf.rx_drop_en = rxq->drop_en; 1555c1d14583SBruce Richardson qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; 1556c1d14583SBruce Richardson } 1557c1d14583SBruce Richardson 1558c1d14583SBruce Richardson void 1559c1d14583SBruce Richardson ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 1560c1d14583SBruce Richardson struct rte_eth_txq_info *qinfo) 1561c1d14583SBruce Richardson { 1562c038157aSBruce Richardson struct ci_tx_queue *txq; 1563c1d14583SBruce Richardson 1564c1d14583SBruce Richardson txq = dev->data->tx_queues[queue_id]; 1565c1d14583SBruce Richardson 1566c1d14583SBruce Richardson qinfo->nb_desc = txq->nb_tx_desc; 1567c1d14583SBruce Richardson 1568e3b5f52dSBruce Richardson qinfo->conf.tx_thresh.pthresh = ICE_DEFAULT_TX_PTHRESH; 1569e3b5f52dSBruce Richardson qinfo->conf.tx_thresh.hthresh = ICE_DEFAULT_TX_HTHRESH; 1570e3b5f52dSBruce Richardson qinfo->conf.tx_thresh.wthresh = ICE_DEFAULT_TX_WTHRESH; 1571c1d14583SBruce Richardson 1572c1d14583SBruce Richardson qinfo->conf.tx_free_thresh = txq->tx_free_thresh; 1573c1d14583SBruce Richardson qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; 1574c1d14583SBruce Richardson qinfo->conf.offloads = txq->offloads; 1575c1d14583SBruce Richardson qinfo->conf.tx_deferred_start = txq->tx_deferred_start; 1576c1d14583SBruce Richardson } 1577c1d14583SBruce Richardson 1578c1d14583SBruce Richardson uint32_t 1579c1d14583SBruce Richardson ice_rx_queue_count(void *rx_queue) 1580c1d14583SBruce Richardson { 1581c1d14583SBruce Richardson #define ICE_RXQ_SCAN_INTERVAL 4 1582c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 1583c1d14583SBruce Richardson struct ice_rx_queue *rxq; 1584c1d14583SBruce Richardson uint16_t desc = 0; 1585c1d14583SBruce Richardson 1586c1d14583SBruce Richardson rxq = rx_queue; 1587c1d14583SBruce Richardson rxdp = &rxq->rx_ring[rxq->rx_tail]; 1588c1d14583SBruce Richardson while ((desc < rxq->nb_rx_desc) && 1589c1d14583SBruce Richardson rte_le_to_cpu_16(rxdp->wb.status_error0) & 1590c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) { 1591c1d14583SBruce Richardson /** 1592c1d14583SBruce Richardson * Check the DD bit of a rx descriptor of each 4 in a group, 1593c1d14583SBruce Richardson * to avoid checking too frequently and downgrading performance 1594c1d14583SBruce Richardson * too much. 1595c1d14583SBruce Richardson */ 1596c1d14583SBruce Richardson desc += ICE_RXQ_SCAN_INTERVAL; 1597c1d14583SBruce Richardson rxdp += ICE_RXQ_SCAN_INTERVAL; 1598c1d14583SBruce Richardson if (rxq->rx_tail + desc >= rxq->nb_rx_desc) 1599c1d14583SBruce Richardson rxdp = &(rxq->rx_ring[rxq->rx_tail + 1600c1d14583SBruce Richardson desc - rxq->nb_rx_desc]); 1601c1d14583SBruce Richardson } 1602c1d14583SBruce Richardson 1603c1d14583SBruce Richardson return desc; 1604c1d14583SBruce Richardson } 1605c1d14583SBruce Richardson 1606c1d14583SBruce Richardson #define ICE_RX_FLEX_ERR0_BITS \ 1607c1d14583SBruce Richardson ((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) | \ 1608c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \ 1609c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \ 1610c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \ 1611c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \ 1612c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S)) 1613c1d14583SBruce Richardson 1614c1d14583SBruce Richardson /* Rx L3/L4 checksum */ 1615c1d14583SBruce Richardson static inline uint64_t 1616c1d14583SBruce Richardson ice_rxd_error_to_pkt_flags(uint16_t stat_err0) 1617c1d14583SBruce Richardson { 1618c1d14583SBruce Richardson uint64_t flags = 0; 1619c1d14583SBruce Richardson 1620c1d14583SBruce Richardson /* check if HW has decoded the packet and checksum */ 1621c1d14583SBruce Richardson if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))) 1622c1d14583SBruce Richardson return 0; 1623c1d14583SBruce Richardson 1624c1d14583SBruce Richardson if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) { 1625c1d14583SBruce Richardson flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD | 1626c1d14583SBruce Richardson RTE_MBUF_F_RX_L4_CKSUM_GOOD | 1627c1d14583SBruce Richardson RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD); 1628c1d14583SBruce Richardson return flags; 1629c1d14583SBruce Richardson } 1630c1d14583SBruce Richardson 1631c1d14583SBruce Richardson if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))) 1632c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 1633c1d14583SBruce Richardson else 1634c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 1635c1d14583SBruce Richardson 1636c1d14583SBruce Richardson if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))) 1637c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 1638c1d14583SBruce Richardson else 1639c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 1640c1d14583SBruce Richardson 1641c1d14583SBruce Richardson if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) 1642c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; 1643c1d14583SBruce Richardson 1644c1d14583SBruce Richardson if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) 1645c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD; 1646c1d14583SBruce Richardson else 1647c1d14583SBruce Richardson flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD; 1648c1d14583SBruce Richardson 1649c1d14583SBruce Richardson return flags; 1650c1d14583SBruce Richardson } 1651c1d14583SBruce Richardson 1652c1d14583SBruce Richardson static inline void 1653c1d14583SBruce Richardson ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp) 1654c1d14583SBruce Richardson { 1655c1d14583SBruce Richardson if (rte_le_to_cpu_16(rxdp->wb.status_error0) & 1656c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { 1657c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 1658c1d14583SBruce Richardson mb->vlan_tci = 1659c1d14583SBruce Richardson rte_le_to_cpu_16(rxdp->wb.l2tag1); 1660c1d14583SBruce Richardson PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", 1661c1d14583SBruce Richardson rte_le_to_cpu_16(rxdp->wb.l2tag1)); 1662c1d14583SBruce Richardson } else { 1663c1d14583SBruce Richardson mb->vlan_tci = 0; 1664c1d14583SBruce Richardson } 1665c1d14583SBruce Richardson 1666c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1667c1d14583SBruce Richardson if (rte_le_to_cpu_16(rxdp->wb.status_error1) & 1668c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { 1669c1d14583SBruce Richardson mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ | 1670c1d14583SBruce Richardson RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; 1671c1d14583SBruce Richardson mb->vlan_tci_outer = mb->vlan_tci; 1672c1d14583SBruce Richardson mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); 1673c1d14583SBruce Richardson PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", 1674c1d14583SBruce Richardson rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), 1675c1d14583SBruce Richardson rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); 1676c1d14583SBruce Richardson } else { 1677c1d14583SBruce Richardson mb->vlan_tci_outer = 0; 1678c1d14583SBruce Richardson } 1679c1d14583SBruce Richardson #endif 1680c1d14583SBruce Richardson PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u", 1681c1d14583SBruce Richardson mb->vlan_tci, mb->vlan_tci_outer); 1682c1d14583SBruce Richardson } 1683c1d14583SBruce Richardson 1684c1d14583SBruce Richardson #define ICE_LOOK_AHEAD 8 1685c1d14583SBruce Richardson #if (ICE_LOOK_AHEAD != 8) 1686c1d14583SBruce Richardson #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n" 1687c1d14583SBruce Richardson #endif 1688c1d14583SBruce Richardson 1689c1d14583SBruce Richardson #define ICE_PTP_TS_VALID 0x1 1690c1d14583SBruce Richardson 1691c1d14583SBruce Richardson static inline int 1692c1d14583SBruce Richardson ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) 1693c1d14583SBruce Richardson { 1694c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 1695c1d14583SBruce Richardson struct ice_rx_entry *rxep; 1696c1d14583SBruce Richardson struct rte_mbuf *mb; 1697c1d14583SBruce Richardson uint16_t stat_err0; 1698c1d14583SBruce Richardson uint16_t pkt_len, hdr_len; 1699c1d14583SBruce Richardson int32_t s[ICE_LOOK_AHEAD], nb_dd; 1700c1d14583SBruce Richardson int32_t i, j, nb_rx = 0; 1701c1d14583SBruce Richardson uint64_t pkt_flags = 0; 1702c1d14583SBruce Richardson uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 1703c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1704c1d14583SBruce Richardson bool is_tsinit = false; 1705c1d14583SBruce Richardson uint64_t ts_ns; 1706c1d14583SBruce Richardson struct ice_vsi *vsi = rxq->vsi; 1707c1d14583SBruce Richardson struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 1708c1d14583SBruce Richardson struct ice_adapter *ad = rxq->vsi->adapter; 1709c1d14583SBruce Richardson #endif 1710c1d14583SBruce Richardson rxdp = &rxq->rx_ring[rxq->rx_tail]; 1711c1d14583SBruce Richardson rxep = &rxq->sw_ring[rxq->rx_tail]; 1712c1d14583SBruce Richardson 1713c1d14583SBruce Richardson stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 1714c1d14583SBruce Richardson 1715c1d14583SBruce Richardson /* Make sure there is at least 1 packet to receive */ 1716c1d14583SBruce Richardson if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 1717c1d14583SBruce Richardson return 0; 1718c1d14583SBruce Richardson 1719c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1720c1d14583SBruce Richardson if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 1721c1d14583SBruce Richardson uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 1722c1d14583SBruce Richardson 1723c1d14583SBruce Richardson if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 1724c1d14583SBruce Richardson is_tsinit = 1; 1725c1d14583SBruce Richardson } 1726c1d14583SBruce Richardson #endif 1727c1d14583SBruce Richardson 1728c1d14583SBruce Richardson /** 1729c1d14583SBruce Richardson * Scan LOOK_AHEAD descriptors at a time to determine which 1730c1d14583SBruce Richardson * descriptors reference packets that are ready to be received. 1731c1d14583SBruce Richardson */ 1732c1d14583SBruce Richardson for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD, 1733c1d14583SBruce Richardson rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) { 1734c1d14583SBruce Richardson /* Read desc statuses backwards to avoid race condition */ 1735c1d14583SBruce Richardson for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--) 1736c1d14583SBruce Richardson s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0); 1737c1d14583SBruce Richardson 1738c1d14583SBruce Richardson rte_smp_rmb(); 1739c1d14583SBruce Richardson 1740c1d14583SBruce Richardson /* Compute how many status bits were set */ 1741c1d14583SBruce Richardson for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++) 1742c1d14583SBruce Richardson nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S); 1743c1d14583SBruce Richardson 1744c1d14583SBruce Richardson nb_rx += nb_dd; 1745c1d14583SBruce Richardson 1746c1d14583SBruce Richardson /* Translate descriptor info to mbuf parameters */ 1747c1d14583SBruce Richardson for (j = 0; j < nb_dd; j++) { 1748c1d14583SBruce Richardson mb = rxep[j].mbuf; 1749c1d14583SBruce Richardson pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1750c1d14583SBruce Richardson ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1751c1d14583SBruce Richardson mb->data_len = pkt_len; 1752c1d14583SBruce Richardson mb->pkt_len = pkt_len; 1753c1d14583SBruce Richardson 1754c1d14583SBruce Richardson if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1755c1d14583SBruce Richardson pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1756c1d14583SBruce Richardson ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1757c1d14583SBruce Richardson mb->data_len = pkt_len; 1758c1d14583SBruce Richardson mb->pkt_len = pkt_len; 1759c1d14583SBruce Richardson } else { 1760c1d14583SBruce Richardson mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs); 1761c1d14583SBruce Richardson mb->next->next = NULL; 1762c1d14583SBruce Richardson hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) & 1763c1d14583SBruce Richardson ICE_RX_FLEX_DESC_HEADER_LEN_M; 1764c1d14583SBruce Richardson pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) & 1765c1d14583SBruce Richardson ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 1766c1d14583SBruce Richardson mb->data_len = hdr_len; 1767c1d14583SBruce Richardson mb->pkt_len = hdr_len + pkt_len; 1768c1d14583SBruce Richardson mb->next->data_len = pkt_len; 1769c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_RX 1770c1d14583SBruce Richardson rte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb)); 1771c1d14583SBruce Richardson #endif 1772c1d14583SBruce Richardson } 1773c1d14583SBruce Richardson 1774c1d14583SBruce Richardson mb->ol_flags = 0; 1775c1d14583SBruce Richardson stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); 1776c1d14583SBruce Richardson pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0); 1777c1d14583SBruce Richardson mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 1778c1d14583SBruce Richardson rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; 1779c1d14583SBruce Richardson ice_rxd_to_vlan_tci(mb, &rxdp[j]); 1780c1d14583SBruce Richardson rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); 1781c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 1782c1d14583SBruce Richardson if (ice_timestamp_dynflag > 0 && 1783c1d14583SBruce Richardson (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 1784c1d14583SBruce Richardson rxq->time_high = 1785c1d14583SBruce Richardson rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); 1786c1d14583SBruce Richardson if (unlikely(is_tsinit)) { 1787c1d14583SBruce Richardson ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, 1788c1d14583SBruce Richardson rxq->time_high); 1789c1d14583SBruce Richardson rxq->hw_time_low = (uint32_t)ts_ns; 1790c1d14583SBruce Richardson rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 1791c1d14583SBruce Richardson is_tsinit = false; 1792c1d14583SBruce Richardson } else { 1793c1d14583SBruce Richardson if (rxq->time_high < rxq->hw_time_low) 1794c1d14583SBruce Richardson rxq->hw_time_high += 1; 1795c1d14583SBruce Richardson ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 1796c1d14583SBruce Richardson rxq->hw_time_low = rxq->time_high; 1797c1d14583SBruce Richardson } 1798c1d14583SBruce Richardson rxq->hw_time_update = rte_get_timer_cycles() / 1799c1d14583SBruce Richardson (rte_get_timer_hz() / 1000); 1800c1d14583SBruce Richardson *RTE_MBUF_DYNFIELD(mb, 1801c1d14583SBruce Richardson ice_timestamp_dynfield_offset, 1802c1d14583SBruce Richardson rte_mbuf_timestamp_t *) = ts_ns; 1803c1d14583SBruce Richardson pkt_flags |= ice_timestamp_dynflag; 1804c1d14583SBruce Richardson } 1805c1d14583SBruce Richardson 1806c1d14583SBruce Richardson if (ad->ptp_ena && ((mb->packet_type & 1807c1d14583SBruce Richardson RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) { 1808c1d14583SBruce Richardson rxq->time_high = 1809c1d14583SBruce Richardson rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); 1810c1d14583SBruce Richardson mb->timesync = rxq->queue_id; 1811c1d14583SBruce Richardson pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 1812c1d14583SBruce Richardson if (rxdp[j].wb.time_stamp_low & 1813c1d14583SBruce Richardson ICE_PTP_TS_VALID) 1814c1d14583SBruce Richardson pkt_flags |= 1815c1d14583SBruce Richardson RTE_MBUF_F_RX_IEEE1588_TMST; 1816c1d14583SBruce Richardson } 1817c1d14583SBruce Richardson #endif 1818c1d14583SBruce Richardson mb->ol_flags |= pkt_flags; 1819c1d14583SBruce Richardson } 1820c1d14583SBruce Richardson 1821c1d14583SBruce Richardson for (j = 0; j < ICE_LOOK_AHEAD; j++) 1822c1d14583SBruce Richardson rxq->rx_stage[i + j] = rxep[j].mbuf; 1823c1d14583SBruce Richardson 1824c1d14583SBruce Richardson if (nb_dd != ICE_LOOK_AHEAD) 1825c1d14583SBruce Richardson break; 1826c1d14583SBruce Richardson } 1827c1d14583SBruce Richardson 1828c1d14583SBruce Richardson /* Clear software ring entries */ 1829c1d14583SBruce Richardson for (i = 0; i < nb_rx; i++) 1830c1d14583SBruce Richardson rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL; 1831c1d14583SBruce Richardson 1832c1d14583SBruce Richardson PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: " 1833c1d14583SBruce Richardson "port_id=%u, queue_id=%u, nb_rx=%d", 1834c1d14583SBruce Richardson rxq->port_id, rxq->queue_id, nb_rx); 1835c1d14583SBruce Richardson 1836c1d14583SBruce Richardson return nb_rx; 1837c1d14583SBruce Richardson } 1838c1d14583SBruce Richardson 1839c1d14583SBruce Richardson static inline uint16_t 1840c1d14583SBruce Richardson ice_rx_fill_from_stage(struct ice_rx_queue *rxq, 1841c1d14583SBruce Richardson struct rte_mbuf **rx_pkts, 1842c1d14583SBruce Richardson uint16_t nb_pkts) 1843c1d14583SBruce Richardson { 1844c1d14583SBruce Richardson uint16_t i; 1845c1d14583SBruce Richardson struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; 1846c1d14583SBruce Richardson 1847c1d14583SBruce Richardson nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); 1848c1d14583SBruce Richardson 1849c1d14583SBruce Richardson for (i = 0; i < nb_pkts; i++) 1850c1d14583SBruce Richardson rx_pkts[i] = stage[i]; 1851c1d14583SBruce Richardson 1852c1d14583SBruce Richardson rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); 1853c1d14583SBruce Richardson rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); 1854c1d14583SBruce Richardson 1855c1d14583SBruce Richardson return nb_pkts; 1856c1d14583SBruce Richardson } 1857c1d14583SBruce Richardson 1858c1d14583SBruce Richardson static inline int 1859c1d14583SBruce Richardson ice_rx_alloc_bufs(struct ice_rx_queue *rxq) 1860c1d14583SBruce Richardson { 1861c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 1862c1d14583SBruce Richardson struct ice_rx_entry *rxep; 1863c1d14583SBruce Richardson struct rte_mbuf *mb; 1864c1d14583SBruce Richardson uint16_t alloc_idx, i; 1865c1d14583SBruce Richardson uint64_t dma_addr; 1866c1d14583SBruce Richardson int diag, diag_pay; 1867c1d14583SBruce Richardson uint64_t pay_addr; 1868c1d14583SBruce Richardson struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh]; 1869c1d14583SBruce Richardson 1870c1d14583SBruce Richardson /* Allocate buffers in bulk */ 1871c1d14583SBruce Richardson alloc_idx = (uint16_t)(rxq->rx_free_trigger - 1872c1d14583SBruce Richardson (rxq->rx_free_thresh - 1)); 1873c1d14583SBruce Richardson rxep = &rxq->sw_ring[alloc_idx]; 1874c1d14583SBruce Richardson diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, 1875c1d14583SBruce Richardson rxq->rx_free_thresh); 1876c1d14583SBruce Richardson if (unlikely(diag != 0)) { 1877c1d14583SBruce Richardson PMD_RX_LOG(ERR, "Failed to get mbufs in bulk"); 1878c1d14583SBruce Richardson return -ENOMEM; 1879c1d14583SBruce Richardson } 1880c1d14583SBruce Richardson 1881c1d14583SBruce Richardson if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { 1882c1d14583SBruce Richardson diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp, 1883c1d14583SBruce Richardson (void *)mbufs_pay, rxq->rx_free_thresh); 1884c1d14583SBruce Richardson if (unlikely(diag_pay != 0)) { 1885c1d14583SBruce Richardson rte_mempool_put_bulk(rxq->mp, (void *)rxep, 1886c1d14583SBruce Richardson rxq->rx_free_thresh); 1887c1d14583SBruce Richardson PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk"); 1888c1d14583SBruce Richardson return -ENOMEM; 1889c1d14583SBruce Richardson } 1890c1d14583SBruce Richardson } 1891c1d14583SBruce Richardson 1892c1d14583SBruce Richardson rxdp = &rxq->rx_ring[alloc_idx]; 1893c1d14583SBruce Richardson for (i = 0; i < rxq->rx_free_thresh; i++) { 1894c1d14583SBruce Richardson if (likely(i < (rxq->rx_free_thresh - 1))) 1895c1d14583SBruce Richardson /* Prefetch next mbuf */ 1896c1d14583SBruce Richardson rte_prefetch0(rxep[i + 1].mbuf); 1897c1d14583SBruce Richardson 1898c1d14583SBruce Richardson mb = rxep[i].mbuf; 1899c1d14583SBruce Richardson rte_mbuf_refcnt_set(mb, 1); 1900c1d14583SBruce Richardson mb->data_off = RTE_PKTMBUF_HEADROOM; 1901c1d14583SBruce Richardson mb->nb_segs = 1; 1902c1d14583SBruce Richardson mb->port = rxq->port_id; 1903c1d14583SBruce Richardson dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); 1904c1d14583SBruce Richardson 1905c1d14583SBruce Richardson if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 1906c1d14583SBruce Richardson mb->next = NULL; 1907c1d14583SBruce Richardson rxdp[i].read.hdr_addr = 0; 1908c1d14583SBruce Richardson rxdp[i].read.pkt_addr = dma_addr; 1909c1d14583SBruce Richardson } else { 1910c1d14583SBruce Richardson mb->next = mbufs_pay[i]; 1911c1d14583SBruce Richardson pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i])); 1912c1d14583SBruce Richardson rxdp[i].read.hdr_addr = dma_addr; 1913c1d14583SBruce Richardson rxdp[i].read.pkt_addr = pay_addr; 1914c1d14583SBruce Richardson } 1915c1d14583SBruce Richardson } 1916c1d14583SBruce Richardson 1917c1d14583SBruce Richardson /* Update Rx tail register */ 1918c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger); 1919c1d14583SBruce Richardson 1920c1d14583SBruce Richardson rxq->rx_free_trigger = 1921c1d14583SBruce Richardson (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); 1922c1d14583SBruce Richardson if (rxq->rx_free_trigger >= rxq->nb_rx_desc) 1923c1d14583SBruce Richardson rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); 1924c1d14583SBruce Richardson 1925c1d14583SBruce Richardson return 0; 1926c1d14583SBruce Richardson } 1927c1d14583SBruce Richardson 1928c1d14583SBruce Richardson static inline uint16_t 1929c1d14583SBruce Richardson rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 1930c1d14583SBruce Richardson { 1931c1d14583SBruce Richardson struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue; 1932c1d14583SBruce Richardson uint16_t nb_rx = 0; 1933c1d14583SBruce Richardson 1934c1d14583SBruce Richardson if (!nb_pkts) 1935c1d14583SBruce Richardson return 0; 1936c1d14583SBruce Richardson 1937c1d14583SBruce Richardson if (rxq->rx_nb_avail) 1938c1d14583SBruce Richardson return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); 1939c1d14583SBruce Richardson 1940c1d14583SBruce Richardson nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq); 1941c1d14583SBruce Richardson rxq->rx_next_avail = 0; 1942c1d14583SBruce Richardson rxq->rx_nb_avail = nb_rx; 1943c1d14583SBruce Richardson rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); 1944c1d14583SBruce Richardson 1945c1d14583SBruce Richardson if (rxq->rx_tail > rxq->rx_free_trigger) { 1946c1d14583SBruce Richardson if (ice_rx_alloc_bufs(rxq) != 0) { 1947c1d14583SBruce Richardson uint16_t i, j; 1948c1d14583SBruce Richardson 1949c1d14583SBruce Richardson rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed += 1950c1d14583SBruce Richardson rxq->rx_free_thresh; 1951c1d14583SBruce Richardson PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for " 1952c1d14583SBruce Richardson "port_id=%u, queue_id=%u", 1953c1d14583SBruce Richardson rxq->port_id, rxq->queue_id); 1954c1d14583SBruce Richardson rxq->rx_nb_avail = 0; 1955c1d14583SBruce Richardson rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); 1956c1d14583SBruce Richardson for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) 1957c1d14583SBruce Richardson rxq->sw_ring[j].mbuf = rxq->rx_stage[i]; 1958c1d14583SBruce Richardson 1959c1d14583SBruce Richardson return 0; 1960c1d14583SBruce Richardson } 1961c1d14583SBruce Richardson } 1962c1d14583SBruce Richardson 1963c1d14583SBruce Richardson if (rxq->rx_tail >= rxq->nb_rx_desc) 1964c1d14583SBruce Richardson rxq->rx_tail = 0; 1965c1d14583SBruce Richardson 1966c1d14583SBruce Richardson if (rxq->rx_nb_avail) 1967c1d14583SBruce Richardson return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); 1968c1d14583SBruce Richardson 1969c1d14583SBruce Richardson return 0; 1970c1d14583SBruce Richardson } 1971c1d14583SBruce Richardson 1972c1d14583SBruce Richardson static uint16_t 1973c1d14583SBruce Richardson ice_recv_pkts_bulk_alloc(void *rx_queue, 1974c1d14583SBruce Richardson struct rte_mbuf **rx_pkts, 1975c1d14583SBruce Richardson uint16_t nb_pkts) 1976c1d14583SBruce Richardson { 1977c1d14583SBruce Richardson uint16_t nb_rx = 0; 1978c1d14583SBruce Richardson uint16_t n; 1979c1d14583SBruce Richardson uint16_t count; 1980c1d14583SBruce Richardson 1981c1d14583SBruce Richardson if (unlikely(nb_pkts == 0)) 1982c1d14583SBruce Richardson return nb_rx; 1983c1d14583SBruce Richardson 1984c1d14583SBruce Richardson if (likely(nb_pkts <= ICE_RX_MAX_BURST)) 1985c1d14583SBruce Richardson return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); 1986c1d14583SBruce Richardson 1987c1d14583SBruce Richardson while (nb_pkts) { 1988c1d14583SBruce Richardson n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST); 1989c1d14583SBruce Richardson count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); 1990c1d14583SBruce Richardson nb_rx = (uint16_t)(nb_rx + count); 1991c1d14583SBruce Richardson nb_pkts = (uint16_t)(nb_pkts - count); 1992c1d14583SBruce Richardson if (count < n) 1993c1d14583SBruce Richardson break; 1994c1d14583SBruce Richardson } 1995c1d14583SBruce Richardson 1996c1d14583SBruce Richardson return nb_rx; 1997c1d14583SBruce Richardson } 1998c1d14583SBruce Richardson 1999c1d14583SBruce Richardson static uint16_t 2000c1d14583SBruce Richardson ice_recv_scattered_pkts(void *rx_queue, 2001c1d14583SBruce Richardson struct rte_mbuf **rx_pkts, 2002c1d14583SBruce Richardson uint16_t nb_pkts) 2003c1d14583SBruce Richardson { 2004c1d14583SBruce Richardson struct ice_rx_queue *rxq = rx_queue; 2005c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; 2006c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 2007c1d14583SBruce Richardson union ice_rx_flex_desc rxd; 2008c1d14583SBruce Richardson struct ice_rx_entry *sw_ring = rxq->sw_ring; 2009c1d14583SBruce Richardson struct ice_rx_entry *rxe; 2010c1d14583SBruce Richardson struct rte_mbuf *first_seg = rxq->pkt_first_seg; 2011c1d14583SBruce Richardson struct rte_mbuf *last_seg = rxq->pkt_last_seg; 2012c1d14583SBruce Richardson struct rte_mbuf *nmb; /* new allocated mbuf */ 2013c1d14583SBruce Richardson struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ 2014c1d14583SBruce Richardson uint16_t rx_id = rxq->rx_tail; 2015c1d14583SBruce Richardson uint16_t nb_rx = 0; 2016c1d14583SBruce Richardson uint16_t nb_hold = 0; 2017c1d14583SBruce Richardson uint16_t rx_packet_len; 2018c1d14583SBruce Richardson uint16_t rx_stat_err0; 2019c1d14583SBruce Richardson uint64_t dma_addr; 2020c1d14583SBruce Richardson uint64_t pkt_flags; 2021c1d14583SBruce Richardson uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 2022c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2023c1d14583SBruce Richardson bool is_tsinit = false; 2024c1d14583SBruce Richardson uint64_t ts_ns; 2025c1d14583SBruce Richardson struct ice_vsi *vsi = rxq->vsi; 2026c1d14583SBruce Richardson struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2027c1d14583SBruce Richardson struct ice_adapter *ad = rxq->vsi->adapter; 2028c1d14583SBruce Richardson 2029c1d14583SBruce Richardson if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 2030c1d14583SBruce Richardson uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 2031c1d14583SBruce Richardson 2032c1d14583SBruce Richardson if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 2033c1d14583SBruce Richardson is_tsinit = true; 2034c1d14583SBruce Richardson } 2035c1d14583SBruce Richardson #endif 2036c1d14583SBruce Richardson 2037c1d14583SBruce Richardson while (nb_rx < nb_pkts) { 2038c1d14583SBruce Richardson rxdp = &rx_ring[rx_id]; 2039c1d14583SBruce Richardson rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 2040c1d14583SBruce Richardson 2041c1d14583SBruce Richardson /* Check the DD bit first */ 2042c1d14583SBruce Richardson if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 2043c1d14583SBruce Richardson break; 2044c1d14583SBruce Richardson 2045c1d14583SBruce Richardson /* allocate mbuf */ 2046c1d14583SBruce Richardson nmb = rte_mbuf_raw_alloc(rxq->mp); 2047c1d14583SBruce Richardson if (unlikely(!nmb)) { 2048c1d14583SBruce Richardson rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2049c1d14583SBruce Richardson break; 2050c1d14583SBruce Richardson } 2051c1d14583SBruce Richardson rxd = *rxdp; /* copy descriptor in ring to temp variable*/ 2052c1d14583SBruce Richardson 2053c1d14583SBruce Richardson nb_hold++; 2054c1d14583SBruce Richardson rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ 2055c1d14583SBruce Richardson rx_id++; 2056c1d14583SBruce Richardson if (unlikely(rx_id == rxq->nb_rx_desc)) 2057c1d14583SBruce Richardson rx_id = 0; 2058c1d14583SBruce Richardson 2059c1d14583SBruce Richardson /* Prefetch next mbuf */ 2060c1d14583SBruce Richardson rte_prefetch0(sw_ring[rx_id].mbuf); 2061c1d14583SBruce Richardson 2062c1d14583SBruce Richardson /** 2063c1d14583SBruce Richardson * When next RX descriptor is on a cache line boundary, 2064c1d14583SBruce Richardson * prefetch the next 4 RX descriptors and next 8 pointers 2065c1d14583SBruce Richardson * to mbufs. 2066c1d14583SBruce Richardson */ 2067c1d14583SBruce Richardson if ((rx_id & 0x3) == 0) { 2068c1d14583SBruce Richardson rte_prefetch0(&rx_ring[rx_id]); 2069c1d14583SBruce Richardson rte_prefetch0(&sw_ring[rx_id]); 2070c1d14583SBruce Richardson } 2071c1d14583SBruce Richardson 2072c1d14583SBruce Richardson rxm = rxe->mbuf; 2073c1d14583SBruce Richardson rxe->mbuf = nmb; 2074c1d14583SBruce Richardson dma_addr = 2075c1d14583SBruce Richardson rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2076c1d14583SBruce Richardson 2077c1d14583SBruce Richardson /* Set data buffer address and data length of the mbuf */ 2078c1d14583SBruce Richardson rxdp->read.hdr_addr = 0; 2079c1d14583SBruce Richardson rxdp->read.pkt_addr = dma_addr; 2080c1d14583SBruce Richardson rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) & 2081c1d14583SBruce Richardson ICE_RX_FLX_DESC_PKT_LEN_M; 2082c1d14583SBruce Richardson rxm->data_len = rx_packet_len; 2083c1d14583SBruce Richardson rxm->data_off = RTE_PKTMBUF_HEADROOM; 2084c1d14583SBruce Richardson 2085c1d14583SBruce Richardson /** 2086c1d14583SBruce Richardson * If this is the first buffer of the received packet, set the 2087c1d14583SBruce Richardson * pointer to the first mbuf of the packet and initialize its 2088c1d14583SBruce Richardson * context. Otherwise, update the total length and the number 2089c1d14583SBruce Richardson * of segments of the current scattered packet, and update the 2090c1d14583SBruce Richardson * pointer to the last mbuf of the current packet. 2091c1d14583SBruce Richardson */ 2092c1d14583SBruce Richardson if (!first_seg) { 2093c1d14583SBruce Richardson first_seg = rxm; 2094c1d14583SBruce Richardson first_seg->nb_segs = 1; 2095c1d14583SBruce Richardson first_seg->pkt_len = rx_packet_len; 2096c1d14583SBruce Richardson } else { 2097c1d14583SBruce Richardson first_seg->pkt_len = 2098c1d14583SBruce Richardson (uint16_t)(first_seg->pkt_len + 2099c1d14583SBruce Richardson rx_packet_len); 2100c1d14583SBruce Richardson first_seg->nb_segs++; 2101c1d14583SBruce Richardson last_seg->next = rxm; 2102c1d14583SBruce Richardson } 2103c1d14583SBruce Richardson 2104c1d14583SBruce Richardson /** 2105c1d14583SBruce Richardson * If this is not the last buffer of the received packet, 2106c1d14583SBruce Richardson * update the pointer to the last mbuf of the current scattered 2107c1d14583SBruce Richardson * packet and continue to parse the RX ring. 2108c1d14583SBruce Richardson */ 2109c1d14583SBruce Richardson if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) { 2110c1d14583SBruce Richardson last_seg = rxm; 2111c1d14583SBruce Richardson continue; 2112c1d14583SBruce Richardson } 2113c1d14583SBruce Richardson 2114c1d14583SBruce Richardson /** 2115c1d14583SBruce Richardson * This is the last buffer of the received packet. If the CRC 2116c1d14583SBruce Richardson * is not stripped by the hardware: 2117c1d14583SBruce Richardson * - Subtract the CRC length from the total packet length. 2118c1d14583SBruce Richardson * - If the last buffer only contains the whole CRC or a part 2119c1d14583SBruce Richardson * of it, free the mbuf associated to the last buffer. If part 2120c1d14583SBruce Richardson * of the CRC is also contained in the previous mbuf, subtract 2121c1d14583SBruce Richardson * the length of that CRC part from the data length of the 2122c1d14583SBruce Richardson * previous mbuf. 2123c1d14583SBruce Richardson */ 2124c1d14583SBruce Richardson rxm->next = NULL; 2125c1d14583SBruce Richardson if (unlikely(rxq->crc_len > 0)) { 2126c1d14583SBruce Richardson first_seg->pkt_len -= RTE_ETHER_CRC_LEN; 2127c1d14583SBruce Richardson if (rx_packet_len <= RTE_ETHER_CRC_LEN) { 2128c1d14583SBruce Richardson rte_pktmbuf_free_seg(rxm); 2129c1d14583SBruce Richardson first_seg->nb_segs--; 2130c1d14583SBruce Richardson last_seg->data_len = 2131c1d14583SBruce Richardson (uint16_t)(last_seg->data_len - 2132c1d14583SBruce Richardson (RTE_ETHER_CRC_LEN - rx_packet_len)); 2133c1d14583SBruce Richardson last_seg->next = NULL; 2134c1d14583SBruce Richardson } else 2135c1d14583SBruce Richardson rxm->data_len = (uint16_t)(rx_packet_len - 2136c1d14583SBruce Richardson RTE_ETHER_CRC_LEN); 2137c1d14583SBruce Richardson } else if (rx_packet_len == 0) { 2138c1d14583SBruce Richardson rte_pktmbuf_free_seg(rxm); 2139c1d14583SBruce Richardson first_seg->nb_segs--; 2140c1d14583SBruce Richardson last_seg->next = NULL; 2141c1d14583SBruce Richardson } 2142c1d14583SBruce Richardson 2143c1d14583SBruce Richardson first_seg->port = rxq->port_id; 2144c1d14583SBruce Richardson first_seg->ol_flags = 0; 2145c1d14583SBruce Richardson first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 2146c1d14583SBruce Richardson rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; 2147c1d14583SBruce Richardson ice_rxd_to_vlan_tci(first_seg, &rxd); 2148c1d14583SBruce Richardson rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); 2149c1d14583SBruce Richardson pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); 2150c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2151c1d14583SBruce Richardson if (ice_timestamp_dynflag > 0 && 2152c1d14583SBruce Richardson (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 2153c1d14583SBruce Richardson rxq->time_high = 2154c1d14583SBruce Richardson rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2155c1d14583SBruce Richardson if (unlikely(is_tsinit)) { 2156c1d14583SBruce Richardson ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); 2157c1d14583SBruce Richardson rxq->hw_time_low = (uint32_t)ts_ns; 2158c1d14583SBruce Richardson rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 2159c1d14583SBruce Richardson is_tsinit = false; 2160c1d14583SBruce Richardson } else { 2161c1d14583SBruce Richardson if (rxq->time_high < rxq->hw_time_low) 2162c1d14583SBruce Richardson rxq->hw_time_high += 1; 2163c1d14583SBruce Richardson ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 2164c1d14583SBruce Richardson rxq->hw_time_low = rxq->time_high; 2165c1d14583SBruce Richardson } 2166c1d14583SBruce Richardson rxq->hw_time_update = rte_get_timer_cycles() / 2167c1d14583SBruce Richardson (rte_get_timer_hz() / 1000); 2168c1d14583SBruce Richardson *RTE_MBUF_DYNFIELD(first_seg, 2169c1d14583SBruce Richardson (ice_timestamp_dynfield_offset), 2170c1d14583SBruce Richardson rte_mbuf_timestamp_t *) = ts_ns; 2171c1d14583SBruce Richardson pkt_flags |= ice_timestamp_dynflag; 2172c1d14583SBruce Richardson } 2173c1d14583SBruce Richardson 2174c1d14583SBruce Richardson if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK) 2175c1d14583SBruce Richardson == RTE_PTYPE_L2_ETHER_TIMESYNC)) { 2176c1d14583SBruce Richardson rxq->time_high = 2177c1d14583SBruce Richardson rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2178c1d14583SBruce Richardson first_seg->timesync = rxq->queue_id; 2179c1d14583SBruce Richardson pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2180c1d14583SBruce Richardson } 2181c1d14583SBruce Richardson #endif 2182c1d14583SBruce Richardson first_seg->ol_flags |= pkt_flags; 2183c1d14583SBruce Richardson /* Prefetch data of first segment, if configured to do so. */ 2184c1d14583SBruce Richardson rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr, 2185c1d14583SBruce Richardson first_seg->data_off)); 2186c1d14583SBruce Richardson rx_pkts[nb_rx++] = first_seg; 2187c1d14583SBruce Richardson first_seg = NULL; 2188c1d14583SBruce Richardson } 2189c1d14583SBruce Richardson 2190c1d14583SBruce Richardson /* Record index of the next RX descriptor to probe. */ 2191c1d14583SBruce Richardson rxq->rx_tail = rx_id; 2192c1d14583SBruce Richardson rxq->pkt_first_seg = first_seg; 2193c1d14583SBruce Richardson rxq->pkt_last_seg = last_seg; 2194c1d14583SBruce Richardson 2195c1d14583SBruce Richardson /** 2196c1d14583SBruce Richardson * If the number of free RX descriptors is greater than the RX free 2197c1d14583SBruce Richardson * threshold of the queue, advance the Receive Descriptor Tail (RDT) 2198c1d14583SBruce Richardson * register. Update the RDT with the value of the last processed RX 2199c1d14583SBruce Richardson * descriptor minus 1, to guarantee that the RDT register is never 2200c1d14583SBruce Richardson * equal to the RDH register, which creates a "full" ring situation 2201c1d14583SBruce Richardson * from the hardware point of view. 2202c1d14583SBruce Richardson */ 2203c1d14583SBruce Richardson nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 2204c1d14583SBruce Richardson if (nb_hold > rxq->rx_free_thresh) { 2205c1d14583SBruce Richardson rx_id = (uint16_t)(rx_id == 0 ? 2206c1d14583SBruce Richardson (rxq->nb_rx_desc - 1) : (rx_id - 1)); 2207c1d14583SBruce Richardson /* write TAIL register */ 2208c1d14583SBruce Richardson ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 2209c1d14583SBruce Richardson nb_hold = 0; 2210c1d14583SBruce Richardson } 2211c1d14583SBruce Richardson rxq->nb_rx_hold = nb_hold; 2212c1d14583SBruce Richardson 2213c1d14583SBruce Richardson /* return received packet in the burst */ 2214c1d14583SBruce Richardson return nb_rx; 2215c1d14583SBruce Richardson } 2216c1d14583SBruce Richardson 2217c1d14583SBruce Richardson const uint32_t * 2218c1d14583SBruce Richardson ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements) 2219c1d14583SBruce Richardson { 2220c1d14583SBruce Richardson struct ice_adapter *ad = 2221c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 2222c1d14583SBruce Richardson const uint32_t *ptypes; 2223c1d14583SBruce Richardson 2224c1d14583SBruce Richardson static const uint32_t ptypes_os[] = { 2225c1d14583SBruce Richardson /* refers to ice_get_default_pkt_type() */ 2226c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER, 2227c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_TIMESYNC, 2228c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_LLDP, 2229c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_ARP, 2230c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2231c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2232c1d14583SBruce Richardson RTE_PTYPE_L4_FRAG, 2233c1d14583SBruce Richardson RTE_PTYPE_L4_ICMP, 2234c1d14583SBruce Richardson RTE_PTYPE_L4_NONFRAG, 2235c1d14583SBruce Richardson RTE_PTYPE_L4_SCTP, 2236c1d14583SBruce Richardson RTE_PTYPE_L4_TCP, 2237c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 2238c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT, 2239c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP, 2240c1d14583SBruce Richardson RTE_PTYPE_INNER_L2_ETHER, 2241c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2242c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2243c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 2244c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 2245c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 2246c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 2247c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 2248c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 2249c1d14583SBruce Richardson }; 2250c1d14583SBruce Richardson 2251c1d14583SBruce Richardson static const uint32_t ptypes_comms[] = { 2252c1d14583SBruce Richardson /* refers to ice_get_default_pkt_type() */ 2253c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER, 2254c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_TIMESYNC, 2255c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_LLDP, 2256c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_ARP, 2257c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 2258c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 2259c1d14583SBruce Richardson RTE_PTYPE_L4_FRAG, 2260c1d14583SBruce Richardson RTE_PTYPE_L4_ICMP, 2261c1d14583SBruce Richardson RTE_PTYPE_L4_NONFRAG, 2262c1d14583SBruce Richardson RTE_PTYPE_L4_SCTP, 2263c1d14583SBruce Richardson RTE_PTYPE_L4_TCP, 2264c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 2265c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT, 2266c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP, 2267c1d14583SBruce Richardson RTE_PTYPE_INNER_L2_ETHER, 2268c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 2269c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 2270c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 2271c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 2272c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 2273c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 2274c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 2275c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 2276c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPC, 2277c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU, 2278c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_PPPOE, 2279c1d14583SBruce Richardson }; 2280c1d14583SBruce Richardson 2281c1d14583SBruce Richardson if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) { 2282c1d14583SBruce Richardson *no_of_elements = RTE_DIM(ptypes_comms); 2283c1d14583SBruce Richardson ptypes = ptypes_comms; 2284c1d14583SBruce Richardson } else { 2285c1d14583SBruce Richardson *no_of_elements = RTE_DIM(ptypes_os); 2286c1d14583SBruce Richardson ptypes = ptypes_os; 2287c1d14583SBruce Richardson } 2288c1d14583SBruce Richardson 2289c1d14583SBruce Richardson if (dev->rx_pkt_burst == ice_recv_pkts || 2290c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc || 2291c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_scattered_pkts) 2292c1d14583SBruce Richardson return ptypes; 2293c1d14583SBruce Richardson 2294c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 2295c1d14583SBruce Richardson if (dev->rx_pkt_burst == ice_recv_pkts_vec || 2296c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || 2297c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 2298c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || 2299c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload || 2300c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || 2301c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload || 2302c1d14583SBruce Richardson #endif 2303c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || 2304c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload || 2305c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 || 2306c1d14583SBruce Richardson dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload) 2307c1d14583SBruce Richardson return ptypes; 2308c1d14583SBruce Richardson #endif 2309c1d14583SBruce Richardson 2310c1d14583SBruce Richardson return NULL; 2311c1d14583SBruce Richardson } 2312c1d14583SBruce Richardson 2313c1d14583SBruce Richardson int 2314c1d14583SBruce Richardson ice_rx_descriptor_status(void *rx_queue, uint16_t offset) 2315c1d14583SBruce Richardson { 2316c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 2317c1d14583SBruce Richardson struct ice_rx_queue *rxq = rx_queue; 2318c1d14583SBruce Richardson uint32_t desc; 2319c1d14583SBruce Richardson 2320c1d14583SBruce Richardson if (unlikely(offset >= rxq->nb_rx_desc)) 2321c1d14583SBruce Richardson return -EINVAL; 2322c1d14583SBruce Richardson 2323c1d14583SBruce Richardson if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) 2324c1d14583SBruce Richardson return RTE_ETH_RX_DESC_UNAVAIL; 2325c1d14583SBruce Richardson 2326c1d14583SBruce Richardson desc = rxq->rx_tail + offset; 2327c1d14583SBruce Richardson if (desc >= rxq->nb_rx_desc) 2328c1d14583SBruce Richardson desc -= rxq->nb_rx_desc; 2329c1d14583SBruce Richardson 2330c1d14583SBruce Richardson rxdp = &rxq->rx_ring[desc]; 2331c1d14583SBruce Richardson if (rte_le_to_cpu_16(rxdp->wb.status_error0) & 2332c1d14583SBruce Richardson (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) 2333c1d14583SBruce Richardson return RTE_ETH_RX_DESC_DONE; 2334c1d14583SBruce Richardson 2335c1d14583SBruce Richardson return RTE_ETH_RX_DESC_AVAIL; 2336c1d14583SBruce Richardson } 2337c1d14583SBruce Richardson 2338c1d14583SBruce Richardson int 2339c1d14583SBruce Richardson ice_tx_descriptor_status(void *tx_queue, uint16_t offset) 2340c1d14583SBruce Richardson { 2341c038157aSBruce Richardson struct ci_tx_queue *txq = tx_queue; 2342c1d14583SBruce Richardson volatile uint64_t *status; 2343c1d14583SBruce Richardson uint64_t mask, expect; 2344c1d14583SBruce Richardson uint32_t desc; 2345c1d14583SBruce Richardson 2346c1d14583SBruce Richardson if (unlikely(offset >= txq->nb_tx_desc)) 2347c1d14583SBruce Richardson return -EINVAL; 2348c1d14583SBruce Richardson 2349c1d14583SBruce Richardson desc = txq->tx_tail + offset; 2350c1d14583SBruce Richardson /* go to next desc that has the RS bit */ 2351c1d14583SBruce Richardson desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) * 2352c1d14583SBruce Richardson txq->tx_rs_thresh; 2353c1d14583SBruce Richardson if (desc >= txq->nb_tx_desc) { 2354c1d14583SBruce Richardson desc -= txq->nb_tx_desc; 2355c1d14583SBruce Richardson if (desc >= txq->nb_tx_desc) 2356c1d14583SBruce Richardson desc -= txq->nb_tx_desc; 2357c1d14583SBruce Richardson } 2358c1d14583SBruce Richardson 23594d0f54d9SBruce Richardson status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz; 2360c1d14583SBruce Richardson mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M); 2361c1d14583SBruce Richardson expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE << 2362c1d14583SBruce Richardson ICE_TXD_QW1_DTYPE_S); 2363c1d14583SBruce Richardson if ((*status & mask) == expect) 2364c1d14583SBruce Richardson return RTE_ETH_TX_DESC_DONE; 2365c1d14583SBruce Richardson 2366c1d14583SBruce Richardson return RTE_ETH_TX_DESC_FULL; 2367c1d14583SBruce Richardson } 2368c1d14583SBruce Richardson 2369c1d14583SBruce Richardson void 2370c1d14583SBruce Richardson ice_free_queues(struct rte_eth_dev *dev) 2371c1d14583SBruce Richardson { 2372c1d14583SBruce Richardson uint16_t i; 2373c1d14583SBruce Richardson 2374c1d14583SBruce Richardson PMD_INIT_FUNC_TRACE(); 2375c1d14583SBruce Richardson 2376c1d14583SBruce Richardson for (i = 0; i < dev->data->nb_rx_queues; i++) { 2377c1d14583SBruce Richardson if (!dev->data->rx_queues[i]) 2378c1d14583SBruce Richardson continue; 2379c1d14583SBruce Richardson ice_rx_queue_release(dev->data->rx_queues[i]); 2380c1d14583SBruce Richardson dev->data->rx_queues[i] = NULL; 2381c1d14583SBruce Richardson } 2382c1d14583SBruce Richardson dev->data->nb_rx_queues = 0; 2383c1d14583SBruce Richardson 2384c1d14583SBruce Richardson for (i = 0; i < dev->data->nb_tx_queues; i++) { 2385c1d14583SBruce Richardson if (!dev->data->tx_queues[i]) 2386c1d14583SBruce Richardson continue; 2387c1d14583SBruce Richardson ice_tx_queue_release(dev->data->tx_queues[i]); 2388c1d14583SBruce Richardson dev->data->tx_queues[i] = NULL; 2389c1d14583SBruce Richardson } 2390c1d14583SBruce Richardson dev->data->nb_tx_queues = 0; 2391c1d14583SBruce Richardson } 2392c1d14583SBruce Richardson 2393c1d14583SBruce Richardson #define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC 2394c1d14583SBruce Richardson #define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC 2395c1d14583SBruce Richardson 2396c1d14583SBruce Richardson int 2397c1d14583SBruce Richardson ice_fdir_setup_tx_resources(struct ice_pf *pf) 2398c1d14583SBruce Richardson { 2399c038157aSBruce Richardson struct ci_tx_queue *txq; 2400c1d14583SBruce Richardson const struct rte_memzone *tz = NULL; 2401c1d14583SBruce Richardson uint32_t ring_size; 2402c1d14583SBruce Richardson struct rte_eth_dev *dev; 2403c1d14583SBruce Richardson 2404c1d14583SBruce Richardson if (!pf) { 2405c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "PF is not available"); 2406c1d14583SBruce Richardson return -EINVAL; 2407c1d14583SBruce Richardson } 2408c1d14583SBruce Richardson 2409c1d14583SBruce Richardson dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id]; 2410c1d14583SBruce Richardson 2411c1d14583SBruce Richardson /* Allocate the TX queue data structure. */ 2412c1d14583SBruce Richardson txq = rte_zmalloc_socket("ice fdir tx queue", 2413c038157aSBruce Richardson sizeof(struct ci_tx_queue), 2414c1d14583SBruce Richardson RTE_CACHE_LINE_SIZE, 2415c1d14583SBruce Richardson SOCKET_ID_ANY); 2416c1d14583SBruce Richardson if (!txq) { 2417c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to allocate memory for " 2418c1d14583SBruce Richardson "tx queue structure."); 2419c1d14583SBruce Richardson return -ENOMEM; 2420c1d14583SBruce Richardson } 2421c1d14583SBruce Richardson 2422c1d14583SBruce Richardson /* Allocate TX hardware ring descriptors. */ 2423c1d14583SBruce Richardson ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC; 2424c1d14583SBruce Richardson ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 2425c1d14583SBruce Richardson 2426c1d14583SBruce Richardson tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", 2427c1d14583SBruce Richardson ICE_FDIR_QUEUE_ID, ring_size, 2428c1d14583SBruce Richardson ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); 2429c1d14583SBruce Richardson if (!tz) { 2430c1d14583SBruce Richardson ice_tx_queue_release(txq); 2431c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); 2432c1d14583SBruce Richardson return -ENOMEM; 2433c1d14583SBruce Richardson } 2434c1d14583SBruce Richardson 2435c1d14583SBruce Richardson txq->mz = tz; 2436c1d14583SBruce Richardson txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; 2437c1d14583SBruce Richardson txq->queue_id = ICE_FDIR_QUEUE_ID; 2438c1d14583SBruce Richardson txq->reg_idx = pf->fdir.fdir_vsi->base_queue; 24394d0f54d9SBruce Richardson txq->ice_vsi = pf->fdir.fdir_vsi; 2440c1d14583SBruce Richardson 2441c1d14583SBruce Richardson txq->tx_ring_dma = tz->iova; 24424d0f54d9SBruce Richardson txq->ice_tx_ring = (struct ice_tx_desc *)tz->addr; 2443c1d14583SBruce Richardson /* 2444c1d14583SBruce Richardson * don't need to allocate software ring and reset for the fdir 2445c1d14583SBruce Richardson * program queue just set the queue has been configured. 2446c1d14583SBruce Richardson */ 2447c1d14583SBruce Richardson txq->q_set = true; 2448c1d14583SBruce Richardson pf->fdir.txq = txq; 2449c1d14583SBruce Richardson 2450c1d14583SBruce Richardson 2451c1d14583SBruce Richardson return ICE_SUCCESS; 2452c1d14583SBruce Richardson } 2453c1d14583SBruce Richardson 2454c1d14583SBruce Richardson int 2455c1d14583SBruce Richardson ice_fdir_setup_rx_resources(struct ice_pf *pf) 2456c1d14583SBruce Richardson { 2457c1d14583SBruce Richardson struct ice_rx_queue *rxq; 2458c1d14583SBruce Richardson const struct rte_memzone *rz = NULL; 2459c1d14583SBruce Richardson uint32_t ring_size; 2460c1d14583SBruce Richardson struct rte_eth_dev *dev; 2461c1d14583SBruce Richardson 2462c1d14583SBruce Richardson if (!pf) { 2463c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "PF is not available"); 2464c1d14583SBruce Richardson return -EINVAL; 2465c1d14583SBruce Richardson } 2466c1d14583SBruce Richardson 2467c1d14583SBruce Richardson dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id]; 2468c1d14583SBruce Richardson 2469c1d14583SBruce Richardson /* Allocate the RX queue data structure. */ 2470c1d14583SBruce Richardson rxq = rte_zmalloc_socket("ice fdir rx queue", 2471c1d14583SBruce Richardson sizeof(struct ice_rx_queue), 2472c1d14583SBruce Richardson RTE_CACHE_LINE_SIZE, 2473c1d14583SBruce Richardson SOCKET_ID_ANY); 2474c1d14583SBruce Richardson if (!rxq) { 2475c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to allocate memory for " 2476c1d14583SBruce Richardson "rx queue structure."); 2477c1d14583SBruce Richardson return -ENOMEM; 2478c1d14583SBruce Richardson } 2479c1d14583SBruce Richardson 2480c1d14583SBruce Richardson /* Allocate RX hardware ring descriptors. */ 2481c1d14583SBruce Richardson ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC; 2482c1d14583SBruce Richardson ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); 2483c1d14583SBruce Richardson 2484c1d14583SBruce Richardson rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", 2485c1d14583SBruce Richardson ICE_FDIR_QUEUE_ID, ring_size, 2486c1d14583SBruce Richardson ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); 2487c1d14583SBruce Richardson if (!rz) { 2488c1d14583SBruce Richardson ice_rx_queue_release(rxq); 2489c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); 2490c1d14583SBruce Richardson return -ENOMEM; 2491c1d14583SBruce Richardson } 2492c1d14583SBruce Richardson 2493c1d14583SBruce Richardson rxq->mz = rz; 2494c1d14583SBruce Richardson rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; 2495c1d14583SBruce Richardson rxq->queue_id = ICE_FDIR_QUEUE_ID; 2496c1d14583SBruce Richardson rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; 2497c1d14583SBruce Richardson rxq->vsi = pf->fdir.fdir_vsi; 2498c1d14583SBruce Richardson 2499c1d14583SBruce Richardson rxq->rx_ring_dma = rz->iova; 2500c1d14583SBruce Richardson memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * 2501c1d14583SBruce Richardson sizeof(union ice_32byte_rx_desc)); 2502c1d14583SBruce Richardson rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr; 2503c1d14583SBruce Richardson 2504c1d14583SBruce Richardson /* 2505c1d14583SBruce Richardson * Don't need to allocate software ring and reset for the fdir 2506c1d14583SBruce Richardson * rx queue, just set the queue has been configured. 2507c1d14583SBruce Richardson */ 2508c1d14583SBruce Richardson rxq->q_set = true; 2509c1d14583SBruce Richardson pf->fdir.rxq = rxq; 2510c1d14583SBruce Richardson 2511c1d14583SBruce Richardson rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; 2512c1d14583SBruce Richardson 2513c1d14583SBruce Richardson return ICE_SUCCESS; 2514c1d14583SBruce Richardson } 2515c1d14583SBruce Richardson 2516c1d14583SBruce Richardson uint16_t 2517c1d14583SBruce Richardson ice_recv_pkts(void *rx_queue, 2518c1d14583SBruce Richardson struct rte_mbuf **rx_pkts, 2519c1d14583SBruce Richardson uint16_t nb_pkts) 2520c1d14583SBruce Richardson { 2521c1d14583SBruce Richardson struct ice_rx_queue *rxq = rx_queue; 2522c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; 2523c1d14583SBruce Richardson volatile union ice_rx_flex_desc *rxdp; 2524c1d14583SBruce Richardson union ice_rx_flex_desc rxd; 2525c1d14583SBruce Richardson struct ice_rx_entry *sw_ring = rxq->sw_ring; 2526c1d14583SBruce Richardson struct ice_rx_entry *rxe; 2527c1d14583SBruce Richardson struct rte_mbuf *nmb; /* new allocated mbuf */ 2528c1d14583SBruce Richardson struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */ 2529c1d14583SBruce Richardson struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */ 2530c1d14583SBruce Richardson uint16_t rx_id = rxq->rx_tail; 2531c1d14583SBruce Richardson uint16_t nb_rx = 0; 2532c1d14583SBruce Richardson uint16_t nb_hold = 0; 2533c1d14583SBruce Richardson uint16_t rx_packet_len; 2534c1d14583SBruce Richardson uint16_t rx_header_len; 2535c1d14583SBruce Richardson uint16_t rx_stat_err0; 2536c1d14583SBruce Richardson uint64_t dma_addr; 2537c1d14583SBruce Richardson uint64_t pkt_flags; 2538c1d14583SBruce Richardson uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; 2539c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2540c1d14583SBruce Richardson bool is_tsinit = false; 2541c1d14583SBruce Richardson uint64_t ts_ns; 2542c1d14583SBruce Richardson struct ice_vsi *vsi = rxq->vsi; 2543c1d14583SBruce Richardson struct ice_hw *hw = ICE_VSI_TO_HW(vsi); 2544c1d14583SBruce Richardson struct ice_adapter *ad = rxq->vsi->adapter; 2545c1d14583SBruce Richardson 2546c1d14583SBruce Richardson if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { 2547c1d14583SBruce Richardson uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); 2548c1d14583SBruce Richardson 2549c1d14583SBruce Richardson if (unlikely(sw_cur_time - rxq->hw_time_update > 4)) 2550c1d14583SBruce Richardson is_tsinit = 1; 2551c1d14583SBruce Richardson } 2552c1d14583SBruce Richardson #endif 2553c1d14583SBruce Richardson 2554c1d14583SBruce Richardson while (nb_rx < nb_pkts) { 2555c1d14583SBruce Richardson rxdp = &rx_ring[rx_id]; 2556c1d14583SBruce Richardson rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); 2557c1d14583SBruce Richardson 2558c1d14583SBruce Richardson /* Check the DD bit first */ 2559c1d14583SBruce Richardson if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) 2560c1d14583SBruce Richardson break; 2561c1d14583SBruce Richardson 2562c1d14583SBruce Richardson /* allocate header mbuf */ 2563c1d14583SBruce Richardson nmb = rte_mbuf_raw_alloc(rxq->mp); 2564c1d14583SBruce Richardson if (unlikely(!nmb)) { 2565c1d14583SBruce Richardson rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2566c1d14583SBruce Richardson break; 2567c1d14583SBruce Richardson } 2568c1d14583SBruce Richardson 2569c1d14583SBruce Richardson rxd = *rxdp; /* copy descriptor in ring to temp variable*/ 2570c1d14583SBruce Richardson 2571c1d14583SBruce Richardson nb_hold++; 2572c1d14583SBruce Richardson rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */ 2573c1d14583SBruce Richardson rx_id++; 2574c1d14583SBruce Richardson if (unlikely(rx_id == rxq->nb_rx_desc)) 2575c1d14583SBruce Richardson rx_id = 0; 2576c1d14583SBruce Richardson rxm = rxe->mbuf; 2577c1d14583SBruce Richardson rxe->mbuf = nmb; 2578c1d14583SBruce Richardson dma_addr = 2579c1d14583SBruce Richardson rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); 2580c1d14583SBruce Richardson 2581c1d14583SBruce Richardson if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 2582c1d14583SBruce Richardson /** 2583c1d14583SBruce Richardson * fill the read format of descriptor with physic address in 2584c1d14583SBruce Richardson * new allocated mbuf: nmb 2585c1d14583SBruce Richardson */ 2586c1d14583SBruce Richardson rxdp->read.hdr_addr = 0; 2587c1d14583SBruce Richardson rxdp->read.pkt_addr = dma_addr; 2588c1d14583SBruce Richardson } else { 2589c1d14583SBruce Richardson /* allocate payload mbuf */ 2590c1d14583SBruce Richardson nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp); 2591c1d14583SBruce Richardson if (unlikely(!nmb_pay)) { 2592c1d14583SBruce Richardson rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++; 2593c1d14583SBruce Richardson rxe->mbuf = NULL; 2594c1d14583SBruce Richardson nb_hold--; 2595c1d14583SBruce Richardson if (unlikely(rx_id == 0)) 2596c1d14583SBruce Richardson rx_id = rxq->nb_rx_desc; 2597c1d14583SBruce Richardson 2598c1d14583SBruce Richardson rx_id--; 2599c1d14583SBruce Richardson rte_pktmbuf_free(nmb); 2600c1d14583SBruce Richardson break; 2601c1d14583SBruce Richardson } 2602c1d14583SBruce Richardson 2603c1d14583SBruce Richardson nmb->next = nmb_pay; 2604c1d14583SBruce Richardson nmb_pay->next = NULL; 2605c1d14583SBruce Richardson 2606c1d14583SBruce Richardson /** 2607c1d14583SBruce Richardson * fill the read format of descriptor with physic address in 2608c1d14583SBruce Richardson * new allocated mbuf: nmb 2609c1d14583SBruce Richardson */ 2610c1d14583SBruce Richardson rxdp->read.hdr_addr = dma_addr; 2611c1d14583SBruce Richardson rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay)); 2612c1d14583SBruce Richardson } 2613c1d14583SBruce Richardson 2614c1d14583SBruce Richardson /* fill old mbuf with received descriptor: rxd */ 2615c1d14583SBruce Richardson rxm->data_off = RTE_PKTMBUF_HEADROOM; 2616c1d14583SBruce Richardson rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); 2617c1d14583SBruce Richardson if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 2618c1d14583SBruce Richardson rxm->nb_segs = 1; 2619c1d14583SBruce Richardson rxm->next = NULL; 2620c1d14583SBruce Richardson /* calculate rx_packet_len of the received pkt */ 2621c1d14583SBruce Richardson rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & 2622c1d14583SBruce Richardson ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 2623c1d14583SBruce Richardson rxm->data_len = rx_packet_len; 2624c1d14583SBruce Richardson rxm->pkt_len = rx_packet_len; 2625c1d14583SBruce Richardson } else { 2626c1d14583SBruce Richardson rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs); 2627c1d14583SBruce Richardson rxm->next->next = NULL; 2628c1d14583SBruce Richardson /* calculate rx_packet_len of the received pkt */ 2629c1d14583SBruce Richardson rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) & 2630c1d14583SBruce Richardson ICE_RX_FLEX_DESC_HEADER_LEN_M; 2631c1d14583SBruce Richardson rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) & 2632c1d14583SBruce Richardson ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len; 2633c1d14583SBruce Richardson rxm->data_len = rx_header_len; 2634c1d14583SBruce Richardson rxm->pkt_len = rx_header_len + rx_packet_len; 2635c1d14583SBruce Richardson rxm->next->data_len = rx_packet_len; 2636c1d14583SBruce Richardson 2637c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_RX 2638c1d14583SBruce Richardson rte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm)); 2639c1d14583SBruce Richardson #endif 2640c1d14583SBruce Richardson } 2641c1d14583SBruce Richardson 2642c1d14583SBruce Richardson rxm->port = rxq->port_id; 2643c1d14583SBruce Richardson rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M & 2644c1d14583SBruce Richardson rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; 2645c1d14583SBruce Richardson ice_rxd_to_vlan_tci(rxm, &rxd); 2646c1d14583SBruce Richardson rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); 2647c1d14583SBruce Richardson pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); 2648c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC 2649c1d14583SBruce Richardson if (ice_timestamp_dynflag > 0 && 2650c1d14583SBruce Richardson (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) { 2651c1d14583SBruce Richardson rxq->time_high = 2652c1d14583SBruce Richardson rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2653c1d14583SBruce Richardson if (unlikely(is_tsinit)) { 2654c1d14583SBruce Richardson ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high); 2655c1d14583SBruce Richardson rxq->hw_time_low = (uint32_t)ts_ns; 2656c1d14583SBruce Richardson rxq->hw_time_high = (uint32_t)(ts_ns >> 32); 2657c1d14583SBruce Richardson is_tsinit = false; 2658c1d14583SBruce Richardson } else { 2659c1d14583SBruce Richardson if (rxq->time_high < rxq->hw_time_low) 2660c1d14583SBruce Richardson rxq->hw_time_high += 1; 2661c1d14583SBruce Richardson ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high; 2662c1d14583SBruce Richardson rxq->hw_time_low = rxq->time_high; 2663c1d14583SBruce Richardson } 2664c1d14583SBruce Richardson rxq->hw_time_update = rte_get_timer_cycles() / 2665c1d14583SBruce Richardson (rte_get_timer_hz() / 1000); 2666c1d14583SBruce Richardson *RTE_MBUF_DYNFIELD(rxm, 2667c1d14583SBruce Richardson (ice_timestamp_dynfield_offset), 2668c1d14583SBruce Richardson rte_mbuf_timestamp_t *) = ts_ns; 2669c1d14583SBruce Richardson pkt_flags |= ice_timestamp_dynflag; 2670c1d14583SBruce Richardson } 2671c1d14583SBruce Richardson 2672c1d14583SBruce Richardson if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) == 2673c1d14583SBruce Richardson RTE_PTYPE_L2_ETHER_TIMESYNC)) { 2674c1d14583SBruce Richardson rxq->time_high = 2675c1d14583SBruce Richardson rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); 2676c1d14583SBruce Richardson rxm->timesync = rxq->queue_id; 2677c1d14583SBruce Richardson pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP; 2678c1d14583SBruce Richardson } 2679c1d14583SBruce Richardson #endif 2680c1d14583SBruce Richardson rxm->ol_flags |= pkt_flags; 2681c1d14583SBruce Richardson /* copy old mbuf to rx_pkts */ 2682c1d14583SBruce Richardson rx_pkts[nb_rx++] = rxm; 2683c1d14583SBruce Richardson } 2684c1d14583SBruce Richardson 2685c1d14583SBruce Richardson rxq->rx_tail = rx_id; 2686c1d14583SBruce Richardson /** 2687c1d14583SBruce Richardson * If the number of free RX descriptors is greater than the RX free 2688c1d14583SBruce Richardson * threshold of the queue, advance the receive tail register of queue. 2689c1d14583SBruce Richardson * Update that register with the value of the last processed RX 2690c1d14583SBruce Richardson * descriptor minus 1. 2691c1d14583SBruce Richardson */ 2692c1d14583SBruce Richardson nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 2693c1d14583SBruce Richardson if (nb_hold > rxq->rx_free_thresh) { 2694c1d14583SBruce Richardson rx_id = (uint16_t)(rx_id == 0 ? 2695c1d14583SBruce Richardson (rxq->nb_rx_desc - 1) : (rx_id - 1)); 2696c1d14583SBruce Richardson /* write TAIL register */ 2697c1d14583SBruce Richardson ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 2698c1d14583SBruce Richardson nb_hold = 0; 2699c1d14583SBruce Richardson } 2700c1d14583SBruce Richardson rxq->nb_rx_hold = nb_hold; 2701c1d14583SBruce Richardson 2702c1d14583SBruce Richardson /* return received packet in the burst */ 2703c1d14583SBruce Richardson return nb_rx; 2704c1d14583SBruce Richardson } 2705c1d14583SBruce Richardson 2706c1d14583SBruce Richardson static inline void 2707c1d14583SBruce Richardson ice_parse_tunneling_params(uint64_t ol_flags, 2708c1d14583SBruce Richardson union ice_tx_offload tx_offload, 2709c1d14583SBruce Richardson uint32_t *cd_tunneling) 2710c1d14583SBruce Richardson { 2711c1d14583SBruce Richardson /* EIPT: External (outer) IP header type */ 2712c1d14583SBruce Richardson if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) 2713c1d14583SBruce Richardson *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4; 2714c1d14583SBruce Richardson else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) 2715c1d14583SBruce Richardson *cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM; 2716c1d14583SBruce Richardson else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) 2717c1d14583SBruce Richardson *cd_tunneling |= ICE_TX_CTX_EIPT_IPV6; 2718c1d14583SBruce Richardson 2719c1d14583SBruce Richardson /* EIPLEN: External (outer) IP header length, in DWords */ 2720c1d14583SBruce Richardson *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << 2721c1d14583SBruce Richardson ICE_TXD_CTX_QW0_EIPLEN_S; 2722c1d14583SBruce Richardson 2723c1d14583SBruce Richardson /* L4TUNT: L4 Tunneling Type */ 2724c1d14583SBruce Richardson switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 2725c1d14583SBruce Richardson case RTE_MBUF_F_TX_TUNNEL_IPIP: 2726c1d14583SBruce Richardson /* for non UDP / GRE tunneling, set to 00b */ 2727c1d14583SBruce Richardson break; 2728c1d14583SBruce Richardson case RTE_MBUF_F_TX_TUNNEL_VXLAN: 2729c1d14583SBruce Richardson case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE: 2730c1d14583SBruce Richardson case RTE_MBUF_F_TX_TUNNEL_GTP: 2731c1d14583SBruce Richardson case RTE_MBUF_F_TX_TUNNEL_GENEVE: 2732c1d14583SBruce Richardson *cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING; 2733c1d14583SBruce Richardson break; 2734c1d14583SBruce Richardson case RTE_MBUF_F_TX_TUNNEL_GRE: 2735c1d14583SBruce Richardson *cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING; 2736c1d14583SBruce Richardson break; 2737c1d14583SBruce Richardson default: 2738c1d14583SBruce Richardson PMD_TX_LOG(ERR, "Tunnel type not supported"); 2739c1d14583SBruce Richardson return; 2740c1d14583SBruce Richardson } 2741c1d14583SBruce Richardson 2742c1d14583SBruce Richardson /* L4TUNLEN: L4 Tunneling Length, in Words 2743c1d14583SBruce Richardson * 2744c1d14583SBruce Richardson * We depend on app to set rte_mbuf.l2_len correctly. 2745c1d14583SBruce Richardson * For IP in GRE it should be set to the length of the GRE 2746c1d14583SBruce Richardson * header; 2747c1d14583SBruce Richardson * For MAC in GRE or MAC in UDP it should be set to the length 2748c1d14583SBruce Richardson * of the GRE or UDP headers plus the inner MAC up to including 2749c1d14583SBruce Richardson * its last Ethertype. 2750c1d14583SBruce Richardson * If MPLS labels exists, it should include them as well. 2751c1d14583SBruce Richardson */ 2752c1d14583SBruce Richardson *cd_tunneling |= (tx_offload.l2_len >> 1) << 2753c1d14583SBruce Richardson ICE_TXD_CTX_QW0_NATLEN_S; 2754c1d14583SBruce Richardson 2755c1d14583SBruce Richardson /** 2756c1d14583SBruce Richardson * Calculate the tunneling UDP checksum. 2757c1d14583SBruce Richardson * Shall be set only if L4TUNT = 01b and EIPT is not zero 2758c1d14583SBruce Richardson */ 2759c1d14583SBruce Richardson if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && 2760c1d14583SBruce Richardson (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && 2761c1d14583SBruce Richardson (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) 2762c1d14583SBruce Richardson *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; 2763c1d14583SBruce Richardson } 2764c1d14583SBruce Richardson 2765c1d14583SBruce Richardson static inline void 2766c1d14583SBruce Richardson ice_txd_enable_checksum(uint64_t ol_flags, 2767c1d14583SBruce Richardson uint32_t *td_cmd, 2768c1d14583SBruce Richardson uint32_t *td_offset, 2769c1d14583SBruce Richardson union ice_tx_offload tx_offload) 2770c1d14583SBruce Richardson { 2771c1d14583SBruce Richardson /* Set MACLEN */ 2772c1d14583SBruce Richardson if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) 2773c1d14583SBruce Richardson *td_offset |= (tx_offload.l2_len >> 1) 2774c1d14583SBruce Richardson << ICE_TX_DESC_LEN_MACLEN_S; 2775c1d14583SBruce Richardson 2776c1d14583SBruce Richardson /* Enable L3 checksum offloads */ 2777c1d14583SBruce Richardson if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 2778c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; 2779c1d14583SBruce Richardson *td_offset |= (tx_offload.l3_len >> 2) << 2780c1d14583SBruce Richardson ICE_TX_DESC_LEN_IPLEN_S; 2781c1d14583SBruce Richardson } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 2782c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; 2783c1d14583SBruce Richardson *td_offset |= (tx_offload.l3_len >> 2) << 2784c1d14583SBruce Richardson ICE_TX_DESC_LEN_IPLEN_S; 2785c1d14583SBruce Richardson } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 2786c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; 2787c1d14583SBruce Richardson *td_offset |= (tx_offload.l3_len >> 2) << 2788c1d14583SBruce Richardson ICE_TX_DESC_LEN_IPLEN_S; 2789c1d14583SBruce Richardson } 2790c1d14583SBruce Richardson 2791c1d14583SBruce Richardson if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 2792c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2793c1d14583SBruce Richardson *td_offset |= (tx_offload.l4_len >> 2) << 2794c1d14583SBruce Richardson ICE_TX_DESC_LEN_L4_LEN_S; 2795c1d14583SBruce Richardson return; 2796c1d14583SBruce Richardson } 2797c1d14583SBruce Richardson 2798c1d14583SBruce Richardson if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) { 2799c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2800c1d14583SBruce Richardson *td_offset |= (tx_offload.l4_len >> 2) << 2801c1d14583SBruce Richardson ICE_TX_DESC_LEN_L4_LEN_S; 2802c1d14583SBruce Richardson return; 2803c1d14583SBruce Richardson } 2804c1d14583SBruce Richardson 2805c1d14583SBruce Richardson /* Enable L4 checksum offloads */ 2806c1d14583SBruce Richardson switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 2807c1d14583SBruce Richardson case RTE_MBUF_F_TX_TCP_CKSUM: 2808c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; 2809c1d14583SBruce Richardson *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 2810c1d14583SBruce Richardson ICE_TX_DESC_LEN_L4_LEN_S; 2811c1d14583SBruce Richardson break; 2812c1d14583SBruce Richardson case RTE_MBUF_F_TX_SCTP_CKSUM: 2813c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; 2814c1d14583SBruce Richardson *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 2815c1d14583SBruce Richardson ICE_TX_DESC_LEN_L4_LEN_S; 2816c1d14583SBruce Richardson break; 2817c1d14583SBruce Richardson case RTE_MBUF_F_TX_UDP_CKSUM: 2818c1d14583SBruce Richardson *td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; 2819c1d14583SBruce Richardson *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 2820c1d14583SBruce Richardson ICE_TX_DESC_LEN_L4_LEN_S; 2821c1d14583SBruce Richardson break; 2822c1d14583SBruce Richardson default: 2823c1d14583SBruce Richardson break; 2824c1d14583SBruce Richardson } 2825c1d14583SBruce Richardson } 2826c1d14583SBruce Richardson 2827c1d14583SBruce Richardson static inline int 2828c038157aSBruce Richardson ice_xmit_cleanup(struct ci_tx_queue *txq) 2829c1d14583SBruce Richardson { 28305cc9919fSBruce Richardson struct ci_tx_entry *sw_ring = txq->sw_ring; 28314d0f54d9SBruce Richardson volatile struct ice_tx_desc *txd = txq->ice_tx_ring; 2832c1d14583SBruce Richardson uint16_t last_desc_cleaned = txq->last_desc_cleaned; 2833c1d14583SBruce Richardson uint16_t nb_tx_desc = txq->nb_tx_desc; 2834c1d14583SBruce Richardson uint16_t desc_to_clean_to; 2835c1d14583SBruce Richardson uint16_t nb_tx_to_clean; 2836c1d14583SBruce Richardson 2837c1d14583SBruce Richardson /* Determine the last descriptor needing to be cleaned */ 2838c1d14583SBruce Richardson desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh); 2839c1d14583SBruce Richardson if (desc_to_clean_to >= nb_tx_desc) 2840c1d14583SBruce Richardson desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc); 2841c1d14583SBruce Richardson 2842c1d14583SBruce Richardson /* Check to make sure the last descriptor to clean is done */ 2843c1d14583SBruce Richardson desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; 2844c1d14583SBruce Richardson if (!(txd[desc_to_clean_to].cmd_type_offset_bsz & 2845c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) { 2846c1d14583SBruce Richardson PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done " 2847c1d14583SBruce Richardson "(port=%d queue=%d) value=0x%"PRIx64, 2848c1d14583SBruce Richardson desc_to_clean_to, 2849c1d14583SBruce Richardson txq->port_id, txq->queue_id, 2850c1d14583SBruce Richardson txd[desc_to_clean_to].cmd_type_offset_bsz); 2851c1d14583SBruce Richardson /* Failed to clean any descriptors */ 2852c1d14583SBruce Richardson return -1; 2853c1d14583SBruce Richardson } 2854c1d14583SBruce Richardson 2855c1d14583SBruce Richardson /* Figure out how many descriptors will be cleaned */ 2856c1d14583SBruce Richardson if (last_desc_cleaned > desc_to_clean_to) 2857c1d14583SBruce Richardson nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) + 2858c1d14583SBruce Richardson desc_to_clean_to); 2859c1d14583SBruce Richardson else 2860c1d14583SBruce Richardson nb_tx_to_clean = (uint16_t)(desc_to_clean_to - 2861c1d14583SBruce Richardson last_desc_cleaned); 2862c1d14583SBruce Richardson 2863c1d14583SBruce Richardson /* The last descriptor to clean is done, so that means all the 2864c1d14583SBruce Richardson * descriptors from the last descriptor that was cleaned 2865c1d14583SBruce Richardson * up to the last descriptor with the RS bit set 2866c1d14583SBruce Richardson * are done. Only reset the threshold descriptor. 2867c1d14583SBruce Richardson */ 2868c1d14583SBruce Richardson txd[desc_to_clean_to].cmd_type_offset_bsz = 0; 2869c1d14583SBruce Richardson 2870c1d14583SBruce Richardson /* Update the txq to reflect the last descriptor that was cleaned */ 2871c1d14583SBruce Richardson txq->last_desc_cleaned = desc_to_clean_to; 2872c1d14583SBruce Richardson txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean); 2873c1d14583SBruce Richardson 2874c1d14583SBruce Richardson return 0; 2875c1d14583SBruce Richardson } 2876c1d14583SBruce Richardson 2877c1d14583SBruce Richardson /* Construct the tx flags */ 2878c1d14583SBruce Richardson static inline uint64_t 2879c1d14583SBruce Richardson ice_build_ctob(uint32_t td_cmd, 2880c1d14583SBruce Richardson uint32_t td_offset, 2881c1d14583SBruce Richardson uint16_t size, 2882c1d14583SBruce Richardson uint32_t td_tag) 2883c1d14583SBruce Richardson { 2884c1d14583SBruce Richardson return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 2885c1d14583SBruce Richardson ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 2886c1d14583SBruce Richardson ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 2887c1d14583SBruce Richardson ((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) | 2888c1d14583SBruce Richardson ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 2889c1d14583SBruce Richardson } 2890c1d14583SBruce Richardson 2891c1d14583SBruce Richardson /* Check if the context descriptor is needed for TX offloading */ 2892c1d14583SBruce Richardson static inline uint16_t 2893c1d14583SBruce Richardson ice_calc_context_desc(uint64_t flags) 2894c1d14583SBruce Richardson { 2895c1d14583SBruce Richardson static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG | 2896c1d14583SBruce Richardson RTE_MBUF_F_TX_UDP_SEG | 2897c1d14583SBruce Richardson RTE_MBUF_F_TX_QINQ | 2898c1d14583SBruce Richardson RTE_MBUF_F_TX_OUTER_IP_CKSUM | 2899c1d14583SBruce Richardson RTE_MBUF_F_TX_TUNNEL_MASK | 2900c1d14583SBruce Richardson RTE_MBUF_F_TX_IEEE1588_TMST; 2901c1d14583SBruce Richardson 2902c1d14583SBruce Richardson return (flags & mask) ? 1 : 0; 2903c1d14583SBruce Richardson } 2904c1d14583SBruce Richardson 2905c1d14583SBruce Richardson /* set ice TSO context descriptor */ 2906c1d14583SBruce Richardson static inline uint64_t 2907c1d14583SBruce Richardson ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload) 2908c1d14583SBruce Richardson { 2909c1d14583SBruce Richardson uint64_t ctx_desc = 0; 2910c1d14583SBruce Richardson uint32_t cd_cmd, hdr_len, cd_tso_len; 2911c1d14583SBruce Richardson 2912c1d14583SBruce Richardson if (!tx_offload.l4_len) { 2913c1d14583SBruce Richardson PMD_TX_LOG(DEBUG, "L4 length set to 0"); 2914c1d14583SBruce Richardson return ctx_desc; 2915c1d14583SBruce Richardson } 2916c1d14583SBruce Richardson 2917c1d14583SBruce Richardson hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len; 2918c1d14583SBruce Richardson hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? 2919c1d14583SBruce Richardson tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0; 2920c1d14583SBruce Richardson 2921c1d14583SBruce Richardson cd_cmd = ICE_TX_CTX_DESC_TSO; 2922c1d14583SBruce Richardson cd_tso_len = mbuf->pkt_len - hdr_len; 2923c1d14583SBruce Richardson ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) | 2924c1d14583SBruce Richardson ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) | 2925c1d14583SBruce Richardson ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S); 2926c1d14583SBruce Richardson 2927c1d14583SBruce Richardson return ctx_desc; 2928c1d14583SBruce Richardson } 2929c1d14583SBruce Richardson 2930c1d14583SBruce Richardson /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */ 2931c1d14583SBruce Richardson #define ICE_MAX_DATA_PER_TXD \ 2932c1d14583SBruce Richardson (ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S) 2933c1d14583SBruce Richardson /* Calculate the number of TX descriptors needed for each pkt */ 2934c1d14583SBruce Richardson static inline uint16_t 2935c1d14583SBruce Richardson ice_calc_pkt_desc(struct rte_mbuf *tx_pkt) 2936c1d14583SBruce Richardson { 2937c1d14583SBruce Richardson struct rte_mbuf *txd = tx_pkt; 2938c1d14583SBruce Richardson uint16_t count = 0; 2939c1d14583SBruce Richardson 2940c1d14583SBruce Richardson while (txd != NULL) { 2941c1d14583SBruce Richardson count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD); 2942c1d14583SBruce Richardson txd = txd->next; 2943c1d14583SBruce Richardson } 2944c1d14583SBruce Richardson 2945c1d14583SBruce Richardson return count; 2946c1d14583SBruce Richardson } 2947c1d14583SBruce Richardson 2948c1d14583SBruce Richardson uint16_t 2949c1d14583SBruce Richardson ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 2950c1d14583SBruce Richardson { 2951c038157aSBruce Richardson struct ci_tx_queue *txq; 29524d0f54d9SBruce Richardson volatile struct ice_tx_desc *ice_tx_ring; 2953c1d14583SBruce Richardson volatile struct ice_tx_desc *txd; 29545cc9919fSBruce Richardson struct ci_tx_entry *sw_ring; 29555cc9919fSBruce Richardson struct ci_tx_entry *txe, *txn; 2956c1d14583SBruce Richardson struct rte_mbuf *tx_pkt; 2957c1d14583SBruce Richardson struct rte_mbuf *m_seg; 2958c1d14583SBruce Richardson uint32_t cd_tunneling_params; 2959c1d14583SBruce Richardson uint16_t tx_id; 2960c1d14583SBruce Richardson uint16_t nb_tx; 2961c1d14583SBruce Richardson uint16_t nb_used; 2962c1d14583SBruce Richardson uint16_t nb_ctx; 2963c1d14583SBruce Richardson uint32_t td_cmd = 0; 2964c1d14583SBruce Richardson uint32_t td_offset = 0; 2965c1d14583SBruce Richardson uint32_t td_tag = 0; 2966c1d14583SBruce Richardson uint16_t tx_last; 2967c1d14583SBruce Richardson uint16_t slen; 2968c1d14583SBruce Richardson uint64_t buf_dma_addr; 2969c1d14583SBruce Richardson uint64_t ol_flags; 2970c1d14583SBruce Richardson union ice_tx_offload tx_offload = {0}; 2971c1d14583SBruce Richardson 2972c1d14583SBruce Richardson txq = tx_queue; 2973c1d14583SBruce Richardson sw_ring = txq->sw_ring; 29744d0f54d9SBruce Richardson ice_tx_ring = txq->ice_tx_ring; 2975c1d14583SBruce Richardson tx_id = txq->tx_tail; 2976c1d14583SBruce Richardson txe = &sw_ring[tx_id]; 2977c1d14583SBruce Richardson 2978c1d14583SBruce Richardson /* Check if the descriptor ring needs to be cleaned. */ 2979c1d14583SBruce Richardson if (txq->nb_tx_free < txq->tx_free_thresh) 2980c1d14583SBruce Richardson (void)ice_xmit_cleanup(txq); 2981c1d14583SBruce Richardson 2982c1d14583SBruce Richardson for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 2983c1d14583SBruce Richardson tx_pkt = *tx_pkts++; 2984c1d14583SBruce Richardson 2985c1d14583SBruce Richardson td_cmd = 0; 2986c1d14583SBruce Richardson td_tag = 0; 2987c1d14583SBruce Richardson td_offset = 0; 2988c1d14583SBruce Richardson ol_flags = tx_pkt->ol_flags; 2989c1d14583SBruce Richardson tx_offload.l2_len = tx_pkt->l2_len; 2990c1d14583SBruce Richardson tx_offload.l3_len = tx_pkt->l3_len; 2991c1d14583SBruce Richardson tx_offload.outer_l2_len = tx_pkt->outer_l2_len; 2992c1d14583SBruce Richardson tx_offload.outer_l3_len = tx_pkt->outer_l3_len; 2993c1d14583SBruce Richardson tx_offload.l4_len = tx_pkt->l4_len; 2994c1d14583SBruce Richardson tx_offload.tso_segsz = tx_pkt->tso_segsz; 2995c1d14583SBruce Richardson /* Calculate the number of context descriptors needed. */ 2996c1d14583SBruce Richardson nb_ctx = ice_calc_context_desc(ol_flags); 2997c1d14583SBruce Richardson 2998c1d14583SBruce Richardson /* The number of descriptors that must be allocated for 2999c1d14583SBruce Richardson * a packet equals to the number of the segments of that 3000c1d14583SBruce Richardson * packet plus the number of context descriptor if needed. 3001c1d14583SBruce Richardson * Recalculate the needed tx descs when TSO enabled in case 3002c1d14583SBruce Richardson * the mbuf data size exceeds max data size that hw allows 3003c1d14583SBruce Richardson * per tx desc. 3004c1d14583SBruce Richardson */ 3005c1d14583SBruce Richardson if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) 3006c1d14583SBruce Richardson nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) + 3007c1d14583SBruce Richardson nb_ctx); 3008c1d14583SBruce Richardson else 3009c1d14583SBruce Richardson nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); 3010c1d14583SBruce Richardson tx_last = (uint16_t)(tx_id + nb_used - 1); 3011c1d14583SBruce Richardson 3012c1d14583SBruce Richardson /* Circular ring */ 3013c1d14583SBruce Richardson if (tx_last >= txq->nb_tx_desc) 3014c1d14583SBruce Richardson tx_last = (uint16_t)(tx_last - txq->nb_tx_desc); 3015c1d14583SBruce Richardson 3016c1d14583SBruce Richardson if (nb_used > txq->nb_tx_free) { 3017c1d14583SBruce Richardson if (ice_xmit_cleanup(txq) != 0) { 3018c1d14583SBruce Richardson if (nb_tx == 0) 3019c1d14583SBruce Richardson return 0; 3020c1d14583SBruce Richardson goto end_of_tx; 3021c1d14583SBruce Richardson } 3022c1d14583SBruce Richardson if (unlikely(nb_used > txq->tx_rs_thresh)) { 3023c1d14583SBruce Richardson while (nb_used > txq->nb_tx_free) { 3024c1d14583SBruce Richardson if (ice_xmit_cleanup(txq) != 0) { 3025c1d14583SBruce Richardson if (nb_tx == 0) 3026c1d14583SBruce Richardson return 0; 3027c1d14583SBruce Richardson goto end_of_tx; 3028c1d14583SBruce Richardson } 3029c1d14583SBruce Richardson } 3030c1d14583SBruce Richardson } 3031c1d14583SBruce Richardson } 3032c1d14583SBruce Richardson 3033c1d14583SBruce Richardson /* Descriptor based VLAN insertion */ 3034c1d14583SBruce Richardson if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 3035c1d14583SBruce Richardson td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; 3036c1d14583SBruce Richardson td_tag = tx_pkt->vlan_tci; 3037c1d14583SBruce Richardson } 3038c1d14583SBruce Richardson 3039c1d14583SBruce Richardson /* Fill in tunneling parameters if necessary */ 3040c1d14583SBruce Richardson cd_tunneling_params = 0; 3041c1d14583SBruce Richardson if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) { 3042c1d14583SBruce Richardson td_offset |= (tx_offload.outer_l2_len >> 1) 3043c1d14583SBruce Richardson << ICE_TX_DESC_LEN_MACLEN_S; 3044c1d14583SBruce Richardson ice_parse_tunneling_params(ol_flags, tx_offload, 3045c1d14583SBruce Richardson &cd_tunneling_params); 3046c1d14583SBruce Richardson } 3047c1d14583SBruce Richardson 3048c1d14583SBruce Richardson /* Enable checksum offloading */ 3049c1d14583SBruce Richardson if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) 3050c1d14583SBruce Richardson ice_txd_enable_checksum(ol_flags, &td_cmd, 3051c1d14583SBruce Richardson &td_offset, tx_offload); 3052c1d14583SBruce Richardson 3053c1d14583SBruce Richardson if (nb_ctx) { 3054c1d14583SBruce Richardson /* Setup TX context descriptor if required */ 3055c1d14583SBruce Richardson volatile struct ice_tx_ctx_desc *ctx_txd = 3056c1d14583SBruce Richardson (volatile struct ice_tx_ctx_desc *) 30574d0f54d9SBruce Richardson &ice_tx_ring[tx_id]; 3058c1d14583SBruce Richardson uint16_t cd_l2tag2 = 0; 3059c1d14583SBruce Richardson uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX; 3060c1d14583SBruce Richardson 3061c1d14583SBruce Richardson txn = &sw_ring[txe->next_id]; 3062c1d14583SBruce Richardson RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); 3063c1d14583SBruce Richardson if (txe->mbuf) { 3064c1d14583SBruce Richardson rte_pktmbuf_free_seg(txe->mbuf); 3065c1d14583SBruce Richardson txe->mbuf = NULL; 3066c1d14583SBruce Richardson } 3067c1d14583SBruce Richardson 3068c1d14583SBruce Richardson if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) 3069c1d14583SBruce Richardson cd_type_cmd_tso_mss |= 3070c1d14583SBruce Richardson ice_set_tso_ctx(tx_pkt, tx_offload); 3071c1d14583SBruce Richardson else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 3072c1d14583SBruce Richardson cd_type_cmd_tso_mss |= 3073c1d14583SBruce Richardson ((uint64_t)ICE_TX_CTX_DESC_TSYN << 3074c1d14583SBruce Richardson ICE_TXD_CTX_QW1_CMD_S) | 30754d0f54d9SBruce Richardson (((uint64_t)txq->ice_vsi->adapter->ptp_tx_index << 3076c1d14583SBruce Richardson ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M); 3077c1d14583SBruce Richardson 3078c1d14583SBruce Richardson ctx_txd->tunneling_params = 3079c1d14583SBruce Richardson rte_cpu_to_le_32(cd_tunneling_params); 3080c1d14583SBruce Richardson 3081c1d14583SBruce Richardson /* TX context descriptor based double VLAN insert */ 3082c1d14583SBruce Richardson if (ol_flags & RTE_MBUF_F_TX_QINQ) { 3083c1d14583SBruce Richardson cd_l2tag2 = tx_pkt->vlan_tci_outer; 3084c1d14583SBruce Richardson cd_type_cmd_tso_mss |= 3085c1d14583SBruce Richardson ((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 << 3086c1d14583SBruce Richardson ICE_TXD_CTX_QW1_CMD_S); 3087c1d14583SBruce Richardson } 3088c1d14583SBruce Richardson ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); 3089c1d14583SBruce Richardson ctx_txd->qw1 = 3090c1d14583SBruce Richardson rte_cpu_to_le_64(cd_type_cmd_tso_mss); 3091c1d14583SBruce Richardson 3092c1d14583SBruce Richardson txe->last_id = tx_last; 3093c1d14583SBruce Richardson tx_id = txe->next_id; 3094c1d14583SBruce Richardson txe = txn; 3095c1d14583SBruce Richardson } 3096c1d14583SBruce Richardson m_seg = tx_pkt; 3097c1d14583SBruce Richardson 3098c1d14583SBruce Richardson do { 30994d0f54d9SBruce Richardson txd = &ice_tx_ring[tx_id]; 3100c1d14583SBruce Richardson txn = &sw_ring[txe->next_id]; 3101c1d14583SBruce Richardson 3102c1d14583SBruce Richardson if (txe->mbuf) 3103c1d14583SBruce Richardson rte_pktmbuf_free_seg(txe->mbuf); 3104c1d14583SBruce Richardson txe->mbuf = m_seg; 3105c1d14583SBruce Richardson 3106c1d14583SBruce Richardson /* Setup TX Descriptor */ 3107c1d14583SBruce Richardson slen = m_seg->data_len; 3108c1d14583SBruce Richardson buf_dma_addr = rte_mbuf_data_iova(m_seg); 3109c1d14583SBruce Richardson 3110c1d14583SBruce Richardson while ((ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) && 3111c1d14583SBruce Richardson unlikely(slen > ICE_MAX_DATA_PER_TXD)) { 3112c1d14583SBruce Richardson txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 3113c1d14583SBruce Richardson txd->cmd_type_offset_bsz = 3114c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 3115c1d14583SBruce Richardson ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 3116c1d14583SBruce Richardson ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 3117c1d14583SBruce Richardson ((uint64_t)ICE_MAX_DATA_PER_TXD << 3118c1d14583SBruce Richardson ICE_TXD_QW1_TX_BUF_SZ_S) | 3119c1d14583SBruce Richardson ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 3120c1d14583SBruce Richardson 3121c1d14583SBruce Richardson buf_dma_addr += ICE_MAX_DATA_PER_TXD; 3122c1d14583SBruce Richardson slen -= ICE_MAX_DATA_PER_TXD; 3123c1d14583SBruce Richardson 3124c1d14583SBruce Richardson txe->last_id = tx_last; 3125c1d14583SBruce Richardson tx_id = txe->next_id; 3126c1d14583SBruce Richardson txe = txn; 31274d0f54d9SBruce Richardson txd = &ice_tx_ring[tx_id]; 3128c1d14583SBruce Richardson txn = &sw_ring[txe->next_id]; 3129c1d14583SBruce Richardson } 3130c1d14583SBruce Richardson 3131c1d14583SBruce Richardson txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 3132c1d14583SBruce Richardson txd->cmd_type_offset_bsz = 3133c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA | 3134c1d14583SBruce Richardson ((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) | 3135c1d14583SBruce Richardson ((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) | 3136c1d14583SBruce Richardson ((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) | 3137c1d14583SBruce Richardson ((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S)); 3138c1d14583SBruce Richardson 3139c1d14583SBruce Richardson txe->last_id = tx_last; 3140c1d14583SBruce Richardson tx_id = txe->next_id; 3141c1d14583SBruce Richardson txe = txn; 3142c1d14583SBruce Richardson m_seg = m_seg->next; 3143c1d14583SBruce Richardson } while (m_seg); 3144c1d14583SBruce Richardson 3145c1d14583SBruce Richardson /* fill the last descriptor with End of Packet (EOP) bit */ 3146c1d14583SBruce Richardson td_cmd |= ICE_TX_DESC_CMD_EOP; 3147c1d14583SBruce Richardson txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used); 3148c1d14583SBruce Richardson txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used); 3149c1d14583SBruce Richardson 3150c1d14583SBruce Richardson /* set RS bit on the last descriptor of one packet */ 3151c1d14583SBruce Richardson if (txq->nb_tx_used >= txq->tx_rs_thresh) { 3152c1d14583SBruce Richardson PMD_TX_LOG(DEBUG, 3153c1d14583SBruce Richardson "Setting RS bit on TXD id=" 3154c1d14583SBruce Richardson "%4u (port=%d queue=%d)", 3155c1d14583SBruce Richardson tx_last, txq->port_id, txq->queue_id); 3156c1d14583SBruce Richardson 3157c1d14583SBruce Richardson td_cmd |= ICE_TX_DESC_CMD_RS; 3158c1d14583SBruce Richardson 3159c1d14583SBruce Richardson /* Update txq RS bit counters */ 3160c1d14583SBruce Richardson txq->nb_tx_used = 0; 3161c1d14583SBruce Richardson } 3162c1d14583SBruce Richardson txd->cmd_type_offset_bsz |= 3163c1d14583SBruce Richardson rte_cpu_to_le_64(((uint64_t)td_cmd) << 3164c1d14583SBruce Richardson ICE_TXD_QW1_CMD_S); 3165c1d14583SBruce Richardson } 3166c1d14583SBruce Richardson end_of_tx: 3167c1d14583SBruce Richardson /* update Tail register */ 3168c1d14583SBruce Richardson ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id); 3169c1d14583SBruce Richardson txq->tx_tail = tx_id; 3170c1d14583SBruce Richardson 3171c1d14583SBruce Richardson return nb_tx; 3172c1d14583SBruce Richardson } 3173c1d14583SBruce Richardson 3174c1d14583SBruce Richardson static __rte_always_inline int 3175c038157aSBruce Richardson ice_tx_free_bufs(struct ci_tx_queue *txq) 3176c1d14583SBruce Richardson { 31775cc9919fSBruce Richardson struct ci_tx_entry *txep; 3178c1d14583SBruce Richardson uint16_t i; 3179c1d14583SBruce Richardson 31804d0f54d9SBruce Richardson if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & 3181c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != 3182c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) 3183c1d14583SBruce Richardson return 0; 3184c1d14583SBruce Richardson 3185c1d14583SBruce Richardson txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)]; 3186c1d14583SBruce Richardson 3187c1d14583SBruce Richardson for (i = 0; i < txq->tx_rs_thresh; i++) 3188c1d14583SBruce Richardson rte_prefetch0((txep + i)->mbuf); 3189c1d14583SBruce Richardson 3190c1d14583SBruce Richardson if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { 3191c1d14583SBruce Richardson for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { 3192c1d14583SBruce Richardson rte_mempool_put(txep->mbuf->pool, txep->mbuf); 3193c1d14583SBruce Richardson txep->mbuf = NULL; 3194c1d14583SBruce Richardson } 3195c1d14583SBruce Richardson } else { 3196c1d14583SBruce Richardson for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { 3197c1d14583SBruce Richardson rte_pktmbuf_free_seg(txep->mbuf); 3198c1d14583SBruce Richardson txep->mbuf = NULL; 3199c1d14583SBruce Richardson } 3200c1d14583SBruce Richardson } 3201c1d14583SBruce Richardson 3202c1d14583SBruce Richardson txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); 3203c1d14583SBruce Richardson txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); 3204c1d14583SBruce Richardson if (txq->tx_next_dd >= txq->nb_tx_desc) 3205c1d14583SBruce Richardson txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 3206c1d14583SBruce Richardson 3207c1d14583SBruce Richardson return txq->tx_rs_thresh; 3208c1d14583SBruce Richardson } 3209c1d14583SBruce Richardson 3210c1d14583SBruce Richardson static int 3211c038157aSBruce Richardson ice_tx_done_cleanup_full(struct ci_tx_queue *txq, 3212c1d14583SBruce Richardson uint32_t free_cnt) 3213c1d14583SBruce Richardson { 32145cc9919fSBruce Richardson struct ci_tx_entry *swr_ring = txq->sw_ring; 3215c1d14583SBruce Richardson uint16_t i, tx_last, tx_id; 3216c1d14583SBruce Richardson uint16_t nb_tx_free_last; 3217c1d14583SBruce Richardson uint16_t nb_tx_to_clean; 3218c1d14583SBruce Richardson uint32_t pkt_cnt; 3219c1d14583SBruce Richardson 3220c1d14583SBruce Richardson /* Start free mbuf from the next of tx_tail */ 3221c1d14583SBruce Richardson tx_last = txq->tx_tail; 3222c1d14583SBruce Richardson tx_id = swr_ring[tx_last].next_id; 3223c1d14583SBruce Richardson 3224c1d14583SBruce Richardson if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq)) 3225c1d14583SBruce Richardson return 0; 3226c1d14583SBruce Richardson 3227c1d14583SBruce Richardson nb_tx_to_clean = txq->nb_tx_free; 3228c1d14583SBruce Richardson nb_tx_free_last = txq->nb_tx_free; 3229c1d14583SBruce Richardson if (!free_cnt) 3230c1d14583SBruce Richardson free_cnt = txq->nb_tx_desc; 3231c1d14583SBruce Richardson 3232c1d14583SBruce Richardson /* Loop through swr_ring to count the amount of 3233c1d14583SBruce Richardson * freeable mubfs and packets. 3234c1d14583SBruce Richardson */ 3235c1d14583SBruce Richardson for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { 3236c1d14583SBruce Richardson for (i = 0; i < nb_tx_to_clean && 3237c1d14583SBruce Richardson pkt_cnt < free_cnt && 3238c1d14583SBruce Richardson tx_id != tx_last; i++) { 3239c1d14583SBruce Richardson if (swr_ring[tx_id].mbuf != NULL) { 3240c1d14583SBruce Richardson rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); 3241c1d14583SBruce Richardson swr_ring[tx_id].mbuf = NULL; 3242c1d14583SBruce Richardson 3243c1d14583SBruce Richardson /* 3244c1d14583SBruce Richardson * last segment in the packet, 3245c1d14583SBruce Richardson * increment packet count 3246c1d14583SBruce Richardson */ 3247c1d14583SBruce Richardson pkt_cnt += (swr_ring[tx_id].last_id == tx_id); 3248c1d14583SBruce Richardson } 3249c1d14583SBruce Richardson 3250c1d14583SBruce Richardson tx_id = swr_ring[tx_id].next_id; 3251c1d14583SBruce Richardson } 3252c1d14583SBruce Richardson 3253c1d14583SBruce Richardson if (txq->tx_rs_thresh > txq->nb_tx_desc - 3254c1d14583SBruce Richardson txq->nb_tx_free || tx_id == tx_last) 3255c1d14583SBruce Richardson break; 3256c1d14583SBruce Richardson 3257c1d14583SBruce Richardson if (pkt_cnt < free_cnt) { 3258c1d14583SBruce Richardson if (ice_xmit_cleanup(txq)) 3259c1d14583SBruce Richardson break; 3260c1d14583SBruce Richardson 3261c1d14583SBruce Richardson nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last; 3262c1d14583SBruce Richardson nb_tx_free_last = txq->nb_tx_free; 3263c1d14583SBruce Richardson } 3264c1d14583SBruce Richardson } 3265c1d14583SBruce Richardson 3266c1d14583SBruce Richardson return (int)pkt_cnt; 3267c1d14583SBruce Richardson } 3268c1d14583SBruce Richardson 3269c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 3270c1d14583SBruce Richardson static int 3271c038157aSBruce Richardson ice_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused, 3272c1d14583SBruce Richardson uint32_t free_cnt __rte_unused) 3273c1d14583SBruce Richardson { 3274c1d14583SBruce Richardson return -ENOTSUP; 3275c1d14583SBruce Richardson } 3276c1d14583SBruce Richardson #endif 3277c1d14583SBruce Richardson 3278c1d14583SBruce Richardson static int 3279c038157aSBruce Richardson ice_tx_done_cleanup_simple(struct ci_tx_queue *txq, 3280c1d14583SBruce Richardson uint32_t free_cnt) 3281c1d14583SBruce Richardson { 3282c1d14583SBruce Richardson int i, n, cnt; 3283c1d14583SBruce Richardson 3284c1d14583SBruce Richardson if (free_cnt == 0 || free_cnt > txq->nb_tx_desc) 3285c1d14583SBruce Richardson free_cnt = txq->nb_tx_desc; 3286c1d14583SBruce Richardson 3287c1d14583SBruce Richardson cnt = free_cnt - free_cnt % txq->tx_rs_thresh; 3288c1d14583SBruce Richardson 3289c1d14583SBruce Richardson for (i = 0; i < cnt; i += n) { 3290c1d14583SBruce Richardson if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh) 3291c1d14583SBruce Richardson break; 3292c1d14583SBruce Richardson 3293c1d14583SBruce Richardson n = ice_tx_free_bufs(txq); 3294c1d14583SBruce Richardson 3295c1d14583SBruce Richardson if (n == 0) 3296c1d14583SBruce Richardson break; 3297c1d14583SBruce Richardson } 3298c1d14583SBruce Richardson 3299c1d14583SBruce Richardson return i; 3300c1d14583SBruce Richardson } 3301c1d14583SBruce Richardson 3302c1d14583SBruce Richardson int 3303c1d14583SBruce Richardson ice_tx_done_cleanup(void *txq, uint32_t free_cnt) 3304c1d14583SBruce Richardson { 3305c038157aSBruce Richardson struct ci_tx_queue *q = (struct ci_tx_queue *)txq; 3306c1d14583SBruce Richardson struct rte_eth_dev *dev = &rte_eth_devices[q->port_id]; 3307c1d14583SBruce Richardson struct ice_adapter *ad = 3308c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3309c1d14583SBruce Richardson 3310c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 3311c1d14583SBruce Richardson if (ad->tx_vec_allowed) 3312c1d14583SBruce Richardson return ice_tx_done_cleanup_vec(q, free_cnt); 3313c1d14583SBruce Richardson #endif 3314c1d14583SBruce Richardson if (ad->tx_simple_allowed) 3315c1d14583SBruce Richardson return ice_tx_done_cleanup_simple(q, free_cnt); 3316c1d14583SBruce Richardson else 3317c1d14583SBruce Richardson return ice_tx_done_cleanup_full(q, free_cnt); 3318c1d14583SBruce Richardson } 3319c1d14583SBruce Richardson 3320c1d14583SBruce Richardson /* Populate 4 descriptors with data from 4 mbufs */ 3321c1d14583SBruce Richardson static inline void 3322c1d14583SBruce Richardson tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) 3323c1d14583SBruce Richardson { 3324c1d14583SBruce Richardson uint64_t dma_addr; 3325c1d14583SBruce Richardson uint32_t i; 3326c1d14583SBruce Richardson 3327c1d14583SBruce Richardson for (i = 0; i < 4; i++, txdp++, pkts++) { 3328c1d14583SBruce Richardson dma_addr = rte_mbuf_data_iova(*pkts); 3329c1d14583SBruce Richardson txdp->buf_addr = rte_cpu_to_le_64(dma_addr); 3330c1d14583SBruce Richardson txdp->cmd_type_offset_bsz = 3331c1d14583SBruce Richardson ice_build_ctob((uint32_t)ICE_TD_CMD, 0, 3332c1d14583SBruce Richardson (*pkts)->data_len, 0); 3333c1d14583SBruce Richardson } 3334c1d14583SBruce Richardson } 3335c1d14583SBruce Richardson 3336c1d14583SBruce Richardson /* Populate 1 descriptor with data from 1 mbuf */ 3337c1d14583SBruce Richardson static inline void 3338c1d14583SBruce Richardson tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts) 3339c1d14583SBruce Richardson { 3340c1d14583SBruce Richardson uint64_t dma_addr; 3341c1d14583SBruce Richardson 3342c1d14583SBruce Richardson dma_addr = rte_mbuf_data_iova(*pkts); 3343c1d14583SBruce Richardson txdp->buf_addr = rte_cpu_to_le_64(dma_addr); 3344c1d14583SBruce Richardson txdp->cmd_type_offset_bsz = 3345c1d14583SBruce Richardson ice_build_ctob((uint32_t)ICE_TD_CMD, 0, 3346c1d14583SBruce Richardson (*pkts)->data_len, 0); 3347c1d14583SBruce Richardson } 3348c1d14583SBruce Richardson 3349c1d14583SBruce Richardson static inline void 3350c038157aSBruce Richardson ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts, 3351c1d14583SBruce Richardson uint16_t nb_pkts) 3352c1d14583SBruce Richardson { 33534d0f54d9SBruce Richardson volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail]; 33545cc9919fSBruce Richardson struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail]; 3355c1d14583SBruce Richardson const int N_PER_LOOP = 4; 3356c1d14583SBruce Richardson const int N_PER_LOOP_MASK = N_PER_LOOP - 1; 3357c1d14583SBruce Richardson int mainpart, leftover; 3358c1d14583SBruce Richardson int i, j; 3359c1d14583SBruce Richardson 3360c1d14583SBruce Richardson /** 3361c1d14583SBruce Richardson * Process most of the packets in chunks of N pkts. Any 3362c1d14583SBruce Richardson * leftover packets will get processed one at a time. 3363c1d14583SBruce Richardson */ 3364c1d14583SBruce Richardson mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK); 3365c1d14583SBruce Richardson leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK); 3366c1d14583SBruce Richardson for (i = 0; i < mainpart; i += N_PER_LOOP) { 3367c1d14583SBruce Richardson /* Copy N mbuf pointers to the S/W ring */ 3368c1d14583SBruce Richardson for (j = 0; j < N_PER_LOOP; ++j) 3369c1d14583SBruce Richardson (txep + i + j)->mbuf = *(pkts + i + j); 3370c1d14583SBruce Richardson tx4(txdp + i, pkts + i); 3371c1d14583SBruce Richardson } 3372c1d14583SBruce Richardson 3373c1d14583SBruce Richardson if (unlikely(leftover > 0)) { 3374c1d14583SBruce Richardson for (i = 0; i < leftover; ++i) { 3375c1d14583SBruce Richardson (txep + mainpart + i)->mbuf = *(pkts + mainpart + i); 3376c1d14583SBruce Richardson tx1(txdp + mainpart + i, pkts + mainpart + i); 3377c1d14583SBruce Richardson } 3378c1d14583SBruce Richardson } 3379c1d14583SBruce Richardson } 3380c1d14583SBruce Richardson 3381c1d14583SBruce Richardson static inline uint16_t 3382c038157aSBruce Richardson tx_xmit_pkts(struct ci_tx_queue *txq, 3383c1d14583SBruce Richardson struct rte_mbuf **tx_pkts, 3384c1d14583SBruce Richardson uint16_t nb_pkts) 3385c1d14583SBruce Richardson { 33864d0f54d9SBruce Richardson volatile struct ice_tx_desc *txr = txq->ice_tx_ring; 3387c1d14583SBruce Richardson uint16_t n = 0; 3388c1d14583SBruce Richardson 3389c1d14583SBruce Richardson /** 3390c1d14583SBruce Richardson * Begin scanning the H/W ring for done descriptors when the number 3391c1d14583SBruce Richardson * of available descriptors drops below tx_free_thresh. For each done 3392c1d14583SBruce Richardson * descriptor, free the associated buffer. 3393c1d14583SBruce Richardson */ 3394c1d14583SBruce Richardson if (txq->nb_tx_free < txq->tx_free_thresh) 3395c1d14583SBruce Richardson ice_tx_free_bufs(txq); 3396c1d14583SBruce Richardson 3397c1d14583SBruce Richardson /* Use available descriptor only */ 3398c1d14583SBruce Richardson nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); 3399c1d14583SBruce Richardson if (unlikely(!nb_pkts)) 3400c1d14583SBruce Richardson return 0; 3401c1d14583SBruce Richardson 3402c1d14583SBruce Richardson txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); 3403c1d14583SBruce Richardson if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) { 3404c1d14583SBruce Richardson n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail); 3405c1d14583SBruce Richardson ice_tx_fill_hw_ring(txq, tx_pkts, n); 3406c1d14583SBruce Richardson txr[txq->tx_next_rs].cmd_type_offset_bsz |= 3407c1d14583SBruce Richardson rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << 3408c1d14583SBruce Richardson ICE_TXD_QW1_CMD_S); 3409c1d14583SBruce Richardson txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 3410c1d14583SBruce Richardson txq->tx_tail = 0; 3411c1d14583SBruce Richardson } 3412c1d14583SBruce Richardson 3413c1d14583SBruce Richardson /* Fill hardware descriptor ring with mbuf data */ 3414c1d14583SBruce Richardson ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n)); 3415c1d14583SBruce Richardson txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n)); 3416c1d14583SBruce Richardson 3417c1d14583SBruce Richardson /* Determine if RS bit needs to be set */ 3418c1d14583SBruce Richardson if (txq->tx_tail > txq->tx_next_rs) { 3419c1d14583SBruce Richardson txr[txq->tx_next_rs].cmd_type_offset_bsz |= 3420c1d14583SBruce Richardson rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << 3421c1d14583SBruce Richardson ICE_TXD_QW1_CMD_S); 3422c1d14583SBruce Richardson txq->tx_next_rs = 3423c1d14583SBruce Richardson (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); 3424c1d14583SBruce Richardson if (txq->tx_next_rs >= txq->nb_tx_desc) 3425c1d14583SBruce Richardson txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); 3426c1d14583SBruce Richardson } 3427c1d14583SBruce Richardson 3428c1d14583SBruce Richardson if (txq->tx_tail >= txq->nb_tx_desc) 3429c1d14583SBruce Richardson txq->tx_tail = 0; 3430c1d14583SBruce Richardson 3431c1d14583SBruce Richardson /* Update the tx tail register */ 3432c1d14583SBruce Richardson ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail); 3433c1d14583SBruce Richardson 3434c1d14583SBruce Richardson return nb_pkts; 3435c1d14583SBruce Richardson } 3436c1d14583SBruce Richardson 3437c1d14583SBruce Richardson static uint16_t 3438c1d14583SBruce Richardson ice_xmit_pkts_simple(void *tx_queue, 3439c1d14583SBruce Richardson struct rte_mbuf **tx_pkts, 3440c1d14583SBruce Richardson uint16_t nb_pkts) 3441c1d14583SBruce Richardson { 3442c1d14583SBruce Richardson uint16_t nb_tx = 0; 3443c1d14583SBruce Richardson 3444c1d14583SBruce Richardson if (likely(nb_pkts <= ICE_TX_MAX_BURST)) 3445c038157aSBruce Richardson return tx_xmit_pkts((struct ci_tx_queue *)tx_queue, 3446c1d14583SBruce Richardson tx_pkts, nb_pkts); 3447c1d14583SBruce Richardson 3448c1d14583SBruce Richardson while (nb_pkts) { 3449c1d14583SBruce Richardson uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts, 3450c1d14583SBruce Richardson ICE_TX_MAX_BURST); 3451c1d14583SBruce Richardson 3452c038157aSBruce Richardson ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue, 3453c1d14583SBruce Richardson &tx_pkts[nb_tx], num); 3454c1d14583SBruce Richardson nb_tx = (uint16_t)(nb_tx + ret); 3455c1d14583SBruce Richardson nb_pkts = (uint16_t)(nb_pkts - ret); 3456c1d14583SBruce Richardson if (ret < num) 3457c1d14583SBruce Richardson break; 3458c1d14583SBruce Richardson } 3459c1d14583SBruce Richardson 3460c1d14583SBruce Richardson return nb_tx; 3461c1d14583SBruce Richardson } 3462c1d14583SBruce Richardson 3463c1d14583SBruce Richardson void __rte_cold 3464c1d14583SBruce Richardson ice_set_rx_function(struct rte_eth_dev *dev) 3465c1d14583SBruce Richardson { 3466c1d14583SBruce Richardson PMD_INIT_FUNC_TRACE(); 3467c1d14583SBruce Richardson struct ice_adapter *ad = 3468c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3469c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 3470c1d14583SBruce Richardson struct ice_rx_queue *rxq; 3471c1d14583SBruce Richardson int i; 3472c1d14583SBruce Richardson int rx_check_ret = -1; 3473c1d14583SBruce Richardson 3474c1d14583SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3475c1d14583SBruce Richardson ad->rx_use_avx512 = false; 3476c1d14583SBruce Richardson ad->rx_use_avx2 = false; 3477c1d14583SBruce Richardson rx_check_ret = ice_rx_vec_dev_check(dev); 3478c1d14583SBruce Richardson if (ad->ptp_ena) 3479c1d14583SBruce Richardson rx_check_ret = -1; 3480c1d14583SBruce Richardson ad->rx_vec_offload_support = 3481c1d14583SBruce Richardson (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH); 3482c1d14583SBruce Richardson if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed && 3483c1d14583SBruce Richardson rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 3484c1d14583SBruce Richardson ad->rx_vec_allowed = true; 3485c1d14583SBruce Richardson for (i = 0; i < dev->data->nb_rx_queues; i++) { 3486c1d14583SBruce Richardson rxq = dev->data->rx_queues[i]; 3487c1d14583SBruce Richardson if (rxq && ice_rxq_vec_setup(rxq)) { 3488c1d14583SBruce Richardson ad->rx_vec_allowed = false; 3489c1d14583SBruce Richardson break; 3490c1d14583SBruce Richardson } 3491c1d14583SBruce Richardson } 3492c1d14583SBruce Richardson 3493c1d14583SBruce Richardson if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && 3494c1d14583SBruce Richardson rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && 3495c1d14583SBruce Richardson rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1) 3496c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3497c1d14583SBruce Richardson ad->rx_use_avx512 = true; 3498c1d14583SBruce Richardson #else 3499c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3500c1d14583SBruce Richardson "AVX512 is not supported in build env"); 3501c1d14583SBruce Richardson #endif 3502c1d14583SBruce Richardson if (!ad->rx_use_avx512 && 3503c1d14583SBruce Richardson (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || 3504c1d14583SBruce Richardson rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && 3505c1d14583SBruce Richardson rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) 3506c1d14583SBruce Richardson ad->rx_use_avx2 = true; 3507c1d14583SBruce Richardson 3508c1d14583SBruce Richardson } else { 3509c1d14583SBruce Richardson ad->rx_vec_allowed = false; 3510c1d14583SBruce Richardson } 3511c1d14583SBruce Richardson } 3512c1d14583SBruce Richardson 3513c1d14583SBruce Richardson if (ad->rx_vec_allowed) { 3514c1d14583SBruce Richardson if (dev->data->scattered_rx) { 3515c1d14583SBruce Richardson if (ad->rx_use_avx512) { 3516c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3517c1d14583SBruce Richardson if (ad->rx_vec_offload_support) { 3518c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3519c1d14583SBruce Richardson "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).", 3520c1d14583SBruce Richardson dev->data->port_id); 3521c1d14583SBruce Richardson dev->rx_pkt_burst = 3522c1d14583SBruce Richardson ice_recv_scattered_pkts_vec_avx512_offload; 3523c1d14583SBruce Richardson } else { 3524c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3525c1d14583SBruce Richardson "Using AVX512 Vector Scattered Rx (port %d).", 3526c1d14583SBruce Richardson dev->data->port_id); 3527c1d14583SBruce Richardson dev->rx_pkt_burst = 3528c1d14583SBruce Richardson ice_recv_scattered_pkts_vec_avx512; 3529c1d14583SBruce Richardson } 3530c1d14583SBruce Richardson #endif 3531c1d14583SBruce Richardson } else if (ad->rx_use_avx2) { 3532c1d14583SBruce Richardson if (ad->rx_vec_offload_support) { 3533c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3534c1d14583SBruce Richardson "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).", 3535c1d14583SBruce Richardson dev->data->port_id); 3536c1d14583SBruce Richardson dev->rx_pkt_burst = 3537c1d14583SBruce Richardson ice_recv_scattered_pkts_vec_avx2_offload; 3538c1d14583SBruce Richardson } else { 3539c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3540c1d14583SBruce Richardson "Using AVX2 Vector Scattered Rx (port %d).", 3541c1d14583SBruce Richardson dev->data->port_id); 3542c1d14583SBruce Richardson dev->rx_pkt_burst = 3543c1d14583SBruce Richardson ice_recv_scattered_pkts_vec_avx2; 3544c1d14583SBruce Richardson } 3545c1d14583SBruce Richardson } else { 3546c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, 3547c1d14583SBruce Richardson "Using Vector Scattered Rx (port %d).", 3548c1d14583SBruce Richardson dev->data->port_id); 3549c1d14583SBruce Richardson dev->rx_pkt_burst = ice_recv_scattered_pkts_vec; 3550c1d14583SBruce Richardson } 3551c1d14583SBruce Richardson } else { 3552c1d14583SBruce Richardson if (ad->rx_use_avx512) { 3553c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3554c1d14583SBruce Richardson if (ad->rx_vec_offload_support) { 3555c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3556c1d14583SBruce Richardson "Using AVX512 OFFLOAD Vector Rx (port %d).", 3557c1d14583SBruce Richardson dev->data->port_id); 3558c1d14583SBruce Richardson dev->rx_pkt_burst = 3559c1d14583SBruce Richardson ice_recv_pkts_vec_avx512_offload; 3560c1d14583SBruce Richardson } else { 3561c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3562c1d14583SBruce Richardson "Using AVX512 Vector Rx (port %d).", 3563c1d14583SBruce Richardson dev->data->port_id); 3564c1d14583SBruce Richardson dev->rx_pkt_burst = 3565c1d14583SBruce Richardson ice_recv_pkts_vec_avx512; 3566c1d14583SBruce Richardson } 3567c1d14583SBruce Richardson #endif 3568c1d14583SBruce Richardson } else if (ad->rx_use_avx2) { 3569c1d14583SBruce Richardson if (ad->rx_vec_offload_support) { 3570c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3571c1d14583SBruce Richardson "Using AVX2 OFFLOAD Vector Rx (port %d).", 3572c1d14583SBruce Richardson dev->data->port_id); 3573c1d14583SBruce Richardson dev->rx_pkt_burst = 3574c1d14583SBruce Richardson ice_recv_pkts_vec_avx2_offload; 3575c1d14583SBruce Richardson } else { 3576c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3577c1d14583SBruce Richardson "Using AVX2 Vector Rx (port %d).", 3578c1d14583SBruce Richardson dev->data->port_id); 3579c1d14583SBruce Richardson dev->rx_pkt_burst = 3580c1d14583SBruce Richardson ice_recv_pkts_vec_avx2; 3581c1d14583SBruce Richardson } 3582c1d14583SBruce Richardson } else { 3583c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, 3584c1d14583SBruce Richardson "Using Vector Rx (port %d).", 3585c1d14583SBruce Richardson dev->data->port_id); 3586c1d14583SBruce Richardson dev->rx_pkt_burst = ice_recv_pkts_vec; 3587c1d14583SBruce Richardson } 3588c1d14583SBruce Richardson } 3589c1d14583SBruce Richardson return; 3590c1d14583SBruce Richardson } 3591c1d14583SBruce Richardson 3592c1d14583SBruce Richardson #endif 3593c1d14583SBruce Richardson 3594c1d14583SBruce Richardson if (dev->data->scattered_rx) { 3595c1d14583SBruce Richardson /* Set the non-LRO scattered function */ 3596c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, 3597c1d14583SBruce Richardson "Using a Scattered function on port %d.", 3598c1d14583SBruce Richardson dev->data->port_id); 3599c1d14583SBruce Richardson dev->rx_pkt_burst = ice_recv_scattered_pkts; 3600c1d14583SBruce Richardson } else if (ad->rx_bulk_alloc_allowed) { 3601c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, 3602c1d14583SBruce Richardson "Rx Burst Bulk Alloc Preconditions are " 3603c1d14583SBruce Richardson "satisfied. Rx Burst Bulk Alloc function " 3604c1d14583SBruce Richardson "will be used on port %d.", 3605c1d14583SBruce Richardson dev->data->port_id); 3606c1d14583SBruce Richardson dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc; 3607c1d14583SBruce Richardson } else { 3608c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, 3609c1d14583SBruce Richardson "Rx Burst Bulk Alloc Preconditions are not " 3610c1d14583SBruce Richardson "satisfied, Normal Rx will be used on port %d.", 3611c1d14583SBruce Richardson dev->data->port_id); 3612c1d14583SBruce Richardson dev->rx_pkt_burst = ice_recv_pkts; 3613c1d14583SBruce Richardson } 3614c1d14583SBruce Richardson } 3615c1d14583SBruce Richardson 3616c1d14583SBruce Richardson static const struct { 3617c1d14583SBruce Richardson eth_rx_burst_t pkt_burst; 3618c1d14583SBruce Richardson const char *info; 3619c1d14583SBruce Richardson } ice_rx_burst_infos[] = { 3620c1d14583SBruce Richardson { ice_recv_scattered_pkts, "Scalar Scattered" }, 3621c1d14583SBruce Richardson { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, 3622c1d14583SBruce Richardson { ice_recv_pkts, "Scalar" }, 3623c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 3624c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3625c1d14583SBruce Richardson { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, 3626c1d14583SBruce Richardson { ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" }, 3627c1d14583SBruce Richardson { ice_recv_pkts_vec_avx512, "Vector AVX512" }, 3628c1d14583SBruce Richardson { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" }, 3629c1d14583SBruce Richardson #endif 3630c1d14583SBruce Richardson { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, 3631c1d14583SBruce Richardson { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" }, 3632c1d14583SBruce Richardson { ice_recv_pkts_vec_avx2, "Vector AVX2" }, 3633c1d14583SBruce Richardson { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" }, 3634c1d14583SBruce Richardson { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, 3635c1d14583SBruce Richardson { ice_recv_pkts_vec, "Vector SSE" }, 3636c1d14583SBruce Richardson #endif 3637c1d14583SBruce Richardson }; 3638c1d14583SBruce Richardson 3639c1d14583SBruce Richardson int 3640c1d14583SBruce Richardson ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3641c1d14583SBruce Richardson struct rte_eth_burst_mode *mode) 3642c1d14583SBruce Richardson { 3643c1d14583SBruce Richardson eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 3644c1d14583SBruce Richardson int ret = -EINVAL; 3645c1d14583SBruce Richardson unsigned int i; 3646c1d14583SBruce Richardson 3647c1d14583SBruce Richardson for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) { 3648c1d14583SBruce Richardson if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) { 3649c1d14583SBruce Richardson snprintf(mode->info, sizeof(mode->info), "%s", 3650c1d14583SBruce Richardson ice_rx_burst_infos[i].info); 3651c1d14583SBruce Richardson ret = 0; 3652c1d14583SBruce Richardson break; 3653c1d14583SBruce Richardson } 3654c1d14583SBruce Richardson } 3655c1d14583SBruce Richardson 3656c1d14583SBruce Richardson return ret; 3657c1d14583SBruce Richardson } 3658c1d14583SBruce Richardson 3659c1d14583SBruce Richardson void __rte_cold 3660c038157aSBruce Richardson ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq) 3661c1d14583SBruce Richardson { 3662c1d14583SBruce Richardson struct ice_adapter *ad = 3663c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3664c1d14583SBruce Richardson 3665c1d14583SBruce Richardson /* Use a simple Tx queue if possible (only fast free is allowed) */ 3666c1d14583SBruce Richardson ad->tx_simple_allowed = 3667c1d14583SBruce Richardson (txq->offloads == 3668c1d14583SBruce Richardson (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) && 3669c1d14583SBruce Richardson txq->tx_rs_thresh >= ICE_TX_MAX_BURST); 3670c1d14583SBruce Richardson 3671c1d14583SBruce Richardson if (ad->tx_simple_allowed) 3672c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", 3673c1d14583SBruce Richardson txq->queue_id); 3674c1d14583SBruce Richardson else 3675c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, 3676c1d14583SBruce Richardson "Simple Tx can NOT be enabled on Tx queue %u.", 3677c1d14583SBruce Richardson txq->queue_id); 3678c1d14583SBruce Richardson } 3679c1d14583SBruce Richardson 3680c1d14583SBruce Richardson /********************************************************************* 3681c1d14583SBruce Richardson * 3682c1d14583SBruce Richardson * TX prep functions 3683c1d14583SBruce Richardson * 3684c1d14583SBruce Richardson **********************************************************************/ 3685c1d14583SBruce Richardson /* The default values of TSO MSS */ 3686c1d14583SBruce Richardson #define ICE_MIN_TSO_MSS 64 3687c1d14583SBruce Richardson #define ICE_MAX_TSO_MSS 9728 3688c1d14583SBruce Richardson #define ICE_MAX_TSO_FRAME_SIZE 262144 3689c1d14583SBruce Richardson 3690c1d14583SBruce Richardson /*Check for empty mbuf*/ 3691c1d14583SBruce Richardson static inline uint16_t 3692c1d14583SBruce Richardson ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) 3693c1d14583SBruce Richardson { 3694c1d14583SBruce Richardson struct rte_mbuf *txd = tx_pkt; 3695c1d14583SBruce Richardson 3696c1d14583SBruce Richardson while (txd != NULL) { 3697c1d14583SBruce Richardson if (txd->data_len == 0) 3698c1d14583SBruce Richardson return -1; 3699c1d14583SBruce Richardson txd = txd->next; 3700c1d14583SBruce Richardson } 3701c1d14583SBruce Richardson 3702c1d14583SBruce Richardson return 0; 3703c1d14583SBruce Richardson } 3704c1d14583SBruce Richardson 3705c1d14583SBruce Richardson /* Tx mbuf check */ 3706c1d14583SBruce Richardson static uint16_t 3707c1d14583SBruce Richardson ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 3708c1d14583SBruce Richardson { 3709c038157aSBruce Richardson struct ci_tx_queue *txq = tx_queue; 3710c1d14583SBruce Richardson uint16_t idx; 3711c1d14583SBruce Richardson struct rte_mbuf *mb; 3712c1d14583SBruce Richardson bool pkt_error = false; 3713c1d14583SBruce Richardson uint16_t good_pkts = nb_pkts; 3714c1d14583SBruce Richardson const char *reason = NULL; 37154d0f54d9SBruce Richardson struct ice_adapter *adapter = txq->ice_vsi->adapter; 3716c1d14583SBruce Richardson uint64_t ol_flags; 3717c1d14583SBruce Richardson 3718c1d14583SBruce Richardson for (idx = 0; idx < nb_pkts; idx++) { 3719c1d14583SBruce Richardson mb = tx_pkts[idx]; 3720c1d14583SBruce Richardson ol_flags = mb->ol_flags; 3721c1d14583SBruce Richardson 3722c1d14583SBruce Richardson if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_MBUF) && 3723c1d14583SBruce Richardson (rte_mbuf_check(mb, 1, &reason) != 0)) { 3724c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: %s", reason); 3725c1d14583SBruce Richardson pkt_error = true; 3726c1d14583SBruce Richardson break; 3727c1d14583SBruce Richardson } 3728c1d14583SBruce Richardson 3729c1d14583SBruce Richardson if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SIZE) && 3730c1d14583SBruce Richardson (mb->data_len > mb->pkt_len || 3731c1d14583SBruce Richardson mb->data_len < ICE_TX_MIN_PKT_LEN || 3732c1d14583SBruce Richardson mb->data_len > ICE_FRAME_SIZE_MAX)) { 3733c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out of range, reasonable range (%d - %d)", 3734c1d14583SBruce Richardson mb->data_len, ICE_TX_MIN_PKT_LEN, ICE_FRAME_SIZE_MAX); 3735c1d14583SBruce Richardson pkt_error = true; 3736c1d14583SBruce Richardson break; 3737c1d14583SBruce Richardson } 3738c1d14583SBruce Richardson 3739c1d14583SBruce Richardson if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SEGMENT) { 3740c1d14583SBruce Richardson if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { 3741c1d14583SBruce Richardson /** 3742c1d14583SBruce Richardson * No TSO case: nb->segs, pkt_len to not exceed 3743c1d14583SBruce Richardson * the limites. 3744c1d14583SBruce Richardson */ 3745c1d14583SBruce Richardson if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) { 3746c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds HW limit, maximum allowed value is %d", 3747c1d14583SBruce Richardson mb->nb_segs, ICE_TX_MTU_SEG_MAX); 3748c1d14583SBruce Richardson pkt_error = true; 3749c1d14583SBruce Richardson break; 3750c1d14583SBruce Richardson } 3751c1d14583SBruce Richardson if (mb->pkt_len > ICE_FRAME_SIZE_MAX) { 3752c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds HW limit, maximum allowed value is %d", 3753c1d14583SBruce Richardson mb->nb_segs, ICE_FRAME_SIZE_MAX); 3754c1d14583SBruce Richardson pkt_error = true; 3755c1d14583SBruce Richardson break; 3756c1d14583SBruce Richardson } 3757c1d14583SBruce Richardson } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 3758c1d14583SBruce Richardson /** TSO case: tso_segsz, nb_segs, pkt_len not exceed 3759c1d14583SBruce Richardson * the limits. 3760c1d14583SBruce Richardson */ 3761c1d14583SBruce Richardson if (mb->tso_segsz < ICE_MIN_TSO_MSS || 3762c1d14583SBruce Richardson mb->tso_segsz > ICE_MAX_TSO_MSS) { 3763c1d14583SBruce Richardson /** 3764c1d14583SBruce Richardson * MSS outside the range are considered malicious 3765c1d14583SBruce Richardson */ 3766c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out of range, reasonable range (%d - %u)", 3767c1d14583SBruce Richardson mb->tso_segsz, ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS); 3768c1d14583SBruce Richardson pkt_error = true; 3769c1d14583SBruce Richardson break; 3770c1d14583SBruce Richardson } 3771c038157aSBruce Richardson if (mb->nb_segs > ((struct ci_tx_queue *)tx_queue)->nb_tx_desc) { 3772c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out of ring length"); 3773c1d14583SBruce Richardson pkt_error = true; 3774c1d14583SBruce Richardson break; 3775c1d14583SBruce Richardson } 3776c1d14583SBruce Richardson } 3777c1d14583SBruce Richardson } 3778c1d14583SBruce Richardson 3779c1d14583SBruce Richardson if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_OFFLOAD) { 3780c1d14583SBruce Richardson if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) { 3781c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: TX offload is not supported"); 3782c1d14583SBruce Richardson pkt_error = true; 3783c1d14583SBruce Richardson break; 3784c1d14583SBruce Richardson } 3785c1d14583SBruce Richardson 3786c1d14583SBruce Richardson if (!rte_validate_tx_offload(mb)) { 3787c1d14583SBruce Richardson PMD_TX_LOG(ERR, "INVALID mbuf: TX offload setup error"); 3788c1d14583SBruce Richardson pkt_error = true; 3789c1d14583SBruce Richardson break; 3790c1d14583SBruce Richardson } 3791c1d14583SBruce Richardson } 3792c1d14583SBruce Richardson } 3793c1d14583SBruce Richardson 3794c1d14583SBruce Richardson if (pkt_error) { 3795c1d14583SBruce Richardson txq->mbuf_errors++; 3796c1d14583SBruce Richardson good_pkts = idx; 3797c1d14583SBruce Richardson if (good_pkts == 0) 3798c1d14583SBruce Richardson return 0; 3799c1d14583SBruce Richardson } 3800c1d14583SBruce Richardson 3801c1d14583SBruce Richardson return adapter->tx_pkt_burst(tx_queue, tx_pkts, good_pkts); 3802c1d14583SBruce Richardson } 3803c1d14583SBruce Richardson 3804c1d14583SBruce Richardson uint16_t 3805c1d14583SBruce Richardson ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 3806c1d14583SBruce Richardson uint16_t nb_pkts) 3807c1d14583SBruce Richardson { 3808c1d14583SBruce Richardson int i, ret; 3809c1d14583SBruce Richardson uint64_t ol_flags; 3810c1d14583SBruce Richardson struct rte_mbuf *m; 3811c1d14583SBruce Richardson 3812c1d14583SBruce Richardson for (i = 0; i < nb_pkts; i++) { 3813c1d14583SBruce Richardson m = tx_pkts[i]; 3814c1d14583SBruce Richardson ol_flags = m->ol_flags; 3815c1d14583SBruce Richardson 3816c1d14583SBruce Richardson if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) && 3817c1d14583SBruce Richardson /** 3818c1d14583SBruce Richardson * No TSO case: nb->segs, pkt_len to not exceed 3819c1d14583SBruce Richardson * the limites. 3820c1d14583SBruce Richardson */ 3821c1d14583SBruce Richardson (m->nb_segs > ICE_TX_MTU_SEG_MAX || 3822c1d14583SBruce Richardson m->pkt_len > ICE_FRAME_SIZE_MAX)) { 3823c1d14583SBruce Richardson rte_errno = EINVAL; 3824c1d14583SBruce Richardson return i; 3825c1d14583SBruce Richardson } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && 3826c1d14583SBruce Richardson /** TSO case: tso_segsz, nb_segs, pkt_len not exceed 3827c1d14583SBruce Richardson * the limits. 3828c1d14583SBruce Richardson */ 3829c1d14583SBruce Richardson (m->tso_segsz < ICE_MIN_TSO_MSS || 3830c1d14583SBruce Richardson m->tso_segsz > ICE_MAX_TSO_MSS || 3831c1d14583SBruce Richardson m->nb_segs > 3832c038157aSBruce Richardson ((struct ci_tx_queue *)tx_queue)->nb_tx_desc || 3833c1d14583SBruce Richardson m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { 3834c1d14583SBruce Richardson /** 3835c1d14583SBruce Richardson * MSS outside the range are considered malicious 3836c1d14583SBruce Richardson */ 3837c1d14583SBruce Richardson rte_errno = EINVAL; 3838c1d14583SBruce Richardson return i; 3839c1d14583SBruce Richardson } 3840c1d14583SBruce Richardson 3841c1d14583SBruce Richardson if (m->pkt_len < ICE_TX_MIN_PKT_LEN) { 3842c1d14583SBruce Richardson rte_errno = EINVAL; 3843c1d14583SBruce Richardson return i; 3844c1d14583SBruce Richardson } 3845c1d14583SBruce Richardson 3846c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_TX 3847c1d14583SBruce Richardson ret = rte_validate_tx_offload(m); 3848c1d14583SBruce Richardson if (ret != 0) { 3849c1d14583SBruce Richardson rte_errno = -ret; 3850c1d14583SBruce Richardson return i; 3851c1d14583SBruce Richardson } 3852c1d14583SBruce Richardson #endif 3853c1d14583SBruce Richardson ret = rte_net_intel_cksum_prepare(m); 3854c1d14583SBruce Richardson if (ret != 0) { 3855c1d14583SBruce Richardson rte_errno = -ret; 3856c1d14583SBruce Richardson return i; 3857c1d14583SBruce Richardson } 3858c1d14583SBruce Richardson 3859c1d14583SBruce Richardson if (ice_check_empty_mbuf(m) != 0) { 3860c1d14583SBruce Richardson rte_errno = EINVAL; 3861c1d14583SBruce Richardson return i; 3862c1d14583SBruce Richardson } 3863c1d14583SBruce Richardson } 3864c1d14583SBruce Richardson return i; 3865c1d14583SBruce Richardson } 3866c1d14583SBruce Richardson 3867c1d14583SBruce Richardson void __rte_cold 3868c1d14583SBruce Richardson ice_set_tx_function(struct rte_eth_dev *dev) 3869c1d14583SBruce Richardson { 3870c1d14583SBruce Richardson struct ice_adapter *ad = 3871c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 3872c1d14583SBruce Richardson int mbuf_check = ad->devargs.mbuf_check; 3873c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 3874c038157aSBruce Richardson struct ci_tx_queue *txq; 3875c1d14583SBruce Richardson int i; 3876c1d14583SBruce Richardson int tx_check_ret = -1; 3877c1d14583SBruce Richardson 3878c1d14583SBruce Richardson if (rte_eal_process_type() == RTE_PROC_PRIMARY) { 3879c1d14583SBruce Richardson ad->tx_use_avx2 = false; 3880c1d14583SBruce Richardson ad->tx_use_avx512 = false; 3881c1d14583SBruce Richardson tx_check_ret = ice_tx_vec_dev_check(dev); 3882c1d14583SBruce Richardson if (tx_check_ret >= 0 && 3883c1d14583SBruce Richardson rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { 3884c1d14583SBruce Richardson ad->tx_vec_allowed = true; 3885c1d14583SBruce Richardson 3886c1d14583SBruce Richardson if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && 3887c1d14583SBruce Richardson rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && 3888c1d14583SBruce Richardson rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1) 3889c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3890c1d14583SBruce Richardson ad->tx_use_avx512 = true; 3891c1d14583SBruce Richardson #else 3892c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3893c1d14583SBruce Richardson "AVX512 is not supported in build env"); 3894c1d14583SBruce Richardson #endif 3895c1d14583SBruce Richardson if (!ad->tx_use_avx512 && 3896c1d14583SBruce Richardson (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || 3897c1d14583SBruce Richardson rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && 3898c1d14583SBruce Richardson rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) 3899c1d14583SBruce Richardson ad->tx_use_avx2 = true; 3900c1d14583SBruce Richardson 3901c1d14583SBruce Richardson if (!ad->tx_use_avx2 && !ad->tx_use_avx512 && 3902c1d14583SBruce Richardson tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) 3903c1d14583SBruce Richardson ad->tx_vec_allowed = false; 3904c1d14583SBruce Richardson 3905c1d14583SBruce Richardson if (ad->tx_vec_allowed) { 3906c1d14583SBruce Richardson for (i = 0; i < dev->data->nb_tx_queues; i++) { 3907c1d14583SBruce Richardson txq = dev->data->tx_queues[i]; 3908c1d14583SBruce Richardson if (txq && ice_txq_vec_setup(txq)) { 3909c1d14583SBruce Richardson ad->tx_vec_allowed = false; 3910c1d14583SBruce Richardson break; 3911c1d14583SBruce Richardson } 3912c1d14583SBruce Richardson } 3913c1d14583SBruce Richardson } 3914c1d14583SBruce Richardson } else { 3915c1d14583SBruce Richardson ad->tx_vec_allowed = false; 3916c1d14583SBruce Richardson } 3917c1d14583SBruce Richardson } 3918c1d14583SBruce Richardson 3919c1d14583SBruce Richardson if (ad->tx_vec_allowed) { 3920c1d14583SBruce Richardson dev->tx_pkt_prepare = NULL; 3921c1d14583SBruce Richardson if (ad->tx_use_avx512) { 3922c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3923c1d14583SBruce Richardson if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { 3924c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3925c1d14583SBruce Richardson "Using AVX512 OFFLOAD Vector Tx (port %d).", 3926c1d14583SBruce Richardson dev->data->port_id); 3927c1d14583SBruce Richardson dev->tx_pkt_burst = 3928c1d14583SBruce Richardson ice_xmit_pkts_vec_avx512_offload; 3929c1d14583SBruce Richardson dev->tx_pkt_prepare = ice_prep_pkts; 3930c1d14583SBruce Richardson } else { 3931c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3932c1d14583SBruce Richardson "Using AVX512 Vector Tx (port %d).", 3933c1d14583SBruce Richardson dev->data->port_id); 3934c1d14583SBruce Richardson dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; 3935c1d14583SBruce Richardson } 3936c1d14583SBruce Richardson #endif 3937c1d14583SBruce Richardson } else { 3938c1d14583SBruce Richardson if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { 3939c1d14583SBruce Richardson PMD_DRV_LOG(NOTICE, 3940c1d14583SBruce Richardson "Using AVX2 OFFLOAD Vector Tx (port %d).", 3941c1d14583SBruce Richardson dev->data->port_id); 3942c1d14583SBruce Richardson dev->tx_pkt_burst = 3943c1d14583SBruce Richardson ice_xmit_pkts_vec_avx2_offload; 3944c1d14583SBruce Richardson dev->tx_pkt_prepare = ice_prep_pkts; 3945c1d14583SBruce Richardson } else { 3946c1d14583SBruce Richardson PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", 3947c1d14583SBruce Richardson ad->tx_use_avx2 ? "avx2 " : "", 3948c1d14583SBruce Richardson dev->data->port_id); 3949c1d14583SBruce Richardson dev->tx_pkt_burst = ad->tx_use_avx2 ? 3950c1d14583SBruce Richardson ice_xmit_pkts_vec_avx2 : 3951c1d14583SBruce Richardson ice_xmit_pkts_vec; 3952c1d14583SBruce Richardson } 3953c1d14583SBruce Richardson } 3954c1d14583SBruce Richardson 3955c1d14583SBruce Richardson if (mbuf_check) { 3956c1d14583SBruce Richardson ad->tx_pkt_burst = dev->tx_pkt_burst; 3957c1d14583SBruce Richardson dev->tx_pkt_burst = ice_xmit_pkts_check; 3958c1d14583SBruce Richardson } 3959c1d14583SBruce Richardson return; 3960c1d14583SBruce Richardson } 3961c1d14583SBruce Richardson #endif 3962c1d14583SBruce Richardson 3963c1d14583SBruce Richardson if (ad->tx_simple_allowed) { 3964c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); 3965c1d14583SBruce Richardson dev->tx_pkt_burst = ice_xmit_pkts_simple; 3966c1d14583SBruce Richardson dev->tx_pkt_prepare = NULL; 3967c1d14583SBruce Richardson } else { 3968c1d14583SBruce Richardson PMD_INIT_LOG(DEBUG, "Normal tx finally be used."); 3969c1d14583SBruce Richardson dev->tx_pkt_burst = ice_xmit_pkts; 3970c1d14583SBruce Richardson dev->tx_pkt_prepare = ice_prep_pkts; 3971c1d14583SBruce Richardson } 3972c1d14583SBruce Richardson 3973c1d14583SBruce Richardson if (mbuf_check) { 3974c1d14583SBruce Richardson ad->tx_pkt_burst = dev->tx_pkt_burst; 3975c1d14583SBruce Richardson dev->tx_pkt_burst = ice_xmit_pkts_check; 3976c1d14583SBruce Richardson } 3977c1d14583SBruce Richardson } 3978c1d14583SBruce Richardson 3979c1d14583SBruce Richardson static const struct { 3980c1d14583SBruce Richardson eth_tx_burst_t pkt_burst; 3981c1d14583SBruce Richardson const char *info; 3982c1d14583SBruce Richardson } ice_tx_burst_infos[] = { 3983c1d14583SBruce Richardson { ice_xmit_pkts_simple, "Scalar Simple" }, 3984c1d14583SBruce Richardson { ice_xmit_pkts, "Scalar" }, 3985c1d14583SBruce Richardson #ifdef RTE_ARCH_X86 3986c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT 3987c1d14583SBruce Richardson { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, 3988c1d14583SBruce Richardson { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, 3989c1d14583SBruce Richardson #endif 3990c1d14583SBruce Richardson { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, 3991c1d14583SBruce Richardson { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" }, 3992c1d14583SBruce Richardson { ice_xmit_pkts_vec, "Vector SSE" }, 3993c1d14583SBruce Richardson #endif 3994c1d14583SBruce Richardson }; 3995c1d14583SBruce Richardson 3996c1d14583SBruce Richardson int 3997c1d14583SBruce Richardson ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, 3998c1d14583SBruce Richardson struct rte_eth_burst_mode *mode) 3999c1d14583SBruce Richardson { 4000c1d14583SBruce Richardson eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 4001c1d14583SBruce Richardson int ret = -EINVAL; 4002c1d14583SBruce Richardson unsigned int i; 4003c1d14583SBruce Richardson 4004c1d14583SBruce Richardson for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) { 4005c1d14583SBruce Richardson if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) { 4006c1d14583SBruce Richardson snprintf(mode->info, sizeof(mode->info), "%s", 4007c1d14583SBruce Richardson ice_tx_burst_infos[i].info); 4008c1d14583SBruce Richardson ret = 0; 4009c1d14583SBruce Richardson break; 4010c1d14583SBruce Richardson } 4011c1d14583SBruce Richardson } 4012c1d14583SBruce Richardson 4013c1d14583SBruce Richardson return ret; 4014c1d14583SBruce Richardson } 4015c1d14583SBruce Richardson 4016c1d14583SBruce Richardson /* For each value it means, datasheet of hardware can tell more details 4017c1d14583SBruce Richardson * 4018c1d14583SBruce Richardson * @note: fix ice_dev_supported_ptypes_get() if any change here. 4019c1d14583SBruce Richardson */ 4020c1d14583SBruce Richardson static inline uint32_t 4021c1d14583SBruce Richardson ice_get_default_pkt_type(uint16_t ptype) 4022c1d14583SBruce Richardson { 4023c1d14583SBruce Richardson static const alignas(RTE_CACHE_LINE_SIZE) uint32_t type_table[ICE_MAX_PKT_TYPE] = { 4024c1d14583SBruce Richardson /* L2 types */ 4025c1d14583SBruce Richardson /* [0] reserved */ 4026c1d14583SBruce Richardson [1] = RTE_PTYPE_L2_ETHER, 4027c1d14583SBruce Richardson [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, 4028c1d14583SBruce Richardson /* [3] - [5] reserved */ 4029c1d14583SBruce Richardson [6] = RTE_PTYPE_L2_ETHER_LLDP, 4030c1d14583SBruce Richardson /* [7] - [10] reserved */ 4031c1d14583SBruce Richardson [11] = RTE_PTYPE_L2_ETHER_ARP, 4032c1d14583SBruce Richardson /* [12] - [21] reserved */ 4033c1d14583SBruce Richardson 4034c1d14583SBruce Richardson /* Non tunneled IPv4 */ 4035c1d14583SBruce Richardson [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4036c1d14583SBruce Richardson RTE_PTYPE_L4_FRAG, 4037c1d14583SBruce Richardson [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4038c1d14583SBruce Richardson RTE_PTYPE_L4_NONFRAG, 4039c1d14583SBruce Richardson [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4040c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4041c1d14583SBruce Richardson /* [25] reserved */ 4042c1d14583SBruce Richardson [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4043c1d14583SBruce Richardson RTE_PTYPE_L4_TCP, 4044c1d14583SBruce Richardson [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4045c1d14583SBruce Richardson RTE_PTYPE_L4_SCTP, 4046c1d14583SBruce Richardson [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4047c1d14583SBruce Richardson RTE_PTYPE_L4_ICMP, 4048c1d14583SBruce Richardson 4049c1d14583SBruce Richardson /* IPv4 --> IPv4 */ 4050c1d14583SBruce Richardson [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4051c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4052c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4053c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4054c1d14583SBruce Richardson [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4055c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4056c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4057c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4058c1d14583SBruce Richardson [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4059c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4060c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4061c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4062c1d14583SBruce Richardson /* [32] reserved */ 4063c1d14583SBruce Richardson [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4064c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4065c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4066c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4067c1d14583SBruce Richardson [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4068c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4069c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4070c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4071c1d14583SBruce Richardson [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4072c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4073c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4074c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4075c1d14583SBruce Richardson 4076c1d14583SBruce Richardson /* IPv4 --> IPv6 */ 4077c1d14583SBruce Richardson [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4078c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4079c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4080c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4081c1d14583SBruce Richardson [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4082c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4083c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4084c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4085c1d14583SBruce Richardson [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4086c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4087c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4088c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4089c1d14583SBruce Richardson /* [39] reserved */ 4090c1d14583SBruce Richardson [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4091c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4092c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4093c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4094c1d14583SBruce Richardson [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4095c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4096c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4097c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4098c1d14583SBruce Richardson [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4099c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4100c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4101c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4102c1d14583SBruce Richardson 4103c1d14583SBruce Richardson /* IPv4 --> GRE/Teredo/VXLAN */ 4104c1d14583SBruce Richardson [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4105c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT, 4106c1d14583SBruce Richardson 4107c1d14583SBruce Richardson /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ 4108c1d14583SBruce Richardson [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4109c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4110c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4111c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4112c1d14583SBruce Richardson [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4113c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4114c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4115c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4116c1d14583SBruce Richardson [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4117c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4118c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4119c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4120c1d14583SBruce Richardson /* [47] reserved */ 4121c1d14583SBruce Richardson [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4122c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4123c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4124c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4125c1d14583SBruce Richardson [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4126c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4127c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4128c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4129c1d14583SBruce Richardson [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4130c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4131c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4132c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4133c1d14583SBruce Richardson 4134c1d14583SBruce Richardson /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ 4135c1d14583SBruce Richardson [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4136c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4137c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4138c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4139c1d14583SBruce Richardson [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4140c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4141c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4142c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4143c1d14583SBruce Richardson [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4144c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4145c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4146c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4147c1d14583SBruce Richardson /* [54] reserved */ 4148c1d14583SBruce Richardson [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4149c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4150c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4151c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4152c1d14583SBruce Richardson [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4153c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4154c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4155c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4156c1d14583SBruce Richardson [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4157c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4158c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4159c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4160c1d14583SBruce Richardson 4161c1d14583SBruce Richardson /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ 4162c1d14583SBruce Richardson [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4163c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, 4164c1d14583SBruce Richardson 4165c1d14583SBruce Richardson /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 4166c1d14583SBruce Richardson [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4167c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4168c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4169c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4170c1d14583SBruce Richardson [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4171c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4172c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4173c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4174c1d14583SBruce Richardson [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4175c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4176c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4177c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4178c1d14583SBruce Richardson /* [62] reserved */ 4179c1d14583SBruce Richardson [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4180c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4181c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4182c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4183c1d14583SBruce Richardson [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4184c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4185c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4186c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4187c1d14583SBruce Richardson [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4188c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4189c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4190c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4191c1d14583SBruce Richardson 4192c1d14583SBruce Richardson /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 4193c1d14583SBruce Richardson [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4194c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4195c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4196c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4197c1d14583SBruce Richardson [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4198c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4199c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4200c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4201c1d14583SBruce Richardson [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4202c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4203c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4204c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4205c1d14583SBruce Richardson /* [69] reserved */ 4206c1d14583SBruce Richardson [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4207c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4208c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4209c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4210c1d14583SBruce Richardson [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4211c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4212c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4213c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4214c1d14583SBruce Richardson [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4215c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4216c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4217c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4218c1d14583SBruce Richardson /* [73] - [87] reserved */ 4219c1d14583SBruce Richardson 4220c1d14583SBruce Richardson /* Non tunneled IPv6 */ 4221c1d14583SBruce Richardson [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4222c1d14583SBruce Richardson RTE_PTYPE_L4_FRAG, 4223c1d14583SBruce Richardson [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4224c1d14583SBruce Richardson RTE_PTYPE_L4_NONFRAG, 4225c1d14583SBruce Richardson [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4226c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4227c1d14583SBruce Richardson /* [91] reserved */ 4228c1d14583SBruce Richardson [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4229c1d14583SBruce Richardson RTE_PTYPE_L4_TCP, 4230c1d14583SBruce Richardson [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4231c1d14583SBruce Richardson RTE_PTYPE_L4_SCTP, 4232c1d14583SBruce Richardson [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4233c1d14583SBruce Richardson RTE_PTYPE_L4_ICMP, 4234c1d14583SBruce Richardson 4235c1d14583SBruce Richardson /* IPv6 --> IPv4 */ 4236c1d14583SBruce Richardson [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4237c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4238c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4239c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4240c1d14583SBruce Richardson [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4241c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4242c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4243c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4244c1d14583SBruce Richardson [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4245c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4246c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4247c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4248c1d14583SBruce Richardson /* [98] reserved */ 4249c1d14583SBruce Richardson [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4250c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4251c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4252c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4253c1d14583SBruce Richardson [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4254c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4255c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4256c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4257c1d14583SBruce Richardson [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4258c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4259c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4260c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4261c1d14583SBruce Richardson 4262c1d14583SBruce Richardson /* IPv6 --> IPv6 */ 4263c1d14583SBruce Richardson [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4264c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4265c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4266c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4267c1d14583SBruce Richardson [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4268c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4269c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4270c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4271c1d14583SBruce Richardson [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4272c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4273c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4274c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4275c1d14583SBruce Richardson /* [105] reserved */ 4276c1d14583SBruce Richardson [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4277c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4278c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4279c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4280c1d14583SBruce Richardson [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4281c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4282c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4283c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4284c1d14583SBruce Richardson [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4285c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_IP | 4286c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4287c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4288c1d14583SBruce Richardson 4289c1d14583SBruce Richardson /* IPv6 --> GRE/Teredo/VXLAN */ 4290c1d14583SBruce Richardson [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4291c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT, 4292c1d14583SBruce Richardson 4293c1d14583SBruce Richardson /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ 4294c1d14583SBruce Richardson [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4295c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4296c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4297c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4298c1d14583SBruce Richardson [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4299c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4300c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4301c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4302c1d14583SBruce Richardson [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4303c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4304c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4305c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4306c1d14583SBruce Richardson /* [113] reserved */ 4307c1d14583SBruce Richardson [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4308c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4309c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4310c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4311c1d14583SBruce Richardson [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4312c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4313c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4314c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4315c1d14583SBruce Richardson [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4316c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4317c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4318c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4319c1d14583SBruce Richardson 4320c1d14583SBruce Richardson /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ 4321c1d14583SBruce Richardson [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4322c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4323c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4324c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4325c1d14583SBruce Richardson [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4326c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4327c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4328c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4329c1d14583SBruce Richardson [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4330c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4331c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4332c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4333c1d14583SBruce Richardson /* [120] reserved */ 4334c1d14583SBruce Richardson [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4335c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4336c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4337c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4338c1d14583SBruce Richardson [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4339c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4340c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4341c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4342c1d14583SBruce Richardson [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4343c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | 4344c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4345c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4346c1d14583SBruce Richardson 4347c1d14583SBruce Richardson /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ 4348c1d14583SBruce Richardson [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4349c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, 4350c1d14583SBruce Richardson 4351c1d14583SBruce Richardson /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ 4352c1d14583SBruce Richardson [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4353c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4354c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4355c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4356c1d14583SBruce Richardson [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4357c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4358c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4359c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4360c1d14583SBruce Richardson [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4361c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4362c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4363c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4364c1d14583SBruce Richardson /* [128] reserved */ 4365c1d14583SBruce Richardson [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4366c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4367c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4368c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4369c1d14583SBruce Richardson [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4370c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4371c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4372c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4373c1d14583SBruce Richardson [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4374c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4375c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4376c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4377c1d14583SBruce Richardson 4378c1d14583SBruce Richardson /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ 4379c1d14583SBruce Richardson [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4380c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4381c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4382c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4383c1d14583SBruce Richardson [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4384c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4385c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4386c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4387c1d14583SBruce Richardson [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4388c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4389c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4390c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4391c1d14583SBruce Richardson /* [135] reserved */ 4392c1d14583SBruce Richardson [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4393c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4394c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4395c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4396c1d14583SBruce Richardson [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4397c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4398c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4399c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_SCTP, 4400c1d14583SBruce Richardson [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4401c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | 4402c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4403c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4404c1d14583SBruce Richardson /* [139] - [299] reserved */ 4405c1d14583SBruce Richardson 4406c1d14583SBruce Richardson /* PPPoE */ 4407c1d14583SBruce Richardson [300] = RTE_PTYPE_L2_ETHER_PPPOE, 4408c1d14583SBruce Richardson [301] = RTE_PTYPE_L2_ETHER_PPPOE, 4409c1d14583SBruce Richardson 4410c1d14583SBruce Richardson /* PPPoE --> IPv4 */ 4411c1d14583SBruce Richardson [302] = RTE_PTYPE_L2_ETHER_PPPOE | 4412c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4413c1d14583SBruce Richardson RTE_PTYPE_L4_FRAG, 4414c1d14583SBruce Richardson [303] = RTE_PTYPE_L2_ETHER_PPPOE | 4415c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4416c1d14583SBruce Richardson RTE_PTYPE_L4_NONFRAG, 4417c1d14583SBruce Richardson [304] = RTE_PTYPE_L2_ETHER_PPPOE | 4418c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4419c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4420c1d14583SBruce Richardson [305] = RTE_PTYPE_L2_ETHER_PPPOE | 4421c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4422c1d14583SBruce Richardson RTE_PTYPE_L4_TCP, 4423c1d14583SBruce Richardson [306] = RTE_PTYPE_L2_ETHER_PPPOE | 4424c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4425c1d14583SBruce Richardson RTE_PTYPE_L4_SCTP, 4426c1d14583SBruce Richardson [307] = RTE_PTYPE_L2_ETHER_PPPOE | 4427c1d14583SBruce Richardson RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4428c1d14583SBruce Richardson RTE_PTYPE_L4_ICMP, 4429c1d14583SBruce Richardson 4430c1d14583SBruce Richardson /* PPPoE --> IPv6 */ 4431c1d14583SBruce Richardson [308] = RTE_PTYPE_L2_ETHER_PPPOE | 4432c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4433c1d14583SBruce Richardson RTE_PTYPE_L4_FRAG, 4434c1d14583SBruce Richardson [309] = RTE_PTYPE_L2_ETHER_PPPOE | 4435c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4436c1d14583SBruce Richardson RTE_PTYPE_L4_NONFRAG, 4437c1d14583SBruce Richardson [310] = RTE_PTYPE_L2_ETHER_PPPOE | 4438c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4439c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4440c1d14583SBruce Richardson [311] = RTE_PTYPE_L2_ETHER_PPPOE | 4441c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4442c1d14583SBruce Richardson RTE_PTYPE_L4_TCP, 4443c1d14583SBruce Richardson [312] = RTE_PTYPE_L2_ETHER_PPPOE | 4444c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4445c1d14583SBruce Richardson RTE_PTYPE_L4_SCTP, 4446c1d14583SBruce Richardson [313] = RTE_PTYPE_L2_ETHER_PPPOE | 4447c1d14583SBruce Richardson RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4448c1d14583SBruce Richardson RTE_PTYPE_L4_ICMP, 4449c1d14583SBruce Richardson /* [314] - [324] reserved */ 4450c1d14583SBruce Richardson 4451c1d14583SBruce Richardson /* IPv4/IPv6 --> GTPC/GTPU */ 4452c1d14583SBruce Richardson [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4453c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPC, 4454c1d14583SBruce Richardson [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4455c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPC, 4456c1d14583SBruce Richardson [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4457c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPC, 4458c1d14583SBruce Richardson [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4459c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPC, 4460c1d14583SBruce Richardson [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4461c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU, 4462c1d14583SBruce Richardson [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4463c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU, 4464c1d14583SBruce Richardson 4465c1d14583SBruce Richardson /* IPv4 --> GTPU --> IPv4 */ 4466c1d14583SBruce Richardson [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4467c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4468c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4469c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4470c1d14583SBruce Richardson [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4471c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4472c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4473c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4474c1d14583SBruce Richardson [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4475c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4476c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4477c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4478c1d14583SBruce Richardson [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4479c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4480c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4481c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4482c1d14583SBruce Richardson [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4483c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4484c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4485c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4486c1d14583SBruce Richardson 4487c1d14583SBruce Richardson /* IPv6 --> GTPU --> IPv4 */ 4488c1d14583SBruce Richardson [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4489c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4490c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4491c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4492c1d14583SBruce Richardson [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4493c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4494c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4495c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4496c1d14583SBruce Richardson [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4497c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4498c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4499c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4500c1d14583SBruce Richardson [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4501c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4502c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4503c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4504c1d14583SBruce Richardson [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4505c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4506c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | 4507c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4508c1d14583SBruce Richardson 4509c1d14583SBruce Richardson /* IPv4 --> GTPU --> IPv6 */ 4510c1d14583SBruce Richardson [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4511c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4512c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4513c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4514c1d14583SBruce Richardson [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4515c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4516c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4517c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4518c1d14583SBruce Richardson [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4519c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4520c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4521c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4522c1d14583SBruce Richardson [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4523c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4524c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4525c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4526c1d14583SBruce Richardson [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4527c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4528c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4529c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4530c1d14583SBruce Richardson 4531c1d14583SBruce Richardson /* IPv6 --> GTPU --> IPv6 */ 4532c1d14583SBruce Richardson [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4533c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4534c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4535c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_FRAG, 4536c1d14583SBruce Richardson [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4537c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4538c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4539c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_NONFRAG, 4540c1d14583SBruce Richardson [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4541c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4542c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4543c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_UDP, 4544c1d14583SBruce Richardson [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4545c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4546c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4547c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_TCP, 4548c1d14583SBruce Richardson [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4549c1d14583SBruce Richardson RTE_PTYPE_TUNNEL_GTPU | 4550c1d14583SBruce Richardson RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | 4551c1d14583SBruce Richardson RTE_PTYPE_INNER_L4_ICMP, 4552c1d14583SBruce Richardson 4553c1d14583SBruce Richardson /* IPv4 --> UDP ECPRI */ 4554c1d14583SBruce Richardson [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4555c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4556c1d14583SBruce Richardson [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4557c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4558c1d14583SBruce Richardson [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4559c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4560c1d14583SBruce Richardson [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4561c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4562c1d14583SBruce Richardson [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4563c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4564c1d14583SBruce Richardson [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4565c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4566c1d14583SBruce Richardson [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4567c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4568c1d14583SBruce Richardson [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4569c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4570c1d14583SBruce Richardson [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4571c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4572c1d14583SBruce Richardson [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | 4573c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4574c1d14583SBruce Richardson 4575c1d14583SBruce Richardson /* IPV6 --> UDP ECPRI */ 4576c1d14583SBruce Richardson [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4577c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4578c1d14583SBruce Richardson [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4579c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4580c1d14583SBruce Richardson [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4581c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4582c1d14583SBruce Richardson [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4583c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4584c1d14583SBruce Richardson [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4585c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4586c1d14583SBruce Richardson [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4587c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4588c1d14583SBruce Richardson [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4589c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4590c1d14583SBruce Richardson [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4591c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4592c1d14583SBruce Richardson [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4593c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4594c1d14583SBruce Richardson [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | 4595c1d14583SBruce Richardson RTE_PTYPE_L4_UDP, 4596c1d14583SBruce Richardson /* All others reserved */ 4597c1d14583SBruce Richardson }; 4598c1d14583SBruce Richardson 4599c1d14583SBruce Richardson return type_table[ptype]; 4600c1d14583SBruce Richardson } 4601c1d14583SBruce Richardson 4602c1d14583SBruce Richardson void __rte_cold 4603c1d14583SBruce Richardson ice_set_default_ptype_table(struct rte_eth_dev *dev) 4604c1d14583SBruce Richardson { 4605c1d14583SBruce Richardson struct ice_adapter *ad = 4606c1d14583SBruce Richardson ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); 4607c1d14583SBruce Richardson int i; 4608c1d14583SBruce Richardson 4609c1d14583SBruce Richardson for (i = 0; i < ICE_MAX_PKT_TYPE; i++) 4610c1d14583SBruce Richardson ad->ptype_tbl[i] = ice_get_default_pkt_type(i); 4611c1d14583SBruce Richardson } 4612c1d14583SBruce Richardson 4613c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S 1 4614c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M \ 4615c1d14583SBruce Richardson (0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S) 4616c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0 4617c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1 4618c1d14583SBruce Richardson 4619c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S 4 4620c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M \ 4621c1d14583SBruce Richardson (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S) 4622c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S 5 4623c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M \ 4624c1d14583SBruce Richardson (1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S) 4625c1d14583SBruce Richardson 4626c1d14583SBruce Richardson /* 4627c1d14583SBruce Richardson * check the programming status descriptor in rx queue. 4628c1d14583SBruce Richardson * done after Programming Flow Director is programmed on 4629c1d14583SBruce Richardson * tx queue 4630c1d14583SBruce Richardson */ 4631c1d14583SBruce Richardson static inline int 4632c1d14583SBruce Richardson ice_check_fdir_programming_status(struct ice_rx_queue *rxq) 4633c1d14583SBruce Richardson { 4634c1d14583SBruce Richardson volatile union ice_32byte_rx_desc *rxdp; 4635c1d14583SBruce Richardson uint64_t qword1; 4636c1d14583SBruce Richardson uint32_t rx_status; 4637c1d14583SBruce Richardson uint32_t error; 4638c1d14583SBruce Richardson uint32_t id; 4639c1d14583SBruce Richardson int ret = -EAGAIN; 4640c1d14583SBruce Richardson 4641c1d14583SBruce Richardson rxdp = (volatile union ice_32byte_rx_desc *) 4642c1d14583SBruce Richardson (&rxq->rx_ring[rxq->rx_tail]); 4643c1d14583SBruce Richardson qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); 4644c1d14583SBruce Richardson rx_status = (qword1 & ICE_RXD_QW1_STATUS_M) 4645c1d14583SBruce Richardson >> ICE_RXD_QW1_STATUS_S; 4646c1d14583SBruce Richardson 4647c1d14583SBruce Richardson if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) { 4648c1d14583SBruce Richardson ret = 0; 4649c1d14583SBruce Richardson error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >> 4650c1d14583SBruce Richardson ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S; 4651c1d14583SBruce Richardson id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >> 4652c1d14583SBruce Richardson ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S; 4653c1d14583SBruce Richardson if (error) { 4654c1d14583SBruce Richardson if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD) 4655c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to add FDIR rule."); 4656c1d14583SBruce Richardson else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL) 4657c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to remove FDIR rule."); 4658c1d14583SBruce Richardson ret = -EINVAL; 4659c1d14583SBruce Richardson goto err; 4660c1d14583SBruce Richardson } 4661c1d14583SBruce Richardson error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >> 4662c1d14583SBruce Richardson ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S; 4663c1d14583SBruce Richardson if (error) { 4664c1d14583SBruce Richardson PMD_DRV_LOG(ERR, "Failed to create FDIR profile."); 4665c1d14583SBruce Richardson ret = -EINVAL; 4666c1d14583SBruce Richardson } 4667c1d14583SBruce Richardson err: 4668c1d14583SBruce Richardson rxdp->wb.qword1.status_error_len = 0; 4669c1d14583SBruce Richardson rxq->rx_tail++; 4670c1d14583SBruce Richardson if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) 4671c1d14583SBruce Richardson rxq->rx_tail = 0; 4672c1d14583SBruce Richardson if (rxq->rx_tail == 0) 4673c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); 4674c1d14583SBruce Richardson else 4675c1d14583SBruce Richardson ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); 4676c1d14583SBruce Richardson } 4677c1d14583SBruce Richardson 4678c1d14583SBruce Richardson return ret; 4679c1d14583SBruce Richardson } 4680c1d14583SBruce Richardson 4681c1d14583SBruce Richardson #define ICE_FDIR_MAX_WAIT_US 10000 4682c1d14583SBruce Richardson 4683c1d14583SBruce Richardson int 4684c1d14583SBruce Richardson ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) 4685c1d14583SBruce Richardson { 4686c038157aSBruce Richardson struct ci_tx_queue *txq = pf->fdir.txq; 4687c1d14583SBruce Richardson struct ice_rx_queue *rxq = pf->fdir.rxq; 4688c1d14583SBruce Richardson volatile struct ice_fltr_desc *fdirdp; 4689c1d14583SBruce Richardson volatile struct ice_tx_desc *txdp; 4690c1d14583SBruce Richardson uint32_t td_cmd; 4691c1d14583SBruce Richardson uint16_t i; 4692c1d14583SBruce Richardson 4693c1d14583SBruce Richardson fdirdp = (volatile struct ice_fltr_desc *) 46944d0f54d9SBruce Richardson (&txq->ice_tx_ring[txq->tx_tail]); 4695c1d14583SBruce Richardson fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat; 4696c1d14583SBruce Richardson fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid; 4697c1d14583SBruce Richardson 46984d0f54d9SBruce Richardson txdp = &txq->ice_tx_ring[txq->tx_tail + 1]; 4699c1d14583SBruce Richardson txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); 4700c1d14583SBruce Richardson td_cmd = ICE_TX_DESC_CMD_EOP | 4701c1d14583SBruce Richardson ICE_TX_DESC_CMD_RS | 4702c1d14583SBruce Richardson ICE_TX_DESC_CMD_DUMMY; 4703c1d14583SBruce Richardson 4704c1d14583SBruce Richardson txdp->cmd_type_offset_bsz = 4705c1d14583SBruce Richardson ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0); 4706c1d14583SBruce Richardson 4707c1d14583SBruce Richardson txq->tx_tail += 2; 4708c1d14583SBruce Richardson if (txq->tx_tail >= txq->nb_tx_desc) 4709c1d14583SBruce Richardson txq->tx_tail = 0; 4710c1d14583SBruce Richardson /* Update the tx tail register */ 4711c1d14583SBruce Richardson ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); 4712c1d14583SBruce Richardson for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) { 4713c1d14583SBruce Richardson if ((txdp->cmd_type_offset_bsz & 4714c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == 4715c1d14583SBruce Richardson rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) 4716c1d14583SBruce Richardson break; 4717c1d14583SBruce Richardson rte_delay_us(1); 4718c1d14583SBruce Richardson } 4719c1d14583SBruce Richardson if (i >= ICE_FDIR_MAX_WAIT_US) { 4720c1d14583SBruce Richardson PMD_DRV_LOG(ERR, 4721c1d14583SBruce Richardson "Failed to program FDIR filter: time out to get DD on tx queue."); 4722c1d14583SBruce Richardson return -ETIMEDOUT; 4723c1d14583SBruce Richardson } 4724c1d14583SBruce Richardson 4725c1d14583SBruce Richardson for (; i < ICE_FDIR_MAX_WAIT_US; i++) { 4726c1d14583SBruce Richardson int ret; 4727c1d14583SBruce Richardson 4728c1d14583SBruce Richardson ret = ice_check_fdir_programming_status(rxq); 4729c1d14583SBruce Richardson if (ret == -EAGAIN) 4730c1d14583SBruce Richardson rte_delay_us(1); 4731c1d14583SBruce Richardson else 4732c1d14583SBruce Richardson return ret; 4733c1d14583SBruce Richardson } 4734c1d14583SBruce Richardson 4735c1d14583SBruce Richardson PMD_DRV_LOG(ERR, 4736c1d14583SBruce Richardson "Failed to program FDIR filter: programming status reported."); 4737c1d14583SBruce Richardson return -ETIMEDOUT; 4738c1d14583SBruce Richardson 4739c1d14583SBruce Richardson 4740c1d14583SBruce Richardson } 4741