xref: /dpdk/drivers/net/intel/ice/ice_rxtx.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1*c1d14583SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2*c1d14583SBruce Richardson  * Copyright(c) 2018 Intel Corporation
3*c1d14583SBruce Richardson  */
4*c1d14583SBruce Richardson 
5*c1d14583SBruce Richardson #include <ethdev_driver.h>
6*c1d14583SBruce Richardson #include <rte_net.h>
7*c1d14583SBruce Richardson #include <rte_vect.h>
8*c1d14583SBruce Richardson 
9*c1d14583SBruce Richardson #include "ice_rxtx.h"
10*c1d14583SBruce Richardson #include "ice_rxtx_vec_common.h"
11*c1d14583SBruce Richardson 
12*c1d14583SBruce Richardson #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |		 \
13*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_L4_MASK |		 \
14*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_TCP_SEG |		 \
15*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_UDP_SEG |		 \
16*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_OUTER_IP_CKSUM)
17*c1d14583SBruce Richardson 
18*c1d14583SBruce Richardson /**
19*c1d14583SBruce Richardson  * The mbuf dynamic field pointer for protocol extraction metadata.
20*c1d14583SBruce Richardson  */
21*c1d14583SBruce Richardson #define ICE_DYNF_PROTO_XTR_METADATA(m, n) \
22*c1d14583SBruce Richardson 	RTE_MBUF_DYNFIELD((m), (n), uint32_t *)
23*c1d14583SBruce Richardson 
24*c1d14583SBruce Richardson static int
25*c1d14583SBruce Richardson ice_monitor_callback(const uint64_t value,
26*c1d14583SBruce Richardson 		const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
27*c1d14583SBruce Richardson {
28*c1d14583SBruce Richardson 	const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
29*c1d14583SBruce Richardson 	/*
30*c1d14583SBruce Richardson 	 * we expect the DD bit to be set to 1 if this descriptor was already
31*c1d14583SBruce Richardson 	 * written to.
32*c1d14583SBruce Richardson 	 */
33*c1d14583SBruce Richardson 	return (value & m) == m ? -1 : 0;
34*c1d14583SBruce Richardson }
35*c1d14583SBruce Richardson 
36*c1d14583SBruce Richardson int
37*c1d14583SBruce Richardson ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
38*c1d14583SBruce Richardson {
39*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
40*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
41*c1d14583SBruce Richardson 	uint16_t desc;
42*c1d14583SBruce Richardson 
43*c1d14583SBruce Richardson 	desc = rxq->rx_tail;
44*c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[desc];
45*c1d14583SBruce Richardson 	/* watch for changes in status bit */
46*c1d14583SBruce Richardson 	pmc->addr = &rxdp->wb.status_error0;
47*c1d14583SBruce Richardson 
48*c1d14583SBruce Richardson 	/* comparison callback */
49*c1d14583SBruce Richardson 	pmc->fn = ice_monitor_callback;
50*c1d14583SBruce Richardson 
51*c1d14583SBruce Richardson 	/* register is 16-bit */
52*c1d14583SBruce Richardson 	pmc->size = sizeof(uint16_t);
53*c1d14583SBruce Richardson 
54*c1d14583SBruce Richardson 	return 0;
55*c1d14583SBruce Richardson }
56*c1d14583SBruce Richardson 
57*c1d14583SBruce Richardson 
58*c1d14583SBruce Richardson static inline uint8_t
59*c1d14583SBruce Richardson ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
60*c1d14583SBruce Richardson {
61*c1d14583SBruce Richardson 	static uint8_t rxdid_map[] = {
62*c1d14583SBruce Richardson 		[PROTO_XTR_NONE]      = ICE_RXDID_COMMS_OVS,
63*c1d14583SBruce Richardson 		[PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
64*c1d14583SBruce Richardson 		[PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
65*c1d14583SBruce Richardson 		[PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
66*c1d14583SBruce Richardson 		[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
67*c1d14583SBruce Richardson 		[PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
68*c1d14583SBruce Richardson 		[PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
69*c1d14583SBruce Richardson 	};
70*c1d14583SBruce Richardson 
71*c1d14583SBruce Richardson 	return xtr_type < RTE_DIM(rxdid_map) ?
72*c1d14583SBruce Richardson 				rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
73*c1d14583SBruce Richardson }
74*c1d14583SBruce Richardson 
75*c1d14583SBruce Richardson static inline void
76*c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
77*c1d14583SBruce Richardson 				       struct rte_mbuf *mb,
78*c1d14583SBruce Richardson 				       volatile union ice_rx_flex_desc *rxdp)
79*c1d14583SBruce Richardson {
80*c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms *desc =
81*c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
82*c1d14583SBruce Richardson 	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
83*c1d14583SBruce Richardson 
84*c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
85*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
86*c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
87*c1d14583SBruce Richardson 	}
88*c1d14583SBruce Richardson 
89*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
90*c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
91*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
92*c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
93*c1d14583SBruce Richardson 	}
94*c1d14583SBruce Richardson #endif
95*c1d14583SBruce Richardson }
96*c1d14583SBruce Richardson 
97*c1d14583SBruce Richardson static inline void
98*c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
99*c1d14583SBruce Richardson 				   struct rte_mbuf *mb,
100*c1d14583SBruce Richardson 				   volatile union ice_rx_flex_desc *rxdp)
101*c1d14583SBruce Richardson {
102*c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
103*c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
104*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
105*c1d14583SBruce Richardson 	uint16_t stat_err;
106*c1d14583SBruce Richardson #endif
107*c1d14583SBruce Richardson 
108*c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
109*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
110*c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
111*c1d14583SBruce Richardson 	}
112*c1d14583SBruce Richardson 
113*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
114*c1d14583SBruce Richardson 	stat_err = rte_le_to_cpu_16(desc->status_error0);
115*c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
116*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
117*c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
118*c1d14583SBruce Richardson 	}
119*c1d14583SBruce Richardson #endif
120*c1d14583SBruce Richardson }
121*c1d14583SBruce Richardson 
122*c1d14583SBruce Richardson static inline void
123*c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
124*c1d14583SBruce Richardson 				      struct rte_mbuf *mb,
125*c1d14583SBruce Richardson 				      volatile union ice_rx_flex_desc *rxdp)
126*c1d14583SBruce Richardson {
127*c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms *desc =
128*c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
129*c1d14583SBruce Richardson 	uint16_t stat_err;
130*c1d14583SBruce Richardson 
131*c1d14583SBruce Richardson 	stat_err = rte_le_to_cpu_16(desc->status_error0);
132*c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
133*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
134*c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
135*c1d14583SBruce Richardson 	}
136*c1d14583SBruce Richardson 
137*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
138*c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
139*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
140*c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
141*c1d14583SBruce Richardson 	}
142*c1d14583SBruce Richardson 
143*c1d14583SBruce Richardson 	if (rxq->xtr_ol_flag) {
144*c1d14583SBruce Richardson 		uint32_t metadata = 0;
145*c1d14583SBruce Richardson 
146*c1d14583SBruce Richardson 		stat_err = rte_le_to_cpu_16(desc->status_error1);
147*c1d14583SBruce Richardson 
148*c1d14583SBruce Richardson 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
149*c1d14583SBruce Richardson 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
150*c1d14583SBruce Richardson 
151*c1d14583SBruce Richardson 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
152*c1d14583SBruce Richardson 			metadata |=
153*c1d14583SBruce Richardson 				rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
154*c1d14583SBruce Richardson 
155*c1d14583SBruce Richardson 		if (metadata) {
156*c1d14583SBruce Richardson 			mb->ol_flags |= rxq->xtr_ol_flag;
157*c1d14583SBruce Richardson 
158*c1d14583SBruce Richardson 			*ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata;
159*c1d14583SBruce Richardson 		}
160*c1d14583SBruce Richardson 	}
161*c1d14583SBruce Richardson #else
162*c1d14583SBruce Richardson 	RTE_SET_USED(rxq);
163*c1d14583SBruce Richardson #endif
164*c1d14583SBruce Richardson }
165*c1d14583SBruce Richardson 
166*c1d14583SBruce Richardson static inline void
167*c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
168*c1d14583SBruce Richardson 				      struct rte_mbuf *mb,
169*c1d14583SBruce Richardson 				      volatile union ice_rx_flex_desc *rxdp)
170*c1d14583SBruce Richardson {
171*c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms *desc =
172*c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
173*c1d14583SBruce Richardson 	uint16_t stat_err;
174*c1d14583SBruce Richardson 
175*c1d14583SBruce Richardson 	stat_err = rte_le_to_cpu_16(desc->status_error0);
176*c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
177*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
178*c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
179*c1d14583SBruce Richardson 	}
180*c1d14583SBruce Richardson 
181*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
182*c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
183*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
184*c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
185*c1d14583SBruce Richardson 	}
186*c1d14583SBruce Richardson 
187*c1d14583SBruce Richardson 	if (rxq->xtr_ol_flag) {
188*c1d14583SBruce Richardson 		uint32_t metadata = 0;
189*c1d14583SBruce Richardson 
190*c1d14583SBruce Richardson 		if (desc->flex_ts.flex.aux0 != 0xFFFF)
191*c1d14583SBruce Richardson 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
192*c1d14583SBruce Richardson 		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
193*c1d14583SBruce Richardson 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
194*c1d14583SBruce Richardson 
195*c1d14583SBruce Richardson 		if (metadata) {
196*c1d14583SBruce Richardson 			mb->ol_flags |= rxq->xtr_ol_flag;
197*c1d14583SBruce Richardson 
198*c1d14583SBruce Richardson 			*ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata;
199*c1d14583SBruce Richardson 		}
200*c1d14583SBruce Richardson 	}
201*c1d14583SBruce Richardson #else
202*c1d14583SBruce Richardson 	RTE_SET_USED(rxq);
203*c1d14583SBruce Richardson #endif
204*c1d14583SBruce Richardson }
205*c1d14583SBruce Richardson 
206*c1d14583SBruce Richardson static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
207*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
208*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
209*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
210*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
211*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
212*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
213*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
214*c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
215*c1d14583SBruce Richardson };
216*c1d14583SBruce Richardson 
217*c1d14583SBruce Richardson void
218*c1d14583SBruce Richardson ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
219*c1d14583SBruce Richardson {
220*c1d14583SBruce Richardson 	rxq->rxdid = rxdid;
221*c1d14583SBruce Richardson 
222*c1d14583SBruce Richardson 	switch (rxdid) {
223*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_VLAN:
224*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IPV4:
225*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IPV6:
226*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
227*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_TCP:
228*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
229*c1d14583SBruce Richardson 		break;
230*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_GENERIC:
231*c1d14583SBruce Richardson 		/* fallthrough */
232*c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_OVS:
233*c1d14583SBruce Richardson 		break;
234*c1d14583SBruce Richardson 
235*c1d14583SBruce Richardson 	default:
236*c1d14583SBruce Richardson 		/* update this according to the RXDID for PROTO_XTR_NONE */
237*c1d14583SBruce Richardson 		rxq->rxdid = ICE_RXDID_COMMS_OVS;
238*c1d14583SBruce Richardson 		break;
239*c1d14583SBruce Richardson 	}
240*c1d14583SBruce Richardson 
241*c1d14583SBruce Richardson 	if (rxq->xtr_field_offs == -1)
242*c1d14583SBruce Richardson 		rxq->xtr_ol_flag = 0;
243*c1d14583SBruce Richardson }
244*c1d14583SBruce Richardson 
245*c1d14583SBruce Richardson static int
246*c1d14583SBruce Richardson ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
247*c1d14583SBruce Richardson {
248*c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
249*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
250*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
251*c1d14583SBruce Richardson 	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
252*c1d14583SBruce Richardson 	struct ice_rlan_ctx rx_ctx;
253*c1d14583SBruce Richardson 	uint16_t buf_size;
254*c1d14583SBruce Richardson 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
255*c1d14583SBruce Richardson 	uint32_t regval;
256*c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
257*c1d14583SBruce Richardson 	uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
258*c1d14583SBruce Richardson 	int err;
259*c1d14583SBruce Richardson 
260*c1d14583SBruce Richardson 	/* Set buffer size as the head split is disabled. */
261*c1d14583SBruce Richardson 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
262*c1d14583SBruce Richardson 			      RTE_PKTMBUF_HEADROOM);
263*c1d14583SBruce Richardson 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
264*c1d14583SBruce Richardson 	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE);
265*c1d14583SBruce Richardson 	rxq->max_pkt_len =
266*c1d14583SBruce Richardson 		RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
267*c1d14583SBruce Richardson 			frame_size);
268*c1d14583SBruce Richardson 
269*c1d14583SBruce Richardson 	if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
270*c1d14583SBruce Richardson 	    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
271*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "maximum packet length must "
272*c1d14583SBruce Richardson 			    "be larger than %u and smaller than %u",
273*c1d14583SBruce Richardson 			    (uint32_t)RTE_ETHER_MIN_LEN,
274*c1d14583SBruce Richardson 			    (uint32_t)ICE_FRAME_SIZE_MAX);
275*c1d14583SBruce Richardson 		return -EINVAL;
276*c1d14583SBruce Richardson 	}
277*c1d14583SBruce Richardson 
278*c1d14583SBruce Richardson 	if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
279*c1d14583SBruce Richardson 		/* Register mbuf field and flag for Rx timestamp */
280*c1d14583SBruce Richardson 		err = rte_mbuf_dyn_rx_timestamp_register(
281*c1d14583SBruce Richardson 				&ice_timestamp_dynfield_offset,
282*c1d14583SBruce Richardson 				&ice_timestamp_dynflag);
283*c1d14583SBruce Richardson 		if (err) {
284*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR,
285*c1d14583SBruce Richardson 				"Cannot register mbuf field/flag for timestamp");
286*c1d14583SBruce Richardson 			return -EINVAL;
287*c1d14583SBruce Richardson 		}
288*c1d14583SBruce Richardson 		rxq->ts_enable = true;
289*c1d14583SBruce Richardson 	}
290*c1d14583SBruce Richardson 
291*c1d14583SBruce Richardson 	memset(&rx_ctx, 0, sizeof(rx_ctx));
292*c1d14583SBruce Richardson 
293*c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
294*c1d14583SBruce Richardson 		uint32_t proto_hdr;
295*c1d14583SBruce Richardson 		proto_hdr = rxq->rxseg[0].proto_hdr;
296*c1d14583SBruce Richardson 
297*c1d14583SBruce Richardson 		if (proto_hdr == RTE_PTYPE_UNKNOWN) {
298*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Buffer split protocol must be configured");
299*c1d14583SBruce Richardson 			return -EINVAL;
300*c1d14583SBruce Richardson 		}
301*c1d14583SBruce Richardson 
302*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_L4_MASK) {
303*c1d14583SBruce Richardson 		case RTE_PTYPE_L4_TCP:
304*c1d14583SBruce Richardson 		case RTE_PTYPE_L4_UDP:
305*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
306*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;
307*c1d14583SBruce Richardson 			goto set_hsplit_finish;
308*c1d14583SBruce Richardson 		case RTE_PTYPE_L4_SCTP:
309*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
310*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
311*c1d14583SBruce Richardson 			goto set_hsplit_finish;
312*c1d14583SBruce Richardson 		}
313*c1d14583SBruce Richardson 
314*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_L3_MASK) {
315*c1d14583SBruce Richardson 		case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
316*c1d14583SBruce Richardson 		case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
317*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
318*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;
319*c1d14583SBruce Richardson 			goto set_hsplit_finish;
320*c1d14583SBruce Richardson 		}
321*c1d14583SBruce Richardson 
322*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_L2_MASK) {
323*c1d14583SBruce Richardson 		case RTE_PTYPE_L2_ETHER:
324*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
325*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;
326*c1d14583SBruce Richardson 			rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2;
327*c1d14583SBruce Richardson 			goto set_hsplit_finish;
328*c1d14583SBruce Richardson 		}
329*c1d14583SBruce Richardson 
330*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) {
331*c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L4_TCP:
332*c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L4_UDP:
333*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
334*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;
335*c1d14583SBruce Richardson 			goto set_hsplit_finish;
336*c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L4_SCTP:
337*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
338*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
339*c1d14583SBruce Richardson 			goto set_hsplit_finish;
340*c1d14583SBruce Richardson 		}
341*c1d14583SBruce Richardson 
342*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) {
343*c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
344*c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
345*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
346*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;
347*c1d14583SBruce Richardson 			goto set_hsplit_finish;
348*c1d14583SBruce Richardson 		}
349*c1d14583SBruce Richardson 
350*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) {
351*c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L2_ETHER:
352*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
353*c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;
354*c1d14583SBruce Richardson 			goto set_hsplit_finish;
355*c1d14583SBruce Richardson 		}
356*c1d14583SBruce Richardson 
357*c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) {
358*c1d14583SBruce Richardson 		case RTE_PTYPE_TUNNEL_GRENAT:
359*c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
360*c1d14583SBruce Richardson 			rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS;
361*c1d14583SBruce Richardson 			goto set_hsplit_finish;
362*c1d14583SBruce Richardson 		}
363*c1d14583SBruce Richardson 
364*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Buffer split protocol is not supported");
365*c1d14583SBruce Richardson 		return -EINVAL;
366*c1d14583SBruce Richardson 
367*c1d14583SBruce Richardson set_hsplit_finish:
368*c1d14583SBruce Richardson 		rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE;
369*c1d14583SBruce Richardson 	} else {
370*c1d14583SBruce Richardson 		rxq->rx_hdr_len = 0;
371*c1d14583SBruce Richardson 		rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */
372*c1d14583SBruce Richardson 	}
373*c1d14583SBruce Richardson 
374*c1d14583SBruce Richardson 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
375*c1d14583SBruce Richardson 	rx_ctx.qlen = rxq->nb_rx_desc;
376*c1d14583SBruce Richardson 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
377*c1d14583SBruce Richardson 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
378*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
379*c1d14583SBruce Richardson 	rx_ctx.dsize = 1; /* 32B descriptors */
380*c1d14583SBruce Richardson #endif
381*c1d14583SBruce Richardson 	rx_ctx.rxmax = rxq->max_pkt_len;
382*c1d14583SBruce Richardson 	/* TPH: Transaction Layer Packet (TLP) processing hints */
383*c1d14583SBruce Richardson 	rx_ctx.tphrdesc_ena = 1;
384*c1d14583SBruce Richardson 	rx_ctx.tphwdesc_ena = 1;
385*c1d14583SBruce Richardson 	rx_ctx.tphdata_ena = 1;
386*c1d14583SBruce Richardson 	rx_ctx.tphhead_ena = 1;
387*c1d14583SBruce Richardson 	/* Low Receive Queue Threshold defined in 64 descriptors units.
388*c1d14583SBruce Richardson 	 * When the number of free descriptors goes below the lrxqthresh,
389*c1d14583SBruce Richardson 	 * an immediate interrupt is triggered.
390*c1d14583SBruce Richardson 	 */
391*c1d14583SBruce Richardson 	rx_ctx.lrxqthresh = 2;
392*c1d14583SBruce Richardson 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
393*c1d14583SBruce Richardson 	rx_ctx.l2tsel = 1;
394*c1d14583SBruce Richardson 	rx_ctx.showiv = 0;
395*c1d14583SBruce Richardson 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
396*c1d14583SBruce Richardson 
397*c1d14583SBruce Richardson 	rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
398*c1d14583SBruce Richardson 
399*c1d14583SBruce Richardson 	PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
400*c1d14583SBruce Richardson 		    rxq->port_id, rxq->queue_id, rxdid);
401*c1d14583SBruce Richardson 
402*c1d14583SBruce Richardson 	if (!(pf->supported_rxdid & BIT(rxdid))) {
403*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
404*c1d14583SBruce Richardson 			    rxdid);
405*c1d14583SBruce Richardson 		return -EINVAL;
406*c1d14583SBruce Richardson 	}
407*c1d14583SBruce Richardson 
408*c1d14583SBruce Richardson 	rxq->rxdid = rxdid;
409*c1d14583SBruce Richardson 
410*c1d14583SBruce Richardson 	/* Enable Flexible Descriptors in the queue context which
411*c1d14583SBruce Richardson 	 * allows this driver to select a specific receive descriptor format
412*c1d14583SBruce Richardson 	 */
413*c1d14583SBruce Richardson 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
414*c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_IDX_M;
415*c1d14583SBruce Richardson 
416*c1d14583SBruce Richardson 	/* increasing context priority to pick up profile ID;
417*c1d14583SBruce Richardson 	 * default is 0x01; setting to 0x03 to ensure profile
418*c1d14583SBruce Richardson 	 * is programming if prev context is of same priority
419*c1d14583SBruce Richardson 	 */
420*c1d14583SBruce Richardson 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
421*c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_PRIO_M;
422*c1d14583SBruce Richardson 
423*c1d14583SBruce Richardson 	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
424*c1d14583SBruce Richardson 		regval |= QRXFLXP_CNTXT_TS_M;
425*c1d14583SBruce Richardson 
426*c1d14583SBruce Richardson 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
427*c1d14583SBruce Richardson 
428*c1d14583SBruce Richardson 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
429*c1d14583SBruce Richardson 	if (err) {
430*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
431*c1d14583SBruce Richardson 			    rxq->queue_id);
432*c1d14583SBruce Richardson 		return -EINVAL;
433*c1d14583SBruce Richardson 	}
434*c1d14583SBruce Richardson 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
435*c1d14583SBruce Richardson 	if (err) {
436*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
437*c1d14583SBruce Richardson 			    rxq->queue_id);
438*c1d14583SBruce Richardson 		return -EINVAL;
439*c1d14583SBruce Richardson 	}
440*c1d14583SBruce Richardson 
441*c1d14583SBruce Richardson 	/* Check if scattered RX needs to be used. */
442*c1d14583SBruce Richardson 	if (frame_size > buf_size)
443*c1d14583SBruce Richardson 		dev_data->scattered_rx = 1;
444*c1d14583SBruce Richardson 
445*c1d14583SBruce Richardson 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
446*c1d14583SBruce Richardson 
447*c1d14583SBruce Richardson 	/* Init the Rx tail register*/
448*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
449*c1d14583SBruce Richardson 
450*c1d14583SBruce Richardson 	return 0;
451*c1d14583SBruce Richardson }
452*c1d14583SBruce Richardson 
453*c1d14583SBruce Richardson /* Allocate mbufs for all descriptors in rx queue */
454*c1d14583SBruce Richardson static int
455*c1d14583SBruce Richardson ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
456*c1d14583SBruce Richardson {
457*c1d14583SBruce Richardson 	struct ice_rx_entry *rxe = rxq->sw_ring;
458*c1d14583SBruce Richardson 	uint64_t dma_addr;
459*c1d14583SBruce Richardson 	uint16_t i;
460*c1d14583SBruce Richardson 
461*c1d14583SBruce Richardson 	for (i = 0; i < rxq->nb_rx_desc; i++) {
462*c1d14583SBruce Richardson 		volatile union ice_rx_flex_desc *rxd;
463*c1d14583SBruce Richardson 		rxd = &rxq->rx_ring[i];
464*c1d14583SBruce Richardson 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
465*c1d14583SBruce Richardson 
466*c1d14583SBruce Richardson 		if (unlikely(!mbuf)) {
467*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
468*c1d14583SBruce Richardson 			return -ENOMEM;
469*c1d14583SBruce Richardson 		}
470*c1d14583SBruce Richardson 
471*c1d14583SBruce Richardson 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
472*c1d14583SBruce Richardson 		mbuf->nb_segs = 1;
473*c1d14583SBruce Richardson 		mbuf->port = rxq->port_id;
474*c1d14583SBruce Richardson 
475*c1d14583SBruce Richardson 		dma_addr =
476*c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
477*c1d14583SBruce Richardson 
478*c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
479*c1d14583SBruce Richardson 			rte_mbuf_refcnt_set(mbuf, 1);
480*c1d14583SBruce Richardson 			mbuf->next = NULL;
481*c1d14583SBruce Richardson 			rxd->read.hdr_addr = 0;
482*c1d14583SBruce Richardson 			rxd->read.pkt_addr = dma_addr;
483*c1d14583SBruce Richardson 		} else {
484*c1d14583SBruce Richardson 			struct rte_mbuf *mbuf_pay;
485*c1d14583SBruce Richardson 			mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);
486*c1d14583SBruce Richardson 			if (unlikely(!mbuf_pay)) {
487*c1d14583SBruce Richardson 				rte_pktmbuf_free(mbuf);
488*c1d14583SBruce Richardson 				PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX");
489*c1d14583SBruce Richardson 				return -ENOMEM;
490*c1d14583SBruce Richardson 			}
491*c1d14583SBruce Richardson 
492*c1d14583SBruce Richardson 			mbuf_pay->next = NULL;
493*c1d14583SBruce Richardson 			mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM;
494*c1d14583SBruce Richardson 			mbuf_pay->nb_segs = 1;
495*c1d14583SBruce Richardson 			mbuf_pay->port = rxq->port_id;
496*c1d14583SBruce Richardson 			mbuf->next = mbuf_pay;
497*c1d14583SBruce Richardson 
498*c1d14583SBruce Richardson 			rxd->read.hdr_addr = dma_addr;
499*c1d14583SBruce Richardson 			/* The LS bit should be set to zero regardless of
500*c1d14583SBruce Richardson 			 * buffer split enablement.
501*c1d14583SBruce Richardson 			 */
502*c1d14583SBruce Richardson 			rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay));
503*c1d14583SBruce Richardson 		}
504*c1d14583SBruce Richardson 
505*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
506*c1d14583SBruce Richardson 		rxd->read.rsvd1 = 0;
507*c1d14583SBruce Richardson 		rxd->read.rsvd2 = 0;
508*c1d14583SBruce Richardson #endif
509*c1d14583SBruce Richardson 		rxe[i].mbuf = mbuf;
510*c1d14583SBruce Richardson 	}
511*c1d14583SBruce Richardson 
512*c1d14583SBruce Richardson 	return 0;
513*c1d14583SBruce Richardson }
514*c1d14583SBruce Richardson 
515*c1d14583SBruce Richardson /* Free all mbufs for descriptors in rx queue */
516*c1d14583SBruce Richardson static void
517*c1d14583SBruce Richardson _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
518*c1d14583SBruce Richardson {
519*c1d14583SBruce Richardson 	uint16_t i;
520*c1d14583SBruce Richardson 
521*c1d14583SBruce Richardson 	if (!rxq || !rxq->sw_ring) {
522*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
523*c1d14583SBruce Richardson 		return;
524*c1d14583SBruce Richardson 	}
525*c1d14583SBruce Richardson 
526*c1d14583SBruce Richardson 	for (i = 0; i < rxq->nb_rx_desc; i++) {
527*c1d14583SBruce Richardson 		if (rxq->sw_ring[i].mbuf) {
528*c1d14583SBruce Richardson 			rte_pktmbuf_free(rxq->sw_ring[i].mbuf);
529*c1d14583SBruce Richardson 			rxq->sw_ring[i].mbuf = NULL;
530*c1d14583SBruce Richardson 		}
531*c1d14583SBruce Richardson 	}
532*c1d14583SBruce Richardson 	if (rxq->rx_nb_avail == 0)
533*c1d14583SBruce Richardson 		return;
534*c1d14583SBruce Richardson 	for (i = 0; i < rxq->rx_nb_avail; i++)
535*c1d14583SBruce Richardson 		rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]);
536*c1d14583SBruce Richardson 
537*c1d14583SBruce Richardson 	rxq->rx_nb_avail = 0;
538*c1d14583SBruce Richardson }
539*c1d14583SBruce Richardson 
540*c1d14583SBruce Richardson /* turn on or off rx queue
541*c1d14583SBruce Richardson  * @q_idx: queue index in pf scope
542*c1d14583SBruce Richardson  * @on: turn on or off the queue
543*c1d14583SBruce Richardson  */
544*c1d14583SBruce Richardson static int
545*c1d14583SBruce Richardson ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
546*c1d14583SBruce Richardson {
547*c1d14583SBruce Richardson 	uint32_t reg;
548*c1d14583SBruce Richardson 	uint16_t j;
549*c1d14583SBruce Richardson 
550*c1d14583SBruce Richardson 	/* QRX_CTRL = QRX_ENA */
551*c1d14583SBruce Richardson 	reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
552*c1d14583SBruce Richardson 
553*c1d14583SBruce Richardson 	if (on) {
554*c1d14583SBruce Richardson 		if (reg & QRX_CTRL_QENA_STAT_M)
555*c1d14583SBruce Richardson 			return 0; /* Already on, skip */
556*c1d14583SBruce Richardson 		reg |= QRX_CTRL_QENA_REQ_M;
557*c1d14583SBruce Richardson 	} else {
558*c1d14583SBruce Richardson 		if (!(reg & QRX_CTRL_QENA_STAT_M))
559*c1d14583SBruce Richardson 			return 0; /* Already off, skip */
560*c1d14583SBruce Richardson 		reg &= ~QRX_CTRL_QENA_REQ_M;
561*c1d14583SBruce Richardson 	}
562*c1d14583SBruce Richardson 
563*c1d14583SBruce Richardson 	/* Write the register */
564*c1d14583SBruce Richardson 	ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
565*c1d14583SBruce Richardson 	/* Check the result. It is said that QENA_STAT
566*c1d14583SBruce Richardson 	 * follows the QENA_REQ not more than 10 use.
567*c1d14583SBruce Richardson 	 * TODO: need to change the wait counter later
568*c1d14583SBruce Richardson 	 */
569*c1d14583SBruce Richardson 	for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
570*c1d14583SBruce Richardson 		rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
571*c1d14583SBruce Richardson 		reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
572*c1d14583SBruce Richardson 		if (on) {
573*c1d14583SBruce Richardson 			if ((reg & QRX_CTRL_QENA_REQ_M) &&
574*c1d14583SBruce Richardson 			    (reg & QRX_CTRL_QENA_STAT_M))
575*c1d14583SBruce Richardson 				break;
576*c1d14583SBruce Richardson 		} else {
577*c1d14583SBruce Richardson 			if (!(reg & QRX_CTRL_QENA_REQ_M) &&
578*c1d14583SBruce Richardson 			    !(reg & QRX_CTRL_QENA_STAT_M))
579*c1d14583SBruce Richardson 				break;
580*c1d14583SBruce Richardson 		}
581*c1d14583SBruce Richardson 	}
582*c1d14583SBruce Richardson 
583*c1d14583SBruce Richardson 	/* Check if it is timeout */
584*c1d14583SBruce Richardson 	if (j >= ICE_CHK_Q_ENA_COUNT) {
585*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
586*c1d14583SBruce Richardson 			    (on ? "enable" : "disable"), q_idx);
587*c1d14583SBruce Richardson 		return -ETIMEDOUT;
588*c1d14583SBruce Richardson 	}
589*c1d14583SBruce Richardson 
590*c1d14583SBruce Richardson 	return 0;
591*c1d14583SBruce Richardson }
592*c1d14583SBruce Richardson 
593*c1d14583SBruce Richardson static inline int
594*c1d14583SBruce Richardson ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
595*c1d14583SBruce Richardson {
596*c1d14583SBruce Richardson 	int ret = 0;
597*c1d14583SBruce Richardson 
598*c1d14583SBruce Richardson 	if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
599*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
600*c1d14583SBruce Richardson 			     "rxq->rx_free_thresh=%d, "
601*c1d14583SBruce Richardson 			     "ICE_RX_MAX_BURST=%d",
602*c1d14583SBruce Richardson 			     rxq->rx_free_thresh, ICE_RX_MAX_BURST);
603*c1d14583SBruce Richardson 		ret = -EINVAL;
604*c1d14583SBruce Richardson 	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
605*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
606*c1d14583SBruce Richardson 			     "rxq->rx_free_thresh=%d, "
607*c1d14583SBruce Richardson 			     "rxq->nb_rx_desc=%d",
608*c1d14583SBruce Richardson 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
609*c1d14583SBruce Richardson 		ret = -EINVAL;
610*c1d14583SBruce Richardson 	} else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
611*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
612*c1d14583SBruce Richardson 			     "rxq->nb_rx_desc=%d, "
613*c1d14583SBruce Richardson 			     "rxq->rx_free_thresh=%d",
614*c1d14583SBruce Richardson 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
615*c1d14583SBruce Richardson 		ret = -EINVAL;
616*c1d14583SBruce Richardson 	}
617*c1d14583SBruce Richardson 
618*c1d14583SBruce Richardson 	return ret;
619*c1d14583SBruce Richardson }
620*c1d14583SBruce Richardson 
621*c1d14583SBruce Richardson /* reset fields in ice_rx_queue back to default */
622*c1d14583SBruce Richardson static void
623*c1d14583SBruce Richardson ice_reset_rx_queue(struct ice_rx_queue *rxq)
624*c1d14583SBruce Richardson {
625*c1d14583SBruce Richardson 	unsigned int i;
626*c1d14583SBruce Richardson 	uint16_t len;
627*c1d14583SBruce Richardson 
628*c1d14583SBruce Richardson 	if (!rxq) {
629*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
630*c1d14583SBruce Richardson 		return;
631*c1d14583SBruce Richardson 	}
632*c1d14583SBruce Richardson 
633*c1d14583SBruce Richardson 	len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
634*c1d14583SBruce Richardson 
635*c1d14583SBruce Richardson 	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
636*c1d14583SBruce Richardson 		((volatile char *)rxq->rx_ring)[i] = 0;
637*c1d14583SBruce Richardson 
638*c1d14583SBruce Richardson 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
639*c1d14583SBruce Richardson 	for (i = 0; i < ICE_RX_MAX_BURST; ++i)
640*c1d14583SBruce Richardson 		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
641*c1d14583SBruce Richardson 
642*c1d14583SBruce Richardson 	rxq->rx_nb_avail = 0;
643*c1d14583SBruce Richardson 	rxq->rx_next_avail = 0;
644*c1d14583SBruce Richardson 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
645*c1d14583SBruce Richardson 
646*c1d14583SBruce Richardson 	rxq->rx_tail = 0;
647*c1d14583SBruce Richardson 	rxq->nb_rx_hold = 0;
648*c1d14583SBruce Richardson 	rxq->pkt_first_seg = NULL;
649*c1d14583SBruce Richardson 	rxq->pkt_last_seg = NULL;
650*c1d14583SBruce Richardson 
651*c1d14583SBruce Richardson 	rxq->rxrearm_start = 0;
652*c1d14583SBruce Richardson 	rxq->rxrearm_nb = 0;
653*c1d14583SBruce Richardson }
654*c1d14583SBruce Richardson 
655*c1d14583SBruce Richardson int
656*c1d14583SBruce Richardson ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
657*c1d14583SBruce Richardson {
658*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
659*c1d14583SBruce Richardson 	int err;
660*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
661*c1d14583SBruce Richardson 
662*c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
663*c1d14583SBruce Richardson 
664*c1d14583SBruce Richardson 	if (rx_queue_id >= dev->data->nb_rx_queues) {
665*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
666*c1d14583SBruce Richardson 			    rx_queue_id, dev->data->nb_rx_queues);
667*c1d14583SBruce Richardson 		return -EINVAL;
668*c1d14583SBruce Richardson 	}
669*c1d14583SBruce Richardson 
670*c1d14583SBruce Richardson 	rxq = dev->data->rx_queues[rx_queue_id];
671*c1d14583SBruce Richardson 	if (!rxq || !rxq->q_set) {
672*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
673*c1d14583SBruce Richardson 			    rx_queue_id);
674*c1d14583SBruce Richardson 		return -EINVAL;
675*c1d14583SBruce Richardson 	}
676*c1d14583SBruce Richardson 
677*c1d14583SBruce Richardson 	if (dev->data->rx_queue_state[rx_queue_id] ==
678*c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STARTED)
679*c1d14583SBruce Richardson 		return 0;
680*c1d14583SBruce Richardson 
681*c1d14583SBruce Richardson 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
682*c1d14583SBruce Richardson 		rxq->ts_enable = true;
683*c1d14583SBruce Richardson 	err = ice_program_hw_rx_queue(rxq);
684*c1d14583SBruce Richardson 	if (err) {
685*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to program RX queue %u",
686*c1d14583SBruce Richardson 			    rx_queue_id);
687*c1d14583SBruce Richardson 		return -EIO;
688*c1d14583SBruce Richardson 	}
689*c1d14583SBruce Richardson 
690*c1d14583SBruce Richardson 	err = ice_alloc_rx_queue_mbufs(rxq);
691*c1d14583SBruce Richardson 	if (err) {
692*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
693*c1d14583SBruce Richardson 		return -ENOMEM;
694*c1d14583SBruce Richardson 	}
695*c1d14583SBruce Richardson 
696*c1d14583SBruce Richardson 	/* Init the RX tail register. */
697*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
698*c1d14583SBruce Richardson 
699*c1d14583SBruce Richardson 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
700*c1d14583SBruce Richardson 	if (err) {
701*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
702*c1d14583SBruce Richardson 			    rx_queue_id);
703*c1d14583SBruce Richardson 
704*c1d14583SBruce Richardson 		rxq->rx_rel_mbufs(rxq);
705*c1d14583SBruce Richardson 		ice_reset_rx_queue(rxq);
706*c1d14583SBruce Richardson 		return -EINVAL;
707*c1d14583SBruce Richardson 	}
708*c1d14583SBruce Richardson 
709*c1d14583SBruce Richardson 	dev->data->rx_queue_state[rx_queue_id] =
710*c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STARTED;
711*c1d14583SBruce Richardson 
712*c1d14583SBruce Richardson 	return 0;
713*c1d14583SBruce Richardson }
714*c1d14583SBruce Richardson 
715*c1d14583SBruce Richardson int
716*c1d14583SBruce Richardson ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
717*c1d14583SBruce Richardson {
718*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
719*c1d14583SBruce Richardson 	int err;
720*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721*c1d14583SBruce Richardson 
722*c1d14583SBruce Richardson 	if (rx_queue_id < dev->data->nb_rx_queues) {
723*c1d14583SBruce Richardson 		rxq = dev->data->rx_queues[rx_queue_id];
724*c1d14583SBruce Richardson 
725*c1d14583SBruce Richardson 		if (dev->data->rx_queue_state[rx_queue_id] ==
726*c1d14583SBruce Richardson 			RTE_ETH_QUEUE_STATE_STOPPED)
727*c1d14583SBruce Richardson 			return 0;
728*c1d14583SBruce Richardson 
729*c1d14583SBruce Richardson 		err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
730*c1d14583SBruce Richardson 		if (err) {
731*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
732*c1d14583SBruce Richardson 				    rx_queue_id);
733*c1d14583SBruce Richardson 			return -EINVAL;
734*c1d14583SBruce Richardson 		}
735*c1d14583SBruce Richardson 		rxq->rx_rel_mbufs(rxq);
736*c1d14583SBruce Richardson 		ice_reset_rx_queue(rxq);
737*c1d14583SBruce Richardson 		dev->data->rx_queue_state[rx_queue_id] =
738*c1d14583SBruce Richardson 			RTE_ETH_QUEUE_STATE_STOPPED;
739*c1d14583SBruce Richardson 	}
740*c1d14583SBruce Richardson 
741*c1d14583SBruce Richardson 	return 0;
742*c1d14583SBruce Richardson }
743*c1d14583SBruce Richardson 
744*c1d14583SBruce Richardson int
745*c1d14583SBruce Richardson ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
746*c1d14583SBruce Richardson {
747*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
748*c1d14583SBruce Richardson 	int err;
749*c1d14583SBruce Richardson 	struct ice_vsi *vsi;
750*c1d14583SBruce Richardson 	struct ice_hw *hw;
751*c1d14583SBruce Richardson 	struct ice_pf *pf;
752*c1d14583SBruce Richardson 	struct ice_aqc_add_tx_qgrp *txq_elem;
753*c1d14583SBruce Richardson 	struct ice_tlan_ctx tx_ctx;
754*c1d14583SBruce Richardson 	int buf_len;
755*c1d14583SBruce Richardson 
756*c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
757*c1d14583SBruce Richardson 
758*c1d14583SBruce Richardson 	if (tx_queue_id >= dev->data->nb_tx_queues) {
759*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
760*c1d14583SBruce Richardson 			    tx_queue_id, dev->data->nb_tx_queues);
761*c1d14583SBruce Richardson 		return -EINVAL;
762*c1d14583SBruce Richardson 	}
763*c1d14583SBruce Richardson 
764*c1d14583SBruce Richardson 	txq = dev->data->tx_queues[tx_queue_id];
765*c1d14583SBruce Richardson 	if (!txq || !txq->q_set) {
766*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
767*c1d14583SBruce Richardson 			    tx_queue_id);
768*c1d14583SBruce Richardson 		return -EINVAL;
769*c1d14583SBruce Richardson 	}
770*c1d14583SBruce Richardson 
771*c1d14583SBruce Richardson 	if (dev->data->tx_queue_state[tx_queue_id] ==
772*c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STARTED)
773*c1d14583SBruce Richardson 		return 0;
774*c1d14583SBruce Richardson 
775*c1d14583SBruce Richardson 	buf_len = ice_struct_size(txq_elem, txqs, 1);
776*c1d14583SBruce Richardson 	txq_elem = ice_malloc(hw, buf_len);
777*c1d14583SBruce Richardson 	if (!txq_elem)
778*c1d14583SBruce Richardson 		return -ENOMEM;
779*c1d14583SBruce Richardson 
780*c1d14583SBruce Richardson 	vsi = txq->vsi;
781*c1d14583SBruce Richardson 	hw = ICE_VSI_TO_HW(vsi);
782*c1d14583SBruce Richardson 	pf = ICE_VSI_TO_PF(vsi);
783*c1d14583SBruce Richardson 
784*c1d14583SBruce Richardson 	memset(&tx_ctx, 0, sizeof(tx_ctx));
785*c1d14583SBruce Richardson 	txq_elem->num_txqs = 1;
786*c1d14583SBruce Richardson 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
787*c1d14583SBruce Richardson 
788*c1d14583SBruce Richardson 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
789*c1d14583SBruce Richardson 	tx_ctx.qlen = txq->nb_tx_desc;
790*c1d14583SBruce Richardson 	tx_ctx.pf_num = hw->pf_id;
791*c1d14583SBruce Richardson 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
792*c1d14583SBruce Richardson 	tx_ctx.src_vsi = vsi->vsi_id;
793*c1d14583SBruce Richardson 	tx_ctx.port_num = hw->port_info->lport;
794*c1d14583SBruce Richardson 	tx_ctx.tso_ena = 1; /* tso enable */
795*c1d14583SBruce Richardson 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
796*c1d14583SBruce Richardson 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
797*c1d14583SBruce Richardson 	tx_ctx.tsyn_ena = 1;
798*c1d14583SBruce Richardson 
799*c1d14583SBruce Richardson 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
800*c1d14583SBruce Richardson 		    ice_tlan_ctx_info);
801*c1d14583SBruce Richardson 
802*c1d14583SBruce Richardson 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
803*c1d14583SBruce Richardson 
804*c1d14583SBruce Richardson 	/* Init the Tx tail register*/
805*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
806*c1d14583SBruce Richardson 
807*c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
808*c1d14583SBruce Richardson 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
809*c1d14583SBruce Richardson 			txq_elem, buf_len, NULL);
810*c1d14583SBruce Richardson 	if (err) {
811*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to add lan txq");
812*c1d14583SBruce Richardson 		rte_free(txq_elem);
813*c1d14583SBruce Richardson 		return -EIO;
814*c1d14583SBruce Richardson 	}
815*c1d14583SBruce Richardson 	/* store the schedule node id */
816*c1d14583SBruce Richardson 	txq->q_teid = txq_elem->txqs[0].q_teid;
817*c1d14583SBruce Richardson 
818*c1d14583SBruce Richardson 	/* move the queue to correct position in hierarchy, if explicit hierarchy configured */
819*c1d14583SBruce Richardson 	if (pf->tm_conf.committed)
820*c1d14583SBruce Richardson 		if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) {
821*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node");
822*c1d14583SBruce Richardson 			rte_free(txq_elem);
823*c1d14583SBruce Richardson 			return -EIO;
824*c1d14583SBruce Richardson 		}
825*c1d14583SBruce Richardson 
826*c1d14583SBruce Richardson 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
827*c1d14583SBruce Richardson 
828*c1d14583SBruce Richardson 	rte_free(txq_elem);
829*c1d14583SBruce Richardson 	return 0;
830*c1d14583SBruce Richardson }
831*c1d14583SBruce Richardson 
832*c1d14583SBruce Richardson static int
833*c1d14583SBruce Richardson ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
834*c1d14583SBruce Richardson {
835*c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
836*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
837*c1d14583SBruce Richardson 	uint32_t rxdid = ICE_RXDID_LEGACY_1;
838*c1d14583SBruce Richardson 	struct ice_rlan_ctx rx_ctx;
839*c1d14583SBruce Richardson 	uint32_t regval;
840*c1d14583SBruce Richardson 	int err;
841*c1d14583SBruce Richardson 
842*c1d14583SBruce Richardson 	rxq->rx_hdr_len = 0;
843*c1d14583SBruce Richardson 	rxq->rx_buf_len = 1024;
844*c1d14583SBruce Richardson 
845*c1d14583SBruce Richardson 	memset(&rx_ctx, 0, sizeof(rx_ctx));
846*c1d14583SBruce Richardson 
847*c1d14583SBruce Richardson 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
848*c1d14583SBruce Richardson 	rx_ctx.qlen = rxq->nb_rx_desc;
849*c1d14583SBruce Richardson 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
850*c1d14583SBruce Richardson 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
851*c1d14583SBruce Richardson 	rx_ctx.dtype = 0; /* No Buffer Split mode */
852*c1d14583SBruce Richardson 	rx_ctx.dsize = 1; /* 32B descriptors */
853*c1d14583SBruce Richardson 	rx_ctx.rxmax = ICE_ETH_MAX_LEN;
854*c1d14583SBruce Richardson 	/* TPH: Transaction Layer Packet (TLP) processing hints */
855*c1d14583SBruce Richardson 	rx_ctx.tphrdesc_ena = 1;
856*c1d14583SBruce Richardson 	rx_ctx.tphwdesc_ena = 1;
857*c1d14583SBruce Richardson 	rx_ctx.tphdata_ena = 1;
858*c1d14583SBruce Richardson 	rx_ctx.tphhead_ena = 1;
859*c1d14583SBruce Richardson 	/* Low Receive Queue Threshold defined in 64 descriptors units.
860*c1d14583SBruce Richardson 	 * When the number of free descriptors goes below the lrxqthresh,
861*c1d14583SBruce Richardson 	 * an immediate interrupt is triggered.
862*c1d14583SBruce Richardson 	 */
863*c1d14583SBruce Richardson 	rx_ctx.lrxqthresh = 2;
864*c1d14583SBruce Richardson 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
865*c1d14583SBruce Richardson 	rx_ctx.l2tsel = 1;
866*c1d14583SBruce Richardson 	rx_ctx.showiv = 0;
867*c1d14583SBruce Richardson 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
868*c1d14583SBruce Richardson 
869*c1d14583SBruce Richardson 	/* Enable Flexible Descriptors in the queue context which
870*c1d14583SBruce Richardson 	 * allows this driver to select a specific receive descriptor format
871*c1d14583SBruce Richardson 	 */
872*c1d14583SBruce Richardson 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
873*c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_IDX_M;
874*c1d14583SBruce Richardson 
875*c1d14583SBruce Richardson 	/* increasing context priority to pick up profile ID;
876*c1d14583SBruce Richardson 	 * default is 0x01; setting to 0x03 to ensure profile
877*c1d14583SBruce Richardson 	 * is programming if prev context is of same priority
878*c1d14583SBruce Richardson 	 */
879*c1d14583SBruce Richardson 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
880*c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_PRIO_M;
881*c1d14583SBruce Richardson 
882*c1d14583SBruce Richardson 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
883*c1d14583SBruce Richardson 
884*c1d14583SBruce Richardson 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
885*c1d14583SBruce Richardson 	if (err) {
886*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
887*c1d14583SBruce Richardson 			    rxq->queue_id);
888*c1d14583SBruce Richardson 		return -EINVAL;
889*c1d14583SBruce Richardson 	}
890*c1d14583SBruce Richardson 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
891*c1d14583SBruce Richardson 	if (err) {
892*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
893*c1d14583SBruce Richardson 			    rxq->queue_id);
894*c1d14583SBruce Richardson 		return -EINVAL;
895*c1d14583SBruce Richardson 	}
896*c1d14583SBruce Richardson 
897*c1d14583SBruce Richardson 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
898*c1d14583SBruce Richardson 
899*c1d14583SBruce Richardson 	/* Init the Rx tail register*/
900*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
901*c1d14583SBruce Richardson 
902*c1d14583SBruce Richardson 	return 0;
903*c1d14583SBruce Richardson }
904*c1d14583SBruce Richardson 
905*c1d14583SBruce Richardson int
906*c1d14583SBruce Richardson ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
907*c1d14583SBruce Richardson {
908*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
909*c1d14583SBruce Richardson 	int err;
910*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
912*c1d14583SBruce Richardson 
913*c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
914*c1d14583SBruce Richardson 
915*c1d14583SBruce Richardson 	rxq = pf->fdir.rxq;
916*c1d14583SBruce Richardson 	if (!rxq || !rxq->q_set) {
917*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
918*c1d14583SBruce Richardson 			    rx_queue_id);
919*c1d14583SBruce Richardson 		return -EINVAL;
920*c1d14583SBruce Richardson 	}
921*c1d14583SBruce Richardson 
922*c1d14583SBruce Richardson 	err = ice_fdir_program_hw_rx_queue(rxq);
923*c1d14583SBruce Richardson 	if (err) {
924*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
925*c1d14583SBruce Richardson 			    rx_queue_id);
926*c1d14583SBruce Richardson 		return -EIO;
927*c1d14583SBruce Richardson 	}
928*c1d14583SBruce Richardson 
929*c1d14583SBruce Richardson 	/* Init the RX tail register. */
930*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
931*c1d14583SBruce Richardson 
932*c1d14583SBruce Richardson 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
933*c1d14583SBruce Richardson 	if (err) {
934*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
935*c1d14583SBruce Richardson 			    rx_queue_id);
936*c1d14583SBruce Richardson 
937*c1d14583SBruce Richardson 		ice_reset_rx_queue(rxq);
938*c1d14583SBruce Richardson 		return -EINVAL;
939*c1d14583SBruce Richardson 	}
940*c1d14583SBruce Richardson 
941*c1d14583SBruce Richardson 	return 0;
942*c1d14583SBruce Richardson }
943*c1d14583SBruce Richardson 
944*c1d14583SBruce Richardson int
945*c1d14583SBruce Richardson ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
946*c1d14583SBruce Richardson {
947*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
948*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
949*c1d14583SBruce Richardson 	int err;
950*c1d14583SBruce Richardson 	struct ice_vsi *vsi;
951*c1d14583SBruce Richardson 	struct ice_hw *hw;
952*c1d14583SBruce Richardson 	struct ice_aqc_add_tx_qgrp *txq_elem;
953*c1d14583SBruce Richardson 	struct ice_tlan_ctx tx_ctx;
954*c1d14583SBruce Richardson 	int buf_len;
955*c1d14583SBruce Richardson 
956*c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
957*c1d14583SBruce Richardson 
958*c1d14583SBruce Richardson 	txq = pf->fdir.txq;
959*c1d14583SBruce Richardson 	if (!txq || !txq->q_set) {
960*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
961*c1d14583SBruce Richardson 			    tx_queue_id);
962*c1d14583SBruce Richardson 		return -EINVAL;
963*c1d14583SBruce Richardson 	}
964*c1d14583SBruce Richardson 
965*c1d14583SBruce Richardson 	buf_len = ice_struct_size(txq_elem, txqs, 1);
966*c1d14583SBruce Richardson 	txq_elem = ice_malloc(hw, buf_len);
967*c1d14583SBruce Richardson 	if (!txq_elem)
968*c1d14583SBruce Richardson 		return -ENOMEM;
969*c1d14583SBruce Richardson 
970*c1d14583SBruce Richardson 	vsi = txq->vsi;
971*c1d14583SBruce Richardson 	hw = ICE_VSI_TO_HW(vsi);
972*c1d14583SBruce Richardson 
973*c1d14583SBruce Richardson 	memset(&tx_ctx, 0, sizeof(tx_ctx));
974*c1d14583SBruce Richardson 	txq_elem->num_txqs = 1;
975*c1d14583SBruce Richardson 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
976*c1d14583SBruce Richardson 
977*c1d14583SBruce Richardson 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
978*c1d14583SBruce Richardson 	tx_ctx.qlen = txq->nb_tx_desc;
979*c1d14583SBruce Richardson 	tx_ctx.pf_num = hw->pf_id;
980*c1d14583SBruce Richardson 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
981*c1d14583SBruce Richardson 	tx_ctx.src_vsi = vsi->vsi_id;
982*c1d14583SBruce Richardson 	tx_ctx.port_num = hw->port_info->lport;
983*c1d14583SBruce Richardson 	tx_ctx.tso_ena = 1; /* tso enable */
984*c1d14583SBruce Richardson 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
985*c1d14583SBruce Richardson 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
986*c1d14583SBruce Richardson 
987*c1d14583SBruce Richardson 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
988*c1d14583SBruce Richardson 		    ice_tlan_ctx_info);
989*c1d14583SBruce Richardson 
990*c1d14583SBruce Richardson 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
991*c1d14583SBruce Richardson 
992*c1d14583SBruce Richardson 	/* Init the Tx tail register*/
993*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
994*c1d14583SBruce Richardson 
995*c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
996*c1d14583SBruce Richardson 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
997*c1d14583SBruce Richardson 			      txq_elem, buf_len, NULL);
998*c1d14583SBruce Richardson 	if (err) {
999*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
1000*c1d14583SBruce Richardson 		rte_free(txq_elem);
1001*c1d14583SBruce Richardson 		return -EIO;
1002*c1d14583SBruce Richardson 	}
1003*c1d14583SBruce Richardson 	/* store the schedule node id */
1004*c1d14583SBruce Richardson 	txq->q_teid = txq_elem->txqs[0].q_teid;
1005*c1d14583SBruce Richardson 
1006*c1d14583SBruce Richardson 	rte_free(txq_elem);
1007*c1d14583SBruce Richardson 	return 0;
1008*c1d14583SBruce Richardson }
1009*c1d14583SBruce Richardson 
1010*c1d14583SBruce Richardson /* Free all mbufs for descriptors in tx queue */
1011*c1d14583SBruce Richardson static void
1012*c1d14583SBruce Richardson _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
1013*c1d14583SBruce Richardson {
1014*c1d14583SBruce Richardson 	uint16_t i;
1015*c1d14583SBruce Richardson 
1016*c1d14583SBruce Richardson 	if (!txq || !txq->sw_ring) {
1017*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
1018*c1d14583SBruce Richardson 		return;
1019*c1d14583SBruce Richardson 	}
1020*c1d14583SBruce Richardson 
1021*c1d14583SBruce Richardson 	for (i = 0; i < txq->nb_tx_desc; i++) {
1022*c1d14583SBruce Richardson 		if (txq->sw_ring[i].mbuf) {
1023*c1d14583SBruce Richardson 			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1024*c1d14583SBruce Richardson 			txq->sw_ring[i].mbuf = NULL;
1025*c1d14583SBruce Richardson 		}
1026*c1d14583SBruce Richardson 	}
1027*c1d14583SBruce Richardson }
1028*c1d14583SBruce Richardson 
1029*c1d14583SBruce Richardson static void
1030*c1d14583SBruce Richardson ice_reset_tx_queue(struct ice_tx_queue *txq)
1031*c1d14583SBruce Richardson {
1032*c1d14583SBruce Richardson 	struct ice_tx_entry *txe;
1033*c1d14583SBruce Richardson 	uint16_t i, prev, size;
1034*c1d14583SBruce Richardson 
1035*c1d14583SBruce Richardson 	if (!txq) {
1036*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
1037*c1d14583SBruce Richardson 		return;
1038*c1d14583SBruce Richardson 	}
1039*c1d14583SBruce Richardson 
1040*c1d14583SBruce Richardson 	txe = txq->sw_ring;
1041*c1d14583SBruce Richardson 	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
1042*c1d14583SBruce Richardson 	for (i = 0; i < size; i++)
1043*c1d14583SBruce Richardson 		((volatile char *)txq->tx_ring)[i] = 0;
1044*c1d14583SBruce Richardson 
1045*c1d14583SBruce Richardson 	prev = (uint16_t)(txq->nb_tx_desc - 1);
1046*c1d14583SBruce Richardson 	for (i = 0; i < txq->nb_tx_desc; i++) {
1047*c1d14583SBruce Richardson 		volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
1048*c1d14583SBruce Richardson 
1049*c1d14583SBruce Richardson 		txd->cmd_type_offset_bsz =
1050*c1d14583SBruce Richardson 			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
1051*c1d14583SBruce Richardson 		txe[i].mbuf =  NULL;
1052*c1d14583SBruce Richardson 		txe[i].last_id = i;
1053*c1d14583SBruce Richardson 		txe[prev].next_id = i;
1054*c1d14583SBruce Richardson 		prev = i;
1055*c1d14583SBruce Richardson 	}
1056*c1d14583SBruce Richardson 
1057*c1d14583SBruce Richardson 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1058*c1d14583SBruce Richardson 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1059*c1d14583SBruce Richardson 
1060*c1d14583SBruce Richardson 	txq->tx_tail = 0;
1061*c1d14583SBruce Richardson 	txq->nb_tx_used = 0;
1062*c1d14583SBruce Richardson 
1063*c1d14583SBruce Richardson 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1064*c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1065*c1d14583SBruce Richardson }
1066*c1d14583SBruce Richardson 
1067*c1d14583SBruce Richardson int
1068*c1d14583SBruce Richardson ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1069*c1d14583SBruce Richardson {
1070*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
1071*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1072*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1073*c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1074*c1d14583SBruce Richardson 	uint16_t q_ids[1];
1075*c1d14583SBruce Richardson 	uint32_t q_teids[1];
1076*c1d14583SBruce Richardson 	uint16_t q_handle = tx_queue_id;
1077*c1d14583SBruce Richardson 	int status;
1078*c1d14583SBruce Richardson 
1079*c1d14583SBruce Richardson 	if (tx_queue_id >= dev->data->nb_tx_queues) {
1080*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
1081*c1d14583SBruce Richardson 			    tx_queue_id, dev->data->nb_tx_queues);
1082*c1d14583SBruce Richardson 		return -EINVAL;
1083*c1d14583SBruce Richardson 	}
1084*c1d14583SBruce Richardson 
1085*c1d14583SBruce Richardson 	txq = dev->data->tx_queues[tx_queue_id];
1086*c1d14583SBruce Richardson 	if (!txq) {
1087*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
1088*c1d14583SBruce Richardson 			    tx_queue_id);
1089*c1d14583SBruce Richardson 		return -EINVAL;
1090*c1d14583SBruce Richardson 	}
1091*c1d14583SBruce Richardson 
1092*c1d14583SBruce Richardson 	if (dev->data->tx_queue_state[tx_queue_id] ==
1093*c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STOPPED)
1094*c1d14583SBruce Richardson 		return 0;
1095*c1d14583SBruce Richardson 
1096*c1d14583SBruce Richardson 	q_ids[0] = txq->reg_idx;
1097*c1d14583SBruce Richardson 	q_teids[0] = txq->q_teid;
1098*c1d14583SBruce Richardson 
1099*c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
1100*c1d14583SBruce Richardson 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1101*c1d14583SBruce Richardson 				q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1102*c1d14583SBruce Richardson 	if (status != ICE_SUCCESS) {
1103*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1104*c1d14583SBruce Richardson 		return -EINVAL;
1105*c1d14583SBruce Richardson 	}
1106*c1d14583SBruce Richardson 
1107*c1d14583SBruce Richardson 	txq->tx_rel_mbufs(txq);
1108*c1d14583SBruce Richardson 	ice_reset_tx_queue(txq);
1109*c1d14583SBruce Richardson 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1110*c1d14583SBruce Richardson 
1111*c1d14583SBruce Richardson 	return 0;
1112*c1d14583SBruce Richardson }
1113*c1d14583SBruce Richardson 
1114*c1d14583SBruce Richardson int
1115*c1d14583SBruce Richardson ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1116*c1d14583SBruce Richardson {
1117*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1118*c1d14583SBruce Richardson 	int err;
1119*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1120*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1121*c1d14583SBruce Richardson 
1122*c1d14583SBruce Richardson 	rxq = pf->fdir.rxq;
1123*c1d14583SBruce Richardson 
1124*c1d14583SBruce Richardson 	err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1125*c1d14583SBruce Richardson 	if (err) {
1126*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1127*c1d14583SBruce Richardson 			    rx_queue_id);
1128*c1d14583SBruce Richardson 		return -EINVAL;
1129*c1d14583SBruce Richardson 	}
1130*c1d14583SBruce Richardson 	rxq->rx_rel_mbufs(rxq);
1131*c1d14583SBruce Richardson 
1132*c1d14583SBruce Richardson 	return 0;
1133*c1d14583SBruce Richardson }
1134*c1d14583SBruce Richardson 
1135*c1d14583SBruce Richardson int
1136*c1d14583SBruce Richardson ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1137*c1d14583SBruce Richardson {
1138*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
1139*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1140*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1141*c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1142*c1d14583SBruce Richardson 	uint16_t q_ids[1];
1143*c1d14583SBruce Richardson 	uint32_t q_teids[1];
1144*c1d14583SBruce Richardson 	uint16_t q_handle = tx_queue_id;
1145*c1d14583SBruce Richardson 	int status;
1146*c1d14583SBruce Richardson 
1147*c1d14583SBruce Richardson 	txq = pf->fdir.txq;
1148*c1d14583SBruce Richardson 	if (!txq) {
1149*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
1150*c1d14583SBruce Richardson 			    tx_queue_id);
1151*c1d14583SBruce Richardson 		return -EINVAL;
1152*c1d14583SBruce Richardson 	}
1153*c1d14583SBruce Richardson 	if (txq->qtx_tail == NULL) {
1154*c1d14583SBruce Richardson 		PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id);
1155*c1d14583SBruce Richardson 		return 0;
1156*c1d14583SBruce Richardson 	}
1157*c1d14583SBruce Richardson 	vsi = txq->vsi;
1158*c1d14583SBruce Richardson 
1159*c1d14583SBruce Richardson 	q_ids[0] = txq->reg_idx;
1160*c1d14583SBruce Richardson 	q_teids[0] = txq->q_teid;
1161*c1d14583SBruce Richardson 
1162*c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
1163*c1d14583SBruce Richardson 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1164*c1d14583SBruce Richardson 				 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1165*c1d14583SBruce Richardson 	if (status != ICE_SUCCESS) {
1166*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1167*c1d14583SBruce Richardson 		return -EINVAL;
1168*c1d14583SBruce Richardson 	}
1169*c1d14583SBruce Richardson 
1170*c1d14583SBruce Richardson 	txq->tx_rel_mbufs(txq);
1171*c1d14583SBruce Richardson 	txq->qtx_tail = NULL;
1172*c1d14583SBruce Richardson 
1173*c1d14583SBruce Richardson 	return 0;
1174*c1d14583SBruce Richardson }
1175*c1d14583SBruce Richardson 
1176*c1d14583SBruce Richardson int
1177*c1d14583SBruce Richardson ice_rx_queue_setup(struct rte_eth_dev *dev,
1178*c1d14583SBruce Richardson 		   uint16_t queue_idx,
1179*c1d14583SBruce Richardson 		   uint16_t nb_desc,
1180*c1d14583SBruce Richardson 		   unsigned int socket_id,
1181*c1d14583SBruce Richardson 		   const struct rte_eth_rxconf *rx_conf,
1182*c1d14583SBruce Richardson 		   struct rte_mempool *mp)
1183*c1d14583SBruce Richardson {
1184*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1185*c1d14583SBruce Richardson 	struct ice_adapter *ad =
1186*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1187*c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1188*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1189*c1d14583SBruce Richardson 	const struct rte_memzone *rz;
1190*c1d14583SBruce Richardson 	uint32_t ring_size;
1191*c1d14583SBruce Richardson 	uint16_t len;
1192*c1d14583SBruce Richardson 	int use_def_burst_func = 1;
1193*c1d14583SBruce Richardson 	uint64_t offloads;
1194*c1d14583SBruce Richardson 	uint16_t n_seg = rx_conf->rx_nseg;
1195*c1d14583SBruce Richardson 	uint16_t i;
1196*c1d14583SBruce Richardson 
1197*c1d14583SBruce Richardson 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1198*c1d14583SBruce Richardson 	    nb_desc > ICE_MAX_RING_DESC ||
1199*c1d14583SBruce Richardson 	    nb_desc < ICE_MIN_RING_DESC) {
1200*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1201*c1d14583SBruce Richardson 			     "invalid", nb_desc);
1202*c1d14583SBruce Richardson 		return -EINVAL;
1203*c1d14583SBruce Richardson 	}
1204*c1d14583SBruce Richardson 
1205*c1d14583SBruce Richardson 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1206*c1d14583SBruce Richardson 
1207*c1d14583SBruce Richardson 	if (mp)
1208*c1d14583SBruce Richardson 		n_seg = 1;
1209*c1d14583SBruce Richardson 
1210*c1d14583SBruce Richardson 	if (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1211*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured",
1212*c1d14583SBruce Richardson 				dev->data->port_id, queue_idx);
1213*c1d14583SBruce Richardson 		return -EINVAL;
1214*c1d14583SBruce Richardson 	}
1215*c1d14583SBruce Richardson 
1216*c1d14583SBruce Richardson 	/* Free memory if needed */
1217*c1d14583SBruce Richardson 	if (dev->data->rx_queues[queue_idx]) {
1218*c1d14583SBruce Richardson 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1219*c1d14583SBruce Richardson 		dev->data->rx_queues[queue_idx] = NULL;
1220*c1d14583SBruce Richardson 	}
1221*c1d14583SBruce Richardson 
1222*c1d14583SBruce Richardson 	/* Allocate the rx queue data structure */
1223*c1d14583SBruce Richardson 	rxq = rte_zmalloc_socket(NULL,
1224*c1d14583SBruce Richardson 				 sizeof(struct ice_rx_queue),
1225*c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
1226*c1d14583SBruce Richardson 				 socket_id);
1227*c1d14583SBruce Richardson 
1228*c1d14583SBruce Richardson 	if (!rxq) {
1229*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1230*c1d14583SBruce Richardson 			     "rx queue data structure");
1231*c1d14583SBruce Richardson 		return -ENOMEM;
1232*c1d14583SBruce Richardson 	}
1233*c1d14583SBruce Richardson 
1234*c1d14583SBruce Richardson 	rxq->rxseg_nb = n_seg;
1235*c1d14583SBruce Richardson 	if (n_seg > 1) {
1236*c1d14583SBruce Richardson 		for (i = 0; i < n_seg; i++)
1237*c1d14583SBruce Richardson 			memcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split,
1238*c1d14583SBruce Richardson 				sizeof(struct rte_eth_rxseg_split));
1239*c1d14583SBruce Richardson 
1240*c1d14583SBruce Richardson 		rxq->mp = rxq->rxseg[0].mp;
1241*c1d14583SBruce Richardson 	} else {
1242*c1d14583SBruce Richardson 		rxq->mp = mp;
1243*c1d14583SBruce Richardson 	}
1244*c1d14583SBruce Richardson 
1245*c1d14583SBruce Richardson 	rxq->nb_rx_desc = nb_desc;
1246*c1d14583SBruce Richardson 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1247*c1d14583SBruce Richardson 	rxq->queue_id = queue_idx;
1248*c1d14583SBruce Richardson 	rxq->offloads = offloads;
1249*c1d14583SBruce Richardson 
1250*c1d14583SBruce Richardson 	rxq->reg_idx = vsi->base_queue + queue_idx;
1251*c1d14583SBruce Richardson 	rxq->port_id = dev->data->port_id;
1252*c1d14583SBruce Richardson 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1253*c1d14583SBruce Richardson 		rxq->crc_len = RTE_ETHER_CRC_LEN;
1254*c1d14583SBruce Richardson 	else
1255*c1d14583SBruce Richardson 		rxq->crc_len = 0;
1256*c1d14583SBruce Richardson 
1257*c1d14583SBruce Richardson 	rxq->drop_en = rx_conf->rx_drop_en;
1258*c1d14583SBruce Richardson 	rxq->vsi = vsi;
1259*c1d14583SBruce Richardson 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1260*c1d14583SBruce Richardson 	rxq->proto_xtr = pf->proto_xtr != NULL ?
1261*c1d14583SBruce Richardson 			 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1262*c1d14583SBruce Richardson 	if (rxq->proto_xtr != PROTO_XTR_NONE &&
1263*c1d14583SBruce Richardson 			ad->devargs.xtr_flag_offs[rxq->proto_xtr] != 0xff)
1264*c1d14583SBruce Richardson 		rxq->xtr_ol_flag = 1ULL << ad->devargs.xtr_flag_offs[rxq->proto_xtr];
1265*c1d14583SBruce Richardson 	rxq->xtr_field_offs = ad->devargs.xtr_field_offs;
1266*c1d14583SBruce Richardson 
1267*c1d14583SBruce Richardson 	/* Allocate the maximum number of RX ring hardware descriptor. */
1268*c1d14583SBruce Richardson 	len = ICE_MAX_RING_DESC;
1269*c1d14583SBruce Richardson 
1270*c1d14583SBruce Richardson 	/**
1271*c1d14583SBruce Richardson 	 * Allocating a little more memory because vectorized/bulk_alloc Rx
1272*c1d14583SBruce Richardson 	 * functions doesn't check boundaries each time.
1273*c1d14583SBruce Richardson 	 */
1274*c1d14583SBruce Richardson 	len += ICE_RX_MAX_BURST;
1275*c1d14583SBruce Richardson 
1276*c1d14583SBruce Richardson 	/* Allocate the maximum number of RX ring hardware descriptor. */
1277*c1d14583SBruce Richardson 	ring_size = sizeof(union ice_rx_flex_desc) * len;
1278*c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1279*c1d14583SBruce Richardson 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1280*c1d14583SBruce Richardson 				      ring_size, ICE_RING_BASE_ALIGN,
1281*c1d14583SBruce Richardson 				      socket_id);
1282*c1d14583SBruce Richardson 	if (!rz) {
1283*c1d14583SBruce Richardson 		ice_rx_queue_release(rxq);
1284*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1285*c1d14583SBruce Richardson 		return -ENOMEM;
1286*c1d14583SBruce Richardson 	}
1287*c1d14583SBruce Richardson 
1288*c1d14583SBruce Richardson 	rxq->mz = rz;
1289*c1d14583SBruce Richardson 	/* Zero all the descriptors in the ring. */
1290*c1d14583SBruce Richardson 	memset(rz->addr, 0, ring_size);
1291*c1d14583SBruce Richardson 
1292*c1d14583SBruce Richardson 	rxq->rx_ring_dma = rz->iova;
1293*c1d14583SBruce Richardson 	rxq->rx_ring = rz->addr;
1294*c1d14583SBruce Richardson 
1295*c1d14583SBruce Richardson 	/* always reserve more for bulk alloc */
1296*c1d14583SBruce Richardson 	len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1297*c1d14583SBruce Richardson 
1298*c1d14583SBruce Richardson 	/* Allocate the software ring. */
1299*c1d14583SBruce Richardson 	rxq->sw_ring = rte_zmalloc_socket(NULL,
1300*c1d14583SBruce Richardson 					  sizeof(struct ice_rx_entry) * len,
1301*c1d14583SBruce Richardson 					  RTE_CACHE_LINE_SIZE,
1302*c1d14583SBruce Richardson 					  socket_id);
1303*c1d14583SBruce Richardson 	if (!rxq->sw_ring) {
1304*c1d14583SBruce Richardson 		ice_rx_queue_release(rxq);
1305*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1306*c1d14583SBruce Richardson 		return -ENOMEM;
1307*c1d14583SBruce Richardson 	}
1308*c1d14583SBruce Richardson 
1309*c1d14583SBruce Richardson 	ice_reset_rx_queue(rxq);
1310*c1d14583SBruce Richardson 	rxq->q_set = true;
1311*c1d14583SBruce Richardson 	dev->data->rx_queues[queue_idx] = rxq;
1312*c1d14583SBruce Richardson 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1313*c1d14583SBruce Richardson 
1314*c1d14583SBruce Richardson 	use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1315*c1d14583SBruce Richardson 
1316*c1d14583SBruce Richardson 	if (!use_def_burst_func) {
1317*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1318*c1d14583SBruce Richardson 			     "satisfied. Rx Burst Bulk Alloc function will be "
1319*c1d14583SBruce Richardson 			     "used on port=%d, queue=%d.",
1320*c1d14583SBruce Richardson 			     rxq->port_id, rxq->queue_id);
1321*c1d14583SBruce Richardson 	} else {
1322*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1323*c1d14583SBruce Richardson 			     "not satisfied, Scattered Rx is requested. "
1324*c1d14583SBruce Richardson 			     "on port=%d, queue=%d.",
1325*c1d14583SBruce Richardson 			     rxq->port_id, rxq->queue_id);
1326*c1d14583SBruce Richardson 		ad->rx_bulk_alloc_allowed = false;
1327*c1d14583SBruce Richardson 	}
1328*c1d14583SBruce Richardson 
1329*c1d14583SBruce Richardson 	return 0;
1330*c1d14583SBruce Richardson }
1331*c1d14583SBruce Richardson 
1332*c1d14583SBruce Richardson void
1333*c1d14583SBruce Richardson ice_rx_queue_release(void *rxq)
1334*c1d14583SBruce Richardson {
1335*c1d14583SBruce Richardson 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1336*c1d14583SBruce Richardson 
1337*c1d14583SBruce Richardson 	if (!q) {
1338*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1339*c1d14583SBruce Richardson 		return;
1340*c1d14583SBruce Richardson 	}
1341*c1d14583SBruce Richardson 
1342*c1d14583SBruce Richardson 	if (q->rx_rel_mbufs != NULL)
1343*c1d14583SBruce Richardson 		q->rx_rel_mbufs(q);
1344*c1d14583SBruce Richardson 	rte_free(q->sw_ring);
1345*c1d14583SBruce Richardson 	rte_memzone_free(q->mz);
1346*c1d14583SBruce Richardson 	rte_free(q);
1347*c1d14583SBruce Richardson }
1348*c1d14583SBruce Richardson 
1349*c1d14583SBruce Richardson int
1350*c1d14583SBruce Richardson ice_tx_queue_setup(struct rte_eth_dev *dev,
1351*c1d14583SBruce Richardson 		   uint16_t queue_idx,
1352*c1d14583SBruce Richardson 		   uint16_t nb_desc,
1353*c1d14583SBruce Richardson 		   unsigned int socket_id,
1354*c1d14583SBruce Richardson 		   const struct rte_eth_txconf *tx_conf)
1355*c1d14583SBruce Richardson {
1356*c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1357*c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1358*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
1359*c1d14583SBruce Richardson 	const struct rte_memzone *tz;
1360*c1d14583SBruce Richardson 	uint32_t ring_size;
1361*c1d14583SBruce Richardson 	uint16_t tx_rs_thresh, tx_free_thresh;
1362*c1d14583SBruce Richardson 	uint64_t offloads;
1363*c1d14583SBruce Richardson 
1364*c1d14583SBruce Richardson 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1365*c1d14583SBruce Richardson 
1366*c1d14583SBruce Richardson 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1367*c1d14583SBruce Richardson 	    nb_desc > ICE_MAX_RING_DESC ||
1368*c1d14583SBruce Richardson 	    nb_desc < ICE_MIN_RING_DESC) {
1369*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1370*c1d14583SBruce Richardson 			     "invalid", nb_desc);
1371*c1d14583SBruce Richardson 		return -EINVAL;
1372*c1d14583SBruce Richardson 	}
1373*c1d14583SBruce Richardson 
1374*c1d14583SBruce Richardson 	/**
1375*c1d14583SBruce Richardson 	 * The following two parameters control the setting of the RS bit on
1376*c1d14583SBruce Richardson 	 * transmit descriptors. TX descriptors will have their RS bit set
1377*c1d14583SBruce Richardson 	 * after txq->tx_rs_thresh descriptors have been used. The TX
1378*c1d14583SBruce Richardson 	 * descriptor ring will be cleaned after txq->tx_free_thresh
1379*c1d14583SBruce Richardson 	 * descriptors are used or if the number of descriptors required to
1380*c1d14583SBruce Richardson 	 * transmit a packet is greater than the number of free TX descriptors.
1381*c1d14583SBruce Richardson 	 *
1382*c1d14583SBruce Richardson 	 * The following constraints must be satisfied:
1383*c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be greater than 0.
1384*c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
1385*c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
1386*c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be a divisor of the ring size.
1387*c1d14583SBruce Richardson 	 *  - tx_free_thresh must be greater than 0.
1388*c1d14583SBruce Richardson 	 *  - tx_free_thresh must be less than the size of the ring minus 3.
1389*c1d14583SBruce Richardson 	 *  - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1390*c1d14583SBruce Richardson 	 *
1391*c1d14583SBruce Richardson 	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1392*c1d14583SBruce Richardson 	 * race condition, hence the maximum threshold constraints. When set
1393*c1d14583SBruce Richardson 	 * to zero use default values.
1394*c1d14583SBruce Richardson 	 */
1395*c1d14583SBruce Richardson 	tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1396*c1d14583SBruce Richardson 				    tx_conf->tx_free_thresh :
1397*c1d14583SBruce Richardson 				    ICE_DEFAULT_TX_FREE_THRESH);
1398*c1d14583SBruce Richardson 	/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
1399*c1d14583SBruce Richardson 	tx_rs_thresh =
1400*c1d14583SBruce Richardson 		(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1401*c1d14583SBruce Richardson 			nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1402*c1d14583SBruce Richardson 	if (tx_conf->tx_rs_thresh)
1403*c1d14583SBruce Richardson 		tx_rs_thresh = tx_conf->tx_rs_thresh;
1404*c1d14583SBruce Richardson 	if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1405*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1406*c1d14583SBruce Richardson 				"exceed nb_desc. (tx_rs_thresh=%u "
1407*c1d14583SBruce Richardson 				"tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1408*c1d14583SBruce Richardson 				(unsigned int)tx_rs_thresh,
1409*c1d14583SBruce Richardson 				(unsigned int)tx_free_thresh,
1410*c1d14583SBruce Richardson 				(unsigned int)nb_desc,
1411*c1d14583SBruce Richardson 				(int)dev->data->port_id,
1412*c1d14583SBruce Richardson 				(int)queue_idx);
1413*c1d14583SBruce Richardson 		return -EINVAL;
1414*c1d14583SBruce Richardson 	}
1415*c1d14583SBruce Richardson 	if (tx_rs_thresh >= (nb_desc - 2)) {
1416*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1417*c1d14583SBruce Richardson 			     "number of TX descriptors minus 2. "
1418*c1d14583SBruce Richardson 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1419*c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1420*c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1421*c1d14583SBruce Richardson 			     (int)queue_idx);
1422*c1d14583SBruce Richardson 		return -EINVAL;
1423*c1d14583SBruce Richardson 	}
1424*c1d14583SBruce Richardson 	if (tx_free_thresh >= (nb_desc - 3)) {
1425*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1426*c1d14583SBruce Richardson 			     "tx_free_thresh must be less than the "
1427*c1d14583SBruce Richardson 			     "number of TX descriptors minus 3. "
1428*c1d14583SBruce Richardson 			     "(tx_free_thresh=%u port=%d queue=%d)",
1429*c1d14583SBruce Richardson 			     (unsigned int)tx_free_thresh,
1430*c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1431*c1d14583SBruce Richardson 			     (int)queue_idx);
1432*c1d14583SBruce Richardson 		return -EINVAL;
1433*c1d14583SBruce Richardson 	}
1434*c1d14583SBruce Richardson 	if (tx_rs_thresh > tx_free_thresh) {
1435*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1436*c1d14583SBruce Richardson 			     "equal to tx_free_thresh. (tx_free_thresh=%u"
1437*c1d14583SBruce Richardson 			     " tx_rs_thresh=%u port=%d queue=%d)",
1438*c1d14583SBruce Richardson 			     (unsigned int)tx_free_thresh,
1439*c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1440*c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1441*c1d14583SBruce Richardson 			     (int)queue_idx);
1442*c1d14583SBruce Richardson 		return -EINVAL;
1443*c1d14583SBruce Richardson 	}
1444*c1d14583SBruce Richardson 	if ((nb_desc % tx_rs_thresh) != 0) {
1445*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1446*c1d14583SBruce Richardson 			     "number of TX descriptors. (tx_rs_thresh=%u"
1447*c1d14583SBruce Richardson 			     " port=%d queue=%d)",
1448*c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1449*c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1450*c1d14583SBruce Richardson 			     (int)queue_idx);
1451*c1d14583SBruce Richardson 		return -EINVAL;
1452*c1d14583SBruce Richardson 	}
1453*c1d14583SBruce Richardson 	if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1454*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1455*c1d14583SBruce Richardson 			     "tx_rs_thresh is greater than 1. "
1456*c1d14583SBruce Richardson 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1457*c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1458*c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1459*c1d14583SBruce Richardson 			     (int)queue_idx);
1460*c1d14583SBruce Richardson 		return -EINVAL;
1461*c1d14583SBruce Richardson 	}
1462*c1d14583SBruce Richardson 
1463*c1d14583SBruce Richardson 	/* Free memory if needed. */
1464*c1d14583SBruce Richardson 	if (dev->data->tx_queues[queue_idx]) {
1465*c1d14583SBruce Richardson 		ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1466*c1d14583SBruce Richardson 		dev->data->tx_queues[queue_idx] = NULL;
1467*c1d14583SBruce Richardson 	}
1468*c1d14583SBruce Richardson 
1469*c1d14583SBruce Richardson 	/* Allocate the TX queue data structure. */
1470*c1d14583SBruce Richardson 	txq = rte_zmalloc_socket(NULL,
1471*c1d14583SBruce Richardson 				 sizeof(struct ice_tx_queue),
1472*c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
1473*c1d14583SBruce Richardson 				 socket_id);
1474*c1d14583SBruce Richardson 	if (!txq) {
1475*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1476*c1d14583SBruce Richardson 			     "tx queue structure");
1477*c1d14583SBruce Richardson 		return -ENOMEM;
1478*c1d14583SBruce Richardson 	}
1479*c1d14583SBruce Richardson 
1480*c1d14583SBruce Richardson 	/* Allocate TX hardware ring descriptors. */
1481*c1d14583SBruce Richardson 	ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1482*c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1483*c1d14583SBruce Richardson 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1484*c1d14583SBruce Richardson 				      ring_size, ICE_RING_BASE_ALIGN,
1485*c1d14583SBruce Richardson 				      socket_id);
1486*c1d14583SBruce Richardson 	if (!tz) {
1487*c1d14583SBruce Richardson 		ice_tx_queue_release(txq);
1488*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1489*c1d14583SBruce Richardson 		return -ENOMEM;
1490*c1d14583SBruce Richardson 	}
1491*c1d14583SBruce Richardson 
1492*c1d14583SBruce Richardson 	txq->mz = tz;
1493*c1d14583SBruce Richardson 	txq->nb_tx_desc = nb_desc;
1494*c1d14583SBruce Richardson 	txq->tx_rs_thresh = tx_rs_thresh;
1495*c1d14583SBruce Richardson 	txq->tx_free_thresh = tx_free_thresh;
1496*c1d14583SBruce Richardson 	txq->pthresh = tx_conf->tx_thresh.pthresh;
1497*c1d14583SBruce Richardson 	txq->hthresh = tx_conf->tx_thresh.hthresh;
1498*c1d14583SBruce Richardson 	txq->wthresh = tx_conf->tx_thresh.wthresh;
1499*c1d14583SBruce Richardson 	txq->queue_id = queue_idx;
1500*c1d14583SBruce Richardson 
1501*c1d14583SBruce Richardson 	txq->reg_idx = vsi->base_queue + queue_idx;
1502*c1d14583SBruce Richardson 	txq->port_id = dev->data->port_id;
1503*c1d14583SBruce Richardson 	txq->offloads = offloads;
1504*c1d14583SBruce Richardson 	txq->vsi = vsi;
1505*c1d14583SBruce Richardson 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
1506*c1d14583SBruce Richardson 
1507*c1d14583SBruce Richardson 	txq->tx_ring_dma = tz->iova;
1508*c1d14583SBruce Richardson 	txq->tx_ring = tz->addr;
1509*c1d14583SBruce Richardson 
1510*c1d14583SBruce Richardson 	/* Allocate software ring */
1511*c1d14583SBruce Richardson 	txq->sw_ring =
1512*c1d14583SBruce Richardson 		rte_zmalloc_socket(NULL,
1513*c1d14583SBruce Richardson 				   sizeof(struct ice_tx_entry) * nb_desc,
1514*c1d14583SBruce Richardson 				   RTE_CACHE_LINE_SIZE,
1515*c1d14583SBruce Richardson 				   socket_id);
1516*c1d14583SBruce Richardson 	if (!txq->sw_ring) {
1517*c1d14583SBruce Richardson 		ice_tx_queue_release(txq);
1518*c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1519*c1d14583SBruce Richardson 		return -ENOMEM;
1520*c1d14583SBruce Richardson 	}
1521*c1d14583SBruce Richardson 
1522*c1d14583SBruce Richardson 	ice_reset_tx_queue(txq);
1523*c1d14583SBruce Richardson 	txq->q_set = true;
1524*c1d14583SBruce Richardson 	dev->data->tx_queues[queue_idx] = txq;
1525*c1d14583SBruce Richardson 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1526*c1d14583SBruce Richardson 	ice_set_tx_function_flag(dev, txq);
1527*c1d14583SBruce Richardson 
1528*c1d14583SBruce Richardson 	return 0;
1529*c1d14583SBruce Richardson }
1530*c1d14583SBruce Richardson 
1531*c1d14583SBruce Richardson void
1532*c1d14583SBruce Richardson ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1533*c1d14583SBruce Richardson {
1534*c1d14583SBruce Richardson 	ice_rx_queue_release(dev->data->rx_queues[qid]);
1535*c1d14583SBruce Richardson }
1536*c1d14583SBruce Richardson 
1537*c1d14583SBruce Richardson void
1538*c1d14583SBruce Richardson ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1539*c1d14583SBruce Richardson {
1540*c1d14583SBruce Richardson 	ice_tx_queue_release(dev->data->tx_queues[qid]);
1541*c1d14583SBruce Richardson }
1542*c1d14583SBruce Richardson 
1543*c1d14583SBruce Richardson void
1544*c1d14583SBruce Richardson ice_tx_queue_release(void *txq)
1545*c1d14583SBruce Richardson {
1546*c1d14583SBruce Richardson 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1547*c1d14583SBruce Richardson 
1548*c1d14583SBruce Richardson 	if (!q) {
1549*c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1550*c1d14583SBruce Richardson 		return;
1551*c1d14583SBruce Richardson 	}
1552*c1d14583SBruce Richardson 
1553*c1d14583SBruce Richardson 	if (q->tx_rel_mbufs != NULL)
1554*c1d14583SBruce Richardson 		q->tx_rel_mbufs(q);
1555*c1d14583SBruce Richardson 	rte_free(q->sw_ring);
1556*c1d14583SBruce Richardson 	rte_memzone_free(q->mz);
1557*c1d14583SBruce Richardson 	rte_free(q);
1558*c1d14583SBruce Richardson }
1559*c1d14583SBruce Richardson 
1560*c1d14583SBruce Richardson void
1561*c1d14583SBruce Richardson ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1562*c1d14583SBruce Richardson 		 struct rte_eth_rxq_info *qinfo)
1563*c1d14583SBruce Richardson {
1564*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1565*c1d14583SBruce Richardson 
1566*c1d14583SBruce Richardson 	rxq = dev->data->rx_queues[queue_id];
1567*c1d14583SBruce Richardson 
1568*c1d14583SBruce Richardson 	qinfo->mp = rxq->mp;
1569*c1d14583SBruce Richardson 	qinfo->scattered_rx = dev->data->scattered_rx;
1570*c1d14583SBruce Richardson 	qinfo->nb_desc = rxq->nb_rx_desc;
1571*c1d14583SBruce Richardson 
1572*c1d14583SBruce Richardson 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1573*c1d14583SBruce Richardson 	qinfo->conf.rx_drop_en = rxq->drop_en;
1574*c1d14583SBruce Richardson 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1575*c1d14583SBruce Richardson }
1576*c1d14583SBruce Richardson 
1577*c1d14583SBruce Richardson void
1578*c1d14583SBruce Richardson ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1579*c1d14583SBruce Richardson 		 struct rte_eth_txq_info *qinfo)
1580*c1d14583SBruce Richardson {
1581*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
1582*c1d14583SBruce Richardson 
1583*c1d14583SBruce Richardson 	txq = dev->data->tx_queues[queue_id];
1584*c1d14583SBruce Richardson 
1585*c1d14583SBruce Richardson 	qinfo->nb_desc = txq->nb_tx_desc;
1586*c1d14583SBruce Richardson 
1587*c1d14583SBruce Richardson 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1588*c1d14583SBruce Richardson 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1589*c1d14583SBruce Richardson 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1590*c1d14583SBruce Richardson 
1591*c1d14583SBruce Richardson 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1592*c1d14583SBruce Richardson 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1593*c1d14583SBruce Richardson 	qinfo->conf.offloads = txq->offloads;
1594*c1d14583SBruce Richardson 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1595*c1d14583SBruce Richardson }
1596*c1d14583SBruce Richardson 
1597*c1d14583SBruce Richardson uint32_t
1598*c1d14583SBruce Richardson ice_rx_queue_count(void *rx_queue)
1599*c1d14583SBruce Richardson {
1600*c1d14583SBruce Richardson #define ICE_RXQ_SCAN_INTERVAL 4
1601*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
1602*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1603*c1d14583SBruce Richardson 	uint16_t desc = 0;
1604*c1d14583SBruce Richardson 
1605*c1d14583SBruce Richardson 	rxq = rx_queue;
1606*c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1607*c1d14583SBruce Richardson 	while ((desc < rxq->nb_rx_desc) &&
1608*c1d14583SBruce Richardson 	       rte_le_to_cpu_16(rxdp->wb.status_error0) &
1609*c1d14583SBruce Richardson 	       (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1610*c1d14583SBruce Richardson 		/**
1611*c1d14583SBruce Richardson 		 * Check the DD bit of a rx descriptor of each 4 in a group,
1612*c1d14583SBruce Richardson 		 * to avoid checking too frequently and downgrading performance
1613*c1d14583SBruce Richardson 		 * too much.
1614*c1d14583SBruce Richardson 		 */
1615*c1d14583SBruce Richardson 		desc += ICE_RXQ_SCAN_INTERVAL;
1616*c1d14583SBruce Richardson 		rxdp += ICE_RXQ_SCAN_INTERVAL;
1617*c1d14583SBruce Richardson 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1618*c1d14583SBruce Richardson 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
1619*c1d14583SBruce Richardson 				 desc - rxq->nb_rx_desc]);
1620*c1d14583SBruce Richardson 	}
1621*c1d14583SBruce Richardson 
1622*c1d14583SBruce Richardson 	return desc;
1623*c1d14583SBruce Richardson }
1624*c1d14583SBruce Richardson 
1625*c1d14583SBruce Richardson #define ICE_RX_FLEX_ERR0_BITS	\
1626*c1d14583SBruce Richardson 	((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) |	\
1627*c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |	\
1628*c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |	\
1629*c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |	\
1630*c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |	\
1631*c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1632*c1d14583SBruce Richardson 
1633*c1d14583SBruce Richardson /* Rx L3/L4 checksum */
1634*c1d14583SBruce Richardson static inline uint64_t
1635*c1d14583SBruce Richardson ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1636*c1d14583SBruce Richardson {
1637*c1d14583SBruce Richardson 	uint64_t flags = 0;
1638*c1d14583SBruce Richardson 
1639*c1d14583SBruce Richardson 	/* check if HW has decoded the packet and checksum */
1640*c1d14583SBruce Richardson 	if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1641*c1d14583SBruce Richardson 		return 0;
1642*c1d14583SBruce Richardson 
1643*c1d14583SBruce Richardson 	if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1644*c1d14583SBruce Richardson 		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1645*c1d14583SBruce Richardson 			  RTE_MBUF_F_RX_L4_CKSUM_GOOD |
1646*c1d14583SBruce Richardson 			  RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD);
1647*c1d14583SBruce Richardson 		return flags;
1648*c1d14583SBruce Richardson 	}
1649*c1d14583SBruce Richardson 
1650*c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1651*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1652*c1d14583SBruce Richardson 	else
1653*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1654*c1d14583SBruce Richardson 
1655*c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1656*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1657*c1d14583SBruce Richardson 	else
1658*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1659*c1d14583SBruce Richardson 
1660*c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1661*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1662*c1d14583SBruce Richardson 
1663*c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1664*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
1665*c1d14583SBruce Richardson 	else
1666*c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
1667*c1d14583SBruce Richardson 
1668*c1d14583SBruce Richardson 	return flags;
1669*c1d14583SBruce Richardson }
1670*c1d14583SBruce Richardson 
1671*c1d14583SBruce Richardson static inline void
1672*c1d14583SBruce Richardson ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1673*c1d14583SBruce Richardson {
1674*c1d14583SBruce Richardson 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1675*c1d14583SBruce Richardson 	    (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1676*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1677*c1d14583SBruce Richardson 		mb->vlan_tci =
1678*c1d14583SBruce Richardson 			rte_le_to_cpu_16(rxdp->wb.l2tag1);
1679*c1d14583SBruce Richardson 		PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1680*c1d14583SBruce Richardson 			   rte_le_to_cpu_16(rxdp->wb.l2tag1));
1681*c1d14583SBruce Richardson 	} else {
1682*c1d14583SBruce Richardson 		mb->vlan_tci = 0;
1683*c1d14583SBruce Richardson 	}
1684*c1d14583SBruce Richardson 
1685*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1686*c1d14583SBruce Richardson 	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1687*c1d14583SBruce Richardson 	    (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1688*c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
1689*c1d14583SBruce Richardson 				RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
1690*c1d14583SBruce Richardson 		mb->vlan_tci_outer = mb->vlan_tci;
1691*c1d14583SBruce Richardson 		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1692*c1d14583SBruce Richardson 		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1693*c1d14583SBruce Richardson 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1694*c1d14583SBruce Richardson 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1695*c1d14583SBruce Richardson 	} else {
1696*c1d14583SBruce Richardson 		mb->vlan_tci_outer = 0;
1697*c1d14583SBruce Richardson 	}
1698*c1d14583SBruce Richardson #endif
1699*c1d14583SBruce Richardson 	PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1700*c1d14583SBruce Richardson 		   mb->vlan_tci, mb->vlan_tci_outer);
1701*c1d14583SBruce Richardson }
1702*c1d14583SBruce Richardson 
1703*c1d14583SBruce Richardson #define ICE_LOOK_AHEAD 8
1704*c1d14583SBruce Richardson #if (ICE_LOOK_AHEAD != 8)
1705*c1d14583SBruce Richardson #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1706*c1d14583SBruce Richardson #endif
1707*c1d14583SBruce Richardson 
1708*c1d14583SBruce Richardson #define ICE_PTP_TS_VALID 0x1
1709*c1d14583SBruce Richardson 
1710*c1d14583SBruce Richardson static inline int
1711*c1d14583SBruce Richardson ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1712*c1d14583SBruce Richardson {
1713*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
1714*c1d14583SBruce Richardson 	struct ice_rx_entry *rxep;
1715*c1d14583SBruce Richardson 	struct rte_mbuf *mb;
1716*c1d14583SBruce Richardson 	uint16_t stat_err0;
1717*c1d14583SBruce Richardson 	uint16_t pkt_len, hdr_len;
1718*c1d14583SBruce Richardson 	int32_t s[ICE_LOOK_AHEAD], nb_dd;
1719*c1d14583SBruce Richardson 	int32_t i, j, nb_rx = 0;
1720*c1d14583SBruce Richardson 	uint64_t pkt_flags = 0;
1721*c1d14583SBruce Richardson 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1722*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1723*c1d14583SBruce Richardson 	bool is_tsinit = false;
1724*c1d14583SBruce Richardson 	uint64_t ts_ns;
1725*c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
1726*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1727*c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
1728*c1d14583SBruce Richardson #endif
1729*c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1730*c1d14583SBruce Richardson 	rxep = &rxq->sw_ring[rxq->rx_tail];
1731*c1d14583SBruce Richardson 
1732*c1d14583SBruce Richardson 	stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1733*c1d14583SBruce Richardson 
1734*c1d14583SBruce Richardson 	/* Make sure there is at least 1 packet to receive */
1735*c1d14583SBruce Richardson 	if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1736*c1d14583SBruce Richardson 		return 0;
1737*c1d14583SBruce Richardson 
1738*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1739*c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1740*c1d14583SBruce Richardson 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1741*c1d14583SBruce Richardson 
1742*c1d14583SBruce Richardson 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
1743*c1d14583SBruce Richardson 			is_tsinit = 1;
1744*c1d14583SBruce Richardson 	}
1745*c1d14583SBruce Richardson #endif
1746*c1d14583SBruce Richardson 
1747*c1d14583SBruce Richardson 	/**
1748*c1d14583SBruce Richardson 	 * Scan LOOK_AHEAD descriptors at a time to determine which
1749*c1d14583SBruce Richardson 	 * descriptors reference packets that are ready to be received.
1750*c1d14583SBruce Richardson 	 */
1751*c1d14583SBruce Richardson 	for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1752*c1d14583SBruce Richardson 	     rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1753*c1d14583SBruce Richardson 		/* Read desc statuses backwards to avoid race condition */
1754*c1d14583SBruce Richardson 		for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1755*c1d14583SBruce Richardson 			s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1756*c1d14583SBruce Richardson 
1757*c1d14583SBruce Richardson 		rte_smp_rmb();
1758*c1d14583SBruce Richardson 
1759*c1d14583SBruce Richardson 		/* Compute how many status bits were set */
1760*c1d14583SBruce Richardson 		for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1761*c1d14583SBruce Richardson 			nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1762*c1d14583SBruce Richardson 
1763*c1d14583SBruce Richardson 		nb_rx += nb_dd;
1764*c1d14583SBruce Richardson 
1765*c1d14583SBruce Richardson 		/* Translate descriptor info to mbuf parameters */
1766*c1d14583SBruce Richardson 		for (j = 0; j < nb_dd; j++) {
1767*c1d14583SBruce Richardson 			mb = rxep[j].mbuf;
1768*c1d14583SBruce Richardson 			pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1769*c1d14583SBruce Richardson 				   ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1770*c1d14583SBruce Richardson 			mb->data_len = pkt_len;
1771*c1d14583SBruce Richardson 			mb->pkt_len = pkt_len;
1772*c1d14583SBruce Richardson 
1773*c1d14583SBruce Richardson 			if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1774*c1d14583SBruce Richardson 				pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1775*c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1776*c1d14583SBruce Richardson 				mb->data_len = pkt_len;
1777*c1d14583SBruce Richardson 				mb->pkt_len = pkt_len;
1778*c1d14583SBruce Richardson 			} else {
1779*c1d14583SBruce Richardson 				mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs);
1780*c1d14583SBruce Richardson 				mb->next->next = NULL;
1781*c1d14583SBruce Richardson 				hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) &
1782*c1d14583SBruce Richardson 						ICE_RX_FLEX_DESC_HEADER_LEN_M;
1783*c1d14583SBruce Richardson 				pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1784*c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1785*c1d14583SBruce Richardson 				mb->data_len = hdr_len;
1786*c1d14583SBruce Richardson 				mb->pkt_len = hdr_len + pkt_len;
1787*c1d14583SBruce Richardson 				mb->next->data_len = pkt_len;
1788*c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_RX
1789*c1d14583SBruce Richardson 				rte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb));
1790*c1d14583SBruce Richardson #endif
1791*c1d14583SBruce Richardson 			}
1792*c1d14583SBruce Richardson 
1793*c1d14583SBruce Richardson 			mb->ol_flags = 0;
1794*c1d14583SBruce Richardson 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1795*c1d14583SBruce Richardson 			pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1796*c1d14583SBruce Richardson 			mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1797*c1d14583SBruce Richardson 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1798*c1d14583SBruce Richardson 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1799*c1d14583SBruce Richardson 			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1800*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1801*c1d14583SBruce Richardson 			if (ice_timestamp_dynflag > 0 &&
1802*c1d14583SBruce Richardson 			    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
1803*c1d14583SBruce Richardson 				rxq->time_high =
1804*c1d14583SBruce Richardson 				rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1805*c1d14583SBruce Richardson 				if (unlikely(is_tsinit)) {
1806*c1d14583SBruce Richardson 					ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
1807*c1d14583SBruce Richardson 									   rxq->time_high);
1808*c1d14583SBruce Richardson 					rxq->hw_time_low = (uint32_t)ts_ns;
1809*c1d14583SBruce Richardson 					rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
1810*c1d14583SBruce Richardson 					is_tsinit = false;
1811*c1d14583SBruce Richardson 				} else {
1812*c1d14583SBruce Richardson 					if (rxq->time_high < rxq->hw_time_low)
1813*c1d14583SBruce Richardson 						rxq->hw_time_high += 1;
1814*c1d14583SBruce Richardson 					ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
1815*c1d14583SBruce Richardson 					rxq->hw_time_low = rxq->time_high;
1816*c1d14583SBruce Richardson 				}
1817*c1d14583SBruce Richardson 				rxq->hw_time_update = rte_get_timer_cycles() /
1818*c1d14583SBruce Richardson 						     (rte_get_timer_hz() / 1000);
1819*c1d14583SBruce Richardson 				*RTE_MBUF_DYNFIELD(mb,
1820*c1d14583SBruce Richardson 						   ice_timestamp_dynfield_offset,
1821*c1d14583SBruce Richardson 						   rte_mbuf_timestamp_t *) = ts_ns;
1822*c1d14583SBruce Richardson 				pkt_flags |= ice_timestamp_dynflag;
1823*c1d14583SBruce Richardson 			}
1824*c1d14583SBruce Richardson 
1825*c1d14583SBruce Richardson 			if (ad->ptp_ena && ((mb->packet_type &
1826*c1d14583SBruce Richardson 			    RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1827*c1d14583SBruce Richardson 				rxq->time_high =
1828*c1d14583SBruce Richardson 				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1829*c1d14583SBruce Richardson 				mb->timesync = rxq->queue_id;
1830*c1d14583SBruce Richardson 				pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
1831*c1d14583SBruce Richardson 				if (rxdp[j].wb.time_stamp_low &
1832*c1d14583SBruce Richardson 				    ICE_PTP_TS_VALID)
1833*c1d14583SBruce Richardson 					pkt_flags |=
1834*c1d14583SBruce Richardson 						RTE_MBUF_F_RX_IEEE1588_TMST;
1835*c1d14583SBruce Richardson 			}
1836*c1d14583SBruce Richardson #endif
1837*c1d14583SBruce Richardson 			mb->ol_flags |= pkt_flags;
1838*c1d14583SBruce Richardson 		}
1839*c1d14583SBruce Richardson 
1840*c1d14583SBruce Richardson 		for (j = 0; j < ICE_LOOK_AHEAD; j++)
1841*c1d14583SBruce Richardson 			rxq->rx_stage[i + j] = rxep[j].mbuf;
1842*c1d14583SBruce Richardson 
1843*c1d14583SBruce Richardson 		if (nb_dd != ICE_LOOK_AHEAD)
1844*c1d14583SBruce Richardson 			break;
1845*c1d14583SBruce Richardson 	}
1846*c1d14583SBruce Richardson 
1847*c1d14583SBruce Richardson 	/* Clear software ring entries */
1848*c1d14583SBruce Richardson 	for (i = 0; i < nb_rx; i++)
1849*c1d14583SBruce Richardson 		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1850*c1d14583SBruce Richardson 
1851*c1d14583SBruce Richardson 	PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1852*c1d14583SBruce Richardson 		   "port_id=%u, queue_id=%u, nb_rx=%d",
1853*c1d14583SBruce Richardson 		   rxq->port_id, rxq->queue_id, nb_rx);
1854*c1d14583SBruce Richardson 
1855*c1d14583SBruce Richardson 	return nb_rx;
1856*c1d14583SBruce Richardson }
1857*c1d14583SBruce Richardson 
1858*c1d14583SBruce Richardson static inline uint16_t
1859*c1d14583SBruce Richardson ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1860*c1d14583SBruce Richardson 		       struct rte_mbuf **rx_pkts,
1861*c1d14583SBruce Richardson 		       uint16_t nb_pkts)
1862*c1d14583SBruce Richardson {
1863*c1d14583SBruce Richardson 	uint16_t i;
1864*c1d14583SBruce Richardson 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1865*c1d14583SBruce Richardson 
1866*c1d14583SBruce Richardson 	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1867*c1d14583SBruce Richardson 
1868*c1d14583SBruce Richardson 	for (i = 0; i < nb_pkts; i++)
1869*c1d14583SBruce Richardson 		rx_pkts[i] = stage[i];
1870*c1d14583SBruce Richardson 
1871*c1d14583SBruce Richardson 	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1872*c1d14583SBruce Richardson 	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1873*c1d14583SBruce Richardson 
1874*c1d14583SBruce Richardson 	return nb_pkts;
1875*c1d14583SBruce Richardson }
1876*c1d14583SBruce Richardson 
1877*c1d14583SBruce Richardson static inline int
1878*c1d14583SBruce Richardson ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1879*c1d14583SBruce Richardson {
1880*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
1881*c1d14583SBruce Richardson 	struct ice_rx_entry *rxep;
1882*c1d14583SBruce Richardson 	struct rte_mbuf *mb;
1883*c1d14583SBruce Richardson 	uint16_t alloc_idx, i;
1884*c1d14583SBruce Richardson 	uint64_t dma_addr;
1885*c1d14583SBruce Richardson 	int diag, diag_pay;
1886*c1d14583SBruce Richardson 	uint64_t pay_addr;
1887*c1d14583SBruce Richardson 	struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh];
1888*c1d14583SBruce Richardson 
1889*c1d14583SBruce Richardson 	/* Allocate buffers in bulk */
1890*c1d14583SBruce Richardson 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1891*c1d14583SBruce Richardson 			       (rxq->rx_free_thresh - 1));
1892*c1d14583SBruce Richardson 	rxep = &rxq->sw_ring[alloc_idx];
1893*c1d14583SBruce Richardson 	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1894*c1d14583SBruce Richardson 				    rxq->rx_free_thresh);
1895*c1d14583SBruce Richardson 	if (unlikely(diag != 0)) {
1896*c1d14583SBruce Richardson 		PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1897*c1d14583SBruce Richardson 		return -ENOMEM;
1898*c1d14583SBruce Richardson 	}
1899*c1d14583SBruce Richardson 
1900*c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1901*c1d14583SBruce Richardson 		diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp,
1902*c1d14583SBruce Richardson 				(void *)mbufs_pay, rxq->rx_free_thresh);
1903*c1d14583SBruce Richardson 		if (unlikely(diag_pay != 0)) {
1904*c1d14583SBruce Richardson 			rte_mempool_put_bulk(rxq->mp, (void *)rxep,
1905*c1d14583SBruce Richardson 				    rxq->rx_free_thresh);
1906*c1d14583SBruce Richardson 			PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk");
1907*c1d14583SBruce Richardson 			return -ENOMEM;
1908*c1d14583SBruce Richardson 		}
1909*c1d14583SBruce Richardson 	}
1910*c1d14583SBruce Richardson 
1911*c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[alloc_idx];
1912*c1d14583SBruce Richardson 	for (i = 0; i < rxq->rx_free_thresh; i++) {
1913*c1d14583SBruce Richardson 		if (likely(i < (rxq->rx_free_thresh - 1)))
1914*c1d14583SBruce Richardson 			/* Prefetch next mbuf */
1915*c1d14583SBruce Richardson 			rte_prefetch0(rxep[i + 1].mbuf);
1916*c1d14583SBruce Richardson 
1917*c1d14583SBruce Richardson 		mb = rxep[i].mbuf;
1918*c1d14583SBruce Richardson 		rte_mbuf_refcnt_set(mb, 1);
1919*c1d14583SBruce Richardson 		mb->data_off = RTE_PKTMBUF_HEADROOM;
1920*c1d14583SBruce Richardson 		mb->nb_segs = 1;
1921*c1d14583SBruce Richardson 		mb->port = rxq->port_id;
1922*c1d14583SBruce Richardson 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1923*c1d14583SBruce Richardson 
1924*c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1925*c1d14583SBruce Richardson 			mb->next = NULL;
1926*c1d14583SBruce Richardson 			rxdp[i].read.hdr_addr = 0;
1927*c1d14583SBruce Richardson 			rxdp[i].read.pkt_addr = dma_addr;
1928*c1d14583SBruce Richardson 		} else {
1929*c1d14583SBruce Richardson 			mb->next = mbufs_pay[i];
1930*c1d14583SBruce Richardson 			pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i]));
1931*c1d14583SBruce Richardson 			rxdp[i].read.hdr_addr = dma_addr;
1932*c1d14583SBruce Richardson 			rxdp[i].read.pkt_addr = pay_addr;
1933*c1d14583SBruce Richardson 		}
1934*c1d14583SBruce Richardson 	}
1935*c1d14583SBruce Richardson 
1936*c1d14583SBruce Richardson 	/* Update Rx tail register */
1937*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1938*c1d14583SBruce Richardson 
1939*c1d14583SBruce Richardson 	rxq->rx_free_trigger =
1940*c1d14583SBruce Richardson 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1941*c1d14583SBruce Richardson 	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1942*c1d14583SBruce Richardson 		rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1943*c1d14583SBruce Richardson 
1944*c1d14583SBruce Richardson 	return 0;
1945*c1d14583SBruce Richardson }
1946*c1d14583SBruce Richardson 
1947*c1d14583SBruce Richardson static inline uint16_t
1948*c1d14583SBruce Richardson rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1949*c1d14583SBruce Richardson {
1950*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1951*c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
1952*c1d14583SBruce Richardson 
1953*c1d14583SBruce Richardson 	if (!nb_pkts)
1954*c1d14583SBruce Richardson 		return 0;
1955*c1d14583SBruce Richardson 
1956*c1d14583SBruce Richardson 	if (rxq->rx_nb_avail)
1957*c1d14583SBruce Richardson 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1958*c1d14583SBruce Richardson 
1959*c1d14583SBruce Richardson 	nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1960*c1d14583SBruce Richardson 	rxq->rx_next_avail = 0;
1961*c1d14583SBruce Richardson 	rxq->rx_nb_avail = nb_rx;
1962*c1d14583SBruce Richardson 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1963*c1d14583SBruce Richardson 
1964*c1d14583SBruce Richardson 	if (rxq->rx_tail > rxq->rx_free_trigger) {
1965*c1d14583SBruce Richardson 		if (ice_rx_alloc_bufs(rxq) != 0) {
1966*c1d14583SBruce Richardson 			uint16_t i, j;
1967*c1d14583SBruce Richardson 
1968*c1d14583SBruce Richardson 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1969*c1d14583SBruce Richardson 				rxq->rx_free_thresh;
1970*c1d14583SBruce Richardson 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1971*c1d14583SBruce Richardson 				   "port_id=%u, queue_id=%u",
1972*c1d14583SBruce Richardson 				   rxq->port_id, rxq->queue_id);
1973*c1d14583SBruce Richardson 			rxq->rx_nb_avail = 0;
1974*c1d14583SBruce Richardson 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1975*c1d14583SBruce Richardson 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1976*c1d14583SBruce Richardson 				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1977*c1d14583SBruce Richardson 
1978*c1d14583SBruce Richardson 			return 0;
1979*c1d14583SBruce Richardson 		}
1980*c1d14583SBruce Richardson 	}
1981*c1d14583SBruce Richardson 
1982*c1d14583SBruce Richardson 	if (rxq->rx_tail >= rxq->nb_rx_desc)
1983*c1d14583SBruce Richardson 		rxq->rx_tail = 0;
1984*c1d14583SBruce Richardson 
1985*c1d14583SBruce Richardson 	if (rxq->rx_nb_avail)
1986*c1d14583SBruce Richardson 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1987*c1d14583SBruce Richardson 
1988*c1d14583SBruce Richardson 	return 0;
1989*c1d14583SBruce Richardson }
1990*c1d14583SBruce Richardson 
1991*c1d14583SBruce Richardson static uint16_t
1992*c1d14583SBruce Richardson ice_recv_pkts_bulk_alloc(void *rx_queue,
1993*c1d14583SBruce Richardson 			 struct rte_mbuf **rx_pkts,
1994*c1d14583SBruce Richardson 			 uint16_t nb_pkts)
1995*c1d14583SBruce Richardson {
1996*c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
1997*c1d14583SBruce Richardson 	uint16_t n;
1998*c1d14583SBruce Richardson 	uint16_t count;
1999*c1d14583SBruce Richardson 
2000*c1d14583SBruce Richardson 	if (unlikely(nb_pkts == 0))
2001*c1d14583SBruce Richardson 		return nb_rx;
2002*c1d14583SBruce Richardson 
2003*c1d14583SBruce Richardson 	if (likely(nb_pkts <= ICE_RX_MAX_BURST))
2004*c1d14583SBruce Richardson 		return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2005*c1d14583SBruce Richardson 
2006*c1d14583SBruce Richardson 	while (nb_pkts) {
2007*c1d14583SBruce Richardson 		n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
2008*c1d14583SBruce Richardson 		count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2009*c1d14583SBruce Richardson 		nb_rx = (uint16_t)(nb_rx + count);
2010*c1d14583SBruce Richardson 		nb_pkts = (uint16_t)(nb_pkts - count);
2011*c1d14583SBruce Richardson 		if (count < n)
2012*c1d14583SBruce Richardson 			break;
2013*c1d14583SBruce Richardson 	}
2014*c1d14583SBruce Richardson 
2015*c1d14583SBruce Richardson 	return nb_rx;
2016*c1d14583SBruce Richardson }
2017*c1d14583SBruce Richardson 
2018*c1d14583SBruce Richardson static uint16_t
2019*c1d14583SBruce Richardson ice_recv_scattered_pkts(void *rx_queue,
2020*c1d14583SBruce Richardson 			struct rte_mbuf **rx_pkts,
2021*c1d14583SBruce Richardson 			uint16_t nb_pkts)
2022*c1d14583SBruce Richardson {
2023*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
2024*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2025*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
2026*c1d14583SBruce Richardson 	union ice_rx_flex_desc rxd;
2027*c1d14583SBruce Richardson 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2028*c1d14583SBruce Richardson 	struct ice_rx_entry *rxe;
2029*c1d14583SBruce Richardson 	struct rte_mbuf *first_seg = rxq->pkt_first_seg;
2030*c1d14583SBruce Richardson 	struct rte_mbuf *last_seg = rxq->pkt_last_seg;
2031*c1d14583SBruce Richardson 	struct rte_mbuf *nmb; /* new allocated mbuf */
2032*c1d14583SBruce Richardson 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2033*c1d14583SBruce Richardson 	uint16_t rx_id = rxq->rx_tail;
2034*c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
2035*c1d14583SBruce Richardson 	uint16_t nb_hold = 0;
2036*c1d14583SBruce Richardson 	uint16_t rx_packet_len;
2037*c1d14583SBruce Richardson 	uint16_t rx_stat_err0;
2038*c1d14583SBruce Richardson 	uint64_t dma_addr;
2039*c1d14583SBruce Richardson 	uint64_t pkt_flags;
2040*c1d14583SBruce Richardson 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2041*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2042*c1d14583SBruce Richardson 	bool is_tsinit = false;
2043*c1d14583SBruce Richardson 	uint64_t ts_ns;
2044*c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
2045*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2046*c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
2047*c1d14583SBruce Richardson 
2048*c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2049*c1d14583SBruce Richardson 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2050*c1d14583SBruce Richardson 
2051*c1d14583SBruce Richardson 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
2052*c1d14583SBruce Richardson 			is_tsinit = true;
2053*c1d14583SBruce Richardson 	}
2054*c1d14583SBruce Richardson #endif
2055*c1d14583SBruce Richardson 
2056*c1d14583SBruce Richardson 	while (nb_rx < nb_pkts) {
2057*c1d14583SBruce Richardson 		rxdp = &rx_ring[rx_id];
2058*c1d14583SBruce Richardson 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2059*c1d14583SBruce Richardson 
2060*c1d14583SBruce Richardson 		/* Check the DD bit first */
2061*c1d14583SBruce Richardson 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2062*c1d14583SBruce Richardson 			break;
2063*c1d14583SBruce Richardson 
2064*c1d14583SBruce Richardson 		/* allocate mbuf */
2065*c1d14583SBruce Richardson 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2066*c1d14583SBruce Richardson 		if (unlikely(!nmb)) {
2067*c1d14583SBruce Richardson 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2068*c1d14583SBruce Richardson 			break;
2069*c1d14583SBruce Richardson 		}
2070*c1d14583SBruce Richardson 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2071*c1d14583SBruce Richardson 
2072*c1d14583SBruce Richardson 		nb_hold++;
2073*c1d14583SBruce Richardson 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2074*c1d14583SBruce Richardson 		rx_id++;
2075*c1d14583SBruce Richardson 		if (unlikely(rx_id == rxq->nb_rx_desc))
2076*c1d14583SBruce Richardson 			rx_id = 0;
2077*c1d14583SBruce Richardson 
2078*c1d14583SBruce Richardson 		/* Prefetch next mbuf */
2079*c1d14583SBruce Richardson 		rte_prefetch0(sw_ring[rx_id].mbuf);
2080*c1d14583SBruce Richardson 
2081*c1d14583SBruce Richardson 		/**
2082*c1d14583SBruce Richardson 		 * When next RX descriptor is on a cache line boundary,
2083*c1d14583SBruce Richardson 		 * prefetch the next 4 RX descriptors and next 8 pointers
2084*c1d14583SBruce Richardson 		 * to mbufs.
2085*c1d14583SBruce Richardson 		 */
2086*c1d14583SBruce Richardson 		if ((rx_id & 0x3) == 0) {
2087*c1d14583SBruce Richardson 			rte_prefetch0(&rx_ring[rx_id]);
2088*c1d14583SBruce Richardson 			rte_prefetch0(&sw_ring[rx_id]);
2089*c1d14583SBruce Richardson 		}
2090*c1d14583SBruce Richardson 
2091*c1d14583SBruce Richardson 		rxm = rxe->mbuf;
2092*c1d14583SBruce Richardson 		rxe->mbuf = nmb;
2093*c1d14583SBruce Richardson 		dma_addr =
2094*c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2095*c1d14583SBruce Richardson 
2096*c1d14583SBruce Richardson 		/* Set data buffer address and data length of the mbuf */
2097*c1d14583SBruce Richardson 		rxdp->read.hdr_addr = 0;
2098*c1d14583SBruce Richardson 		rxdp->read.pkt_addr = dma_addr;
2099*c1d14583SBruce Richardson 		rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
2100*c1d14583SBruce Richardson 				ICE_RX_FLX_DESC_PKT_LEN_M;
2101*c1d14583SBruce Richardson 		rxm->data_len = rx_packet_len;
2102*c1d14583SBruce Richardson 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2103*c1d14583SBruce Richardson 
2104*c1d14583SBruce Richardson 		/**
2105*c1d14583SBruce Richardson 		 * If this is the first buffer of the received packet, set the
2106*c1d14583SBruce Richardson 		 * pointer to the first mbuf of the packet and initialize its
2107*c1d14583SBruce Richardson 		 * context. Otherwise, update the total length and the number
2108*c1d14583SBruce Richardson 		 * of segments of the current scattered packet, and update the
2109*c1d14583SBruce Richardson 		 * pointer to the last mbuf of the current packet.
2110*c1d14583SBruce Richardson 		 */
2111*c1d14583SBruce Richardson 		if (!first_seg) {
2112*c1d14583SBruce Richardson 			first_seg = rxm;
2113*c1d14583SBruce Richardson 			first_seg->nb_segs = 1;
2114*c1d14583SBruce Richardson 			first_seg->pkt_len = rx_packet_len;
2115*c1d14583SBruce Richardson 		} else {
2116*c1d14583SBruce Richardson 			first_seg->pkt_len =
2117*c1d14583SBruce Richardson 				(uint16_t)(first_seg->pkt_len +
2118*c1d14583SBruce Richardson 					   rx_packet_len);
2119*c1d14583SBruce Richardson 			first_seg->nb_segs++;
2120*c1d14583SBruce Richardson 			last_seg->next = rxm;
2121*c1d14583SBruce Richardson 		}
2122*c1d14583SBruce Richardson 
2123*c1d14583SBruce Richardson 		/**
2124*c1d14583SBruce Richardson 		 * If this is not the last buffer of the received packet,
2125*c1d14583SBruce Richardson 		 * update the pointer to the last mbuf of the current scattered
2126*c1d14583SBruce Richardson 		 * packet and continue to parse the RX ring.
2127*c1d14583SBruce Richardson 		 */
2128*c1d14583SBruce Richardson 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
2129*c1d14583SBruce Richardson 			last_seg = rxm;
2130*c1d14583SBruce Richardson 			continue;
2131*c1d14583SBruce Richardson 		}
2132*c1d14583SBruce Richardson 
2133*c1d14583SBruce Richardson 		/**
2134*c1d14583SBruce Richardson 		 * This is the last buffer of the received packet. If the CRC
2135*c1d14583SBruce Richardson 		 * is not stripped by the hardware:
2136*c1d14583SBruce Richardson 		 *  - Subtract the CRC length from the total packet length.
2137*c1d14583SBruce Richardson 		 *  - If the last buffer only contains the whole CRC or a part
2138*c1d14583SBruce Richardson 		 *  of it, free the mbuf associated to the last buffer. If part
2139*c1d14583SBruce Richardson 		 *  of the CRC is also contained in the previous mbuf, subtract
2140*c1d14583SBruce Richardson 		 *  the length of that CRC part from the data length of the
2141*c1d14583SBruce Richardson 		 *  previous mbuf.
2142*c1d14583SBruce Richardson 		 */
2143*c1d14583SBruce Richardson 		rxm->next = NULL;
2144*c1d14583SBruce Richardson 		if (unlikely(rxq->crc_len > 0)) {
2145*c1d14583SBruce Richardson 			first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
2146*c1d14583SBruce Richardson 			if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
2147*c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(rxm);
2148*c1d14583SBruce Richardson 				first_seg->nb_segs--;
2149*c1d14583SBruce Richardson 				last_seg->data_len =
2150*c1d14583SBruce Richardson 					(uint16_t)(last_seg->data_len -
2151*c1d14583SBruce Richardson 					(RTE_ETHER_CRC_LEN - rx_packet_len));
2152*c1d14583SBruce Richardson 				last_seg->next = NULL;
2153*c1d14583SBruce Richardson 			} else
2154*c1d14583SBruce Richardson 				rxm->data_len = (uint16_t)(rx_packet_len -
2155*c1d14583SBruce Richardson 							   RTE_ETHER_CRC_LEN);
2156*c1d14583SBruce Richardson 		} else if (rx_packet_len == 0) {
2157*c1d14583SBruce Richardson 			rte_pktmbuf_free_seg(rxm);
2158*c1d14583SBruce Richardson 			first_seg->nb_segs--;
2159*c1d14583SBruce Richardson 			last_seg->next = NULL;
2160*c1d14583SBruce Richardson 		}
2161*c1d14583SBruce Richardson 
2162*c1d14583SBruce Richardson 		first_seg->port = rxq->port_id;
2163*c1d14583SBruce Richardson 		first_seg->ol_flags = 0;
2164*c1d14583SBruce Richardson 		first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2165*c1d14583SBruce Richardson 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2166*c1d14583SBruce Richardson 		ice_rxd_to_vlan_tci(first_seg, &rxd);
2167*c1d14583SBruce Richardson 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
2168*c1d14583SBruce Richardson 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2169*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2170*c1d14583SBruce Richardson 		if (ice_timestamp_dynflag > 0 &&
2171*c1d14583SBruce Richardson 		    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
2172*c1d14583SBruce Richardson 			rxq->time_high =
2173*c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2174*c1d14583SBruce Richardson 			if (unlikely(is_tsinit)) {
2175*c1d14583SBruce Richardson 				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2176*c1d14583SBruce Richardson 				rxq->hw_time_low = (uint32_t)ts_ns;
2177*c1d14583SBruce Richardson 				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
2178*c1d14583SBruce Richardson 				is_tsinit = false;
2179*c1d14583SBruce Richardson 			} else {
2180*c1d14583SBruce Richardson 				if (rxq->time_high < rxq->hw_time_low)
2181*c1d14583SBruce Richardson 					rxq->hw_time_high += 1;
2182*c1d14583SBruce Richardson 				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
2183*c1d14583SBruce Richardson 				rxq->hw_time_low = rxq->time_high;
2184*c1d14583SBruce Richardson 			}
2185*c1d14583SBruce Richardson 			rxq->hw_time_update = rte_get_timer_cycles() /
2186*c1d14583SBruce Richardson 					     (rte_get_timer_hz() / 1000);
2187*c1d14583SBruce Richardson 			*RTE_MBUF_DYNFIELD(first_seg,
2188*c1d14583SBruce Richardson 					   (ice_timestamp_dynfield_offset),
2189*c1d14583SBruce Richardson 					   rte_mbuf_timestamp_t *) = ts_ns;
2190*c1d14583SBruce Richardson 			pkt_flags |= ice_timestamp_dynflag;
2191*c1d14583SBruce Richardson 		}
2192*c1d14583SBruce Richardson 
2193*c1d14583SBruce Richardson 		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
2194*c1d14583SBruce Richardson 		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2195*c1d14583SBruce Richardson 			rxq->time_high =
2196*c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2197*c1d14583SBruce Richardson 			first_seg->timesync = rxq->queue_id;
2198*c1d14583SBruce Richardson 			pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2199*c1d14583SBruce Richardson 		}
2200*c1d14583SBruce Richardson #endif
2201*c1d14583SBruce Richardson 		first_seg->ol_flags |= pkt_flags;
2202*c1d14583SBruce Richardson 		/* Prefetch data of first segment, if configured to do so. */
2203*c1d14583SBruce Richardson 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
2204*c1d14583SBruce Richardson 					  first_seg->data_off));
2205*c1d14583SBruce Richardson 		rx_pkts[nb_rx++] = first_seg;
2206*c1d14583SBruce Richardson 		first_seg = NULL;
2207*c1d14583SBruce Richardson 	}
2208*c1d14583SBruce Richardson 
2209*c1d14583SBruce Richardson 	/* Record index of the next RX descriptor to probe. */
2210*c1d14583SBruce Richardson 	rxq->rx_tail = rx_id;
2211*c1d14583SBruce Richardson 	rxq->pkt_first_seg = first_seg;
2212*c1d14583SBruce Richardson 	rxq->pkt_last_seg = last_seg;
2213*c1d14583SBruce Richardson 
2214*c1d14583SBruce Richardson 	/**
2215*c1d14583SBruce Richardson 	 * If the number of free RX descriptors is greater than the RX free
2216*c1d14583SBruce Richardson 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2217*c1d14583SBruce Richardson 	 * register. Update the RDT with the value of the last processed RX
2218*c1d14583SBruce Richardson 	 * descriptor minus 1, to guarantee that the RDT register is never
2219*c1d14583SBruce Richardson 	 * equal to the RDH register, which creates a "full" ring situation
2220*c1d14583SBruce Richardson 	 * from the hardware point of view.
2221*c1d14583SBruce Richardson 	 */
2222*c1d14583SBruce Richardson 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2223*c1d14583SBruce Richardson 	if (nb_hold > rxq->rx_free_thresh) {
2224*c1d14583SBruce Richardson 		rx_id = (uint16_t)(rx_id == 0 ?
2225*c1d14583SBruce Richardson 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2226*c1d14583SBruce Richardson 		/* write TAIL register */
2227*c1d14583SBruce Richardson 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2228*c1d14583SBruce Richardson 		nb_hold = 0;
2229*c1d14583SBruce Richardson 	}
2230*c1d14583SBruce Richardson 	rxq->nb_rx_hold = nb_hold;
2231*c1d14583SBruce Richardson 
2232*c1d14583SBruce Richardson 	/* return received packet in the burst */
2233*c1d14583SBruce Richardson 	return nb_rx;
2234*c1d14583SBruce Richardson }
2235*c1d14583SBruce Richardson 
2236*c1d14583SBruce Richardson const uint32_t *
2237*c1d14583SBruce Richardson ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
2238*c1d14583SBruce Richardson {
2239*c1d14583SBruce Richardson 	struct ice_adapter *ad =
2240*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2241*c1d14583SBruce Richardson 	const uint32_t *ptypes;
2242*c1d14583SBruce Richardson 
2243*c1d14583SBruce Richardson 	static const uint32_t ptypes_os[] = {
2244*c1d14583SBruce Richardson 		/* refers to ice_get_default_pkt_type() */
2245*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER,
2246*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_TIMESYNC,
2247*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_LLDP,
2248*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_ARP,
2249*c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2250*c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2251*c1d14583SBruce Richardson 		RTE_PTYPE_L4_FRAG,
2252*c1d14583SBruce Richardson 		RTE_PTYPE_L4_ICMP,
2253*c1d14583SBruce Richardson 		RTE_PTYPE_L4_NONFRAG,
2254*c1d14583SBruce Richardson 		RTE_PTYPE_L4_SCTP,
2255*c1d14583SBruce Richardson 		RTE_PTYPE_L4_TCP,
2256*c1d14583SBruce Richardson 		RTE_PTYPE_L4_UDP,
2257*c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GRENAT,
2258*c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_IP,
2259*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L2_ETHER,
2260*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2261*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2262*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_FRAG,
2263*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_ICMP,
2264*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_NONFRAG,
2265*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_SCTP,
2266*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_TCP,
2267*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_UDP,
2268*c1d14583SBruce Richardson 	};
2269*c1d14583SBruce Richardson 
2270*c1d14583SBruce Richardson 	static const uint32_t ptypes_comms[] = {
2271*c1d14583SBruce Richardson 		/* refers to ice_get_default_pkt_type() */
2272*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER,
2273*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_TIMESYNC,
2274*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_LLDP,
2275*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_ARP,
2276*c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2277*c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2278*c1d14583SBruce Richardson 		RTE_PTYPE_L4_FRAG,
2279*c1d14583SBruce Richardson 		RTE_PTYPE_L4_ICMP,
2280*c1d14583SBruce Richardson 		RTE_PTYPE_L4_NONFRAG,
2281*c1d14583SBruce Richardson 		RTE_PTYPE_L4_SCTP,
2282*c1d14583SBruce Richardson 		RTE_PTYPE_L4_TCP,
2283*c1d14583SBruce Richardson 		RTE_PTYPE_L4_UDP,
2284*c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GRENAT,
2285*c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_IP,
2286*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L2_ETHER,
2287*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2288*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2289*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_FRAG,
2290*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_ICMP,
2291*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_NONFRAG,
2292*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_SCTP,
2293*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_TCP,
2294*c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_UDP,
2295*c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GTPC,
2296*c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GTPU,
2297*c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_PPPOE,
2298*c1d14583SBruce Richardson 	};
2299*c1d14583SBruce Richardson 
2300*c1d14583SBruce Richardson 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) {
2301*c1d14583SBruce Richardson 		*no_of_elements = RTE_DIM(ptypes_comms);
2302*c1d14583SBruce Richardson 		ptypes = ptypes_comms;
2303*c1d14583SBruce Richardson 	} else {
2304*c1d14583SBruce Richardson 		*no_of_elements = RTE_DIM(ptypes_os);
2305*c1d14583SBruce Richardson 		ptypes = ptypes_os;
2306*c1d14583SBruce Richardson 	}
2307*c1d14583SBruce Richardson 
2308*c1d14583SBruce Richardson 	if (dev->rx_pkt_burst == ice_recv_pkts ||
2309*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2310*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts)
2311*c1d14583SBruce Richardson 		return ptypes;
2312*c1d14583SBruce Richardson 
2313*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
2314*c1d14583SBruce Richardson 	if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2315*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2316*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
2317*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2318*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2319*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2320*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2321*c1d14583SBruce Richardson #endif
2322*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2323*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2324*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2325*c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2326*c1d14583SBruce Richardson 		return ptypes;
2327*c1d14583SBruce Richardson #endif
2328*c1d14583SBruce Richardson 
2329*c1d14583SBruce Richardson 	return NULL;
2330*c1d14583SBruce Richardson }
2331*c1d14583SBruce Richardson 
2332*c1d14583SBruce Richardson int
2333*c1d14583SBruce Richardson ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2334*c1d14583SBruce Richardson {
2335*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
2336*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
2337*c1d14583SBruce Richardson 	uint32_t desc;
2338*c1d14583SBruce Richardson 
2339*c1d14583SBruce Richardson 	if (unlikely(offset >= rxq->nb_rx_desc))
2340*c1d14583SBruce Richardson 		return -EINVAL;
2341*c1d14583SBruce Richardson 
2342*c1d14583SBruce Richardson 	if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2343*c1d14583SBruce Richardson 		return RTE_ETH_RX_DESC_UNAVAIL;
2344*c1d14583SBruce Richardson 
2345*c1d14583SBruce Richardson 	desc = rxq->rx_tail + offset;
2346*c1d14583SBruce Richardson 	if (desc >= rxq->nb_rx_desc)
2347*c1d14583SBruce Richardson 		desc -= rxq->nb_rx_desc;
2348*c1d14583SBruce Richardson 
2349*c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[desc];
2350*c1d14583SBruce Richardson 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2351*c1d14583SBruce Richardson 	    (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2352*c1d14583SBruce Richardson 		return RTE_ETH_RX_DESC_DONE;
2353*c1d14583SBruce Richardson 
2354*c1d14583SBruce Richardson 	return RTE_ETH_RX_DESC_AVAIL;
2355*c1d14583SBruce Richardson }
2356*c1d14583SBruce Richardson 
2357*c1d14583SBruce Richardson int
2358*c1d14583SBruce Richardson ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2359*c1d14583SBruce Richardson {
2360*c1d14583SBruce Richardson 	struct ice_tx_queue *txq = tx_queue;
2361*c1d14583SBruce Richardson 	volatile uint64_t *status;
2362*c1d14583SBruce Richardson 	uint64_t mask, expect;
2363*c1d14583SBruce Richardson 	uint32_t desc;
2364*c1d14583SBruce Richardson 
2365*c1d14583SBruce Richardson 	if (unlikely(offset >= txq->nb_tx_desc))
2366*c1d14583SBruce Richardson 		return -EINVAL;
2367*c1d14583SBruce Richardson 
2368*c1d14583SBruce Richardson 	desc = txq->tx_tail + offset;
2369*c1d14583SBruce Richardson 	/* go to next desc that has the RS bit */
2370*c1d14583SBruce Richardson 	desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2371*c1d14583SBruce Richardson 		txq->tx_rs_thresh;
2372*c1d14583SBruce Richardson 	if (desc >= txq->nb_tx_desc) {
2373*c1d14583SBruce Richardson 		desc -= txq->nb_tx_desc;
2374*c1d14583SBruce Richardson 		if (desc >= txq->nb_tx_desc)
2375*c1d14583SBruce Richardson 			desc -= txq->nb_tx_desc;
2376*c1d14583SBruce Richardson 	}
2377*c1d14583SBruce Richardson 
2378*c1d14583SBruce Richardson 	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2379*c1d14583SBruce Richardson 	mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2380*c1d14583SBruce Richardson 	expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2381*c1d14583SBruce Richardson 				  ICE_TXD_QW1_DTYPE_S);
2382*c1d14583SBruce Richardson 	if ((*status & mask) == expect)
2383*c1d14583SBruce Richardson 		return RTE_ETH_TX_DESC_DONE;
2384*c1d14583SBruce Richardson 
2385*c1d14583SBruce Richardson 	return RTE_ETH_TX_DESC_FULL;
2386*c1d14583SBruce Richardson }
2387*c1d14583SBruce Richardson 
2388*c1d14583SBruce Richardson void
2389*c1d14583SBruce Richardson ice_free_queues(struct rte_eth_dev *dev)
2390*c1d14583SBruce Richardson {
2391*c1d14583SBruce Richardson 	uint16_t i;
2392*c1d14583SBruce Richardson 
2393*c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
2394*c1d14583SBruce Richardson 
2395*c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2396*c1d14583SBruce Richardson 		if (!dev->data->rx_queues[i])
2397*c1d14583SBruce Richardson 			continue;
2398*c1d14583SBruce Richardson 		ice_rx_queue_release(dev->data->rx_queues[i]);
2399*c1d14583SBruce Richardson 		dev->data->rx_queues[i] = NULL;
2400*c1d14583SBruce Richardson 	}
2401*c1d14583SBruce Richardson 	dev->data->nb_rx_queues = 0;
2402*c1d14583SBruce Richardson 
2403*c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2404*c1d14583SBruce Richardson 		if (!dev->data->tx_queues[i])
2405*c1d14583SBruce Richardson 			continue;
2406*c1d14583SBruce Richardson 		ice_tx_queue_release(dev->data->tx_queues[i]);
2407*c1d14583SBruce Richardson 		dev->data->tx_queues[i] = NULL;
2408*c1d14583SBruce Richardson 	}
2409*c1d14583SBruce Richardson 	dev->data->nb_tx_queues = 0;
2410*c1d14583SBruce Richardson }
2411*c1d14583SBruce Richardson 
2412*c1d14583SBruce Richardson #define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
2413*c1d14583SBruce Richardson #define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
2414*c1d14583SBruce Richardson 
2415*c1d14583SBruce Richardson int
2416*c1d14583SBruce Richardson ice_fdir_setup_tx_resources(struct ice_pf *pf)
2417*c1d14583SBruce Richardson {
2418*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
2419*c1d14583SBruce Richardson 	const struct rte_memzone *tz = NULL;
2420*c1d14583SBruce Richardson 	uint32_t ring_size;
2421*c1d14583SBruce Richardson 	struct rte_eth_dev *dev;
2422*c1d14583SBruce Richardson 
2423*c1d14583SBruce Richardson 	if (!pf) {
2424*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "PF is not available");
2425*c1d14583SBruce Richardson 		return -EINVAL;
2426*c1d14583SBruce Richardson 	}
2427*c1d14583SBruce Richardson 
2428*c1d14583SBruce Richardson 	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2429*c1d14583SBruce Richardson 
2430*c1d14583SBruce Richardson 	/* Allocate the TX queue data structure. */
2431*c1d14583SBruce Richardson 	txq = rte_zmalloc_socket("ice fdir tx queue",
2432*c1d14583SBruce Richardson 				 sizeof(struct ice_tx_queue),
2433*c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
2434*c1d14583SBruce Richardson 				 SOCKET_ID_ANY);
2435*c1d14583SBruce Richardson 	if (!txq) {
2436*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2437*c1d14583SBruce Richardson 			    "tx queue structure.");
2438*c1d14583SBruce Richardson 		return -ENOMEM;
2439*c1d14583SBruce Richardson 	}
2440*c1d14583SBruce Richardson 
2441*c1d14583SBruce Richardson 	/* Allocate TX hardware ring descriptors. */
2442*c1d14583SBruce Richardson 	ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2443*c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2444*c1d14583SBruce Richardson 
2445*c1d14583SBruce Richardson 	tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2446*c1d14583SBruce Richardson 				      ICE_FDIR_QUEUE_ID, ring_size,
2447*c1d14583SBruce Richardson 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2448*c1d14583SBruce Richardson 	if (!tz) {
2449*c1d14583SBruce Richardson 		ice_tx_queue_release(txq);
2450*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2451*c1d14583SBruce Richardson 		return -ENOMEM;
2452*c1d14583SBruce Richardson 	}
2453*c1d14583SBruce Richardson 
2454*c1d14583SBruce Richardson 	txq->mz = tz;
2455*c1d14583SBruce Richardson 	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2456*c1d14583SBruce Richardson 	txq->queue_id = ICE_FDIR_QUEUE_ID;
2457*c1d14583SBruce Richardson 	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2458*c1d14583SBruce Richardson 	txq->vsi = pf->fdir.fdir_vsi;
2459*c1d14583SBruce Richardson 
2460*c1d14583SBruce Richardson 	txq->tx_ring_dma = tz->iova;
2461*c1d14583SBruce Richardson 	txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2462*c1d14583SBruce Richardson 	/*
2463*c1d14583SBruce Richardson 	 * don't need to allocate software ring and reset for the fdir
2464*c1d14583SBruce Richardson 	 * program queue just set the queue has been configured.
2465*c1d14583SBruce Richardson 	 */
2466*c1d14583SBruce Richardson 	txq->q_set = true;
2467*c1d14583SBruce Richardson 	pf->fdir.txq = txq;
2468*c1d14583SBruce Richardson 
2469*c1d14583SBruce Richardson 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2470*c1d14583SBruce Richardson 
2471*c1d14583SBruce Richardson 	return ICE_SUCCESS;
2472*c1d14583SBruce Richardson }
2473*c1d14583SBruce Richardson 
2474*c1d14583SBruce Richardson int
2475*c1d14583SBruce Richardson ice_fdir_setup_rx_resources(struct ice_pf *pf)
2476*c1d14583SBruce Richardson {
2477*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
2478*c1d14583SBruce Richardson 	const struct rte_memzone *rz = NULL;
2479*c1d14583SBruce Richardson 	uint32_t ring_size;
2480*c1d14583SBruce Richardson 	struct rte_eth_dev *dev;
2481*c1d14583SBruce Richardson 
2482*c1d14583SBruce Richardson 	if (!pf) {
2483*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "PF is not available");
2484*c1d14583SBruce Richardson 		return -EINVAL;
2485*c1d14583SBruce Richardson 	}
2486*c1d14583SBruce Richardson 
2487*c1d14583SBruce Richardson 	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2488*c1d14583SBruce Richardson 
2489*c1d14583SBruce Richardson 	/* Allocate the RX queue data structure. */
2490*c1d14583SBruce Richardson 	rxq = rte_zmalloc_socket("ice fdir rx queue",
2491*c1d14583SBruce Richardson 				 sizeof(struct ice_rx_queue),
2492*c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
2493*c1d14583SBruce Richardson 				 SOCKET_ID_ANY);
2494*c1d14583SBruce Richardson 	if (!rxq) {
2495*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2496*c1d14583SBruce Richardson 			    "rx queue structure.");
2497*c1d14583SBruce Richardson 		return -ENOMEM;
2498*c1d14583SBruce Richardson 	}
2499*c1d14583SBruce Richardson 
2500*c1d14583SBruce Richardson 	/* Allocate RX hardware ring descriptors. */
2501*c1d14583SBruce Richardson 	ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2502*c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2503*c1d14583SBruce Richardson 
2504*c1d14583SBruce Richardson 	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2505*c1d14583SBruce Richardson 				      ICE_FDIR_QUEUE_ID, ring_size,
2506*c1d14583SBruce Richardson 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2507*c1d14583SBruce Richardson 	if (!rz) {
2508*c1d14583SBruce Richardson 		ice_rx_queue_release(rxq);
2509*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2510*c1d14583SBruce Richardson 		return -ENOMEM;
2511*c1d14583SBruce Richardson 	}
2512*c1d14583SBruce Richardson 
2513*c1d14583SBruce Richardson 	rxq->mz = rz;
2514*c1d14583SBruce Richardson 	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2515*c1d14583SBruce Richardson 	rxq->queue_id = ICE_FDIR_QUEUE_ID;
2516*c1d14583SBruce Richardson 	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2517*c1d14583SBruce Richardson 	rxq->vsi = pf->fdir.fdir_vsi;
2518*c1d14583SBruce Richardson 
2519*c1d14583SBruce Richardson 	rxq->rx_ring_dma = rz->iova;
2520*c1d14583SBruce Richardson 	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2521*c1d14583SBruce Richardson 	       sizeof(union ice_32byte_rx_desc));
2522*c1d14583SBruce Richardson 	rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2523*c1d14583SBruce Richardson 
2524*c1d14583SBruce Richardson 	/*
2525*c1d14583SBruce Richardson 	 * Don't need to allocate software ring and reset for the fdir
2526*c1d14583SBruce Richardson 	 * rx queue, just set the queue has been configured.
2527*c1d14583SBruce Richardson 	 */
2528*c1d14583SBruce Richardson 	rxq->q_set = true;
2529*c1d14583SBruce Richardson 	pf->fdir.rxq = rxq;
2530*c1d14583SBruce Richardson 
2531*c1d14583SBruce Richardson 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2532*c1d14583SBruce Richardson 
2533*c1d14583SBruce Richardson 	return ICE_SUCCESS;
2534*c1d14583SBruce Richardson }
2535*c1d14583SBruce Richardson 
2536*c1d14583SBruce Richardson uint16_t
2537*c1d14583SBruce Richardson ice_recv_pkts(void *rx_queue,
2538*c1d14583SBruce Richardson 	      struct rte_mbuf **rx_pkts,
2539*c1d14583SBruce Richardson 	      uint16_t nb_pkts)
2540*c1d14583SBruce Richardson {
2541*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
2542*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2543*c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
2544*c1d14583SBruce Richardson 	union ice_rx_flex_desc rxd;
2545*c1d14583SBruce Richardson 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2546*c1d14583SBruce Richardson 	struct ice_rx_entry *rxe;
2547*c1d14583SBruce Richardson 	struct rte_mbuf *nmb; /* new allocated mbuf */
2548*c1d14583SBruce Richardson 	struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */
2549*c1d14583SBruce Richardson 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2550*c1d14583SBruce Richardson 	uint16_t rx_id = rxq->rx_tail;
2551*c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
2552*c1d14583SBruce Richardson 	uint16_t nb_hold = 0;
2553*c1d14583SBruce Richardson 	uint16_t rx_packet_len;
2554*c1d14583SBruce Richardson 	uint16_t rx_header_len;
2555*c1d14583SBruce Richardson 	uint16_t rx_stat_err0;
2556*c1d14583SBruce Richardson 	uint64_t dma_addr;
2557*c1d14583SBruce Richardson 	uint64_t pkt_flags;
2558*c1d14583SBruce Richardson 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2559*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2560*c1d14583SBruce Richardson 	bool is_tsinit = false;
2561*c1d14583SBruce Richardson 	uint64_t ts_ns;
2562*c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
2563*c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2564*c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
2565*c1d14583SBruce Richardson 
2566*c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2567*c1d14583SBruce Richardson 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2568*c1d14583SBruce Richardson 
2569*c1d14583SBruce Richardson 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
2570*c1d14583SBruce Richardson 			is_tsinit = 1;
2571*c1d14583SBruce Richardson 	}
2572*c1d14583SBruce Richardson #endif
2573*c1d14583SBruce Richardson 
2574*c1d14583SBruce Richardson 	while (nb_rx < nb_pkts) {
2575*c1d14583SBruce Richardson 		rxdp = &rx_ring[rx_id];
2576*c1d14583SBruce Richardson 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2577*c1d14583SBruce Richardson 
2578*c1d14583SBruce Richardson 		/* Check the DD bit first */
2579*c1d14583SBruce Richardson 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2580*c1d14583SBruce Richardson 			break;
2581*c1d14583SBruce Richardson 
2582*c1d14583SBruce Richardson 		/* allocate header mbuf */
2583*c1d14583SBruce Richardson 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2584*c1d14583SBruce Richardson 		if (unlikely(!nmb)) {
2585*c1d14583SBruce Richardson 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2586*c1d14583SBruce Richardson 			break;
2587*c1d14583SBruce Richardson 		}
2588*c1d14583SBruce Richardson 
2589*c1d14583SBruce Richardson 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2590*c1d14583SBruce Richardson 
2591*c1d14583SBruce Richardson 		nb_hold++;
2592*c1d14583SBruce Richardson 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2593*c1d14583SBruce Richardson 		rx_id++;
2594*c1d14583SBruce Richardson 		if (unlikely(rx_id == rxq->nb_rx_desc))
2595*c1d14583SBruce Richardson 			rx_id = 0;
2596*c1d14583SBruce Richardson 		rxm = rxe->mbuf;
2597*c1d14583SBruce Richardson 		rxe->mbuf = nmb;
2598*c1d14583SBruce Richardson 		dma_addr =
2599*c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2600*c1d14583SBruce Richardson 
2601*c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
2602*c1d14583SBruce Richardson 			/**
2603*c1d14583SBruce Richardson 			 * fill the read format of descriptor with physic address in
2604*c1d14583SBruce Richardson 			 * new allocated mbuf: nmb
2605*c1d14583SBruce Richardson 			 */
2606*c1d14583SBruce Richardson 			rxdp->read.hdr_addr = 0;
2607*c1d14583SBruce Richardson 			rxdp->read.pkt_addr = dma_addr;
2608*c1d14583SBruce Richardson 		} else {
2609*c1d14583SBruce Richardson 			/* allocate payload mbuf */
2610*c1d14583SBruce Richardson 			nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);
2611*c1d14583SBruce Richardson 			if (unlikely(!nmb_pay)) {
2612*c1d14583SBruce Richardson 				rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2613*c1d14583SBruce Richardson 				rxe->mbuf = NULL;
2614*c1d14583SBruce Richardson 				nb_hold--;
2615*c1d14583SBruce Richardson 				if (unlikely(rx_id == 0))
2616*c1d14583SBruce Richardson 					rx_id = rxq->nb_rx_desc;
2617*c1d14583SBruce Richardson 
2618*c1d14583SBruce Richardson 				rx_id--;
2619*c1d14583SBruce Richardson 				rte_pktmbuf_free(nmb);
2620*c1d14583SBruce Richardson 				break;
2621*c1d14583SBruce Richardson 			}
2622*c1d14583SBruce Richardson 
2623*c1d14583SBruce Richardson 			nmb->next = nmb_pay;
2624*c1d14583SBruce Richardson 			nmb_pay->next = NULL;
2625*c1d14583SBruce Richardson 
2626*c1d14583SBruce Richardson 			/**
2627*c1d14583SBruce Richardson 			 * fill the read format of descriptor with physic address in
2628*c1d14583SBruce Richardson 			 * new allocated mbuf: nmb
2629*c1d14583SBruce Richardson 			 */
2630*c1d14583SBruce Richardson 			rxdp->read.hdr_addr = dma_addr;
2631*c1d14583SBruce Richardson 			rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay));
2632*c1d14583SBruce Richardson 		}
2633*c1d14583SBruce Richardson 
2634*c1d14583SBruce Richardson 		/* fill old mbuf with received descriptor: rxd */
2635*c1d14583SBruce Richardson 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2636*c1d14583SBruce Richardson 		rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2637*c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
2638*c1d14583SBruce Richardson 			rxm->nb_segs = 1;
2639*c1d14583SBruce Richardson 			rxm->next = NULL;
2640*c1d14583SBruce Richardson 			/* calculate rx_packet_len of the received pkt */
2641*c1d14583SBruce Richardson 			rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2642*c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2643*c1d14583SBruce Richardson 			rxm->data_len = rx_packet_len;
2644*c1d14583SBruce Richardson 			rxm->pkt_len = rx_packet_len;
2645*c1d14583SBruce Richardson 		} else {
2646*c1d14583SBruce Richardson 			rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs);
2647*c1d14583SBruce Richardson 			rxm->next->next = NULL;
2648*c1d14583SBruce Richardson 			/* calculate rx_packet_len of the received pkt */
2649*c1d14583SBruce Richardson 			rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) &
2650*c1d14583SBruce Richardson 					ICE_RX_FLEX_DESC_HEADER_LEN_M;
2651*c1d14583SBruce Richardson 			rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2652*c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2653*c1d14583SBruce Richardson 			rxm->data_len = rx_header_len;
2654*c1d14583SBruce Richardson 			rxm->pkt_len = rx_header_len + rx_packet_len;
2655*c1d14583SBruce Richardson 			rxm->next->data_len = rx_packet_len;
2656*c1d14583SBruce Richardson 
2657*c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_RX
2658*c1d14583SBruce Richardson 			rte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm));
2659*c1d14583SBruce Richardson #endif
2660*c1d14583SBruce Richardson 		}
2661*c1d14583SBruce Richardson 
2662*c1d14583SBruce Richardson 		rxm->port = rxq->port_id;
2663*c1d14583SBruce Richardson 		rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2664*c1d14583SBruce Richardson 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2665*c1d14583SBruce Richardson 		ice_rxd_to_vlan_tci(rxm, &rxd);
2666*c1d14583SBruce Richardson 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
2667*c1d14583SBruce Richardson 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2668*c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2669*c1d14583SBruce Richardson 		if (ice_timestamp_dynflag > 0 &&
2670*c1d14583SBruce Richardson 		    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
2671*c1d14583SBruce Richardson 			rxq->time_high =
2672*c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2673*c1d14583SBruce Richardson 			if (unlikely(is_tsinit)) {
2674*c1d14583SBruce Richardson 				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2675*c1d14583SBruce Richardson 				rxq->hw_time_low = (uint32_t)ts_ns;
2676*c1d14583SBruce Richardson 				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
2677*c1d14583SBruce Richardson 				is_tsinit = false;
2678*c1d14583SBruce Richardson 			} else {
2679*c1d14583SBruce Richardson 				if (rxq->time_high < rxq->hw_time_low)
2680*c1d14583SBruce Richardson 					rxq->hw_time_high += 1;
2681*c1d14583SBruce Richardson 				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
2682*c1d14583SBruce Richardson 				rxq->hw_time_low = rxq->time_high;
2683*c1d14583SBruce Richardson 			}
2684*c1d14583SBruce Richardson 			rxq->hw_time_update = rte_get_timer_cycles() /
2685*c1d14583SBruce Richardson 					     (rte_get_timer_hz() / 1000);
2686*c1d14583SBruce Richardson 			*RTE_MBUF_DYNFIELD(rxm,
2687*c1d14583SBruce Richardson 					   (ice_timestamp_dynfield_offset),
2688*c1d14583SBruce Richardson 					   rte_mbuf_timestamp_t *) = ts_ns;
2689*c1d14583SBruce Richardson 			pkt_flags |= ice_timestamp_dynflag;
2690*c1d14583SBruce Richardson 		}
2691*c1d14583SBruce Richardson 
2692*c1d14583SBruce Richardson 		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2693*c1d14583SBruce Richardson 		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2694*c1d14583SBruce Richardson 			rxq->time_high =
2695*c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2696*c1d14583SBruce Richardson 			rxm->timesync = rxq->queue_id;
2697*c1d14583SBruce Richardson 			pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2698*c1d14583SBruce Richardson 		}
2699*c1d14583SBruce Richardson #endif
2700*c1d14583SBruce Richardson 		rxm->ol_flags |= pkt_flags;
2701*c1d14583SBruce Richardson 		/* copy old mbuf to rx_pkts */
2702*c1d14583SBruce Richardson 		rx_pkts[nb_rx++] = rxm;
2703*c1d14583SBruce Richardson 	}
2704*c1d14583SBruce Richardson 
2705*c1d14583SBruce Richardson 	rxq->rx_tail = rx_id;
2706*c1d14583SBruce Richardson 	/**
2707*c1d14583SBruce Richardson 	 * If the number of free RX descriptors is greater than the RX free
2708*c1d14583SBruce Richardson 	 * threshold of the queue, advance the receive tail register of queue.
2709*c1d14583SBruce Richardson 	 * Update that register with the value of the last processed RX
2710*c1d14583SBruce Richardson 	 * descriptor minus 1.
2711*c1d14583SBruce Richardson 	 */
2712*c1d14583SBruce Richardson 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2713*c1d14583SBruce Richardson 	if (nb_hold > rxq->rx_free_thresh) {
2714*c1d14583SBruce Richardson 		rx_id = (uint16_t)(rx_id == 0 ?
2715*c1d14583SBruce Richardson 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2716*c1d14583SBruce Richardson 		/* write TAIL register */
2717*c1d14583SBruce Richardson 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2718*c1d14583SBruce Richardson 		nb_hold = 0;
2719*c1d14583SBruce Richardson 	}
2720*c1d14583SBruce Richardson 	rxq->nb_rx_hold = nb_hold;
2721*c1d14583SBruce Richardson 
2722*c1d14583SBruce Richardson 	/* return received packet in the burst */
2723*c1d14583SBruce Richardson 	return nb_rx;
2724*c1d14583SBruce Richardson }
2725*c1d14583SBruce Richardson 
2726*c1d14583SBruce Richardson static inline void
2727*c1d14583SBruce Richardson ice_parse_tunneling_params(uint64_t ol_flags,
2728*c1d14583SBruce Richardson 			    union ice_tx_offload tx_offload,
2729*c1d14583SBruce Richardson 			    uint32_t *cd_tunneling)
2730*c1d14583SBruce Richardson {
2731*c1d14583SBruce Richardson 	/* EIPT: External (outer) IP header type */
2732*c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
2733*c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2734*c1d14583SBruce Richardson 	else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
2735*c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2736*c1d14583SBruce Richardson 	else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
2737*c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2738*c1d14583SBruce Richardson 
2739*c1d14583SBruce Richardson 	/* EIPLEN: External (outer) IP header length, in DWords */
2740*c1d14583SBruce Richardson 	*cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2741*c1d14583SBruce Richardson 		ICE_TXD_CTX_QW0_EIPLEN_S;
2742*c1d14583SBruce Richardson 
2743*c1d14583SBruce Richardson 	/* L4TUNT: L4 Tunneling Type */
2744*c1d14583SBruce Richardson 	switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2745*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_IPIP:
2746*c1d14583SBruce Richardson 		/* for non UDP / GRE tunneling, set to 00b */
2747*c1d14583SBruce Richardson 		break;
2748*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
2749*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
2750*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_GTP:
2751*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
2752*c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2753*c1d14583SBruce Richardson 		break;
2754*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_GRE:
2755*c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2756*c1d14583SBruce Richardson 		break;
2757*c1d14583SBruce Richardson 	default:
2758*c1d14583SBruce Richardson 		PMD_TX_LOG(ERR, "Tunnel type not supported");
2759*c1d14583SBruce Richardson 		return;
2760*c1d14583SBruce Richardson 	}
2761*c1d14583SBruce Richardson 
2762*c1d14583SBruce Richardson 	/* L4TUNLEN: L4 Tunneling Length, in Words
2763*c1d14583SBruce Richardson 	 *
2764*c1d14583SBruce Richardson 	 * We depend on app to set rte_mbuf.l2_len correctly.
2765*c1d14583SBruce Richardson 	 * For IP in GRE it should be set to the length of the GRE
2766*c1d14583SBruce Richardson 	 * header;
2767*c1d14583SBruce Richardson 	 * For MAC in GRE or MAC in UDP it should be set to the length
2768*c1d14583SBruce Richardson 	 * of the GRE or UDP headers plus the inner MAC up to including
2769*c1d14583SBruce Richardson 	 * its last Ethertype.
2770*c1d14583SBruce Richardson 	 * If MPLS labels exists, it should include them as well.
2771*c1d14583SBruce Richardson 	 */
2772*c1d14583SBruce Richardson 	*cd_tunneling |= (tx_offload.l2_len >> 1) <<
2773*c1d14583SBruce Richardson 		ICE_TXD_CTX_QW0_NATLEN_S;
2774*c1d14583SBruce Richardson 
2775*c1d14583SBruce Richardson 	/**
2776*c1d14583SBruce Richardson 	 * Calculate the tunneling UDP checksum.
2777*c1d14583SBruce Richardson 	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2778*c1d14583SBruce Richardson 	 */
2779*c1d14583SBruce Richardson 	if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) &&
2780*c1d14583SBruce Richardson 			(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) &&
2781*c1d14583SBruce Richardson 			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
2782*c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2783*c1d14583SBruce Richardson }
2784*c1d14583SBruce Richardson 
2785*c1d14583SBruce Richardson static inline void
2786*c1d14583SBruce Richardson ice_txd_enable_checksum(uint64_t ol_flags,
2787*c1d14583SBruce Richardson 			uint32_t *td_cmd,
2788*c1d14583SBruce Richardson 			uint32_t *td_offset,
2789*c1d14583SBruce Richardson 			union ice_tx_offload tx_offload)
2790*c1d14583SBruce Richardson {
2791*c1d14583SBruce Richardson 	/* Set MACLEN */
2792*c1d14583SBruce Richardson 	if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK))
2793*c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l2_len >> 1)
2794*c1d14583SBruce Richardson 			<< ICE_TX_DESC_LEN_MACLEN_S;
2795*c1d14583SBruce Richardson 
2796*c1d14583SBruce Richardson 	/* Enable L3 checksum offloads */
2797*c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2798*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2799*c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l3_len >> 2) <<
2800*c1d14583SBruce Richardson 			ICE_TX_DESC_LEN_IPLEN_S;
2801*c1d14583SBruce Richardson 	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2802*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2803*c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l3_len >> 2) <<
2804*c1d14583SBruce Richardson 			ICE_TX_DESC_LEN_IPLEN_S;
2805*c1d14583SBruce Richardson 	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2806*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2807*c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l3_len >> 2) <<
2808*c1d14583SBruce Richardson 			ICE_TX_DESC_LEN_IPLEN_S;
2809*c1d14583SBruce Richardson 	}
2810*c1d14583SBruce Richardson 
2811*c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2812*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2813*c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l4_len >> 2) <<
2814*c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2815*c1d14583SBruce Richardson 		return;
2816*c1d14583SBruce Richardson 	}
2817*c1d14583SBruce Richardson 
2818*c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
2819*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2820*c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l4_len >> 2) <<
2821*c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2822*c1d14583SBruce Richardson 		return;
2823*c1d14583SBruce Richardson 	}
2824*c1d14583SBruce Richardson 
2825*c1d14583SBruce Richardson 	/* Enable L4 checksum offloads */
2826*c1d14583SBruce Richardson 	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2827*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TCP_CKSUM:
2828*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2829*c1d14583SBruce Richardson 		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2830*c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2831*c1d14583SBruce Richardson 		break;
2832*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_SCTP_CKSUM:
2833*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2834*c1d14583SBruce Richardson 		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2835*c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2836*c1d14583SBruce Richardson 		break;
2837*c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_UDP_CKSUM:
2838*c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2839*c1d14583SBruce Richardson 		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2840*c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2841*c1d14583SBruce Richardson 		break;
2842*c1d14583SBruce Richardson 	default:
2843*c1d14583SBruce Richardson 		break;
2844*c1d14583SBruce Richardson 	}
2845*c1d14583SBruce Richardson }
2846*c1d14583SBruce Richardson 
2847*c1d14583SBruce Richardson static inline int
2848*c1d14583SBruce Richardson ice_xmit_cleanup(struct ice_tx_queue *txq)
2849*c1d14583SBruce Richardson {
2850*c1d14583SBruce Richardson 	struct ice_tx_entry *sw_ring = txq->sw_ring;
2851*c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txd = txq->tx_ring;
2852*c1d14583SBruce Richardson 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2853*c1d14583SBruce Richardson 	uint16_t nb_tx_desc = txq->nb_tx_desc;
2854*c1d14583SBruce Richardson 	uint16_t desc_to_clean_to;
2855*c1d14583SBruce Richardson 	uint16_t nb_tx_to_clean;
2856*c1d14583SBruce Richardson 
2857*c1d14583SBruce Richardson 	/* Determine the last descriptor needing to be cleaned */
2858*c1d14583SBruce Richardson 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2859*c1d14583SBruce Richardson 	if (desc_to_clean_to >= nb_tx_desc)
2860*c1d14583SBruce Richardson 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2861*c1d14583SBruce Richardson 
2862*c1d14583SBruce Richardson 	/* Check to make sure the last descriptor to clean is done */
2863*c1d14583SBruce Richardson 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2864*c1d14583SBruce Richardson 	if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2865*c1d14583SBruce Richardson 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2866*c1d14583SBruce Richardson 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2867*c1d14583SBruce Richardson 			   "(port=%d queue=%d) value=0x%"PRIx64,
2868*c1d14583SBruce Richardson 			   desc_to_clean_to,
2869*c1d14583SBruce Richardson 			   txq->port_id, txq->queue_id,
2870*c1d14583SBruce Richardson 			   txd[desc_to_clean_to].cmd_type_offset_bsz);
2871*c1d14583SBruce Richardson 		/* Failed to clean any descriptors */
2872*c1d14583SBruce Richardson 		return -1;
2873*c1d14583SBruce Richardson 	}
2874*c1d14583SBruce Richardson 
2875*c1d14583SBruce Richardson 	/* Figure out how many descriptors will be cleaned */
2876*c1d14583SBruce Richardson 	if (last_desc_cleaned > desc_to_clean_to)
2877*c1d14583SBruce Richardson 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2878*c1d14583SBruce Richardson 					    desc_to_clean_to);
2879*c1d14583SBruce Richardson 	else
2880*c1d14583SBruce Richardson 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2881*c1d14583SBruce Richardson 					    last_desc_cleaned);
2882*c1d14583SBruce Richardson 
2883*c1d14583SBruce Richardson 	/* The last descriptor to clean is done, so that means all the
2884*c1d14583SBruce Richardson 	 * descriptors from the last descriptor that was cleaned
2885*c1d14583SBruce Richardson 	 * up to the last descriptor with the RS bit set
2886*c1d14583SBruce Richardson 	 * are done. Only reset the threshold descriptor.
2887*c1d14583SBruce Richardson 	 */
2888*c1d14583SBruce Richardson 	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2889*c1d14583SBruce Richardson 
2890*c1d14583SBruce Richardson 	/* Update the txq to reflect the last descriptor that was cleaned */
2891*c1d14583SBruce Richardson 	txq->last_desc_cleaned = desc_to_clean_to;
2892*c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2893*c1d14583SBruce Richardson 
2894*c1d14583SBruce Richardson 	return 0;
2895*c1d14583SBruce Richardson }
2896*c1d14583SBruce Richardson 
2897*c1d14583SBruce Richardson /* Construct the tx flags */
2898*c1d14583SBruce Richardson static inline uint64_t
2899*c1d14583SBruce Richardson ice_build_ctob(uint32_t td_cmd,
2900*c1d14583SBruce Richardson 	       uint32_t td_offset,
2901*c1d14583SBruce Richardson 	       uint16_t size,
2902*c1d14583SBruce Richardson 	       uint32_t td_tag)
2903*c1d14583SBruce Richardson {
2904*c1d14583SBruce Richardson 	return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2905*c1d14583SBruce Richardson 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2906*c1d14583SBruce Richardson 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2907*c1d14583SBruce Richardson 				((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2908*c1d14583SBruce Richardson 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2909*c1d14583SBruce Richardson }
2910*c1d14583SBruce Richardson 
2911*c1d14583SBruce Richardson /* Check if the context descriptor is needed for TX offloading */
2912*c1d14583SBruce Richardson static inline uint16_t
2913*c1d14583SBruce Richardson ice_calc_context_desc(uint64_t flags)
2914*c1d14583SBruce Richardson {
2915*c1d14583SBruce Richardson 	static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
2916*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_UDP_SEG |
2917*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_QINQ |
2918*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2919*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_TUNNEL_MASK |
2920*c1d14583SBruce Richardson 		RTE_MBUF_F_TX_IEEE1588_TMST;
2921*c1d14583SBruce Richardson 
2922*c1d14583SBruce Richardson 	return (flags & mask) ? 1 : 0;
2923*c1d14583SBruce Richardson }
2924*c1d14583SBruce Richardson 
2925*c1d14583SBruce Richardson /* set ice TSO context descriptor */
2926*c1d14583SBruce Richardson static inline uint64_t
2927*c1d14583SBruce Richardson ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2928*c1d14583SBruce Richardson {
2929*c1d14583SBruce Richardson 	uint64_t ctx_desc = 0;
2930*c1d14583SBruce Richardson 	uint32_t cd_cmd, hdr_len, cd_tso_len;
2931*c1d14583SBruce Richardson 
2932*c1d14583SBruce Richardson 	if (!tx_offload.l4_len) {
2933*c1d14583SBruce Richardson 		PMD_TX_LOG(DEBUG, "L4 length set to 0");
2934*c1d14583SBruce Richardson 		return ctx_desc;
2935*c1d14583SBruce Richardson 	}
2936*c1d14583SBruce Richardson 
2937*c1d14583SBruce Richardson 	hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2938*c1d14583SBruce Richardson 	hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
2939*c1d14583SBruce Richardson 		   tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2940*c1d14583SBruce Richardson 
2941*c1d14583SBruce Richardson 	cd_cmd = ICE_TX_CTX_DESC_TSO;
2942*c1d14583SBruce Richardson 	cd_tso_len = mbuf->pkt_len - hdr_len;
2943*c1d14583SBruce Richardson 	ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2944*c1d14583SBruce Richardson 		    ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2945*c1d14583SBruce Richardson 		    ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2946*c1d14583SBruce Richardson 
2947*c1d14583SBruce Richardson 	return ctx_desc;
2948*c1d14583SBruce Richardson }
2949*c1d14583SBruce Richardson 
2950*c1d14583SBruce Richardson /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2951*c1d14583SBruce Richardson #define ICE_MAX_DATA_PER_TXD \
2952*c1d14583SBruce Richardson 	(ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2953*c1d14583SBruce Richardson /* Calculate the number of TX descriptors needed for each pkt */
2954*c1d14583SBruce Richardson static inline uint16_t
2955*c1d14583SBruce Richardson ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2956*c1d14583SBruce Richardson {
2957*c1d14583SBruce Richardson 	struct rte_mbuf *txd = tx_pkt;
2958*c1d14583SBruce Richardson 	uint16_t count = 0;
2959*c1d14583SBruce Richardson 
2960*c1d14583SBruce Richardson 	while (txd != NULL) {
2961*c1d14583SBruce Richardson 		count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2962*c1d14583SBruce Richardson 		txd = txd->next;
2963*c1d14583SBruce Richardson 	}
2964*c1d14583SBruce Richardson 
2965*c1d14583SBruce Richardson 	return count;
2966*c1d14583SBruce Richardson }
2967*c1d14583SBruce Richardson 
2968*c1d14583SBruce Richardson uint16_t
2969*c1d14583SBruce Richardson ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2970*c1d14583SBruce Richardson {
2971*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
2972*c1d14583SBruce Richardson 	volatile struct ice_tx_desc *tx_ring;
2973*c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txd;
2974*c1d14583SBruce Richardson 	struct ice_tx_entry *sw_ring;
2975*c1d14583SBruce Richardson 	struct ice_tx_entry *txe, *txn;
2976*c1d14583SBruce Richardson 	struct rte_mbuf *tx_pkt;
2977*c1d14583SBruce Richardson 	struct rte_mbuf *m_seg;
2978*c1d14583SBruce Richardson 	uint32_t cd_tunneling_params;
2979*c1d14583SBruce Richardson 	uint16_t tx_id;
2980*c1d14583SBruce Richardson 	uint16_t nb_tx;
2981*c1d14583SBruce Richardson 	uint16_t nb_used;
2982*c1d14583SBruce Richardson 	uint16_t nb_ctx;
2983*c1d14583SBruce Richardson 	uint32_t td_cmd = 0;
2984*c1d14583SBruce Richardson 	uint32_t td_offset = 0;
2985*c1d14583SBruce Richardson 	uint32_t td_tag = 0;
2986*c1d14583SBruce Richardson 	uint16_t tx_last;
2987*c1d14583SBruce Richardson 	uint16_t slen;
2988*c1d14583SBruce Richardson 	uint64_t buf_dma_addr;
2989*c1d14583SBruce Richardson 	uint64_t ol_flags;
2990*c1d14583SBruce Richardson 	union ice_tx_offload tx_offload = {0};
2991*c1d14583SBruce Richardson 
2992*c1d14583SBruce Richardson 	txq = tx_queue;
2993*c1d14583SBruce Richardson 	sw_ring = txq->sw_ring;
2994*c1d14583SBruce Richardson 	tx_ring = txq->tx_ring;
2995*c1d14583SBruce Richardson 	tx_id = txq->tx_tail;
2996*c1d14583SBruce Richardson 	txe = &sw_ring[tx_id];
2997*c1d14583SBruce Richardson 
2998*c1d14583SBruce Richardson 	/* Check if the descriptor ring needs to be cleaned. */
2999*c1d14583SBruce Richardson 	if (txq->nb_tx_free < txq->tx_free_thresh)
3000*c1d14583SBruce Richardson 		(void)ice_xmit_cleanup(txq);
3001*c1d14583SBruce Richardson 
3002*c1d14583SBruce Richardson 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
3003*c1d14583SBruce Richardson 		tx_pkt = *tx_pkts++;
3004*c1d14583SBruce Richardson 
3005*c1d14583SBruce Richardson 		td_cmd = 0;
3006*c1d14583SBruce Richardson 		td_tag = 0;
3007*c1d14583SBruce Richardson 		td_offset = 0;
3008*c1d14583SBruce Richardson 		ol_flags = tx_pkt->ol_flags;
3009*c1d14583SBruce Richardson 		tx_offload.l2_len = tx_pkt->l2_len;
3010*c1d14583SBruce Richardson 		tx_offload.l3_len = tx_pkt->l3_len;
3011*c1d14583SBruce Richardson 		tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
3012*c1d14583SBruce Richardson 		tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
3013*c1d14583SBruce Richardson 		tx_offload.l4_len = tx_pkt->l4_len;
3014*c1d14583SBruce Richardson 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
3015*c1d14583SBruce Richardson 		/* Calculate the number of context descriptors needed. */
3016*c1d14583SBruce Richardson 		nb_ctx = ice_calc_context_desc(ol_flags);
3017*c1d14583SBruce Richardson 
3018*c1d14583SBruce Richardson 		/* The number of descriptors that must be allocated for
3019*c1d14583SBruce Richardson 		 * a packet equals to the number of the segments of that
3020*c1d14583SBruce Richardson 		 * packet plus the number of context descriptor if needed.
3021*c1d14583SBruce Richardson 		 * Recalculate the needed tx descs when TSO enabled in case
3022*c1d14583SBruce Richardson 		 * the mbuf data size exceeds max data size that hw allows
3023*c1d14583SBruce Richardson 		 * per tx desc.
3024*c1d14583SBruce Richardson 		 */
3025*c1d14583SBruce Richardson 		if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
3026*c1d14583SBruce Richardson 			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
3027*c1d14583SBruce Richardson 					     nb_ctx);
3028*c1d14583SBruce Richardson 		else
3029*c1d14583SBruce Richardson 			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
3030*c1d14583SBruce Richardson 		tx_last = (uint16_t)(tx_id + nb_used - 1);
3031*c1d14583SBruce Richardson 
3032*c1d14583SBruce Richardson 		/* Circular ring */
3033*c1d14583SBruce Richardson 		if (tx_last >= txq->nb_tx_desc)
3034*c1d14583SBruce Richardson 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
3035*c1d14583SBruce Richardson 
3036*c1d14583SBruce Richardson 		if (nb_used > txq->nb_tx_free) {
3037*c1d14583SBruce Richardson 			if (ice_xmit_cleanup(txq) != 0) {
3038*c1d14583SBruce Richardson 				if (nb_tx == 0)
3039*c1d14583SBruce Richardson 					return 0;
3040*c1d14583SBruce Richardson 				goto end_of_tx;
3041*c1d14583SBruce Richardson 			}
3042*c1d14583SBruce Richardson 			if (unlikely(nb_used > txq->tx_rs_thresh)) {
3043*c1d14583SBruce Richardson 				while (nb_used > txq->nb_tx_free) {
3044*c1d14583SBruce Richardson 					if (ice_xmit_cleanup(txq) != 0) {
3045*c1d14583SBruce Richardson 						if (nb_tx == 0)
3046*c1d14583SBruce Richardson 							return 0;
3047*c1d14583SBruce Richardson 						goto end_of_tx;
3048*c1d14583SBruce Richardson 					}
3049*c1d14583SBruce Richardson 				}
3050*c1d14583SBruce Richardson 			}
3051*c1d14583SBruce Richardson 		}
3052*c1d14583SBruce Richardson 
3053*c1d14583SBruce Richardson 		/* Descriptor based VLAN insertion */
3054*c1d14583SBruce Richardson 		if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
3055*c1d14583SBruce Richardson 			td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
3056*c1d14583SBruce Richardson 			td_tag = tx_pkt->vlan_tci;
3057*c1d14583SBruce Richardson 		}
3058*c1d14583SBruce Richardson 
3059*c1d14583SBruce Richardson 		/* Fill in tunneling parameters if necessary */
3060*c1d14583SBruce Richardson 		cd_tunneling_params = 0;
3061*c1d14583SBruce Richardson 		if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
3062*c1d14583SBruce Richardson 			td_offset |= (tx_offload.outer_l2_len >> 1)
3063*c1d14583SBruce Richardson 				<< ICE_TX_DESC_LEN_MACLEN_S;
3064*c1d14583SBruce Richardson 			ice_parse_tunneling_params(ol_flags, tx_offload,
3065*c1d14583SBruce Richardson 						   &cd_tunneling_params);
3066*c1d14583SBruce Richardson 		}
3067*c1d14583SBruce Richardson 
3068*c1d14583SBruce Richardson 		/* Enable checksum offloading */
3069*c1d14583SBruce Richardson 		if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
3070*c1d14583SBruce Richardson 			ice_txd_enable_checksum(ol_flags, &td_cmd,
3071*c1d14583SBruce Richardson 						&td_offset, tx_offload);
3072*c1d14583SBruce Richardson 
3073*c1d14583SBruce Richardson 		if (nb_ctx) {
3074*c1d14583SBruce Richardson 			/* Setup TX context descriptor if required */
3075*c1d14583SBruce Richardson 			volatile struct ice_tx_ctx_desc *ctx_txd =
3076*c1d14583SBruce Richardson 				(volatile struct ice_tx_ctx_desc *)
3077*c1d14583SBruce Richardson 					&tx_ring[tx_id];
3078*c1d14583SBruce Richardson 			uint16_t cd_l2tag2 = 0;
3079*c1d14583SBruce Richardson 			uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
3080*c1d14583SBruce Richardson 
3081*c1d14583SBruce Richardson 			txn = &sw_ring[txe->next_id];
3082*c1d14583SBruce Richardson 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
3083*c1d14583SBruce Richardson 			if (txe->mbuf) {
3084*c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(txe->mbuf);
3085*c1d14583SBruce Richardson 				txe->mbuf = NULL;
3086*c1d14583SBruce Richardson 			}
3087*c1d14583SBruce Richardson 
3088*c1d14583SBruce Richardson 			if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
3089*c1d14583SBruce Richardson 				cd_type_cmd_tso_mss |=
3090*c1d14583SBruce Richardson 					ice_set_tso_ctx(tx_pkt, tx_offload);
3091*c1d14583SBruce Richardson 			else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
3092*c1d14583SBruce Richardson 				cd_type_cmd_tso_mss |=
3093*c1d14583SBruce Richardson 					((uint64_t)ICE_TX_CTX_DESC_TSYN <<
3094*c1d14583SBruce Richardson 					ICE_TXD_CTX_QW1_CMD_S) |
3095*c1d14583SBruce Richardson 					 (((uint64_t)txq->vsi->adapter->ptp_tx_index <<
3096*c1d14583SBruce Richardson 					 ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M);
3097*c1d14583SBruce Richardson 
3098*c1d14583SBruce Richardson 			ctx_txd->tunneling_params =
3099*c1d14583SBruce Richardson 				rte_cpu_to_le_32(cd_tunneling_params);
3100*c1d14583SBruce Richardson 
3101*c1d14583SBruce Richardson 			/* TX context descriptor based double VLAN insert */
3102*c1d14583SBruce Richardson 			if (ol_flags & RTE_MBUF_F_TX_QINQ) {
3103*c1d14583SBruce Richardson 				cd_l2tag2 = tx_pkt->vlan_tci_outer;
3104*c1d14583SBruce Richardson 				cd_type_cmd_tso_mss |=
3105*c1d14583SBruce Richardson 					((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
3106*c1d14583SBruce Richardson 					 ICE_TXD_CTX_QW1_CMD_S);
3107*c1d14583SBruce Richardson 			}
3108*c1d14583SBruce Richardson 			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
3109*c1d14583SBruce Richardson 			ctx_txd->qw1 =
3110*c1d14583SBruce Richardson 				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
3111*c1d14583SBruce Richardson 
3112*c1d14583SBruce Richardson 			txe->last_id = tx_last;
3113*c1d14583SBruce Richardson 			tx_id = txe->next_id;
3114*c1d14583SBruce Richardson 			txe = txn;
3115*c1d14583SBruce Richardson 		}
3116*c1d14583SBruce Richardson 		m_seg = tx_pkt;
3117*c1d14583SBruce Richardson 
3118*c1d14583SBruce Richardson 		do {
3119*c1d14583SBruce Richardson 			txd = &tx_ring[tx_id];
3120*c1d14583SBruce Richardson 			txn = &sw_ring[txe->next_id];
3121*c1d14583SBruce Richardson 
3122*c1d14583SBruce Richardson 			if (txe->mbuf)
3123*c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(txe->mbuf);
3124*c1d14583SBruce Richardson 			txe->mbuf = m_seg;
3125*c1d14583SBruce Richardson 
3126*c1d14583SBruce Richardson 			/* Setup TX Descriptor */
3127*c1d14583SBruce Richardson 			slen = m_seg->data_len;
3128*c1d14583SBruce Richardson 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
3129*c1d14583SBruce Richardson 
3130*c1d14583SBruce Richardson 			while ((ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) &&
3131*c1d14583SBruce Richardson 				unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
3132*c1d14583SBruce Richardson 				txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
3133*c1d14583SBruce Richardson 				txd->cmd_type_offset_bsz =
3134*c1d14583SBruce Richardson 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
3135*c1d14583SBruce Richardson 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
3136*c1d14583SBruce Richardson 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
3137*c1d14583SBruce Richardson 				((uint64_t)ICE_MAX_DATA_PER_TXD <<
3138*c1d14583SBruce Richardson 				 ICE_TXD_QW1_TX_BUF_SZ_S) |
3139*c1d14583SBruce Richardson 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
3140*c1d14583SBruce Richardson 
3141*c1d14583SBruce Richardson 				buf_dma_addr += ICE_MAX_DATA_PER_TXD;
3142*c1d14583SBruce Richardson 				slen -= ICE_MAX_DATA_PER_TXD;
3143*c1d14583SBruce Richardson 
3144*c1d14583SBruce Richardson 				txe->last_id = tx_last;
3145*c1d14583SBruce Richardson 				tx_id = txe->next_id;
3146*c1d14583SBruce Richardson 				txe = txn;
3147*c1d14583SBruce Richardson 				txd = &tx_ring[tx_id];
3148*c1d14583SBruce Richardson 				txn = &sw_ring[txe->next_id];
3149*c1d14583SBruce Richardson 			}
3150*c1d14583SBruce Richardson 
3151*c1d14583SBruce Richardson 			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
3152*c1d14583SBruce Richardson 			txd->cmd_type_offset_bsz =
3153*c1d14583SBruce Richardson 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
3154*c1d14583SBruce Richardson 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
3155*c1d14583SBruce Richardson 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
3156*c1d14583SBruce Richardson 				((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
3157*c1d14583SBruce Richardson 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
3158*c1d14583SBruce Richardson 
3159*c1d14583SBruce Richardson 			txe->last_id = tx_last;
3160*c1d14583SBruce Richardson 			tx_id = txe->next_id;
3161*c1d14583SBruce Richardson 			txe = txn;
3162*c1d14583SBruce Richardson 			m_seg = m_seg->next;
3163*c1d14583SBruce Richardson 		} while (m_seg);
3164*c1d14583SBruce Richardson 
3165*c1d14583SBruce Richardson 		/* fill the last descriptor with End of Packet (EOP) bit */
3166*c1d14583SBruce Richardson 		td_cmd |= ICE_TX_DESC_CMD_EOP;
3167*c1d14583SBruce Richardson 		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
3168*c1d14583SBruce Richardson 		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
3169*c1d14583SBruce Richardson 
3170*c1d14583SBruce Richardson 		/* set RS bit on the last descriptor of one packet */
3171*c1d14583SBruce Richardson 		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
3172*c1d14583SBruce Richardson 			PMD_TX_LOG(DEBUG,
3173*c1d14583SBruce Richardson 				   "Setting RS bit on TXD id="
3174*c1d14583SBruce Richardson 				   "%4u (port=%d queue=%d)",
3175*c1d14583SBruce Richardson 				   tx_last, txq->port_id, txq->queue_id);
3176*c1d14583SBruce Richardson 
3177*c1d14583SBruce Richardson 			td_cmd |= ICE_TX_DESC_CMD_RS;
3178*c1d14583SBruce Richardson 
3179*c1d14583SBruce Richardson 			/* Update txq RS bit counters */
3180*c1d14583SBruce Richardson 			txq->nb_tx_used = 0;
3181*c1d14583SBruce Richardson 		}
3182*c1d14583SBruce Richardson 		txd->cmd_type_offset_bsz |=
3183*c1d14583SBruce Richardson 			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
3184*c1d14583SBruce Richardson 					 ICE_TXD_QW1_CMD_S);
3185*c1d14583SBruce Richardson 	}
3186*c1d14583SBruce Richardson end_of_tx:
3187*c1d14583SBruce Richardson 	/* update Tail register */
3188*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
3189*c1d14583SBruce Richardson 	txq->tx_tail = tx_id;
3190*c1d14583SBruce Richardson 
3191*c1d14583SBruce Richardson 	return nb_tx;
3192*c1d14583SBruce Richardson }
3193*c1d14583SBruce Richardson 
3194*c1d14583SBruce Richardson static __rte_always_inline int
3195*c1d14583SBruce Richardson ice_tx_free_bufs(struct ice_tx_queue *txq)
3196*c1d14583SBruce Richardson {
3197*c1d14583SBruce Richardson 	struct ice_tx_entry *txep;
3198*c1d14583SBruce Richardson 	uint16_t i;
3199*c1d14583SBruce Richardson 
3200*c1d14583SBruce Richardson 	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
3201*c1d14583SBruce Richardson 	     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
3202*c1d14583SBruce Richardson 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3203*c1d14583SBruce Richardson 		return 0;
3204*c1d14583SBruce Richardson 
3205*c1d14583SBruce Richardson 	txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
3206*c1d14583SBruce Richardson 
3207*c1d14583SBruce Richardson 	for (i = 0; i < txq->tx_rs_thresh; i++)
3208*c1d14583SBruce Richardson 		rte_prefetch0((txep + i)->mbuf);
3209*c1d14583SBruce Richardson 
3210*c1d14583SBruce Richardson 	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
3211*c1d14583SBruce Richardson 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
3212*c1d14583SBruce Richardson 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
3213*c1d14583SBruce Richardson 			txep->mbuf = NULL;
3214*c1d14583SBruce Richardson 		}
3215*c1d14583SBruce Richardson 	} else {
3216*c1d14583SBruce Richardson 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
3217*c1d14583SBruce Richardson 			rte_pktmbuf_free_seg(txep->mbuf);
3218*c1d14583SBruce Richardson 			txep->mbuf = NULL;
3219*c1d14583SBruce Richardson 		}
3220*c1d14583SBruce Richardson 	}
3221*c1d14583SBruce Richardson 
3222*c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
3223*c1d14583SBruce Richardson 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
3224*c1d14583SBruce Richardson 	if (txq->tx_next_dd >= txq->nb_tx_desc)
3225*c1d14583SBruce Richardson 		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
3226*c1d14583SBruce Richardson 
3227*c1d14583SBruce Richardson 	return txq->tx_rs_thresh;
3228*c1d14583SBruce Richardson }
3229*c1d14583SBruce Richardson 
3230*c1d14583SBruce Richardson static int
3231*c1d14583SBruce Richardson ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
3232*c1d14583SBruce Richardson 			uint32_t free_cnt)
3233*c1d14583SBruce Richardson {
3234*c1d14583SBruce Richardson 	struct ice_tx_entry *swr_ring = txq->sw_ring;
3235*c1d14583SBruce Richardson 	uint16_t i, tx_last, tx_id;
3236*c1d14583SBruce Richardson 	uint16_t nb_tx_free_last;
3237*c1d14583SBruce Richardson 	uint16_t nb_tx_to_clean;
3238*c1d14583SBruce Richardson 	uint32_t pkt_cnt;
3239*c1d14583SBruce Richardson 
3240*c1d14583SBruce Richardson 	/* Start free mbuf from the next of tx_tail */
3241*c1d14583SBruce Richardson 	tx_last = txq->tx_tail;
3242*c1d14583SBruce Richardson 	tx_id  = swr_ring[tx_last].next_id;
3243*c1d14583SBruce Richardson 
3244*c1d14583SBruce Richardson 	if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
3245*c1d14583SBruce Richardson 		return 0;
3246*c1d14583SBruce Richardson 
3247*c1d14583SBruce Richardson 	nb_tx_to_clean = txq->nb_tx_free;
3248*c1d14583SBruce Richardson 	nb_tx_free_last = txq->nb_tx_free;
3249*c1d14583SBruce Richardson 	if (!free_cnt)
3250*c1d14583SBruce Richardson 		free_cnt = txq->nb_tx_desc;
3251*c1d14583SBruce Richardson 
3252*c1d14583SBruce Richardson 	/* Loop through swr_ring to count the amount of
3253*c1d14583SBruce Richardson 	 * freeable mubfs and packets.
3254*c1d14583SBruce Richardson 	 */
3255*c1d14583SBruce Richardson 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3256*c1d14583SBruce Richardson 		for (i = 0; i < nb_tx_to_clean &&
3257*c1d14583SBruce Richardson 			pkt_cnt < free_cnt &&
3258*c1d14583SBruce Richardson 			tx_id != tx_last; i++) {
3259*c1d14583SBruce Richardson 			if (swr_ring[tx_id].mbuf != NULL) {
3260*c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3261*c1d14583SBruce Richardson 				swr_ring[tx_id].mbuf = NULL;
3262*c1d14583SBruce Richardson 
3263*c1d14583SBruce Richardson 				/*
3264*c1d14583SBruce Richardson 				 * last segment in the packet,
3265*c1d14583SBruce Richardson 				 * increment packet count
3266*c1d14583SBruce Richardson 				 */
3267*c1d14583SBruce Richardson 				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3268*c1d14583SBruce Richardson 			}
3269*c1d14583SBruce Richardson 
3270*c1d14583SBruce Richardson 			tx_id = swr_ring[tx_id].next_id;
3271*c1d14583SBruce Richardson 		}
3272*c1d14583SBruce Richardson 
3273*c1d14583SBruce Richardson 		if (txq->tx_rs_thresh > txq->nb_tx_desc -
3274*c1d14583SBruce Richardson 			txq->nb_tx_free || tx_id == tx_last)
3275*c1d14583SBruce Richardson 			break;
3276*c1d14583SBruce Richardson 
3277*c1d14583SBruce Richardson 		if (pkt_cnt < free_cnt) {
3278*c1d14583SBruce Richardson 			if (ice_xmit_cleanup(txq))
3279*c1d14583SBruce Richardson 				break;
3280*c1d14583SBruce Richardson 
3281*c1d14583SBruce Richardson 			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
3282*c1d14583SBruce Richardson 			nb_tx_free_last = txq->nb_tx_free;
3283*c1d14583SBruce Richardson 		}
3284*c1d14583SBruce Richardson 	}
3285*c1d14583SBruce Richardson 
3286*c1d14583SBruce Richardson 	return (int)pkt_cnt;
3287*c1d14583SBruce Richardson }
3288*c1d14583SBruce Richardson 
3289*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3290*c1d14583SBruce Richardson static int
3291*c1d14583SBruce Richardson ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
3292*c1d14583SBruce Richardson 			uint32_t free_cnt __rte_unused)
3293*c1d14583SBruce Richardson {
3294*c1d14583SBruce Richardson 	return -ENOTSUP;
3295*c1d14583SBruce Richardson }
3296*c1d14583SBruce Richardson #endif
3297*c1d14583SBruce Richardson 
3298*c1d14583SBruce Richardson static int
3299*c1d14583SBruce Richardson ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
3300*c1d14583SBruce Richardson 			uint32_t free_cnt)
3301*c1d14583SBruce Richardson {
3302*c1d14583SBruce Richardson 	int i, n, cnt;
3303*c1d14583SBruce Richardson 
3304*c1d14583SBruce Richardson 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
3305*c1d14583SBruce Richardson 		free_cnt = txq->nb_tx_desc;
3306*c1d14583SBruce Richardson 
3307*c1d14583SBruce Richardson 	cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
3308*c1d14583SBruce Richardson 
3309*c1d14583SBruce Richardson 	for (i = 0; i < cnt; i += n) {
3310*c1d14583SBruce Richardson 		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
3311*c1d14583SBruce Richardson 			break;
3312*c1d14583SBruce Richardson 
3313*c1d14583SBruce Richardson 		n = ice_tx_free_bufs(txq);
3314*c1d14583SBruce Richardson 
3315*c1d14583SBruce Richardson 		if (n == 0)
3316*c1d14583SBruce Richardson 			break;
3317*c1d14583SBruce Richardson 	}
3318*c1d14583SBruce Richardson 
3319*c1d14583SBruce Richardson 	return i;
3320*c1d14583SBruce Richardson }
3321*c1d14583SBruce Richardson 
3322*c1d14583SBruce Richardson int
3323*c1d14583SBruce Richardson ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
3324*c1d14583SBruce Richardson {
3325*c1d14583SBruce Richardson 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
3326*c1d14583SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
3327*c1d14583SBruce Richardson 	struct ice_adapter *ad =
3328*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3329*c1d14583SBruce Richardson 
3330*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3331*c1d14583SBruce Richardson 	if (ad->tx_vec_allowed)
3332*c1d14583SBruce Richardson 		return ice_tx_done_cleanup_vec(q, free_cnt);
3333*c1d14583SBruce Richardson #endif
3334*c1d14583SBruce Richardson 	if (ad->tx_simple_allowed)
3335*c1d14583SBruce Richardson 		return ice_tx_done_cleanup_simple(q, free_cnt);
3336*c1d14583SBruce Richardson 	else
3337*c1d14583SBruce Richardson 		return ice_tx_done_cleanup_full(q, free_cnt);
3338*c1d14583SBruce Richardson }
3339*c1d14583SBruce Richardson 
3340*c1d14583SBruce Richardson /* Populate 4 descriptors with data from 4 mbufs */
3341*c1d14583SBruce Richardson static inline void
3342*c1d14583SBruce Richardson tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3343*c1d14583SBruce Richardson {
3344*c1d14583SBruce Richardson 	uint64_t dma_addr;
3345*c1d14583SBruce Richardson 	uint32_t i;
3346*c1d14583SBruce Richardson 
3347*c1d14583SBruce Richardson 	for (i = 0; i < 4; i++, txdp++, pkts++) {
3348*c1d14583SBruce Richardson 		dma_addr = rte_mbuf_data_iova(*pkts);
3349*c1d14583SBruce Richardson 		txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3350*c1d14583SBruce Richardson 		txdp->cmd_type_offset_bsz =
3351*c1d14583SBruce Richardson 			ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3352*c1d14583SBruce Richardson 				       (*pkts)->data_len, 0);
3353*c1d14583SBruce Richardson 	}
3354*c1d14583SBruce Richardson }
3355*c1d14583SBruce Richardson 
3356*c1d14583SBruce Richardson /* Populate 1 descriptor with data from 1 mbuf */
3357*c1d14583SBruce Richardson static inline void
3358*c1d14583SBruce Richardson tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3359*c1d14583SBruce Richardson {
3360*c1d14583SBruce Richardson 	uint64_t dma_addr;
3361*c1d14583SBruce Richardson 
3362*c1d14583SBruce Richardson 	dma_addr = rte_mbuf_data_iova(*pkts);
3363*c1d14583SBruce Richardson 	txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3364*c1d14583SBruce Richardson 	txdp->cmd_type_offset_bsz =
3365*c1d14583SBruce Richardson 		ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3366*c1d14583SBruce Richardson 			       (*pkts)->data_len, 0);
3367*c1d14583SBruce Richardson }
3368*c1d14583SBruce Richardson 
3369*c1d14583SBruce Richardson static inline void
3370*c1d14583SBruce Richardson ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3371*c1d14583SBruce Richardson 		    uint16_t nb_pkts)
3372*c1d14583SBruce Richardson {
3373*c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3374*c1d14583SBruce Richardson 	struct ice_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3375*c1d14583SBruce Richardson 	const int N_PER_LOOP = 4;
3376*c1d14583SBruce Richardson 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3377*c1d14583SBruce Richardson 	int mainpart, leftover;
3378*c1d14583SBruce Richardson 	int i, j;
3379*c1d14583SBruce Richardson 
3380*c1d14583SBruce Richardson 	/**
3381*c1d14583SBruce Richardson 	 * Process most of the packets in chunks of N pkts.  Any
3382*c1d14583SBruce Richardson 	 * leftover packets will get processed one at a time.
3383*c1d14583SBruce Richardson 	 */
3384*c1d14583SBruce Richardson 	mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3385*c1d14583SBruce Richardson 	leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3386*c1d14583SBruce Richardson 	for (i = 0; i < mainpart; i += N_PER_LOOP) {
3387*c1d14583SBruce Richardson 		/* Copy N mbuf pointers to the S/W ring */
3388*c1d14583SBruce Richardson 		for (j = 0; j < N_PER_LOOP; ++j)
3389*c1d14583SBruce Richardson 			(txep + i + j)->mbuf = *(pkts + i + j);
3390*c1d14583SBruce Richardson 		tx4(txdp + i, pkts + i);
3391*c1d14583SBruce Richardson 	}
3392*c1d14583SBruce Richardson 
3393*c1d14583SBruce Richardson 	if (unlikely(leftover > 0)) {
3394*c1d14583SBruce Richardson 		for (i = 0; i < leftover; ++i) {
3395*c1d14583SBruce Richardson 			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3396*c1d14583SBruce Richardson 			tx1(txdp + mainpart + i, pkts + mainpart + i);
3397*c1d14583SBruce Richardson 		}
3398*c1d14583SBruce Richardson 	}
3399*c1d14583SBruce Richardson }
3400*c1d14583SBruce Richardson 
3401*c1d14583SBruce Richardson static inline uint16_t
3402*c1d14583SBruce Richardson tx_xmit_pkts(struct ice_tx_queue *txq,
3403*c1d14583SBruce Richardson 	     struct rte_mbuf **tx_pkts,
3404*c1d14583SBruce Richardson 	     uint16_t nb_pkts)
3405*c1d14583SBruce Richardson {
3406*c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txr = txq->tx_ring;
3407*c1d14583SBruce Richardson 	uint16_t n = 0;
3408*c1d14583SBruce Richardson 
3409*c1d14583SBruce Richardson 	/**
3410*c1d14583SBruce Richardson 	 * Begin scanning the H/W ring for done descriptors when the number
3411*c1d14583SBruce Richardson 	 * of available descriptors drops below tx_free_thresh. For each done
3412*c1d14583SBruce Richardson 	 * descriptor, free the associated buffer.
3413*c1d14583SBruce Richardson 	 */
3414*c1d14583SBruce Richardson 	if (txq->nb_tx_free < txq->tx_free_thresh)
3415*c1d14583SBruce Richardson 		ice_tx_free_bufs(txq);
3416*c1d14583SBruce Richardson 
3417*c1d14583SBruce Richardson 	/* Use available descriptor only */
3418*c1d14583SBruce Richardson 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3419*c1d14583SBruce Richardson 	if (unlikely(!nb_pkts))
3420*c1d14583SBruce Richardson 		return 0;
3421*c1d14583SBruce Richardson 
3422*c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3423*c1d14583SBruce Richardson 	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3424*c1d14583SBruce Richardson 		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3425*c1d14583SBruce Richardson 		ice_tx_fill_hw_ring(txq, tx_pkts, n);
3426*c1d14583SBruce Richardson 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3427*c1d14583SBruce Richardson 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3428*c1d14583SBruce Richardson 					 ICE_TXD_QW1_CMD_S);
3429*c1d14583SBruce Richardson 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3430*c1d14583SBruce Richardson 		txq->tx_tail = 0;
3431*c1d14583SBruce Richardson 	}
3432*c1d14583SBruce Richardson 
3433*c1d14583SBruce Richardson 	/* Fill hardware descriptor ring with mbuf data */
3434*c1d14583SBruce Richardson 	ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3435*c1d14583SBruce Richardson 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3436*c1d14583SBruce Richardson 
3437*c1d14583SBruce Richardson 	/* Determine if RS bit needs to be set */
3438*c1d14583SBruce Richardson 	if (txq->tx_tail > txq->tx_next_rs) {
3439*c1d14583SBruce Richardson 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3440*c1d14583SBruce Richardson 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3441*c1d14583SBruce Richardson 					 ICE_TXD_QW1_CMD_S);
3442*c1d14583SBruce Richardson 		txq->tx_next_rs =
3443*c1d14583SBruce Richardson 			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3444*c1d14583SBruce Richardson 		if (txq->tx_next_rs >= txq->nb_tx_desc)
3445*c1d14583SBruce Richardson 			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3446*c1d14583SBruce Richardson 	}
3447*c1d14583SBruce Richardson 
3448*c1d14583SBruce Richardson 	if (txq->tx_tail >= txq->nb_tx_desc)
3449*c1d14583SBruce Richardson 		txq->tx_tail = 0;
3450*c1d14583SBruce Richardson 
3451*c1d14583SBruce Richardson 	/* Update the tx tail register */
3452*c1d14583SBruce Richardson 	ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3453*c1d14583SBruce Richardson 
3454*c1d14583SBruce Richardson 	return nb_pkts;
3455*c1d14583SBruce Richardson }
3456*c1d14583SBruce Richardson 
3457*c1d14583SBruce Richardson static uint16_t
3458*c1d14583SBruce Richardson ice_xmit_pkts_simple(void *tx_queue,
3459*c1d14583SBruce Richardson 		     struct rte_mbuf **tx_pkts,
3460*c1d14583SBruce Richardson 		     uint16_t nb_pkts)
3461*c1d14583SBruce Richardson {
3462*c1d14583SBruce Richardson 	uint16_t nb_tx = 0;
3463*c1d14583SBruce Richardson 
3464*c1d14583SBruce Richardson 	if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3465*c1d14583SBruce Richardson 		return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3466*c1d14583SBruce Richardson 				    tx_pkts, nb_pkts);
3467*c1d14583SBruce Richardson 
3468*c1d14583SBruce Richardson 	while (nb_pkts) {
3469*c1d14583SBruce Richardson 		uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3470*c1d14583SBruce Richardson 						      ICE_TX_MAX_BURST);
3471*c1d14583SBruce Richardson 
3472*c1d14583SBruce Richardson 		ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3473*c1d14583SBruce Richardson 				   &tx_pkts[nb_tx], num);
3474*c1d14583SBruce Richardson 		nb_tx = (uint16_t)(nb_tx + ret);
3475*c1d14583SBruce Richardson 		nb_pkts = (uint16_t)(nb_pkts - ret);
3476*c1d14583SBruce Richardson 		if (ret < num)
3477*c1d14583SBruce Richardson 			break;
3478*c1d14583SBruce Richardson 	}
3479*c1d14583SBruce Richardson 
3480*c1d14583SBruce Richardson 	return nb_tx;
3481*c1d14583SBruce Richardson }
3482*c1d14583SBruce Richardson 
3483*c1d14583SBruce Richardson void __rte_cold
3484*c1d14583SBruce Richardson ice_set_rx_function(struct rte_eth_dev *dev)
3485*c1d14583SBruce Richardson {
3486*c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
3487*c1d14583SBruce Richardson 	struct ice_adapter *ad =
3488*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3489*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3490*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
3491*c1d14583SBruce Richardson 	int i;
3492*c1d14583SBruce Richardson 	int rx_check_ret = -1;
3493*c1d14583SBruce Richardson 
3494*c1d14583SBruce Richardson 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3495*c1d14583SBruce Richardson 		ad->rx_use_avx512 = false;
3496*c1d14583SBruce Richardson 		ad->rx_use_avx2 = false;
3497*c1d14583SBruce Richardson 		rx_check_ret = ice_rx_vec_dev_check(dev);
3498*c1d14583SBruce Richardson 		if (ad->ptp_ena)
3499*c1d14583SBruce Richardson 			rx_check_ret = -1;
3500*c1d14583SBruce Richardson 		ad->rx_vec_offload_support =
3501*c1d14583SBruce Richardson 				(rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
3502*c1d14583SBruce Richardson 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3503*c1d14583SBruce Richardson 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3504*c1d14583SBruce Richardson 			ad->rx_vec_allowed = true;
3505*c1d14583SBruce Richardson 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
3506*c1d14583SBruce Richardson 				rxq = dev->data->rx_queues[i];
3507*c1d14583SBruce Richardson 				if (rxq && ice_rxq_vec_setup(rxq)) {
3508*c1d14583SBruce Richardson 					ad->rx_vec_allowed = false;
3509*c1d14583SBruce Richardson 					break;
3510*c1d14583SBruce Richardson 				}
3511*c1d14583SBruce Richardson 			}
3512*c1d14583SBruce Richardson 
3513*c1d14583SBruce Richardson 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3514*c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3515*c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3516*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3517*c1d14583SBruce Richardson 				ad->rx_use_avx512 = true;
3518*c1d14583SBruce Richardson #else
3519*c1d14583SBruce Richardson 			PMD_DRV_LOG(NOTICE,
3520*c1d14583SBruce Richardson 				"AVX512 is not supported in build env");
3521*c1d14583SBruce Richardson #endif
3522*c1d14583SBruce Richardson 			if (!ad->rx_use_avx512 &&
3523*c1d14583SBruce Richardson 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3524*c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3525*c1d14583SBruce Richardson 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3526*c1d14583SBruce Richardson 				ad->rx_use_avx2 = true;
3527*c1d14583SBruce Richardson 
3528*c1d14583SBruce Richardson 		} else {
3529*c1d14583SBruce Richardson 			ad->rx_vec_allowed = false;
3530*c1d14583SBruce Richardson 		}
3531*c1d14583SBruce Richardson 	}
3532*c1d14583SBruce Richardson 
3533*c1d14583SBruce Richardson 	if (ad->rx_vec_allowed) {
3534*c1d14583SBruce Richardson 		if (dev->data->scattered_rx) {
3535*c1d14583SBruce Richardson 			if (ad->rx_use_avx512) {
3536*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3537*c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3538*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3539*c1d14583SBruce Richardson 						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3540*c1d14583SBruce Richardson 						dev->data->port_id);
3541*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3542*c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx512_offload;
3543*c1d14583SBruce Richardson 				} else {
3544*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3545*c1d14583SBruce Richardson 						"Using AVX512 Vector Scattered Rx (port %d).",
3546*c1d14583SBruce Richardson 						dev->data->port_id);
3547*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3548*c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx512;
3549*c1d14583SBruce Richardson 				}
3550*c1d14583SBruce Richardson #endif
3551*c1d14583SBruce Richardson 			} else if (ad->rx_use_avx2) {
3552*c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3553*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3554*c1d14583SBruce Richardson 						    "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3555*c1d14583SBruce Richardson 						    dev->data->port_id);
3556*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3557*c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx2_offload;
3558*c1d14583SBruce Richardson 				} else {
3559*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3560*c1d14583SBruce Richardson 						    "Using AVX2 Vector Scattered Rx (port %d).",
3561*c1d14583SBruce Richardson 						    dev->data->port_id);
3562*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3563*c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx2;
3564*c1d14583SBruce Richardson 				}
3565*c1d14583SBruce Richardson 			} else {
3566*c1d14583SBruce Richardson 				PMD_DRV_LOG(DEBUG,
3567*c1d14583SBruce Richardson 					"Using Vector Scattered Rx (port %d).",
3568*c1d14583SBruce Richardson 					dev->data->port_id);
3569*c1d14583SBruce Richardson 				dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3570*c1d14583SBruce Richardson 			}
3571*c1d14583SBruce Richardson 		} else {
3572*c1d14583SBruce Richardson 			if (ad->rx_use_avx512) {
3573*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3574*c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3575*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3576*c1d14583SBruce Richardson 						"Using AVX512 OFFLOAD Vector Rx (port %d).",
3577*c1d14583SBruce Richardson 						dev->data->port_id);
3578*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3579*c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx512_offload;
3580*c1d14583SBruce Richardson 				} else {
3581*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3582*c1d14583SBruce Richardson 						"Using AVX512 Vector Rx (port %d).",
3583*c1d14583SBruce Richardson 						dev->data->port_id);
3584*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3585*c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx512;
3586*c1d14583SBruce Richardson 				}
3587*c1d14583SBruce Richardson #endif
3588*c1d14583SBruce Richardson 			} else if (ad->rx_use_avx2) {
3589*c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3590*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3591*c1d14583SBruce Richardson 						    "Using AVX2 OFFLOAD Vector Rx (port %d).",
3592*c1d14583SBruce Richardson 						    dev->data->port_id);
3593*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3594*c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx2_offload;
3595*c1d14583SBruce Richardson 				} else {
3596*c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3597*c1d14583SBruce Richardson 						    "Using AVX2 Vector Rx (port %d).",
3598*c1d14583SBruce Richardson 						    dev->data->port_id);
3599*c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3600*c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx2;
3601*c1d14583SBruce Richardson 				}
3602*c1d14583SBruce Richardson 			} else {
3603*c1d14583SBruce Richardson 				PMD_DRV_LOG(DEBUG,
3604*c1d14583SBruce Richardson 					"Using Vector Rx (port %d).",
3605*c1d14583SBruce Richardson 					dev->data->port_id);
3606*c1d14583SBruce Richardson 				dev->rx_pkt_burst = ice_recv_pkts_vec;
3607*c1d14583SBruce Richardson 			}
3608*c1d14583SBruce Richardson 		}
3609*c1d14583SBruce Richardson 		return;
3610*c1d14583SBruce Richardson 	}
3611*c1d14583SBruce Richardson 
3612*c1d14583SBruce Richardson #endif
3613*c1d14583SBruce Richardson 
3614*c1d14583SBruce Richardson 	if (dev->data->scattered_rx) {
3615*c1d14583SBruce Richardson 		/* Set the non-LRO scattered function */
3616*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3617*c1d14583SBruce Richardson 			     "Using a Scattered function on port %d.",
3618*c1d14583SBruce Richardson 			     dev->data->port_id);
3619*c1d14583SBruce Richardson 		dev->rx_pkt_burst = ice_recv_scattered_pkts;
3620*c1d14583SBruce Richardson 	} else if (ad->rx_bulk_alloc_allowed) {
3621*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3622*c1d14583SBruce Richardson 			     "Rx Burst Bulk Alloc Preconditions are "
3623*c1d14583SBruce Richardson 			     "satisfied. Rx Burst Bulk Alloc function "
3624*c1d14583SBruce Richardson 			     "will be used on port %d.",
3625*c1d14583SBruce Richardson 			     dev->data->port_id);
3626*c1d14583SBruce Richardson 		dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3627*c1d14583SBruce Richardson 	} else {
3628*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3629*c1d14583SBruce Richardson 			     "Rx Burst Bulk Alloc Preconditions are not "
3630*c1d14583SBruce Richardson 			     "satisfied, Normal Rx will be used on port %d.",
3631*c1d14583SBruce Richardson 			     dev->data->port_id);
3632*c1d14583SBruce Richardson 		dev->rx_pkt_burst = ice_recv_pkts;
3633*c1d14583SBruce Richardson 	}
3634*c1d14583SBruce Richardson }
3635*c1d14583SBruce Richardson 
3636*c1d14583SBruce Richardson static const struct {
3637*c1d14583SBruce Richardson 	eth_rx_burst_t pkt_burst;
3638*c1d14583SBruce Richardson 	const char *info;
3639*c1d14583SBruce Richardson } ice_rx_burst_infos[] = {
3640*c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts,          "Scalar Scattered" },
3641*c1d14583SBruce Richardson 	{ ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
3642*c1d14583SBruce Richardson 	{ ice_recv_pkts,                    "Scalar" },
3643*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3644*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3645*c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3646*c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3647*c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
3648*c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
3649*c1d14583SBruce Richardson #endif
3650*c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3651*c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3652*c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
3653*c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx2_offload,   "Offload Vector AVX2" },
3654*c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
3655*c1d14583SBruce Richardson 	{ ice_recv_pkts_vec,                "Vector SSE" },
3656*c1d14583SBruce Richardson #endif
3657*c1d14583SBruce Richardson };
3658*c1d14583SBruce Richardson 
3659*c1d14583SBruce Richardson int
3660*c1d14583SBruce Richardson ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3661*c1d14583SBruce Richardson 		      struct rte_eth_burst_mode *mode)
3662*c1d14583SBruce Richardson {
3663*c1d14583SBruce Richardson 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3664*c1d14583SBruce Richardson 	int ret = -EINVAL;
3665*c1d14583SBruce Richardson 	unsigned int i;
3666*c1d14583SBruce Richardson 
3667*c1d14583SBruce Richardson 	for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3668*c1d14583SBruce Richardson 		if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3669*c1d14583SBruce Richardson 			snprintf(mode->info, sizeof(mode->info), "%s",
3670*c1d14583SBruce Richardson 				 ice_rx_burst_infos[i].info);
3671*c1d14583SBruce Richardson 			ret = 0;
3672*c1d14583SBruce Richardson 			break;
3673*c1d14583SBruce Richardson 		}
3674*c1d14583SBruce Richardson 	}
3675*c1d14583SBruce Richardson 
3676*c1d14583SBruce Richardson 	return ret;
3677*c1d14583SBruce Richardson }
3678*c1d14583SBruce Richardson 
3679*c1d14583SBruce Richardson void __rte_cold
3680*c1d14583SBruce Richardson ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3681*c1d14583SBruce Richardson {
3682*c1d14583SBruce Richardson 	struct ice_adapter *ad =
3683*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3684*c1d14583SBruce Richardson 
3685*c1d14583SBruce Richardson 	/* Use a simple Tx queue if possible (only fast free is allowed) */
3686*c1d14583SBruce Richardson 	ad->tx_simple_allowed =
3687*c1d14583SBruce Richardson 		(txq->offloads ==
3688*c1d14583SBruce Richardson 		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
3689*c1d14583SBruce Richardson 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3690*c1d14583SBruce Richardson 
3691*c1d14583SBruce Richardson 	if (ad->tx_simple_allowed)
3692*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3693*c1d14583SBruce Richardson 			     txq->queue_id);
3694*c1d14583SBruce Richardson 	else
3695*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3696*c1d14583SBruce Richardson 			     "Simple Tx can NOT be enabled on Tx queue %u.",
3697*c1d14583SBruce Richardson 			     txq->queue_id);
3698*c1d14583SBruce Richardson }
3699*c1d14583SBruce Richardson 
3700*c1d14583SBruce Richardson /*********************************************************************
3701*c1d14583SBruce Richardson  *
3702*c1d14583SBruce Richardson  *  TX prep functions
3703*c1d14583SBruce Richardson  *
3704*c1d14583SBruce Richardson  **********************************************************************/
3705*c1d14583SBruce Richardson /* The default values of TSO MSS */
3706*c1d14583SBruce Richardson #define ICE_MIN_TSO_MSS            64
3707*c1d14583SBruce Richardson #define ICE_MAX_TSO_MSS            9728
3708*c1d14583SBruce Richardson #define ICE_MAX_TSO_FRAME_SIZE     262144
3709*c1d14583SBruce Richardson 
3710*c1d14583SBruce Richardson /*Check for empty mbuf*/
3711*c1d14583SBruce Richardson static inline uint16_t
3712*c1d14583SBruce Richardson ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
3713*c1d14583SBruce Richardson {
3714*c1d14583SBruce Richardson 	struct rte_mbuf *txd = tx_pkt;
3715*c1d14583SBruce Richardson 
3716*c1d14583SBruce Richardson 	while (txd != NULL) {
3717*c1d14583SBruce Richardson 		if (txd->data_len == 0)
3718*c1d14583SBruce Richardson 			return -1;
3719*c1d14583SBruce Richardson 		txd = txd->next;
3720*c1d14583SBruce Richardson 	}
3721*c1d14583SBruce Richardson 
3722*c1d14583SBruce Richardson 	return 0;
3723*c1d14583SBruce Richardson }
3724*c1d14583SBruce Richardson 
3725*c1d14583SBruce Richardson /* Tx mbuf check */
3726*c1d14583SBruce Richardson static uint16_t
3727*c1d14583SBruce Richardson ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3728*c1d14583SBruce Richardson {
3729*c1d14583SBruce Richardson 	struct ice_tx_queue *txq = tx_queue;
3730*c1d14583SBruce Richardson 	uint16_t idx;
3731*c1d14583SBruce Richardson 	struct rte_mbuf *mb;
3732*c1d14583SBruce Richardson 	bool pkt_error = false;
3733*c1d14583SBruce Richardson 	uint16_t good_pkts = nb_pkts;
3734*c1d14583SBruce Richardson 	const char *reason = NULL;
3735*c1d14583SBruce Richardson 	struct ice_adapter *adapter = txq->vsi->adapter;
3736*c1d14583SBruce Richardson 	uint64_t ol_flags;
3737*c1d14583SBruce Richardson 
3738*c1d14583SBruce Richardson 	for (idx = 0; idx < nb_pkts; idx++) {
3739*c1d14583SBruce Richardson 		mb = tx_pkts[idx];
3740*c1d14583SBruce Richardson 		ol_flags = mb->ol_flags;
3741*c1d14583SBruce Richardson 
3742*c1d14583SBruce Richardson 		if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_MBUF) &&
3743*c1d14583SBruce Richardson 		    (rte_mbuf_check(mb, 1, &reason) != 0)) {
3744*c1d14583SBruce Richardson 			PMD_TX_LOG(ERR, "INVALID mbuf: %s", reason);
3745*c1d14583SBruce Richardson 			pkt_error = true;
3746*c1d14583SBruce Richardson 			break;
3747*c1d14583SBruce Richardson 		}
3748*c1d14583SBruce Richardson 
3749*c1d14583SBruce Richardson 		if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SIZE) &&
3750*c1d14583SBruce Richardson 		    (mb->data_len > mb->pkt_len ||
3751*c1d14583SBruce Richardson 		     mb->data_len < ICE_TX_MIN_PKT_LEN ||
3752*c1d14583SBruce Richardson 		     mb->data_len > ICE_FRAME_SIZE_MAX)) {
3753*c1d14583SBruce Richardson 			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out of range, reasonable range (%d - %d)",
3754*c1d14583SBruce Richardson 				mb->data_len, ICE_TX_MIN_PKT_LEN, ICE_FRAME_SIZE_MAX);
3755*c1d14583SBruce Richardson 			pkt_error = true;
3756*c1d14583SBruce Richardson 			break;
3757*c1d14583SBruce Richardson 		}
3758*c1d14583SBruce Richardson 
3759*c1d14583SBruce Richardson 		if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SEGMENT) {
3760*c1d14583SBruce Richardson 			if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3761*c1d14583SBruce Richardson 				/**
3762*c1d14583SBruce Richardson 				 * No TSO case: nb->segs, pkt_len to not exceed
3763*c1d14583SBruce Richardson 				 * the limites.
3764*c1d14583SBruce Richardson 				 */
3765*c1d14583SBruce Richardson 				if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) {
3766*c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds HW limit, maximum allowed value is %d",
3767*c1d14583SBruce Richardson 						mb->nb_segs, ICE_TX_MTU_SEG_MAX);
3768*c1d14583SBruce Richardson 					pkt_error = true;
3769*c1d14583SBruce Richardson 					break;
3770*c1d14583SBruce Richardson 				}
3771*c1d14583SBruce Richardson 				if (mb->pkt_len > ICE_FRAME_SIZE_MAX) {
3772*c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds HW limit, maximum allowed value is %d",
3773*c1d14583SBruce Richardson 						mb->nb_segs, ICE_FRAME_SIZE_MAX);
3774*c1d14583SBruce Richardson 					pkt_error = true;
3775*c1d14583SBruce Richardson 					break;
3776*c1d14583SBruce Richardson 				}
3777*c1d14583SBruce Richardson 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
3778*c1d14583SBruce Richardson 				/** TSO case: tso_segsz, nb_segs, pkt_len not exceed
3779*c1d14583SBruce Richardson 				 * the limits.
3780*c1d14583SBruce Richardson 				 */
3781*c1d14583SBruce Richardson 				if (mb->tso_segsz < ICE_MIN_TSO_MSS ||
3782*c1d14583SBruce Richardson 				    mb->tso_segsz > ICE_MAX_TSO_MSS) {
3783*c1d14583SBruce Richardson 					/**
3784*c1d14583SBruce Richardson 					 * MSS outside the range are considered malicious
3785*c1d14583SBruce Richardson 					 */
3786*c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out of range, reasonable range (%d - %u)",
3787*c1d14583SBruce Richardson 						mb->tso_segsz, ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS);
3788*c1d14583SBruce Richardson 					pkt_error = true;
3789*c1d14583SBruce Richardson 					break;
3790*c1d14583SBruce Richardson 				}
3791*c1d14583SBruce Richardson 				if (mb->nb_segs > ((struct ice_tx_queue *)tx_queue)->nb_tx_desc) {
3792*c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out of ring length");
3793*c1d14583SBruce Richardson 					pkt_error = true;
3794*c1d14583SBruce Richardson 					break;
3795*c1d14583SBruce Richardson 				}
3796*c1d14583SBruce Richardson 			}
3797*c1d14583SBruce Richardson 		}
3798*c1d14583SBruce Richardson 
3799*c1d14583SBruce Richardson 		if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_OFFLOAD) {
3800*c1d14583SBruce Richardson 			if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) {
3801*c1d14583SBruce Richardson 				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload is not supported");
3802*c1d14583SBruce Richardson 				pkt_error = true;
3803*c1d14583SBruce Richardson 				break;
3804*c1d14583SBruce Richardson 			}
3805*c1d14583SBruce Richardson 
3806*c1d14583SBruce Richardson 			if (!rte_validate_tx_offload(mb)) {
3807*c1d14583SBruce Richardson 				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload setup error");
3808*c1d14583SBruce Richardson 				pkt_error = true;
3809*c1d14583SBruce Richardson 				break;
3810*c1d14583SBruce Richardson 			}
3811*c1d14583SBruce Richardson 		}
3812*c1d14583SBruce Richardson 	}
3813*c1d14583SBruce Richardson 
3814*c1d14583SBruce Richardson 	if (pkt_error) {
3815*c1d14583SBruce Richardson 		txq->mbuf_errors++;
3816*c1d14583SBruce Richardson 		good_pkts = idx;
3817*c1d14583SBruce Richardson 		if (good_pkts == 0)
3818*c1d14583SBruce Richardson 			return 0;
3819*c1d14583SBruce Richardson 	}
3820*c1d14583SBruce Richardson 
3821*c1d14583SBruce Richardson 	return adapter->tx_pkt_burst(tx_queue, tx_pkts, good_pkts);
3822*c1d14583SBruce Richardson }
3823*c1d14583SBruce Richardson 
3824*c1d14583SBruce Richardson uint16_t
3825*c1d14583SBruce Richardson ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
3826*c1d14583SBruce Richardson 	      uint16_t nb_pkts)
3827*c1d14583SBruce Richardson {
3828*c1d14583SBruce Richardson 	int i, ret;
3829*c1d14583SBruce Richardson 	uint64_t ol_flags;
3830*c1d14583SBruce Richardson 	struct rte_mbuf *m;
3831*c1d14583SBruce Richardson 
3832*c1d14583SBruce Richardson 	for (i = 0; i < nb_pkts; i++) {
3833*c1d14583SBruce Richardson 		m = tx_pkts[i];
3834*c1d14583SBruce Richardson 		ol_flags = m->ol_flags;
3835*c1d14583SBruce Richardson 
3836*c1d14583SBruce Richardson 		if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
3837*c1d14583SBruce Richardson 		    /**
3838*c1d14583SBruce Richardson 		     * No TSO case: nb->segs, pkt_len to not exceed
3839*c1d14583SBruce Richardson 		     * the limites.
3840*c1d14583SBruce Richardson 		     */
3841*c1d14583SBruce Richardson 		    (m->nb_segs > ICE_TX_MTU_SEG_MAX ||
3842*c1d14583SBruce Richardson 		     m->pkt_len > ICE_FRAME_SIZE_MAX)) {
3843*c1d14583SBruce Richardson 			rte_errno = EINVAL;
3844*c1d14583SBruce Richardson 			return i;
3845*c1d14583SBruce Richardson 		} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
3846*c1d14583SBruce Richardson 		    /** TSO case: tso_segsz, nb_segs, pkt_len not exceed
3847*c1d14583SBruce Richardson 		     * the limits.
3848*c1d14583SBruce Richardson 		     */
3849*c1d14583SBruce Richardson 		    (m->tso_segsz < ICE_MIN_TSO_MSS ||
3850*c1d14583SBruce Richardson 		     m->tso_segsz > ICE_MAX_TSO_MSS ||
3851*c1d14583SBruce Richardson 		     m->nb_segs >
3852*c1d14583SBruce Richardson 			((struct ice_tx_queue *)tx_queue)->nb_tx_desc ||
3853*c1d14583SBruce Richardson 		     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3854*c1d14583SBruce Richardson 			/**
3855*c1d14583SBruce Richardson 			 * MSS outside the range are considered malicious
3856*c1d14583SBruce Richardson 			 */
3857*c1d14583SBruce Richardson 			rte_errno = EINVAL;
3858*c1d14583SBruce Richardson 			return i;
3859*c1d14583SBruce Richardson 		}
3860*c1d14583SBruce Richardson 
3861*c1d14583SBruce Richardson 		if (m->pkt_len < ICE_TX_MIN_PKT_LEN) {
3862*c1d14583SBruce Richardson 			rte_errno = EINVAL;
3863*c1d14583SBruce Richardson 			return i;
3864*c1d14583SBruce Richardson 		}
3865*c1d14583SBruce Richardson 
3866*c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_TX
3867*c1d14583SBruce Richardson 		ret = rte_validate_tx_offload(m);
3868*c1d14583SBruce Richardson 		if (ret != 0) {
3869*c1d14583SBruce Richardson 			rte_errno = -ret;
3870*c1d14583SBruce Richardson 			return i;
3871*c1d14583SBruce Richardson 		}
3872*c1d14583SBruce Richardson #endif
3873*c1d14583SBruce Richardson 		ret = rte_net_intel_cksum_prepare(m);
3874*c1d14583SBruce Richardson 		if (ret != 0) {
3875*c1d14583SBruce Richardson 			rte_errno = -ret;
3876*c1d14583SBruce Richardson 			return i;
3877*c1d14583SBruce Richardson 		}
3878*c1d14583SBruce Richardson 
3879*c1d14583SBruce Richardson 		if (ice_check_empty_mbuf(m) != 0) {
3880*c1d14583SBruce Richardson 			rte_errno = EINVAL;
3881*c1d14583SBruce Richardson 			return i;
3882*c1d14583SBruce Richardson 		}
3883*c1d14583SBruce Richardson 	}
3884*c1d14583SBruce Richardson 	return i;
3885*c1d14583SBruce Richardson }
3886*c1d14583SBruce Richardson 
3887*c1d14583SBruce Richardson void __rte_cold
3888*c1d14583SBruce Richardson ice_set_tx_function(struct rte_eth_dev *dev)
3889*c1d14583SBruce Richardson {
3890*c1d14583SBruce Richardson 	struct ice_adapter *ad =
3891*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3892*c1d14583SBruce Richardson 	int mbuf_check = ad->devargs.mbuf_check;
3893*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3894*c1d14583SBruce Richardson 	struct ice_tx_queue *txq;
3895*c1d14583SBruce Richardson 	int i;
3896*c1d14583SBruce Richardson 	int tx_check_ret = -1;
3897*c1d14583SBruce Richardson 
3898*c1d14583SBruce Richardson 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3899*c1d14583SBruce Richardson 		ad->tx_use_avx2 = false;
3900*c1d14583SBruce Richardson 		ad->tx_use_avx512 = false;
3901*c1d14583SBruce Richardson 		tx_check_ret = ice_tx_vec_dev_check(dev);
3902*c1d14583SBruce Richardson 		if (tx_check_ret >= 0 &&
3903*c1d14583SBruce Richardson 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3904*c1d14583SBruce Richardson 			ad->tx_vec_allowed = true;
3905*c1d14583SBruce Richardson 
3906*c1d14583SBruce Richardson 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3907*c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3908*c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3909*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3910*c1d14583SBruce Richardson 				ad->tx_use_avx512 = true;
3911*c1d14583SBruce Richardson #else
3912*c1d14583SBruce Richardson 			PMD_DRV_LOG(NOTICE,
3913*c1d14583SBruce Richardson 				"AVX512 is not supported in build env");
3914*c1d14583SBruce Richardson #endif
3915*c1d14583SBruce Richardson 			if (!ad->tx_use_avx512 &&
3916*c1d14583SBruce Richardson 				(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3917*c1d14583SBruce Richardson 				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3918*c1d14583SBruce Richardson 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3919*c1d14583SBruce Richardson 				ad->tx_use_avx2 = true;
3920*c1d14583SBruce Richardson 
3921*c1d14583SBruce Richardson 			if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3922*c1d14583SBruce Richardson 				tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3923*c1d14583SBruce Richardson 				ad->tx_vec_allowed = false;
3924*c1d14583SBruce Richardson 
3925*c1d14583SBruce Richardson 			if (ad->tx_vec_allowed) {
3926*c1d14583SBruce Richardson 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
3927*c1d14583SBruce Richardson 					txq = dev->data->tx_queues[i];
3928*c1d14583SBruce Richardson 					if (txq && ice_txq_vec_setup(txq)) {
3929*c1d14583SBruce Richardson 						ad->tx_vec_allowed = false;
3930*c1d14583SBruce Richardson 						break;
3931*c1d14583SBruce Richardson 					}
3932*c1d14583SBruce Richardson 				}
3933*c1d14583SBruce Richardson 			}
3934*c1d14583SBruce Richardson 		} else {
3935*c1d14583SBruce Richardson 			ad->tx_vec_allowed = false;
3936*c1d14583SBruce Richardson 		}
3937*c1d14583SBruce Richardson 	}
3938*c1d14583SBruce Richardson 
3939*c1d14583SBruce Richardson 	if (ad->tx_vec_allowed) {
3940*c1d14583SBruce Richardson 		dev->tx_pkt_prepare = NULL;
3941*c1d14583SBruce Richardson 		if (ad->tx_use_avx512) {
3942*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3943*c1d14583SBruce Richardson 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3944*c1d14583SBruce Richardson 				PMD_DRV_LOG(NOTICE,
3945*c1d14583SBruce Richardson 					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
3946*c1d14583SBruce Richardson 					    dev->data->port_id);
3947*c1d14583SBruce Richardson 				dev->tx_pkt_burst =
3948*c1d14583SBruce Richardson 					ice_xmit_pkts_vec_avx512_offload;
3949*c1d14583SBruce Richardson 				dev->tx_pkt_prepare = ice_prep_pkts;
3950*c1d14583SBruce Richardson 			} else {
3951*c1d14583SBruce Richardson 				PMD_DRV_LOG(NOTICE,
3952*c1d14583SBruce Richardson 					    "Using AVX512 Vector Tx (port %d).",
3953*c1d14583SBruce Richardson 					    dev->data->port_id);
3954*c1d14583SBruce Richardson 				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3955*c1d14583SBruce Richardson 			}
3956*c1d14583SBruce Richardson #endif
3957*c1d14583SBruce Richardson 		} else {
3958*c1d14583SBruce Richardson 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3959*c1d14583SBruce Richardson 				PMD_DRV_LOG(NOTICE,
3960*c1d14583SBruce Richardson 					    "Using AVX2 OFFLOAD Vector Tx (port %d).",
3961*c1d14583SBruce Richardson 					    dev->data->port_id);
3962*c1d14583SBruce Richardson 				dev->tx_pkt_burst =
3963*c1d14583SBruce Richardson 					ice_xmit_pkts_vec_avx2_offload;
3964*c1d14583SBruce Richardson 				dev->tx_pkt_prepare = ice_prep_pkts;
3965*c1d14583SBruce Richardson 			} else {
3966*c1d14583SBruce Richardson 				PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3967*c1d14583SBruce Richardson 					    ad->tx_use_avx2 ? "avx2 " : "",
3968*c1d14583SBruce Richardson 					    dev->data->port_id);
3969*c1d14583SBruce Richardson 				dev->tx_pkt_burst = ad->tx_use_avx2 ?
3970*c1d14583SBruce Richardson 						    ice_xmit_pkts_vec_avx2 :
3971*c1d14583SBruce Richardson 						    ice_xmit_pkts_vec;
3972*c1d14583SBruce Richardson 			}
3973*c1d14583SBruce Richardson 		}
3974*c1d14583SBruce Richardson 
3975*c1d14583SBruce Richardson 		if (mbuf_check) {
3976*c1d14583SBruce Richardson 			ad->tx_pkt_burst = dev->tx_pkt_burst;
3977*c1d14583SBruce Richardson 			dev->tx_pkt_burst = ice_xmit_pkts_check;
3978*c1d14583SBruce Richardson 		}
3979*c1d14583SBruce Richardson 		return;
3980*c1d14583SBruce Richardson 	}
3981*c1d14583SBruce Richardson #endif
3982*c1d14583SBruce Richardson 
3983*c1d14583SBruce Richardson 	if (ad->tx_simple_allowed) {
3984*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3985*c1d14583SBruce Richardson 		dev->tx_pkt_burst = ice_xmit_pkts_simple;
3986*c1d14583SBruce Richardson 		dev->tx_pkt_prepare = NULL;
3987*c1d14583SBruce Richardson 	} else {
3988*c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3989*c1d14583SBruce Richardson 		dev->tx_pkt_burst = ice_xmit_pkts;
3990*c1d14583SBruce Richardson 		dev->tx_pkt_prepare = ice_prep_pkts;
3991*c1d14583SBruce Richardson 	}
3992*c1d14583SBruce Richardson 
3993*c1d14583SBruce Richardson 	if (mbuf_check) {
3994*c1d14583SBruce Richardson 		ad->tx_pkt_burst = dev->tx_pkt_burst;
3995*c1d14583SBruce Richardson 		dev->tx_pkt_burst = ice_xmit_pkts_check;
3996*c1d14583SBruce Richardson 	}
3997*c1d14583SBruce Richardson }
3998*c1d14583SBruce Richardson 
3999*c1d14583SBruce Richardson static const struct {
4000*c1d14583SBruce Richardson 	eth_tx_burst_t pkt_burst;
4001*c1d14583SBruce Richardson 	const char *info;
4002*c1d14583SBruce Richardson } ice_tx_burst_infos[] = {
4003*c1d14583SBruce Richardson 	{ ice_xmit_pkts_simple,   "Scalar Simple" },
4004*c1d14583SBruce Richardson 	{ ice_xmit_pkts,          "Scalar" },
4005*c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
4006*c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
4007*c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
4008*c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
4009*c1d14583SBruce Richardson #endif
4010*c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx2,         "Vector AVX2" },
4011*c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
4012*c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec,              "Vector SSE" },
4013*c1d14583SBruce Richardson #endif
4014*c1d14583SBruce Richardson };
4015*c1d14583SBruce Richardson 
4016*c1d14583SBruce Richardson int
4017*c1d14583SBruce Richardson ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4018*c1d14583SBruce Richardson 		      struct rte_eth_burst_mode *mode)
4019*c1d14583SBruce Richardson {
4020*c1d14583SBruce Richardson 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4021*c1d14583SBruce Richardson 	int ret = -EINVAL;
4022*c1d14583SBruce Richardson 	unsigned int i;
4023*c1d14583SBruce Richardson 
4024*c1d14583SBruce Richardson 	for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
4025*c1d14583SBruce Richardson 		if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
4026*c1d14583SBruce Richardson 			snprintf(mode->info, sizeof(mode->info), "%s",
4027*c1d14583SBruce Richardson 				 ice_tx_burst_infos[i].info);
4028*c1d14583SBruce Richardson 			ret = 0;
4029*c1d14583SBruce Richardson 			break;
4030*c1d14583SBruce Richardson 		}
4031*c1d14583SBruce Richardson 	}
4032*c1d14583SBruce Richardson 
4033*c1d14583SBruce Richardson 	return ret;
4034*c1d14583SBruce Richardson }
4035*c1d14583SBruce Richardson 
4036*c1d14583SBruce Richardson /* For each value it means, datasheet of hardware can tell more details
4037*c1d14583SBruce Richardson  *
4038*c1d14583SBruce Richardson  * @note: fix ice_dev_supported_ptypes_get() if any change here.
4039*c1d14583SBruce Richardson  */
4040*c1d14583SBruce Richardson static inline uint32_t
4041*c1d14583SBruce Richardson ice_get_default_pkt_type(uint16_t ptype)
4042*c1d14583SBruce Richardson {
4043*c1d14583SBruce Richardson 	static const alignas(RTE_CACHE_LINE_SIZE) uint32_t type_table[ICE_MAX_PKT_TYPE] = {
4044*c1d14583SBruce Richardson 		/* L2 types */
4045*c1d14583SBruce Richardson 		/* [0] reserved */
4046*c1d14583SBruce Richardson 		[1] = RTE_PTYPE_L2_ETHER,
4047*c1d14583SBruce Richardson 		[2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
4048*c1d14583SBruce Richardson 		/* [3] - [5] reserved */
4049*c1d14583SBruce Richardson 		[6] = RTE_PTYPE_L2_ETHER_LLDP,
4050*c1d14583SBruce Richardson 		/* [7] - [10] reserved */
4051*c1d14583SBruce Richardson 		[11] = RTE_PTYPE_L2_ETHER_ARP,
4052*c1d14583SBruce Richardson 		/* [12] - [21] reserved */
4053*c1d14583SBruce Richardson 
4054*c1d14583SBruce Richardson 		/* Non tunneled IPv4 */
4055*c1d14583SBruce Richardson 		[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4056*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_FRAG,
4057*c1d14583SBruce Richardson 		[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4058*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_NONFRAG,
4059*c1d14583SBruce Richardson 		[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4060*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_UDP,
4061*c1d14583SBruce Richardson 		/* [25] reserved */
4062*c1d14583SBruce Richardson 		[26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4063*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_TCP,
4064*c1d14583SBruce Richardson 		[27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4065*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_SCTP,
4066*c1d14583SBruce Richardson 		[28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4067*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_ICMP,
4068*c1d14583SBruce Richardson 
4069*c1d14583SBruce Richardson 		/* IPv4 --> IPv4 */
4070*c1d14583SBruce Richardson 		[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4071*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4072*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4073*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4074*c1d14583SBruce Richardson 		[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4075*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4076*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4077*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4078*c1d14583SBruce Richardson 		[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4079*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4080*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4081*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4082*c1d14583SBruce Richardson 		/* [32] reserved */
4083*c1d14583SBruce Richardson 		[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4084*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4085*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4086*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4087*c1d14583SBruce Richardson 		[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4088*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4089*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4090*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4091*c1d14583SBruce Richardson 		[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4092*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4093*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4094*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4095*c1d14583SBruce Richardson 
4096*c1d14583SBruce Richardson 		/* IPv4 --> IPv6 */
4097*c1d14583SBruce Richardson 		[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4098*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4099*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4100*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4101*c1d14583SBruce Richardson 		[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4102*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4103*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4104*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4105*c1d14583SBruce Richardson 		[38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4106*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4107*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4108*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4109*c1d14583SBruce Richardson 		/* [39] reserved */
4110*c1d14583SBruce Richardson 		[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4111*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4112*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4113*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4114*c1d14583SBruce Richardson 		[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4115*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4116*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4117*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4118*c1d14583SBruce Richardson 		[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4119*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4120*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4121*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4122*c1d14583SBruce Richardson 
4123*c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN */
4124*c1d14583SBruce Richardson 		[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4125*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT,
4126*c1d14583SBruce Richardson 
4127*c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
4128*c1d14583SBruce Richardson 		[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4129*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4130*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4131*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4132*c1d14583SBruce Richardson 		[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4133*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4134*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4135*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4136*c1d14583SBruce Richardson 		[46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4137*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4138*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4139*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4140*c1d14583SBruce Richardson 		/* [47] reserved */
4141*c1d14583SBruce Richardson 		[48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4142*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4143*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4144*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4145*c1d14583SBruce Richardson 		[49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4146*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4147*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4148*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4149*c1d14583SBruce Richardson 		[50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4150*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4151*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4152*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4153*c1d14583SBruce Richardson 
4154*c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
4155*c1d14583SBruce Richardson 		[51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4156*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4157*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4158*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4159*c1d14583SBruce Richardson 		[52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4160*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4161*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4162*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4163*c1d14583SBruce Richardson 		[53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4164*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4165*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4166*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4167*c1d14583SBruce Richardson 		/* [54] reserved */
4168*c1d14583SBruce Richardson 		[55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4169*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4170*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4171*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4172*c1d14583SBruce Richardson 		[56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4173*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4174*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4175*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4176*c1d14583SBruce Richardson 		[57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4177*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4178*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4179*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4180*c1d14583SBruce Richardson 
4181*c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC */
4182*c1d14583SBruce Richardson 		[58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4183*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
4184*c1d14583SBruce Richardson 
4185*c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
4186*c1d14583SBruce Richardson 		[59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4187*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4188*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4189*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4190*c1d14583SBruce Richardson 		[60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4191*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4192*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4193*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4194*c1d14583SBruce Richardson 		[61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4195*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4196*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4197*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4198*c1d14583SBruce Richardson 		/* [62] reserved */
4199*c1d14583SBruce Richardson 		[63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4200*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4201*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4202*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4203*c1d14583SBruce Richardson 		[64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4204*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4205*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4206*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4207*c1d14583SBruce Richardson 		[65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4208*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4209*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4210*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4211*c1d14583SBruce Richardson 
4212*c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
4213*c1d14583SBruce Richardson 		[66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4214*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4215*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4216*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4217*c1d14583SBruce Richardson 		[67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4218*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4219*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4220*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4221*c1d14583SBruce Richardson 		[68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4222*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4223*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4224*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4225*c1d14583SBruce Richardson 		/* [69] reserved */
4226*c1d14583SBruce Richardson 		[70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4227*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4228*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4229*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4230*c1d14583SBruce Richardson 		[71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4231*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4232*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4233*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4234*c1d14583SBruce Richardson 		[72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4235*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4236*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4237*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4238*c1d14583SBruce Richardson 		/* [73] - [87] reserved */
4239*c1d14583SBruce Richardson 
4240*c1d14583SBruce Richardson 		/* Non tunneled IPv6 */
4241*c1d14583SBruce Richardson 		[88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4242*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_FRAG,
4243*c1d14583SBruce Richardson 		[89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4244*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_NONFRAG,
4245*c1d14583SBruce Richardson 		[90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4246*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_UDP,
4247*c1d14583SBruce Richardson 		/* [91] reserved */
4248*c1d14583SBruce Richardson 		[92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4249*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_TCP,
4250*c1d14583SBruce Richardson 		[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4251*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_SCTP,
4252*c1d14583SBruce Richardson 		[94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4253*c1d14583SBruce Richardson 		       RTE_PTYPE_L4_ICMP,
4254*c1d14583SBruce Richardson 
4255*c1d14583SBruce Richardson 		/* IPv6 --> IPv4 */
4256*c1d14583SBruce Richardson 		[95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4257*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4258*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4259*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4260*c1d14583SBruce Richardson 		[96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4261*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4262*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4263*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4264*c1d14583SBruce Richardson 		[97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4265*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4266*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4267*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4268*c1d14583SBruce Richardson 		/* [98] reserved */
4269*c1d14583SBruce Richardson 		[99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4270*c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4271*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4272*c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4273*c1d14583SBruce Richardson 		[100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4274*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4275*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4276*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4277*c1d14583SBruce Richardson 		[101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4278*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4279*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4280*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4281*c1d14583SBruce Richardson 
4282*c1d14583SBruce Richardson 		/* IPv6 --> IPv6 */
4283*c1d14583SBruce Richardson 		[102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4284*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4285*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4286*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4287*c1d14583SBruce Richardson 		[103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4288*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4289*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4290*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4291*c1d14583SBruce Richardson 		[104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4292*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4293*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4294*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4295*c1d14583SBruce Richardson 		/* [105] reserved */
4296*c1d14583SBruce Richardson 		[106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4297*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4298*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4299*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4300*c1d14583SBruce Richardson 		[107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4301*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4302*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4303*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4304*c1d14583SBruce Richardson 		[108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4305*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4306*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4307*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4308*c1d14583SBruce Richardson 
4309*c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN */
4310*c1d14583SBruce Richardson 		[109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4311*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT,
4312*c1d14583SBruce Richardson 
4313*c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
4314*c1d14583SBruce Richardson 		[110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4315*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4316*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4317*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4318*c1d14583SBruce Richardson 		[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4319*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4320*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4321*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4322*c1d14583SBruce Richardson 		[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4323*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4324*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4325*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4326*c1d14583SBruce Richardson 		/* [113] reserved */
4327*c1d14583SBruce Richardson 		[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4328*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4329*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4330*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4331*c1d14583SBruce Richardson 		[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4332*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4333*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4334*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4335*c1d14583SBruce Richardson 		[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4336*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4337*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4338*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4339*c1d14583SBruce Richardson 
4340*c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
4341*c1d14583SBruce Richardson 		[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4342*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4343*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4344*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4345*c1d14583SBruce Richardson 		[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4346*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4347*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4348*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4349*c1d14583SBruce Richardson 		[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4350*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4351*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4352*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4353*c1d14583SBruce Richardson 		/* [120] reserved */
4354*c1d14583SBruce Richardson 		[121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4355*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4356*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4357*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4358*c1d14583SBruce Richardson 		[122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4359*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4360*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4361*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4362*c1d14583SBruce Richardson 		[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4363*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4364*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4365*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4366*c1d14583SBruce Richardson 
4367*c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC */
4368*c1d14583SBruce Richardson 		[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4369*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
4370*c1d14583SBruce Richardson 
4371*c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
4372*c1d14583SBruce Richardson 		[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4373*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4374*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4375*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4376*c1d14583SBruce Richardson 		[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4377*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4378*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4379*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4380*c1d14583SBruce Richardson 		[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4381*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4382*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4383*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4384*c1d14583SBruce Richardson 		/* [128] reserved */
4385*c1d14583SBruce Richardson 		[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4386*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4387*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4388*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4389*c1d14583SBruce Richardson 		[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4390*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4391*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4392*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4393*c1d14583SBruce Richardson 		[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4394*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4395*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4396*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4397*c1d14583SBruce Richardson 
4398*c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
4399*c1d14583SBruce Richardson 		[132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4400*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4401*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4402*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4403*c1d14583SBruce Richardson 		[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4404*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4405*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4406*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4407*c1d14583SBruce Richardson 		[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4408*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4409*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4410*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4411*c1d14583SBruce Richardson 		/* [135] reserved */
4412*c1d14583SBruce Richardson 		[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4413*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4414*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4415*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4416*c1d14583SBruce Richardson 		[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4417*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4418*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4419*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4420*c1d14583SBruce Richardson 		[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4421*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4422*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4423*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4424*c1d14583SBruce Richardson 		/* [139] - [299] reserved */
4425*c1d14583SBruce Richardson 
4426*c1d14583SBruce Richardson 		/* PPPoE */
4427*c1d14583SBruce Richardson 		[300] = RTE_PTYPE_L2_ETHER_PPPOE,
4428*c1d14583SBruce Richardson 		[301] = RTE_PTYPE_L2_ETHER_PPPOE,
4429*c1d14583SBruce Richardson 
4430*c1d14583SBruce Richardson 		/* PPPoE --> IPv4 */
4431*c1d14583SBruce Richardson 		[302] = RTE_PTYPE_L2_ETHER_PPPOE |
4432*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4433*c1d14583SBruce Richardson 			RTE_PTYPE_L4_FRAG,
4434*c1d14583SBruce Richardson 		[303] = RTE_PTYPE_L2_ETHER_PPPOE |
4435*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4436*c1d14583SBruce Richardson 			RTE_PTYPE_L4_NONFRAG,
4437*c1d14583SBruce Richardson 		[304] = RTE_PTYPE_L2_ETHER_PPPOE |
4438*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4439*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4440*c1d14583SBruce Richardson 		[305] = RTE_PTYPE_L2_ETHER_PPPOE |
4441*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4442*c1d14583SBruce Richardson 			RTE_PTYPE_L4_TCP,
4443*c1d14583SBruce Richardson 		[306] = RTE_PTYPE_L2_ETHER_PPPOE |
4444*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4445*c1d14583SBruce Richardson 			RTE_PTYPE_L4_SCTP,
4446*c1d14583SBruce Richardson 		[307] = RTE_PTYPE_L2_ETHER_PPPOE |
4447*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4448*c1d14583SBruce Richardson 			RTE_PTYPE_L4_ICMP,
4449*c1d14583SBruce Richardson 
4450*c1d14583SBruce Richardson 		/* PPPoE --> IPv6 */
4451*c1d14583SBruce Richardson 		[308] = RTE_PTYPE_L2_ETHER_PPPOE |
4452*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4453*c1d14583SBruce Richardson 			RTE_PTYPE_L4_FRAG,
4454*c1d14583SBruce Richardson 		[309] = RTE_PTYPE_L2_ETHER_PPPOE |
4455*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4456*c1d14583SBruce Richardson 			RTE_PTYPE_L4_NONFRAG,
4457*c1d14583SBruce Richardson 		[310] = RTE_PTYPE_L2_ETHER_PPPOE |
4458*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4459*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4460*c1d14583SBruce Richardson 		[311] = RTE_PTYPE_L2_ETHER_PPPOE |
4461*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4462*c1d14583SBruce Richardson 			RTE_PTYPE_L4_TCP,
4463*c1d14583SBruce Richardson 		[312] = RTE_PTYPE_L2_ETHER_PPPOE |
4464*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4465*c1d14583SBruce Richardson 			RTE_PTYPE_L4_SCTP,
4466*c1d14583SBruce Richardson 		[313] = RTE_PTYPE_L2_ETHER_PPPOE |
4467*c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4468*c1d14583SBruce Richardson 			RTE_PTYPE_L4_ICMP,
4469*c1d14583SBruce Richardson 		/* [314] - [324] reserved */
4470*c1d14583SBruce Richardson 
4471*c1d14583SBruce Richardson 		/* IPv4/IPv6 --> GTPC/GTPU */
4472*c1d14583SBruce Richardson 		[325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4473*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4474*c1d14583SBruce Richardson 		[326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4475*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4476*c1d14583SBruce Richardson 		[327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4477*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4478*c1d14583SBruce Richardson 		[328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4479*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4480*c1d14583SBruce Richardson 		[329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4481*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU,
4482*c1d14583SBruce Richardson 		[330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4483*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU,
4484*c1d14583SBruce Richardson 
4485*c1d14583SBruce Richardson 		/* IPv4 --> GTPU --> IPv4 */
4486*c1d14583SBruce Richardson 		[331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4487*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4488*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4489*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4490*c1d14583SBruce Richardson 		[332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4491*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4492*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4493*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4494*c1d14583SBruce Richardson 		[333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4495*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4496*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4497*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4498*c1d14583SBruce Richardson 		[334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4499*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4500*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4501*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4502*c1d14583SBruce Richardson 		[335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4503*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4504*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4505*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4506*c1d14583SBruce Richardson 
4507*c1d14583SBruce Richardson 		/* IPv6 --> GTPU --> IPv4 */
4508*c1d14583SBruce Richardson 		[336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4509*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4510*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4511*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4512*c1d14583SBruce Richardson 		[337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4513*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4514*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4515*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4516*c1d14583SBruce Richardson 		[338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4517*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4518*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4519*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4520*c1d14583SBruce Richardson 		[339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4521*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4522*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4523*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4524*c1d14583SBruce Richardson 		[340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4525*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4526*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4527*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4528*c1d14583SBruce Richardson 
4529*c1d14583SBruce Richardson 		/* IPv4 --> GTPU --> IPv6 */
4530*c1d14583SBruce Richardson 		[341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4531*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4532*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4533*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4534*c1d14583SBruce Richardson 		[342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4535*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4536*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4537*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4538*c1d14583SBruce Richardson 		[343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4539*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4540*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4541*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4542*c1d14583SBruce Richardson 		[344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4543*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4544*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4545*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4546*c1d14583SBruce Richardson 		[345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4547*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4548*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4549*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4550*c1d14583SBruce Richardson 
4551*c1d14583SBruce Richardson 		/* IPv6 --> GTPU --> IPv6 */
4552*c1d14583SBruce Richardson 		[346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4553*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4554*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4555*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4556*c1d14583SBruce Richardson 		[347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4557*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4558*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4559*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4560*c1d14583SBruce Richardson 		[348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4561*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4562*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4563*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4564*c1d14583SBruce Richardson 		[349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4565*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4566*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4567*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4568*c1d14583SBruce Richardson 		[350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4569*c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4570*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4571*c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4572*c1d14583SBruce Richardson 
4573*c1d14583SBruce Richardson 		/* IPv4 --> UDP ECPRI */
4574*c1d14583SBruce Richardson 		[372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4575*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4576*c1d14583SBruce Richardson 		[373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4577*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4578*c1d14583SBruce Richardson 		[374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4579*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4580*c1d14583SBruce Richardson 		[375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4581*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4582*c1d14583SBruce Richardson 		[376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4583*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4584*c1d14583SBruce Richardson 		[377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4585*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4586*c1d14583SBruce Richardson 		[378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4587*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4588*c1d14583SBruce Richardson 		[379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4589*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4590*c1d14583SBruce Richardson 		[380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4591*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4592*c1d14583SBruce Richardson 		[381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4593*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4594*c1d14583SBruce Richardson 
4595*c1d14583SBruce Richardson 		/* IPV6 --> UDP ECPRI */
4596*c1d14583SBruce Richardson 		[382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4597*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4598*c1d14583SBruce Richardson 		[383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4599*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4600*c1d14583SBruce Richardson 		[384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4601*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4602*c1d14583SBruce Richardson 		[385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4603*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4604*c1d14583SBruce Richardson 		[386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4605*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4606*c1d14583SBruce Richardson 		[387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4607*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4608*c1d14583SBruce Richardson 		[388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4609*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4610*c1d14583SBruce Richardson 		[389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4611*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4612*c1d14583SBruce Richardson 		[390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4613*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4614*c1d14583SBruce Richardson 		[391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4615*c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4616*c1d14583SBruce Richardson 		/* All others reserved */
4617*c1d14583SBruce Richardson 	};
4618*c1d14583SBruce Richardson 
4619*c1d14583SBruce Richardson 	return type_table[ptype];
4620*c1d14583SBruce Richardson }
4621*c1d14583SBruce Richardson 
4622*c1d14583SBruce Richardson void __rte_cold
4623*c1d14583SBruce Richardson ice_set_default_ptype_table(struct rte_eth_dev *dev)
4624*c1d14583SBruce Richardson {
4625*c1d14583SBruce Richardson 	struct ice_adapter *ad =
4626*c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4627*c1d14583SBruce Richardson 	int i;
4628*c1d14583SBruce Richardson 
4629*c1d14583SBruce Richardson 	for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4630*c1d14583SBruce Richardson 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4631*c1d14583SBruce Richardson }
4632*c1d14583SBruce Richardson 
4633*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S	1
4634*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M	\
4635*c1d14583SBruce Richardson 			(0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4636*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4637*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4638*c1d14583SBruce Richardson 
4639*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S	4
4640*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M	\
4641*c1d14583SBruce Richardson 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4642*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S	5
4643*c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M	\
4644*c1d14583SBruce Richardson 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4645*c1d14583SBruce Richardson 
4646*c1d14583SBruce Richardson /*
4647*c1d14583SBruce Richardson  * check the programming status descriptor in rx queue.
4648*c1d14583SBruce Richardson  * done after Programming Flow Director is programmed on
4649*c1d14583SBruce Richardson  * tx queue
4650*c1d14583SBruce Richardson  */
4651*c1d14583SBruce Richardson static inline int
4652*c1d14583SBruce Richardson ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4653*c1d14583SBruce Richardson {
4654*c1d14583SBruce Richardson 	volatile union ice_32byte_rx_desc *rxdp;
4655*c1d14583SBruce Richardson 	uint64_t qword1;
4656*c1d14583SBruce Richardson 	uint32_t rx_status;
4657*c1d14583SBruce Richardson 	uint32_t error;
4658*c1d14583SBruce Richardson 	uint32_t id;
4659*c1d14583SBruce Richardson 	int ret = -EAGAIN;
4660*c1d14583SBruce Richardson 
4661*c1d14583SBruce Richardson 	rxdp = (volatile union ice_32byte_rx_desc *)
4662*c1d14583SBruce Richardson 		(&rxq->rx_ring[rxq->rx_tail]);
4663*c1d14583SBruce Richardson 	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4664*c1d14583SBruce Richardson 	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4665*c1d14583SBruce Richardson 			>> ICE_RXD_QW1_STATUS_S;
4666*c1d14583SBruce Richardson 
4667*c1d14583SBruce Richardson 	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4668*c1d14583SBruce Richardson 		ret = 0;
4669*c1d14583SBruce Richardson 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4670*c1d14583SBruce Richardson 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4671*c1d14583SBruce Richardson 		id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4672*c1d14583SBruce Richardson 			ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4673*c1d14583SBruce Richardson 		if (error) {
4674*c1d14583SBruce Richardson 			if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4675*c1d14583SBruce Richardson 				PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4676*c1d14583SBruce Richardson 			else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4677*c1d14583SBruce Richardson 				PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4678*c1d14583SBruce Richardson 			ret = -EINVAL;
4679*c1d14583SBruce Richardson 			goto err;
4680*c1d14583SBruce Richardson 		}
4681*c1d14583SBruce Richardson 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4682*c1d14583SBruce Richardson 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4683*c1d14583SBruce Richardson 		if (error) {
4684*c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4685*c1d14583SBruce Richardson 			ret = -EINVAL;
4686*c1d14583SBruce Richardson 		}
4687*c1d14583SBruce Richardson err:
4688*c1d14583SBruce Richardson 		rxdp->wb.qword1.status_error_len = 0;
4689*c1d14583SBruce Richardson 		rxq->rx_tail++;
4690*c1d14583SBruce Richardson 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4691*c1d14583SBruce Richardson 			rxq->rx_tail = 0;
4692*c1d14583SBruce Richardson 		if (rxq->rx_tail == 0)
4693*c1d14583SBruce Richardson 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4694*c1d14583SBruce Richardson 		else
4695*c1d14583SBruce Richardson 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4696*c1d14583SBruce Richardson 	}
4697*c1d14583SBruce Richardson 
4698*c1d14583SBruce Richardson 	return ret;
4699*c1d14583SBruce Richardson }
4700*c1d14583SBruce Richardson 
4701*c1d14583SBruce Richardson #define ICE_FDIR_MAX_WAIT_US 10000
4702*c1d14583SBruce Richardson 
4703*c1d14583SBruce Richardson int
4704*c1d14583SBruce Richardson ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4705*c1d14583SBruce Richardson {
4706*c1d14583SBruce Richardson 	struct ice_tx_queue *txq = pf->fdir.txq;
4707*c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = pf->fdir.rxq;
4708*c1d14583SBruce Richardson 	volatile struct ice_fltr_desc *fdirdp;
4709*c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txdp;
4710*c1d14583SBruce Richardson 	uint32_t td_cmd;
4711*c1d14583SBruce Richardson 	uint16_t i;
4712*c1d14583SBruce Richardson 
4713*c1d14583SBruce Richardson 	fdirdp = (volatile struct ice_fltr_desc *)
4714*c1d14583SBruce Richardson 		(&txq->tx_ring[txq->tx_tail]);
4715*c1d14583SBruce Richardson 	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4716*c1d14583SBruce Richardson 	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4717*c1d14583SBruce Richardson 
4718*c1d14583SBruce Richardson 	txdp = &txq->tx_ring[txq->tx_tail + 1];
4719*c1d14583SBruce Richardson 	txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4720*c1d14583SBruce Richardson 	td_cmd = ICE_TX_DESC_CMD_EOP |
4721*c1d14583SBruce Richardson 		ICE_TX_DESC_CMD_RS  |
4722*c1d14583SBruce Richardson 		ICE_TX_DESC_CMD_DUMMY;
4723*c1d14583SBruce Richardson 
4724*c1d14583SBruce Richardson 	txdp->cmd_type_offset_bsz =
4725*c1d14583SBruce Richardson 		ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4726*c1d14583SBruce Richardson 
4727*c1d14583SBruce Richardson 	txq->tx_tail += 2;
4728*c1d14583SBruce Richardson 	if (txq->tx_tail >= txq->nb_tx_desc)
4729*c1d14583SBruce Richardson 		txq->tx_tail = 0;
4730*c1d14583SBruce Richardson 	/* Update the tx tail register */
4731*c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4732*c1d14583SBruce Richardson 	for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4733*c1d14583SBruce Richardson 		if ((txdp->cmd_type_offset_bsz &
4734*c1d14583SBruce Richardson 		     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4735*c1d14583SBruce Richardson 		    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4736*c1d14583SBruce Richardson 			break;
4737*c1d14583SBruce Richardson 		rte_delay_us(1);
4738*c1d14583SBruce Richardson 	}
4739*c1d14583SBruce Richardson 	if (i >= ICE_FDIR_MAX_WAIT_US) {
4740*c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
4741*c1d14583SBruce Richardson 			    "Failed to program FDIR filter: time out to get DD on tx queue.");
4742*c1d14583SBruce Richardson 		return -ETIMEDOUT;
4743*c1d14583SBruce Richardson 	}
4744*c1d14583SBruce Richardson 
4745*c1d14583SBruce Richardson 	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4746*c1d14583SBruce Richardson 		int ret;
4747*c1d14583SBruce Richardson 
4748*c1d14583SBruce Richardson 		ret = ice_check_fdir_programming_status(rxq);
4749*c1d14583SBruce Richardson 		if (ret == -EAGAIN)
4750*c1d14583SBruce Richardson 			rte_delay_us(1);
4751*c1d14583SBruce Richardson 		else
4752*c1d14583SBruce Richardson 			return ret;
4753*c1d14583SBruce Richardson 	}
4754*c1d14583SBruce Richardson 
4755*c1d14583SBruce Richardson 	PMD_DRV_LOG(ERR,
4756*c1d14583SBruce Richardson 		    "Failed to program FDIR filter: programming status reported.");
4757*c1d14583SBruce Richardson 	return -ETIMEDOUT;
4758*c1d14583SBruce Richardson 
4759*c1d14583SBruce Richardson 
4760*c1d14583SBruce Richardson }
4761