xref: /dpdk/drivers/net/intel/ice/ice_rxtx.c (revision e3b5f52d590ec2cae30f2eddcc310832154847f1)
1c1d14583SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2c1d14583SBruce Richardson  * Copyright(c) 2018 Intel Corporation
3c1d14583SBruce Richardson  */
4c1d14583SBruce Richardson 
5c1d14583SBruce Richardson #include <ethdev_driver.h>
6c1d14583SBruce Richardson #include <rte_net.h>
7c1d14583SBruce Richardson #include <rte_vect.h>
8c1d14583SBruce Richardson 
9c1d14583SBruce Richardson #include "ice_rxtx.h"
10c1d14583SBruce Richardson #include "ice_rxtx_vec_common.h"
11c1d14583SBruce Richardson 
12c1d14583SBruce Richardson #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |		 \
13c1d14583SBruce Richardson 		RTE_MBUF_F_TX_L4_MASK |		 \
14c1d14583SBruce Richardson 		RTE_MBUF_F_TX_TCP_SEG |		 \
15c1d14583SBruce Richardson 		RTE_MBUF_F_TX_UDP_SEG |		 \
16c1d14583SBruce Richardson 		RTE_MBUF_F_TX_OUTER_IP_CKSUM)
17c1d14583SBruce Richardson 
18c1d14583SBruce Richardson /**
19c1d14583SBruce Richardson  * The mbuf dynamic field pointer for protocol extraction metadata.
20c1d14583SBruce Richardson  */
21c1d14583SBruce Richardson #define ICE_DYNF_PROTO_XTR_METADATA(m, n) \
22c1d14583SBruce Richardson 	RTE_MBUF_DYNFIELD((m), (n), uint32_t *)
23c1d14583SBruce Richardson 
24c1d14583SBruce Richardson static int
25c1d14583SBruce Richardson ice_monitor_callback(const uint64_t value,
26c1d14583SBruce Richardson 		const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
27c1d14583SBruce Richardson {
28c1d14583SBruce Richardson 	const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
29c1d14583SBruce Richardson 	/*
30c1d14583SBruce Richardson 	 * we expect the DD bit to be set to 1 if this descriptor was already
31c1d14583SBruce Richardson 	 * written to.
32c1d14583SBruce Richardson 	 */
33c1d14583SBruce Richardson 	return (value & m) == m ? -1 : 0;
34c1d14583SBruce Richardson }
35c1d14583SBruce Richardson 
36c1d14583SBruce Richardson int
37c1d14583SBruce Richardson ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
38c1d14583SBruce Richardson {
39c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
40c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
41c1d14583SBruce Richardson 	uint16_t desc;
42c1d14583SBruce Richardson 
43c1d14583SBruce Richardson 	desc = rxq->rx_tail;
44c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[desc];
45c1d14583SBruce Richardson 	/* watch for changes in status bit */
46c1d14583SBruce Richardson 	pmc->addr = &rxdp->wb.status_error0;
47c1d14583SBruce Richardson 
48c1d14583SBruce Richardson 	/* comparison callback */
49c1d14583SBruce Richardson 	pmc->fn = ice_monitor_callback;
50c1d14583SBruce Richardson 
51c1d14583SBruce Richardson 	/* register is 16-bit */
52c1d14583SBruce Richardson 	pmc->size = sizeof(uint16_t);
53c1d14583SBruce Richardson 
54c1d14583SBruce Richardson 	return 0;
55c1d14583SBruce Richardson }
56c1d14583SBruce Richardson 
57c1d14583SBruce Richardson 
58c1d14583SBruce Richardson static inline uint8_t
59c1d14583SBruce Richardson ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
60c1d14583SBruce Richardson {
61c1d14583SBruce Richardson 	static uint8_t rxdid_map[] = {
62c1d14583SBruce Richardson 		[PROTO_XTR_NONE]      = ICE_RXDID_COMMS_OVS,
63c1d14583SBruce Richardson 		[PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
64c1d14583SBruce Richardson 		[PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
65c1d14583SBruce Richardson 		[PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
66c1d14583SBruce Richardson 		[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
67c1d14583SBruce Richardson 		[PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
68c1d14583SBruce Richardson 		[PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
69c1d14583SBruce Richardson 	};
70c1d14583SBruce Richardson 
71c1d14583SBruce Richardson 	return xtr_type < RTE_DIM(rxdid_map) ?
72c1d14583SBruce Richardson 				rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
73c1d14583SBruce Richardson }
74c1d14583SBruce Richardson 
75c1d14583SBruce Richardson static inline void
76c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
77c1d14583SBruce Richardson 				       struct rte_mbuf *mb,
78c1d14583SBruce Richardson 				       volatile union ice_rx_flex_desc *rxdp)
79c1d14583SBruce Richardson {
80c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms *desc =
81c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
82c1d14583SBruce Richardson 	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
83c1d14583SBruce Richardson 
84c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
85c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
86c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
87c1d14583SBruce Richardson 	}
88c1d14583SBruce Richardson 
89c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
90c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
91c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
92c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
93c1d14583SBruce Richardson 	}
94c1d14583SBruce Richardson #endif
95c1d14583SBruce Richardson }
96c1d14583SBruce Richardson 
97c1d14583SBruce Richardson static inline void
98c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
99c1d14583SBruce Richardson 				   struct rte_mbuf *mb,
100c1d14583SBruce Richardson 				   volatile union ice_rx_flex_desc *rxdp)
101c1d14583SBruce Richardson {
102c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
103c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
104c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
105c1d14583SBruce Richardson 	uint16_t stat_err;
106c1d14583SBruce Richardson #endif
107c1d14583SBruce Richardson 
108c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
109c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
110c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
111c1d14583SBruce Richardson 	}
112c1d14583SBruce Richardson 
113c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
114c1d14583SBruce Richardson 	stat_err = rte_le_to_cpu_16(desc->status_error0);
115c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
116c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
117c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
118c1d14583SBruce Richardson 	}
119c1d14583SBruce Richardson #endif
120c1d14583SBruce Richardson }
121c1d14583SBruce Richardson 
122c1d14583SBruce Richardson static inline void
123c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
124c1d14583SBruce Richardson 				      struct rte_mbuf *mb,
125c1d14583SBruce Richardson 				      volatile union ice_rx_flex_desc *rxdp)
126c1d14583SBruce Richardson {
127c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms *desc =
128c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
129c1d14583SBruce Richardson 	uint16_t stat_err;
130c1d14583SBruce Richardson 
131c1d14583SBruce Richardson 	stat_err = rte_le_to_cpu_16(desc->status_error0);
132c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
133c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
134c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
135c1d14583SBruce Richardson 	}
136c1d14583SBruce Richardson 
137c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
138c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
139c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
140c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
141c1d14583SBruce Richardson 	}
142c1d14583SBruce Richardson 
143c1d14583SBruce Richardson 	if (rxq->xtr_ol_flag) {
144c1d14583SBruce Richardson 		uint32_t metadata = 0;
145c1d14583SBruce Richardson 
146c1d14583SBruce Richardson 		stat_err = rte_le_to_cpu_16(desc->status_error1);
147c1d14583SBruce Richardson 
148c1d14583SBruce Richardson 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
149c1d14583SBruce Richardson 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
150c1d14583SBruce Richardson 
151c1d14583SBruce Richardson 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
152c1d14583SBruce Richardson 			metadata |=
153c1d14583SBruce Richardson 				rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
154c1d14583SBruce Richardson 
155c1d14583SBruce Richardson 		if (metadata) {
156c1d14583SBruce Richardson 			mb->ol_flags |= rxq->xtr_ol_flag;
157c1d14583SBruce Richardson 
158c1d14583SBruce Richardson 			*ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata;
159c1d14583SBruce Richardson 		}
160c1d14583SBruce Richardson 	}
161c1d14583SBruce Richardson #else
162c1d14583SBruce Richardson 	RTE_SET_USED(rxq);
163c1d14583SBruce Richardson #endif
164c1d14583SBruce Richardson }
165c1d14583SBruce Richardson 
166c1d14583SBruce Richardson static inline void
167c1d14583SBruce Richardson ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
168c1d14583SBruce Richardson 				      struct rte_mbuf *mb,
169c1d14583SBruce Richardson 				      volatile union ice_rx_flex_desc *rxdp)
170c1d14583SBruce Richardson {
171c1d14583SBruce Richardson 	volatile struct ice_32b_rx_flex_desc_comms *desc =
172c1d14583SBruce Richardson 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
173c1d14583SBruce Richardson 	uint16_t stat_err;
174c1d14583SBruce Richardson 
175c1d14583SBruce Richardson 	stat_err = rte_le_to_cpu_16(desc->status_error0);
176c1d14583SBruce Richardson 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
177c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
178c1d14583SBruce Richardson 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
179c1d14583SBruce Richardson 	}
180c1d14583SBruce Richardson 
181c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
182c1d14583SBruce Richardson 	if (desc->flow_id != 0xFFFFFFFF) {
183c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
184c1d14583SBruce Richardson 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
185c1d14583SBruce Richardson 	}
186c1d14583SBruce Richardson 
187c1d14583SBruce Richardson 	if (rxq->xtr_ol_flag) {
188c1d14583SBruce Richardson 		uint32_t metadata = 0;
189c1d14583SBruce Richardson 
190c1d14583SBruce Richardson 		if (desc->flex_ts.flex.aux0 != 0xFFFF)
191c1d14583SBruce Richardson 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
192c1d14583SBruce Richardson 		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
193c1d14583SBruce Richardson 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
194c1d14583SBruce Richardson 
195c1d14583SBruce Richardson 		if (metadata) {
196c1d14583SBruce Richardson 			mb->ol_flags |= rxq->xtr_ol_flag;
197c1d14583SBruce Richardson 
198c1d14583SBruce Richardson 			*ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata;
199c1d14583SBruce Richardson 		}
200c1d14583SBruce Richardson 	}
201c1d14583SBruce Richardson #else
202c1d14583SBruce Richardson 	RTE_SET_USED(rxq);
203c1d14583SBruce Richardson #endif
204c1d14583SBruce Richardson }
205c1d14583SBruce Richardson 
206c1d14583SBruce Richardson static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
207c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
208c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
209c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
210c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
211c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
212c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
213c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
214c1d14583SBruce Richardson 	[ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
215c1d14583SBruce Richardson };
216c1d14583SBruce Richardson 
217c1d14583SBruce Richardson void
218c1d14583SBruce Richardson ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
219c1d14583SBruce Richardson {
220c1d14583SBruce Richardson 	rxq->rxdid = rxdid;
221c1d14583SBruce Richardson 
222c1d14583SBruce Richardson 	switch (rxdid) {
223c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_VLAN:
224c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IPV4:
225c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IPV6:
226c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
227c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_TCP:
228c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
229c1d14583SBruce Richardson 		break;
230c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_GENERIC:
231c1d14583SBruce Richardson 		/* fallthrough */
232c1d14583SBruce Richardson 	case ICE_RXDID_COMMS_OVS:
233c1d14583SBruce Richardson 		break;
234c1d14583SBruce Richardson 
235c1d14583SBruce Richardson 	default:
236c1d14583SBruce Richardson 		/* update this according to the RXDID for PROTO_XTR_NONE */
237c1d14583SBruce Richardson 		rxq->rxdid = ICE_RXDID_COMMS_OVS;
238c1d14583SBruce Richardson 		break;
239c1d14583SBruce Richardson 	}
240c1d14583SBruce Richardson 
241c1d14583SBruce Richardson 	if (rxq->xtr_field_offs == -1)
242c1d14583SBruce Richardson 		rxq->xtr_ol_flag = 0;
243c1d14583SBruce Richardson }
244c1d14583SBruce Richardson 
245c1d14583SBruce Richardson static int
246c1d14583SBruce Richardson ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
247c1d14583SBruce Richardson {
248c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
249c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
250c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
251c1d14583SBruce Richardson 	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
252c1d14583SBruce Richardson 	struct ice_rlan_ctx rx_ctx;
253c1d14583SBruce Richardson 	uint16_t buf_size;
254c1d14583SBruce Richardson 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
255c1d14583SBruce Richardson 	uint32_t regval;
256c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
257c1d14583SBruce Richardson 	uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
258c1d14583SBruce Richardson 	int err;
259c1d14583SBruce Richardson 
260c1d14583SBruce Richardson 	/* Set buffer size as the head split is disabled. */
261c1d14583SBruce Richardson 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
262c1d14583SBruce Richardson 			      RTE_PKTMBUF_HEADROOM);
263c1d14583SBruce Richardson 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
264c1d14583SBruce Richardson 	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE);
265c1d14583SBruce Richardson 	rxq->max_pkt_len =
266c1d14583SBruce Richardson 		RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
267c1d14583SBruce Richardson 			frame_size);
268c1d14583SBruce Richardson 
269c1d14583SBruce Richardson 	if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
270c1d14583SBruce Richardson 	    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
271c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "maximum packet length must "
272c1d14583SBruce Richardson 			    "be larger than %u and smaller than %u",
273c1d14583SBruce Richardson 			    (uint32_t)RTE_ETHER_MIN_LEN,
274c1d14583SBruce Richardson 			    (uint32_t)ICE_FRAME_SIZE_MAX);
275c1d14583SBruce Richardson 		return -EINVAL;
276c1d14583SBruce Richardson 	}
277c1d14583SBruce Richardson 
278c1d14583SBruce Richardson 	if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
279c1d14583SBruce Richardson 		/* Register mbuf field and flag for Rx timestamp */
280c1d14583SBruce Richardson 		err = rte_mbuf_dyn_rx_timestamp_register(
281c1d14583SBruce Richardson 				&ice_timestamp_dynfield_offset,
282c1d14583SBruce Richardson 				&ice_timestamp_dynflag);
283c1d14583SBruce Richardson 		if (err) {
284c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR,
285c1d14583SBruce Richardson 				"Cannot register mbuf field/flag for timestamp");
286c1d14583SBruce Richardson 			return -EINVAL;
287c1d14583SBruce Richardson 		}
288c1d14583SBruce Richardson 		rxq->ts_enable = true;
289c1d14583SBruce Richardson 	}
290c1d14583SBruce Richardson 
291c1d14583SBruce Richardson 	memset(&rx_ctx, 0, sizeof(rx_ctx));
292c1d14583SBruce Richardson 
293c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
294c1d14583SBruce Richardson 		uint32_t proto_hdr;
295c1d14583SBruce Richardson 		proto_hdr = rxq->rxseg[0].proto_hdr;
296c1d14583SBruce Richardson 
297c1d14583SBruce Richardson 		if (proto_hdr == RTE_PTYPE_UNKNOWN) {
298c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Buffer split protocol must be configured");
299c1d14583SBruce Richardson 			return -EINVAL;
300c1d14583SBruce Richardson 		}
301c1d14583SBruce Richardson 
302c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_L4_MASK) {
303c1d14583SBruce Richardson 		case RTE_PTYPE_L4_TCP:
304c1d14583SBruce Richardson 		case RTE_PTYPE_L4_UDP:
305c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
306c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;
307c1d14583SBruce Richardson 			goto set_hsplit_finish;
308c1d14583SBruce Richardson 		case RTE_PTYPE_L4_SCTP:
309c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
310c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
311c1d14583SBruce Richardson 			goto set_hsplit_finish;
312c1d14583SBruce Richardson 		}
313c1d14583SBruce Richardson 
314c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_L3_MASK) {
315c1d14583SBruce Richardson 		case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
316c1d14583SBruce Richardson 		case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
317c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
318c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;
319c1d14583SBruce Richardson 			goto set_hsplit_finish;
320c1d14583SBruce Richardson 		}
321c1d14583SBruce Richardson 
322c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_L2_MASK) {
323c1d14583SBruce Richardson 		case RTE_PTYPE_L2_ETHER:
324c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
325c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;
326c1d14583SBruce Richardson 			rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2;
327c1d14583SBruce Richardson 			goto set_hsplit_finish;
328c1d14583SBruce Richardson 		}
329c1d14583SBruce Richardson 
330c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) {
331c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L4_TCP:
332c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L4_UDP:
333c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
334c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;
335c1d14583SBruce Richardson 			goto set_hsplit_finish;
336c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L4_SCTP:
337c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
338c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
339c1d14583SBruce Richardson 			goto set_hsplit_finish;
340c1d14583SBruce Richardson 		}
341c1d14583SBruce Richardson 
342c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) {
343c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
344c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
345c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
346c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;
347c1d14583SBruce Richardson 			goto set_hsplit_finish;
348c1d14583SBruce Richardson 		}
349c1d14583SBruce Richardson 
350c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) {
351c1d14583SBruce Richardson 		case RTE_PTYPE_INNER_L2_ETHER:
352c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
353c1d14583SBruce Richardson 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;
354c1d14583SBruce Richardson 			goto set_hsplit_finish;
355c1d14583SBruce Richardson 		}
356c1d14583SBruce Richardson 
357c1d14583SBruce Richardson 		switch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) {
358c1d14583SBruce Richardson 		case RTE_PTYPE_TUNNEL_GRENAT:
359c1d14583SBruce Richardson 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
360c1d14583SBruce Richardson 			rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS;
361c1d14583SBruce Richardson 			goto set_hsplit_finish;
362c1d14583SBruce Richardson 		}
363c1d14583SBruce Richardson 
364c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Buffer split protocol is not supported");
365c1d14583SBruce Richardson 		return -EINVAL;
366c1d14583SBruce Richardson 
367c1d14583SBruce Richardson set_hsplit_finish:
368c1d14583SBruce Richardson 		rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE;
369c1d14583SBruce Richardson 	} else {
370c1d14583SBruce Richardson 		rxq->rx_hdr_len = 0;
371c1d14583SBruce Richardson 		rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */
372c1d14583SBruce Richardson 	}
373c1d14583SBruce Richardson 
374c1d14583SBruce Richardson 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
375c1d14583SBruce Richardson 	rx_ctx.qlen = rxq->nb_rx_desc;
376c1d14583SBruce Richardson 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
377c1d14583SBruce Richardson 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
378c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
379c1d14583SBruce Richardson 	rx_ctx.dsize = 1; /* 32B descriptors */
380c1d14583SBruce Richardson #endif
381c1d14583SBruce Richardson 	rx_ctx.rxmax = rxq->max_pkt_len;
382c1d14583SBruce Richardson 	/* TPH: Transaction Layer Packet (TLP) processing hints */
383c1d14583SBruce Richardson 	rx_ctx.tphrdesc_ena = 1;
384c1d14583SBruce Richardson 	rx_ctx.tphwdesc_ena = 1;
385c1d14583SBruce Richardson 	rx_ctx.tphdata_ena = 1;
386c1d14583SBruce Richardson 	rx_ctx.tphhead_ena = 1;
387c1d14583SBruce Richardson 	/* Low Receive Queue Threshold defined in 64 descriptors units.
388c1d14583SBruce Richardson 	 * When the number of free descriptors goes below the lrxqthresh,
389c1d14583SBruce Richardson 	 * an immediate interrupt is triggered.
390c1d14583SBruce Richardson 	 */
391c1d14583SBruce Richardson 	rx_ctx.lrxqthresh = 2;
392c1d14583SBruce Richardson 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
393c1d14583SBruce Richardson 	rx_ctx.l2tsel = 1;
394c1d14583SBruce Richardson 	rx_ctx.showiv = 0;
395c1d14583SBruce Richardson 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
396c1d14583SBruce Richardson 
397c1d14583SBruce Richardson 	rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
398c1d14583SBruce Richardson 
399c1d14583SBruce Richardson 	PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
400c1d14583SBruce Richardson 		    rxq->port_id, rxq->queue_id, rxdid);
401c1d14583SBruce Richardson 
402c1d14583SBruce Richardson 	if (!(pf->supported_rxdid & BIT(rxdid))) {
403c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
404c1d14583SBruce Richardson 			    rxdid);
405c1d14583SBruce Richardson 		return -EINVAL;
406c1d14583SBruce Richardson 	}
407c1d14583SBruce Richardson 
408c1d14583SBruce Richardson 	rxq->rxdid = rxdid;
409c1d14583SBruce Richardson 
410c1d14583SBruce Richardson 	/* Enable Flexible Descriptors in the queue context which
411c1d14583SBruce Richardson 	 * allows this driver to select a specific receive descriptor format
412c1d14583SBruce Richardson 	 */
413c1d14583SBruce Richardson 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
414c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_IDX_M;
415c1d14583SBruce Richardson 
416c1d14583SBruce Richardson 	/* increasing context priority to pick up profile ID;
417c1d14583SBruce Richardson 	 * default is 0x01; setting to 0x03 to ensure profile
418c1d14583SBruce Richardson 	 * is programming if prev context is of same priority
419c1d14583SBruce Richardson 	 */
420c1d14583SBruce Richardson 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
421c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_PRIO_M;
422c1d14583SBruce Richardson 
423c1d14583SBruce Richardson 	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
424c1d14583SBruce Richardson 		regval |= QRXFLXP_CNTXT_TS_M;
425c1d14583SBruce Richardson 
426c1d14583SBruce Richardson 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
427c1d14583SBruce Richardson 
428c1d14583SBruce Richardson 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
429c1d14583SBruce Richardson 	if (err) {
430c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
431c1d14583SBruce Richardson 			    rxq->queue_id);
432c1d14583SBruce Richardson 		return -EINVAL;
433c1d14583SBruce Richardson 	}
434c1d14583SBruce Richardson 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
435c1d14583SBruce Richardson 	if (err) {
436c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
437c1d14583SBruce Richardson 			    rxq->queue_id);
438c1d14583SBruce Richardson 		return -EINVAL;
439c1d14583SBruce Richardson 	}
440c1d14583SBruce Richardson 
441c1d14583SBruce Richardson 	/* Check if scattered RX needs to be used. */
442c1d14583SBruce Richardson 	if (frame_size > buf_size)
443c1d14583SBruce Richardson 		dev_data->scattered_rx = 1;
444c1d14583SBruce Richardson 
445c1d14583SBruce Richardson 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
446c1d14583SBruce Richardson 
447c1d14583SBruce Richardson 	/* Init the Rx tail register*/
448c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
449c1d14583SBruce Richardson 
450c1d14583SBruce Richardson 	return 0;
451c1d14583SBruce Richardson }
452c1d14583SBruce Richardson 
453c1d14583SBruce Richardson /* Allocate mbufs for all descriptors in rx queue */
454c1d14583SBruce Richardson static int
455c1d14583SBruce Richardson ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
456c1d14583SBruce Richardson {
457c1d14583SBruce Richardson 	struct ice_rx_entry *rxe = rxq->sw_ring;
458c1d14583SBruce Richardson 	uint64_t dma_addr;
459c1d14583SBruce Richardson 	uint16_t i;
460c1d14583SBruce Richardson 
461c1d14583SBruce Richardson 	for (i = 0; i < rxq->nb_rx_desc; i++) {
462c1d14583SBruce Richardson 		volatile union ice_rx_flex_desc *rxd;
463c1d14583SBruce Richardson 		rxd = &rxq->rx_ring[i];
464c1d14583SBruce Richardson 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
465c1d14583SBruce Richardson 
466c1d14583SBruce Richardson 		if (unlikely(!mbuf)) {
467c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
468c1d14583SBruce Richardson 			return -ENOMEM;
469c1d14583SBruce Richardson 		}
470c1d14583SBruce Richardson 
471c1d14583SBruce Richardson 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
472c1d14583SBruce Richardson 		mbuf->nb_segs = 1;
473c1d14583SBruce Richardson 		mbuf->port = rxq->port_id;
474c1d14583SBruce Richardson 
475c1d14583SBruce Richardson 		dma_addr =
476c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
477c1d14583SBruce Richardson 
478c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
479c1d14583SBruce Richardson 			rte_mbuf_refcnt_set(mbuf, 1);
480c1d14583SBruce Richardson 			mbuf->next = NULL;
481c1d14583SBruce Richardson 			rxd->read.hdr_addr = 0;
482c1d14583SBruce Richardson 			rxd->read.pkt_addr = dma_addr;
483c1d14583SBruce Richardson 		} else {
484c1d14583SBruce Richardson 			struct rte_mbuf *mbuf_pay;
485c1d14583SBruce Richardson 			mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);
486c1d14583SBruce Richardson 			if (unlikely(!mbuf_pay)) {
487c1d14583SBruce Richardson 				rte_pktmbuf_free(mbuf);
488c1d14583SBruce Richardson 				PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX");
489c1d14583SBruce Richardson 				return -ENOMEM;
490c1d14583SBruce Richardson 			}
491c1d14583SBruce Richardson 
492c1d14583SBruce Richardson 			mbuf_pay->next = NULL;
493c1d14583SBruce Richardson 			mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM;
494c1d14583SBruce Richardson 			mbuf_pay->nb_segs = 1;
495c1d14583SBruce Richardson 			mbuf_pay->port = rxq->port_id;
496c1d14583SBruce Richardson 			mbuf->next = mbuf_pay;
497c1d14583SBruce Richardson 
498c1d14583SBruce Richardson 			rxd->read.hdr_addr = dma_addr;
499c1d14583SBruce Richardson 			/* The LS bit should be set to zero regardless of
500c1d14583SBruce Richardson 			 * buffer split enablement.
501c1d14583SBruce Richardson 			 */
502c1d14583SBruce Richardson 			rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay));
503c1d14583SBruce Richardson 		}
504c1d14583SBruce Richardson 
505c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
506c1d14583SBruce Richardson 		rxd->read.rsvd1 = 0;
507c1d14583SBruce Richardson 		rxd->read.rsvd2 = 0;
508c1d14583SBruce Richardson #endif
509c1d14583SBruce Richardson 		rxe[i].mbuf = mbuf;
510c1d14583SBruce Richardson 	}
511c1d14583SBruce Richardson 
512c1d14583SBruce Richardson 	return 0;
513c1d14583SBruce Richardson }
514c1d14583SBruce Richardson 
515c1d14583SBruce Richardson /* Free all mbufs for descriptors in rx queue */
516c1d14583SBruce Richardson static void
517c1d14583SBruce Richardson _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
518c1d14583SBruce Richardson {
519c1d14583SBruce Richardson 	uint16_t i;
520c1d14583SBruce Richardson 
521c1d14583SBruce Richardson 	if (!rxq || !rxq->sw_ring) {
522c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
523c1d14583SBruce Richardson 		return;
524c1d14583SBruce Richardson 	}
525c1d14583SBruce Richardson 
526c1d14583SBruce Richardson 	for (i = 0; i < rxq->nb_rx_desc; i++) {
527c1d14583SBruce Richardson 		if (rxq->sw_ring[i].mbuf) {
528c1d14583SBruce Richardson 			rte_pktmbuf_free(rxq->sw_ring[i].mbuf);
529c1d14583SBruce Richardson 			rxq->sw_ring[i].mbuf = NULL;
530c1d14583SBruce Richardson 		}
531c1d14583SBruce Richardson 	}
532c1d14583SBruce Richardson 	if (rxq->rx_nb_avail == 0)
533c1d14583SBruce Richardson 		return;
534c1d14583SBruce Richardson 	for (i = 0; i < rxq->rx_nb_avail; i++)
535c1d14583SBruce Richardson 		rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]);
536c1d14583SBruce Richardson 
537c1d14583SBruce Richardson 	rxq->rx_nb_avail = 0;
538c1d14583SBruce Richardson }
539c1d14583SBruce Richardson 
540c1d14583SBruce Richardson /* turn on or off rx queue
541c1d14583SBruce Richardson  * @q_idx: queue index in pf scope
542c1d14583SBruce Richardson  * @on: turn on or off the queue
543c1d14583SBruce Richardson  */
544c1d14583SBruce Richardson static int
545c1d14583SBruce Richardson ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
546c1d14583SBruce Richardson {
547c1d14583SBruce Richardson 	uint32_t reg;
548c1d14583SBruce Richardson 	uint16_t j;
549c1d14583SBruce Richardson 
550c1d14583SBruce Richardson 	/* QRX_CTRL = QRX_ENA */
551c1d14583SBruce Richardson 	reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
552c1d14583SBruce Richardson 
553c1d14583SBruce Richardson 	if (on) {
554c1d14583SBruce Richardson 		if (reg & QRX_CTRL_QENA_STAT_M)
555c1d14583SBruce Richardson 			return 0; /* Already on, skip */
556c1d14583SBruce Richardson 		reg |= QRX_CTRL_QENA_REQ_M;
557c1d14583SBruce Richardson 	} else {
558c1d14583SBruce Richardson 		if (!(reg & QRX_CTRL_QENA_STAT_M))
559c1d14583SBruce Richardson 			return 0; /* Already off, skip */
560c1d14583SBruce Richardson 		reg &= ~QRX_CTRL_QENA_REQ_M;
561c1d14583SBruce Richardson 	}
562c1d14583SBruce Richardson 
563c1d14583SBruce Richardson 	/* Write the register */
564c1d14583SBruce Richardson 	ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
565c1d14583SBruce Richardson 	/* Check the result. It is said that QENA_STAT
566c1d14583SBruce Richardson 	 * follows the QENA_REQ not more than 10 use.
567c1d14583SBruce Richardson 	 * TODO: need to change the wait counter later
568c1d14583SBruce Richardson 	 */
569c1d14583SBruce Richardson 	for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
570c1d14583SBruce Richardson 		rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
571c1d14583SBruce Richardson 		reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
572c1d14583SBruce Richardson 		if (on) {
573c1d14583SBruce Richardson 			if ((reg & QRX_CTRL_QENA_REQ_M) &&
574c1d14583SBruce Richardson 			    (reg & QRX_CTRL_QENA_STAT_M))
575c1d14583SBruce Richardson 				break;
576c1d14583SBruce Richardson 		} else {
577c1d14583SBruce Richardson 			if (!(reg & QRX_CTRL_QENA_REQ_M) &&
578c1d14583SBruce Richardson 			    !(reg & QRX_CTRL_QENA_STAT_M))
579c1d14583SBruce Richardson 				break;
580c1d14583SBruce Richardson 		}
581c1d14583SBruce Richardson 	}
582c1d14583SBruce Richardson 
583c1d14583SBruce Richardson 	/* Check if it is timeout */
584c1d14583SBruce Richardson 	if (j >= ICE_CHK_Q_ENA_COUNT) {
585c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
586c1d14583SBruce Richardson 			    (on ? "enable" : "disable"), q_idx);
587c1d14583SBruce Richardson 		return -ETIMEDOUT;
588c1d14583SBruce Richardson 	}
589c1d14583SBruce Richardson 
590c1d14583SBruce Richardson 	return 0;
591c1d14583SBruce Richardson }
592c1d14583SBruce Richardson 
593c1d14583SBruce Richardson static inline int
594c1d14583SBruce Richardson ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
595c1d14583SBruce Richardson {
596c1d14583SBruce Richardson 	int ret = 0;
597c1d14583SBruce Richardson 
598c1d14583SBruce Richardson 	if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
599c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
600c1d14583SBruce Richardson 			     "rxq->rx_free_thresh=%d, "
601c1d14583SBruce Richardson 			     "ICE_RX_MAX_BURST=%d",
602c1d14583SBruce Richardson 			     rxq->rx_free_thresh, ICE_RX_MAX_BURST);
603c1d14583SBruce Richardson 		ret = -EINVAL;
604c1d14583SBruce Richardson 	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
605c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
606c1d14583SBruce Richardson 			     "rxq->rx_free_thresh=%d, "
607c1d14583SBruce Richardson 			     "rxq->nb_rx_desc=%d",
608c1d14583SBruce Richardson 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
609c1d14583SBruce Richardson 		ret = -EINVAL;
610c1d14583SBruce Richardson 	} else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
611c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
612c1d14583SBruce Richardson 			     "rxq->nb_rx_desc=%d, "
613c1d14583SBruce Richardson 			     "rxq->rx_free_thresh=%d",
614c1d14583SBruce Richardson 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
615c1d14583SBruce Richardson 		ret = -EINVAL;
616c1d14583SBruce Richardson 	}
617c1d14583SBruce Richardson 
618c1d14583SBruce Richardson 	return ret;
619c1d14583SBruce Richardson }
620c1d14583SBruce Richardson 
621c1d14583SBruce Richardson /* reset fields in ice_rx_queue back to default */
622c1d14583SBruce Richardson static void
623c1d14583SBruce Richardson ice_reset_rx_queue(struct ice_rx_queue *rxq)
624c1d14583SBruce Richardson {
625c1d14583SBruce Richardson 	unsigned int i;
626c1d14583SBruce Richardson 	uint16_t len;
627c1d14583SBruce Richardson 
628c1d14583SBruce Richardson 	if (!rxq) {
629c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
630c1d14583SBruce Richardson 		return;
631c1d14583SBruce Richardson 	}
632c1d14583SBruce Richardson 
633c1d14583SBruce Richardson 	len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
634c1d14583SBruce Richardson 
635c1d14583SBruce Richardson 	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
636c1d14583SBruce Richardson 		((volatile char *)rxq->rx_ring)[i] = 0;
637c1d14583SBruce Richardson 
638c1d14583SBruce Richardson 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
639c1d14583SBruce Richardson 	for (i = 0; i < ICE_RX_MAX_BURST; ++i)
640c1d14583SBruce Richardson 		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
641c1d14583SBruce Richardson 
642c1d14583SBruce Richardson 	rxq->rx_nb_avail = 0;
643c1d14583SBruce Richardson 	rxq->rx_next_avail = 0;
644c1d14583SBruce Richardson 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
645c1d14583SBruce Richardson 
646c1d14583SBruce Richardson 	rxq->rx_tail = 0;
647c1d14583SBruce Richardson 	rxq->nb_rx_hold = 0;
648c1d14583SBruce Richardson 	rxq->pkt_first_seg = NULL;
649c1d14583SBruce Richardson 	rxq->pkt_last_seg = NULL;
650c1d14583SBruce Richardson 
651c1d14583SBruce Richardson 	rxq->rxrearm_start = 0;
652c1d14583SBruce Richardson 	rxq->rxrearm_nb = 0;
653c1d14583SBruce Richardson }
654c1d14583SBruce Richardson 
655c1d14583SBruce Richardson int
656c1d14583SBruce Richardson ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
657c1d14583SBruce Richardson {
658c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
659c1d14583SBruce Richardson 	int err;
660c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
661c1d14583SBruce Richardson 
662c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
663c1d14583SBruce Richardson 
664c1d14583SBruce Richardson 	if (rx_queue_id >= dev->data->nb_rx_queues) {
665c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
666c1d14583SBruce Richardson 			    rx_queue_id, dev->data->nb_rx_queues);
667c1d14583SBruce Richardson 		return -EINVAL;
668c1d14583SBruce Richardson 	}
669c1d14583SBruce Richardson 
670c1d14583SBruce Richardson 	rxq = dev->data->rx_queues[rx_queue_id];
671c1d14583SBruce Richardson 	if (!rxq || !rxq->q_set) {
672c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
673c1d14583SBruce Richardson 			    rx_queue_id);
674c1d14583SBruce Richardson 		return -EINVAL;
675c1d14583SBruce Richardson 	}
676c1d14583SBruce Richardson 
677c1d14583SBruce Richardson 	if (dev->data->rx_queue_state[rx_queue_id] ==
678c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STARTED)
679c1d14583SBruce Richardson 		return 0;
680c1d14583SBruce Richardson 
681c1d14583SBruce Richardson 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
682c1d14583SBruce Richardson 		rxq->ts_enable = true;
683c1d14583SBruce Richardson 	err = ice_program_hw_rx_queue(rxq);
684c1d14583SBruce Richardson 	if (err) {
685c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to program RX queue %u",
686c1d14583SBruce Richardson 			    rx_queue_id);
687c1d14583SBruce Richardson 		return -EIO;
688c1d14583SBruce Richardson 	}
689c1d14583SBruce Richardson 
690c1d14583SBruce Richardson 	err = ice_alloc_rx_queue_mbufs(rxq);
691c1d14583SBruce Richardson 	if (err) {
692c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
693c1d14583SBruce Richardson 		return -ENOMEM;
694c1d14583SBruce Richardson 	}
695c1d14583SBruce Richardson 
696c1d14583SBruce Richardson 	/* Init the RX tail register. */
697c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
698c1d14583SBruce Richardson 
699c1d14583SBruce Richardson 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
700c1d14583SBruce Richardson 	if (err) {
701c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
702c1d14583SBruce Richardson 			    rx_queue_id);
703c1d14583SBruce Richardson 
704c1d14583SBruce Richardson 		rxq->rx_rel_mbufs(rxq);
705c1d14583SBruce Richardson 		ice_reset_rx_queue(rxq);
706c1d14583SBruce Richardson 		return -EINVAL;
707c1d14583SBruce Richardson 	}
708c1d14583SBruce Richardson 
709c1d14583SBruce Richardson 	dev->data->rx_queue_state[rx_queue_id] =
710c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STARTED;
711c1d14583SBruce Richardson 
712c1d14583SBruce Richardson 	return 0;
713c1d14583SBruce Richardson }
714c1d14583SBruce Richardson 
715c1d14583SBruce Richardson int
716c1d14583SBruce Richardson ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
717c1d14583SBruce Richardson {
718c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
719c1d14583SBruce Richardson 	int err;
720c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721c1d14583SBruce Richardson 
722c1d14583SBruce Richardson 	if (rx_queue_id < dev->data->nb_rx_queues) {
723c1d14583SBruce Richardson 		rxq = dev->data->rx_queues[rx_queue_id];
724c1d14583SBruce Richardson 
725c1d14583SBruce Richardson 		if (dev->data->rx_queue_state[rx_queue_id] ==
726c1d14583SBruce Richardson 			RTE_ETH_QUEUE_STATE_STOPPED)
727c1d14583SBruce Richardson 			return 0;
728c1d14583SBruce Richardson 
729c1d14583SBruce Richardson 		err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
730c1d14583SBruce Richardson 		if (err) {
731c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
732c1d14583SBruce Richardson 				    rx_queue_id);
733c1d14583SBruce Richardson 			return -EINVAL;
734c1d14583SBruce Richardson 		}
735c1d14583SBruce Richardson 		rxq->rx_rel_mbufs(rxq);
736c1d14583SBruce Richardson 		ice_reset_rx_queue(rxq);
737c1d14583SBruce Richardson 		dev->data->rx_queue_state[rx_queue_id] =
738c1d14583SBruce Richardson 			RTE_ETH_QUEUE_STATE_STOPPED;
739c1d14583SBruce Richardson 	}
740c1d14583SBruce Richardson 
741c1d14583SBruce Richardson 	return 0;
742c1d14583SBruce Richardson }
743c1d14583SBruce Richardson 
744c1d14583SBruce Richardson int
745c1d14583SBruce Richardson ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
746c1d14583SBruce Richardson {
747c038157aSBruce Richardson 	struct ci_tx_queue *txq;
748c1d14583SBruce Richardson 	int err;
749c1d14583SBruce Richardson 	struct ice_vsi *vsi;
750c1d14583SBruce Richardson 	struct ice_hw *hw;
751c1d14583SBruce Richardson 	struct ice_pf *pf;
752c1d14583SBruce Richardson 	struct ice_aqc_add_tx_qgrp *txq_elem;
753c1d14583SBruce Richardson 	struct ice_tlan_ctx tx_ctx;
754c1d14583SBruce Richardson 	int buf_len;
755c1d14583SBruce Richardson 
756c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
757c1d14583SBruce Richardson 
758c1d14583SBruce Richardson 	if (tx_queue_id >= dev->data->nb_tx_queues) {
759c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
760c1d14583SBruce Richardson 			    tx_queue_id, dev->data->nb_tx_queues);
761c1d14583SBruce Richardson 		return -EINVAL;
762c1d14583SBruce Richardson 	}
763c1d14583SBruce Richardson 
764c1d14583SBruce Richardson 	txq = dev->data->tx_queues[tx_queue_id];
765c1d14583SBruce Richardson 	if (!txq || !txq->q_set) {
766c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
767c1d14583SBruce Richardson 			    tx_queue_id);
768c1d14583SBruce Richardson 		return -EINVAL;
769c1d14583SBruce Richardson 	}
770c1d14583SBruce Richardson 
771c1d14583SBruce Richardson 	if (dev->data->tx_queue_state[tx_queue_id] ==
772c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STARTED)
773c1d14583SBruce Richardson 		return 0;
774c1d14583SBruce Richardson 
775c1d14583SBruce Richardson 	buf_len = ice_struct_size(txq_elem, txqs, 1);
776c1d14583SBruce Richardson 	txq_elem = ice_malloc(hw, buf_len);
777c1d14583SBruce Richardson 	if (!txq_elem)
778c1d14583SBruce Richardson 		return -ENOMEM;
779c1d14583SBruce Richardson 
7804d0f54d9SBruce Richardson 	vsi = txq->ice_vsi;
781c1d14583SBruce Richardson 	hw = ICE_VSI_TO_HW(vsi);
782c1d14583SBruce Richardson 	pf = ICE_VSI_TO_PF(vsi);
783c1d14583SBruce Richardson 
784c1d14583SBruce Richardson 	memset(&tx_ctx, 0, sizeof(tx_ctx));
785c1d14583SBruce Richardson 	txq_elem->num_txqs = 1;
786c1d14583SBruce Richardson 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
787c1d14583SBruce Richardson 
788c1d14583SBruce Richardson 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
789c1d14583SBruce Richardson 	tx_ctx.qlen = txq->nb_tx_desc;
790c1d14583SBruce Richardson 	tx_ctx.pf_num = hw->pf_id;
791c1d14583SBruce Richardson 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
792c1d14583SBruce Richardson 	tx_ctx.src_vsi = vsi->vsi_id;
793c1d14583SBruce Richardson 	tx_ctx.port_num = hw->port_info->lport;
794c1d14583SBruce Richardson 	tx_ctx.tso_ena = 1; /* tso enable */
795c1d14583SBruce Richardson 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
796c1d14583SBruce Richardson 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
797c1d14583SBruce Richardson 	tx_ctx.tsyn_ena = 1;
798c1d14583SBruce Richardson 
799c1d14583SBruce Richardson 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
800c1d14583SBruce Richardson 		    ice_tlan_ctx_info);
801c1d14583SBruce Richardson 
802c1d14583SBruce Richardson 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
803c1d14583SBruce Richardson 
804c1d14583SBruce Richardson 	/* Init the Tx tail register*/
805c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
806c1d14583SBruce Richardson 
807c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
808c1d14583SBruce Richardson 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
809c1d14583SBruce Richardson 			txq_elem, buf_len, NULL);
810c1d14583SBruce Richardson 	if (err) {
811c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to add lan txq");
812c1d14583SBruce Richardson 		rte_free(txq_elem);
813c1d14583SBruce Richardson 		return -EIO;
814c1d14583SBruce Richardson 	}
815c1d14583SBruce Richardson 	/* store the schedule node id */
816c1d14583SBruce Richardson 	txq->q_teid = txq_elem->txqs[0].q_teid;
817c1d14583SBruce Richardson 
818c1d14583SBruce Richardson 	/* move the queue to correct position in hierarchy, if explicit hierarchy configured */
819c1d14583SBruce Richardson 	if (pf->tm_conf.committed)
820c1d14583SBruce Richardson 		if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) {
821c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node");
822c1d14583SBruce Richardson 			rte_free(txq_elem);
823c1d14583SBruce Richardson 			return -EIO;
824c1d14583SBruce Richardson 		}
825c1d14583SBruce Richardson 
826c1d14583SBruce Richardson 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
827c1d14583SBruce Richardson 
828c1d14583SBruce Richardson 	rte_free(txq_elem);
829c1d14583SBruce Richardson 	return 0;
830c1d14583SBruce Richardson }
831c1d14583SBruce Richardson 
832c1d14583SBruce Richardson static int
833c1d14583SBruce Richardson ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
834c1d14583SBruce Richardson {
835c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
836c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
837c1d14583SBruce Richardson 	uint32_t rxdid = ICE_RXDID_LEGACY_1;
838c1d14583SBruce Richardson 	struct ice_rlan_ctx rx_ctx;
839c1d14583SBruce Richardson 	uint32_t regval;
840c1d14583SBruce Richardson 	int err;
841c1d14583SBruce Richardson 
842c1d14583SBruce Richardson 	rxq->rx_hdr_len = 0;
843c1d14583SBruce Richardson 	rxq->rx_buf_len = 1024;
844c1d14583SBruce Richardson 
845c1d14583SBruce Richardson 	memset(&rx_ctx, 0, sizeof(rx_ctx));
846c1d14583SBruce Richardson 
847c1d14583SBruce Richardson 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
848c1d14583SBruce Richardson 	rx_ctx.qlen = rxq->nb_rx_desc;
849c1d14583SBruce Richardson 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
850c1d14583SBruce Richardson 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
851c1d14583SBruce Richardson 	rx_ctx.dtype = 0; /* No Buffer Split mode */
852c1d14583SBruce Richardson 	rx_ctx.dsize = 1; /* 32B descriptors */
853c1d14583SBruce Richardson 	rx_ctx.rxmax = ICE_ETH_MAX_LEN;
854c1d14583SBruce Richardson 	/* TPH: Transaction Layer Packet (TLP) processing hints */
855c1d14583SBruce Richardson 	rx_ctx.tphrdesc_ena = 1;
856c1d14583SBruce Richardson 	rx_ctx.tphwdesc_ena = 1;
857c1d14583SBruce Richardson 	rx_ctx.tphdata_ena = 1;
858c1d14583SBruce Richardson 	rx_ctx.tphhead_ena = 1;
859c1d14583SBruce Richardson 	/* Low Receive Queue Threshold defined in 64 descriptors units.
860c1d14583SBruce Richardson 	 * When the number of free descriptors goes below the lrxqthresh,
861c1d14583SBruce Richardson 	 * an immediate interrupt is triggered.
862c1d14583SBruce Richardson 	 */
863c1d14583SBruce Richardson 	rx_ctx.lrxqthresh = 2;
864c1d14583SBruce Richardson 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
865c1d14583SBruce Richardson 	rx_ctx.l2tsel = 1;
866c1d14583SBruce Richardson 	rx_ctx.showiv = 0;
867c1d14583SBruce Richardson 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
868c1d14583SBruce Richardson 
869c1d14583SBruce Richardson 	/* Enable Flexible Descriptors in the queue context which
870c1d14583SBruce Richardson 	 * allows this driver to select a specific receive descriptor format
871c1d14583SBruce Richardson 	 */
872c1d14583SBruce Richardson 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
873c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_IDX_M;
874c1d14583SBruce Richardson 
875c1d14583SBruce Richardson 	/* increasing context priority to pick up profile ID;
876c1d14583SBruce Richardson 	 * default is 0x01; setting to 0x03 to ensure profile
877c1d14583SBruce Richardson 	 * is programming if prev context is of same priority
878c1d14583SBruce Richardson 	 */
879c1d14583SBruce Richardson 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
880c1d14583SBruce Richardson 		QRXFLXP_CNTXT_RXDID_PRIO_M;
881c1d14583SBruce Richardson 
882c1d14583SBruce Richardson 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
883c1d14583SBruce Richardson 
884c1d14583SBruce Richardson 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
885c1d14583SBruce Richardson 	if (err) {
886c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
887c1d14583SBruce Richardson 			    rxq->queue_id);
888c1d14583SBruce Richardson 		return -EINVAL;
889c1d14583SBruce Richardson 	}
890c1d14583SBruce Richardson 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
891c1d14583SBruce Richardson 	if (err) {
892c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
893c1d14583SBruce Richardson 			    rxq->queue_id);
894c1d14583SBruce Richardson 		return -EINVAL;
895c1d14583SBruce Richardson 	}
896c1d14583SBruce Richardson 
897c1d14583SBruce Richardson 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
898c1d14583SBruce Richardson 
899c1d14583SBruce Richardson 	/* Init the Rx tail register*/
900c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
901c1d14583SBruce Richardson 
902c1d14583SBruce Richardson 	return 0;
903c1d14583SBruce Richardson }
904c1d14583SBruce Richardson 
905c1d14583SBruce Richardson int
906c1d14583SBruce Richardson ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
907c1d14583SBruce Richardson {
908c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
909c1d14583SBruce Richardson 	int err;
910c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
912c1d14583SBruce Richardson 
913c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
914c1d14583SBruce Richardson 
915c1d14583SBruce Richardson 	rxq = pf->fdir.rxq;
916c1d14583SBruce Richardson 	if (!rxq || !rxq->q_set) {
917c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
918c1d14583SBruce Richardson 			    rx_queue_id);
919c1d14583SBruce Richardson 		return -EINVAL;
920c1d14583SBruce Richardson 	}
921c1d14583SBruce Richardson 
922c1d14583SBruce Richardson 	err = ice_fdir_program_hw_rx_queue(rxq);
923c1d14583SBruce Richardson 	if (err) {
924c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
925c1d14583SBruce Richardson 			    rx_queue_id);
926c1d14583SBruce Richardson 		return -EIO;
927c1d14583SBruce Richardson 	}
928c1d14583SBruce Richardson 
929c1d14583SBruce Richardson 	/* Init the RX tail register. */
930c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
931c1d14583SBruce Richardson 
932c1d14583SBruce Richardson 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
933c1d14583SBruce Richardson 	if (err) {
934c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
935c1d14583SBruce Richardson 			    rx_queue_id);
936c1d14583SBruce Richardson 
937c1d14583SBruce Richardson 		ice_reset_rx_queue(rxq);
938c1d14583SBruce Richardson 		return -EINVAL;
939c1d14583SBruce Richardson 	}
940c1d14583SBruce Richardson 
941c1d14583SBruce Richardson 	return 0;
942c1d14583SBruce Richardson }
943c1d14583SBruce Richardson 
944c1d14583SBruce Richardson int
945c1d14583SBruce Richardson ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
946c1d14583SBruce Richardson {
947c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
948c038157aSBruce Richardson 	struct ci_tx_queue *txq;
949c1d14583SBruce Richardson 	int err;
950c1d14583SBruce Richardson 	struct ice_vsi *vsi;
951c1d14583SBruce Richardson 	struct ice_hw *hw;
952c1d14583SBruce Richardson 	struct ice_aqc_add_tx_qgrp *txq_elem;
953c1d14583SBruce Richardson 	struct ice_tlan_ctx tx_ctx;
954c1d14583SBruce Richardson 	int buf_len;
955c1d14583SBruce Richardson 
956c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
957c1d14583SBruce Richardson 
958c1d14583SBruce Richardson 	txq = pf->fdir.txq;
959c1d14583SBruce Richardson 	if (!txq || !txq->q_set) {
960c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
961c1d14583SBruce Richardson 			    tx_queue_id);
962c1d14583SBruce Richardson 		return -EINVAL;
963c1d14583SBruce Richardson 	}
964c1d14583SBruce Richardson 
965c1d14583SBruce Richardson 	buf_len = ice_struct_size(txq_elem, txqs, 1);
966c1d14583SBruce Richardson 	txq_elem = ice_malloc(hw, buf_len);
967c1d14583SBruce Richardson 	if (!txq_elem)
968c1d14583SBruce Richardson 		return -ENOMEM;
969c1d14583SBruce Richardson 
9704d0f54d9SBruce Richardson 	vsi = txq->ice_vsi;
971c1d14583SBruce Richardson 	hw = ICE_VSI_TO_HW(vsi);
972c1d14583SBruce Richardson 
973c1d14583SBruce Richardson 	memset(&tx_ctx, 0, sizeof(tx_ctx));
974c1d14583SBruce Richardson 	txq_elem->num_txqs = 1;
975c1d14583SBruce Richardson 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
976c1d14583SBruce Richardson 
977c1d14583SBruce Richardson 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
978c1d14583SBruce Richardson 	tx_ctx.qlen = txq->nb_tx_desc;
979c1d14583SBruce Richardson 	tx_ctx.pf_num = hw->pf_id;
980c1d14583SBruce Richardson 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
981c1d14583SBruce Richardson 	tx_ctx.src_vsi = vsi->vsi_id;
982c1d14583SBruce Richardson 	tx_ctx.port_num = hw->port_info->lport;
983c1d14583SBruce Richardson 	tx_ctx.tso_ena = 1; /* tso enable */
984c1d14583SBruce Richardson 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
985c1d14583SBruce Richardson 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
986c1d14583SBruce Richardson 
987c1d14583SBruce Richardson 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
988c1d14583SBruce Richardson 		    ice_tlan_ctx_info);
989c1d14583SBruce Richardson 
990c1d14583SBruce Richardson 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
991c1d14583SBruce Richardson 
992c1d14583SBruce Richardson 	/* Init the Tx tail register*/
993c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
994c1d14583SBruce Richardson 
995c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
996c1d14583SBruce Richardson 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
997c1d14583SBruce Richardson 			      txq_elem, buf_len, NULL);
998c1d14583SBruce Richardson 	if (err) {
999c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
1000c1d14583SBruce Richardson 		rte_free(txq_elem);
1001c1d14583SBruce Richardson 		return -EIO;
1002c1d14583SBruce Richardson 	}
1003c1d14583SBruce Richardson 	/* store the schedule node id */
1004c1d14583SBruce Richardson 	txq->q_teid = txq_elem->txqs[0].q_teid;
1005c1d14583SBruce Richardson 
1006c1d14583SBruce Richardson 	rte_free(txq_elem);
1007c1d14583SBruce Richardson 	return 0;
1008c1d14583SBruce Richardson }
1009c1d14583SBruce Richardson 
1010c1d14583SBruce Richardson /* Free all mbufs for descriptors in tx queue */
1011c1d14583SBruce Richardson static void
1012c038157aSBruce Richardson _ice_tx_queue_release_mbufs(struct ci_tx_queue *txq)
1013c1d14583SBruce Richardson {
1014c1d14583SBruce Richardson 	uint16_t i;
1015c1d14583SBruce Richardson 
1016c1d14583SBruce Richardson 	if (!txq || !txq->sw_ring) {
1017c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
1018c1d14583SBruce Richardson 		return;
1019c1d14583SBruce Richardson 	}
1020c1d14583SBruce Richardson 
1021c1d14583SBruce Richardson 	for (i = 0; i < txq->nb_tx_desc; i++) {
1022c1d14583SBruce Richardson 		if (txq->sw_ring[i].mbuf) {
1023c1d14583SBruce Richardson 			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1024c1d14583SBruce Richardson 			txq->sw_ring[i].mbuf = NULL;
1025c1d14583SBruce Richardson 		}
1026c1d14583SBruce Richardson 	}
1027c1d14583SBruce Richardson }
1028c1d14583SBruce Richardson 
1029c1d14583SBruce Richardson static void
1030c038157aSBruce Richardson ice_reset_tx_queue(struct ci_tx_queue *txq)
1031c1d14583SBruce Richardson {
10325cc9919fSBruce Richardson 	struct ci_tx_entry *txe;
1033c1d14583SBruce Richardson 	uint16_t i, prev, size;
1034c1d14583SBruce Richardson 
1035c1d14583SBruce Richardson 	if (!txq) {
1036c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
1037c1d14583SBruce Richardson 		return;
1038c1d14583SBruce Richardson 	}
1039c1d14583SBruce Richardson 
1040c1d14583SBruce Richardson 	txe = txq->sw_ring;
1041c1d14583SBruce Richardson 	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
1042c1d14583SBruce Richardson 	for (i = 0; i < size; i++)
10434d0f54d9SBruce Richardson 		((volatile char *)txq->ice_tx_ring)[i] = 0;
1044c1d14583SBruce Richardson 
1045c1d14583SBruce Richardson 	prev = (uint16_t)(txq->nb_tx_desc - 1);
1046c1d14583SBruce Richardson 	for (i = 0; i < txq->nb_tx_desc; i++) {
10474d0f54d9SBruce Richardson 		volatile struct ice_tx_desc *txd = &txq->ice_tx_ring[i];
1048c1d14583SBruce Richardson 
1049c1d14583SBruce Richardson 		txd->cmd_type_offset_bsz =
1050c1d14583SBruce Richardson 			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
1051c1d14583SBruce Richardson 		txe[i].mbuf =  NULL;
1052c1d14583SBruce Richardson 		txe[i].last_id = i;
1053c1d14583SBruce Richardson 		txe[prev].next_id = i;
1054c1d14583SBruce Richardson 		prev = i;
1055c1d14583SBruce Richardson 	}
1056c1d14583SBruce Richardson 
1057c1d14583SBruce Richardson 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1058c1d14583SBruce Richardson 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1059c1d14583SBruce Richardson 
1060c1d14583SBruce Richardson 	txq->tx_tail = 0;
1061c1d14583SBruce Richardson 	txq->nb_tx_used = 0;
1062c1d14583SBruce Richardson 
1063c1d14583SBruce Richardson 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1064c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1065c1d14583SBruce Richardson }
1066c1d14583SBruce Richardson 
1067c1d14583SBruce Richardson int
1068c1d14583SBruce Richardson ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1069c1d14583SBruce Richardson {
1070c038157aSBruce Richardson 	struct ci_tx_queue *txq;
1071c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1072c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1073c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1074c1d14583SBruce Richardson 	uint16_t q_ids[1];
1075c1d14583SBruce Richardson 	uint32_t q_teids[1];
1076c1d14583SBruce Richardson 	uint16_t q_handle = tx_queue_id;
1077c1d14583SBruce Richardson 	int status;
1078c1d14583SBruce Richardson 
1079c1d14583SBruce Richardson 	if (tx_queue_id >= dev->data->nb_tx_queues) {
1080c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
1081c1d14583SBruce Richardson 			    tx_queue_id, dev->data->nb_tx_queues);
1082c1d14583SBruce Richardson 		return -EINVAL;
1083c1d14583SBruce Richardson 	}
1084c1d14583SBruce Richardson 
1085c1d14583SBruce Richardson 	txq = dev->data->tx_queues[tx_queue_id];
1086c1d14583SBruce Richardson 	if (!txq) {
1087c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
1088c1d14583SBruce Richardson 			    tx_queue_id);
1089c1d14583SBruce Richardson 		return -EINVAL;
1090c1d14583SBruce Richardson 	}
1091c1d14583SBruce Richardson 
1092c1d14583SBruce Richardson 	if (dev->data->tx_queue_state[tx_queue_id] ==
1093c1d14583SBruce Richardson 		RTE_ETH_QUEUE_STATE_STOPPED)
1094c1d14583SBruce Richardson 		return 0;
1095c1d14583SBruce Richardson 
1096c1d14583SBruce Richardson 	q_ids[0] = txq->reg_idx;
1097c1d14583SBruce Richardson 	q_teids[0] = txq->q_teid;
1098c1d14583SBruce Richardson 
1099c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
1100c1d14583SBruce Richardson 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1101c1d14583SBruce Richardson 				q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1102c1d14583SBruce Richardson 	if (status != ICE_SUCCESS) {
1103c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1104c1d14583SBruce Richardson 		return -EINVAL;
1105c1d14583SBruce Richardson 	}
1106c1d14583SBruce Richardson 
1107c1d14583SBruce Richardson 	txq->tx_rel_mbufs(txq);
1108c1d14583SBruce Richardson 	ice_reset_tx_queue(txq);
1109c1d14583SBruce Richardson 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1110c1d14583SBruce Richardson 
1111c1d14583SBruce Richardson 	return 0;
1112c1d14583SBruce Richardson }
1113c1d14583SBruce Richardson 
1114c1d14583SBruce Richardson int
1115c1d14583SBruce Richardson ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1116c1d14583SBruce Richardson {
1117c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1118c1d14583SBruce Richardson 	int err;
1119c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1120c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1121c1d14583SBruce Richardson 
1122c1d14583SBruce Richardson 	rxq = pf->fdir.rxq;
1123c1d14583SBruce Richardson 
1124c1d14583SBruce Richardson 	err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1125c1d14583SBruce Richardson 	if (err) {
1126c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1127c1d14583SBruce Richardson 			    rx_queue_id);
1128c1d14583SBruce Richardson 		return -EINVAL;
1129c1d14583SBruce Richardson 	}
1130c1d14583SBruce Richardson 	rxq->rx_rel_mbufs(rxq);
1131c1d14583SBruce Richardson 
1132c1d14583SBruce Richardson 	return 0;
1133c1d14583SBruce Richardson }
1134c1d14583SBruce Richardson 
1135c1d14583SBruce Richardson int
1136c1d14583SBruce Richardson ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1137c1d14583SBruce Richardson {
1138c038157aSBruce Richardson 	struct ci_tx_queue *txq;
1139c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1140c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1141c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1142c1d14583SBruce Richardson 	uint16_t q_ids[1];
1143c1d14583SBruce Richardson 	uint32_t q_teids[1];
1144c1d14583SBruce Richardson 	uint16_t q_handle = tx_queue_id;
1145c1d14583SBruce Richardson 	int status;
1146c1d14583SBruce Richardson 
1147c1d14583SBruce Richardson 	txq = pf->fdir.txq;
1148c1d14583SBruce Richardson 	if (!txq) {
1149c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
1150c1d14583SBruce Richardson 			    tx_queue_id);
1151c1d14583SBruce Richardson 		return -EINVAL;
1152c1d14583SBruce Richardson 	}
1153c1d14583SBruce Richardson 	if (txq->qtx_tail == NULL) {
1154c1d14583SBruce Richardson 		PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id);
1155c1d14583SBruce Richardson 		return 0;
1156c1d14583SBruce Richardson 	}
11574d0f54d9SBruce Richardson 	vsi = txq->ice_vsi;
1158c1d14583SBruce Richardson 
1159c1d14583SBruce Richardson 	q_ids[0] = txq->reg_idx;
1160c1d14583SBruce Richardson 	q_teids[0] = txq->q_teid;
1161c1d14583SBruce Richardson 
1162c1d14583SBruce Richardson 	/* Fix me, we assume TC always 0 here */
1163c1d14583SBruce Richardson 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1164c1d14583SBruce Richardson 				 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1165c1d14583SBruce Richardson 	if (status != ICE_SUCCESS) {
1166c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1167c1d14583SBruce Richardson 		return -EINVAL;
1168c1d14583SBruce Richardson 	}
1169c1d14583SBruce Richardson 
1170c1d14583SBruce Richardson 	txq->tx_rel_mbufs(txq);
1171c1d14583SBruce Richardson 	txq->qtx_tail = NULL;
1172c1d14583SBruce Richardson 
1173c1d14583SBruce Richardson 	return 0;
1174c1d14583SBruce Richardson }
1175c1d14583SBruce Richardson 
1176c1d14583SBruce Richardson int
1177c1d14583SBruce Richardson ice_rx_queue_setup(struct rte_eth_dev *dev,
1178c1d14583SBruce Richardson 		   uint16_t queue_idx,
1179c1d14583SBruce Richardson 		   uint16_t nb_desc,
1180c1d14583SBruce Richardson 		   unsigned int socket_id,
1181c1d14583SBruce Richardson 		   const struct rte_eth_rxconf *rx_conf,
1182c1d14583SBruce Richardson 		   struct rte_mempool *mp)
1183c1d14583SBruce Richardson {
1184c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1185c1d14583SBruce Richardson 	struct ice_adapter *ad =
1186c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1187c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1188c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1189c1d14583SBruce Richardson 	const struct rte_memzone *rz;
1190c1d14583SBruce Richardson 	uint32_t ring_size;
1191c1d14583SBruce Richardson 	uint16_t len;
1192c1d14583SBruce Richardson 	int use_def_burst_func = 1;
1193c1d14583SBruce Richardson 	uint64_t offloads;
1194c1d14583SBruce Richardson 	uint16_t n_seg = rx_conf->rx_nseg;
1195c1d14583SBruce Richardson 	uint16_t i;
1196c1d14583SBruce Richardson 
1197c1d14583SBruce Richardson 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1198c1d14583SBruce Richardson 	    nb_desc > ICE_MAX_RING_DESC ||
1199c1d14583SBruce Richardson 	    nb_desc < ICE_MIN_RING_DESC) {
1200c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1201c1d14583SBruce Richardson 			     "invalid", nb_desc);
1202c1d14583SBruce Richardson 		return -EINVAL;
1203c1d14583SBruce Richardson 	}
1204c1d14583SBruce Richardson 
1205c1d14583SBruce Richardson 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1206c1d14583SBruce Richardson 
1207c1d14583SBruce Richardson 	if (mp)
1208c1d14583SBruce Richardson 		n_seg = 1;
1209c1d14583SBruce Richardson 
1210c1d14583SBruce Richardson 	if (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1211c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured",
1212c1d14583SBruce Richardson 				dev->data->port_id, queue_idx);
1213c1d14583SBruce Richardson 		return -EINVAL;
1214c1d14583SBruce Richardson 	}
1215c1d14583SBruce Richardson 
1216c1d14583SBruce Richardson 	/* Free memory if needed */
1217c1d14583SBruce Richardson 	if (dev->data->rx_queues[queue_idx]) {
1218c1d14583SBruce Richardson 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1219c1d14583SBruce Richardson 		dev->data->rx_queues[queue_idx] = NULL;
1220c1d14583SBruce Richardson 	}
1221c1d14583SBruce Richardson 
1222c1d14583SBruce Richardson 	/* Allocate the rx queue data structure */
1223c1d14583SBruce Richardson 	rxq = rte_zmalloc_socket(NULL,
1224c1d14583SBruce Richardson 				 sizeof(struct ice_rx_queue),
1225c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
1226c1d14583SBruce Richardson 				 socket_id);
1227c1d14583SBruce Richardson 
1228c1d14583SBruce Richardson 	if (!rxq) {
1229c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1230c1d14583SBruce Richardson 			     "rx queue data structure");
1231c1d14583SBruce Richardson 		return -ENOMEM;
1232c1d14583SBruce Richardson 	}
1233c1d14583SBruce Richardson 
1234c1d14583SBruce Richardson 	rxq->rxseg_nb = n_seg;
1235c1d14583SBruce Richardson 	if (n_seg > 1) {
1236c1d14583SBruce Richardson 		for (i = 0; i < n_seg; i++)
1237c1d14583SBruce Richardson 			memcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split,
1238c1d14583SBruce Richardson 				sizeof(struct rte_eth_rxseg_split));
1239c1d14583SBruce Richardson 
1240c1d14583SBruce Richardson 		rxq->mp = rxq->rxseg[0].mp;
1241c1d14583SBruce Richardson 	} else {
1242c1d14583SBruce Richardson 		rxq->mp = mp;
1243c1d14583SBruce Richardson 	}
1244c1d14583SBruce Richardson 
1245c1d14583SBruce Richardson 	rxq->nb_rx_desc = nb_desc;
1246c1d14583SBruce Richardson 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1247c1d14583SBruce Richardson 	rxq->queue_id = queue_idx;
1248c1d14583SBruce Richardson 	rxq->offloads = offloads;
1249c1d14583SBruce Richardson 
1250c1d14583SBruce Richardson 	rxq->reg_idx = vsi->base_queue + queue_idx;
1251c1d14583SBruce Richardson 	rxq->port_id = dev->data->port_id;
1252c1d14583SBruce Richardson 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1253c1d14583SBruce Richardson 		rxq->crc_len = RTE_ETHER_CRC_LEN;
1254c1d14583SBruce Richardson 	else
1255c1d14583SBruce Richardson 		rxq->crc_len = 0;
1256c1d14583SBruce Richardson 
1257c1d14583SBruce Richardson 	rxq->drop_en = rx_conf->rx_drop_en;
1258c1d14583SBruce Richardson 	rxq->vsi = vsi;
1259c1d14583SBruce Richardson 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1260c1d14583SBruce Richardson 	rxq->proto_xtr = pf->proto_xtr != NULL ?
1261c1d14583SBruce Richardson 			 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1262c1d14583SBruce Richardson 	if (rxq->proto_xtr != PROTO_XTR_NONE &&
1263c1d14583SBruce Richardson 			ad->devargs.xtr_flag_offs[rxq->proto_xtr] != 0xff)
1264c1d14583SBruce Richardson 		rxq->xtr_ol_flag = 1ULL << ad->devargs.xtr_flag_offs[rxq->proto_xtr];
1265c1d14583SBruce Richardson 	rxq->xtr_field_offs = ad->devargs.xtr_field_offs;
1266c1d14583SBruce Richardson 
1267c1d14583SBruce Richardson 	/* Allocate the maximum number of RX ring hardware descriptor. */
1268c1d14583SBruce Richardson 	len = ICE_MAX_RING_DESC;
1269c1d14583SBruce Richardson 
1270c1d14583SBruce Richardson 	/**
1271c1d14583SBruce Richardson 	 * Allocating a little more memory because vectorized/bulk_alloc Rx
1272c1d14583SBruce Richardson 	 * functions doesn't check boundaries each time.
1273c1d14583SBruce Richardson 	 */
1274c1d14583SBruce Richardson 	len += ICE_RX_MAX_BURST;
1275c1d14583SBruce Richardson 
1276c1d14583SBruce Richardson 	/* Allocate the maximum number of RX ring hardware descriptor. */
1277c1d14583SBruce Richardson 	ring_size = sizeof(union ice_rx_flex_desc) * len;
1278c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1279c1d14583SBruce Richardson 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1280c1d14583SBruce Richardson 				      ring_size, ICE_RING_BASE_ALIGN,
1281c1d14583SBruce Richardson 				      socket_id);
1282c1d14583SBruce Richardson 	if (!rz) {
1283c1d14583SBruce Richardson 		ice_rx_queue_release(rxq);
1284c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1285c1d14583SBruce Richardson 		return -ENOMEM;
1286c1d14583SBruce Richardson 	}
1287c1d14583SBruce Richardson 
1288c1d14583SBruce Richardson 	rxq->mz = rz;
1289c1d14583SBruce Richardson 	/* Zero all the descriptors in the ring. */
1290c1d14583SBruce Richardson 	memset(rz->addr, 0, ring_size);
1291c1d14583SBruce Richardson 
1292c1d14583SBruce Richardson 	rxq->rx_ring_dma = rz->iova;
1293c1d14583SBruce Richardson 	rxq->rx_ring = rz->addr;
1294c1d14583SBruce Richardson 
1295c1d14583SBruce Richardson 	/* always reserve more for bulk alloc */
1296c1d14583SBruce Richardson 	len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1297c1d14583SBruce Richardson 
1298c1d14583SBruce Richardson 	/* Allocate the software ring. */
1299c1d14583SBruce Richardson 	rxq->sw_ring = rte_zmalloc_socket(NULL,
1300c1d14583SBruce Richardson 					  sizeof(struct ice_rx_entry) * len,
1301c1d14583SBruce Richardson 					  RTE_CACHE_LINE_SIZE,
1302c1d14583SBruce Richardson 					  socket_id);
1303c1d14583SBruce Richardson 	if (!rxq->sw_ring) {
1304c1d14583SBruce Richardson 		ice_rx_queue_release(rxq);
1305c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1306c1d14583SBruce Richardson 		return -ENOMEM;
1307c1d14583SBruce Richardson 	}
1308c1d14583SBruce Richardson 
1309c1d14583SBruce Richardson 	ice_reset_rx_queue(rxq);
1310c1d14583SBruce Richardson 	rxq->q_set = true;
1311c1d14583SBruce Richardson 	dev->data->rx_queues[queue_idx] = rxq;
1312c1d14583SBruce Richardson 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1313c1d14583SBruce Richardson 
1314c1d14583SBruce Richardson 	use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1315c1d14583SBruce Richardson 
1316c1d14583SBruce Richardson 	if (!use_def_burst_func) {
1317c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1318c1d14583SBruce Richardson 			     "satisfied. Rx Burst Bulk Alloc function will be "
1319c1d14583SBruce Richardson 			     "used on port=%d, queue=%d.",
1320c1d14583SBruce Richardson 			     rxq->port_id, rxq->queue_id);
1321c1d14583SBruce Richardson 	} else {
1322c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1323c1d14583SBruce Richardson 			     "not satisfied, Scattered Rx is requested. "
1324c1d14583SBruce Richardson 			     "on port=%d, queue=%d.",
1325c1d14583SBruce Richardson 			     rxq->port_id, rxq->queue_id);
1326c1d14583SBruce Richardson 		ad->rx_bulk_alloc_allowed = false;
1327c1d14583SBruce Richardson 	}
1328c1d14583SBruce Richardson 
1329c1d14583SBruce Richardson 	return 0;
1330c1d14583SBruce Richardson }
1331c1d14583SBruce Richardson 
1332c1d14583SBruce Richardson void
1333c1d14583SBruce Richardson ice_rx_queue_release(void *rxq)
1334c1d14583SBruce Richardson {
1335c1d14583SBruce Richardson 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1336c1d14583SBruce Richardson 
1337c1d14583SBruce Richardson 	if (!q) {
1338c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1339c1d14583SBruce Richardson 		return;
1340c1d14583SBruce Richardson 	}
1341c1d14583SBruce Richardson 
1342c1d14583SBruce Richardson 	if (q->rx_rel_mbufs != NULL)
1343c1d14583SBruce Richardson 		q->rx_rel_mbufs(q);
1344c1d14583SBruce Richardson 	rte_free(q->sw_ring);
1345c1d14583SBruce Richardson 	rte_memzone_free(q->mz);
1346c1d14583SBruce Richardson 	rte_free(q);
1347c1d14583SBruce Richardson }
1348c1d14583SBruce Richardson 
1349c1d14583SBruce Richardson int
1350c1d14583SBruce Richardson ice_tx_queue_setup(struct rte_eth_dev *dev,
1351c1d14583SBruce Richardson 		   uint16_t queue_idx,
1352c1d14583SBruce Richardson 		   uint16_t nb_desc,
1353c1d14583SBruce Richardson 		   unsigned int socket_id,
1354c1d14583SBruce Richardson 		   const struct rte_eth_txconf *tx_conf)
1355c1d14583SBruce Richardson {
1356c1d14583SBruce Richardson 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1357c1d14583SBruce Richardson 	struct ice_vsi *vsi = pf->main_vsi;
1358c038157aSBruce Richardson 	struct ci_tx_queue *txq;
1359c1d14583SBruce Richardson 	const struct rte_memzone *tz;
1360c1d14583SBruce Richardson 	uint32_t ring_size;
1361c1d14583SBruce Richardson 	uint16_t tx_rs_thresh, tx_free_thresh;
1362c1d14583SBruce Richardson 	uint64_t offloads;
1363c1d14583SBruce Richardson 
1364c1d14583SBruce Richardson 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1365c1d14583SBruce Richardson 
1366c1d14583SBruce Richardson 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1367c1d14583SBruce Richardson 	    nb_desc > ICE_MAX_RING_DESC ||
1368c1d14583SBruce Richardson 	    nb_desc < ICE_MIN_RING_DESC) {
1369c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1370c1d14583SBruce Richardson 			     "invalid", nb_desc);
1371c1d14583SBruce Richardson 		return -EINVAL;
1372c1d14583SBruce Richardson 	}
1373c1d14583SBruce Richardson 
1374c1d14583SBruce Richardson 	/**
1375c1d14583SBruce Richardson 	 * The following two parameters control the setting of the RS bit on
1376c1d14583SBruce Richardson 	 * transmit descriptors. TX descriptors will have their RS bit set
1377c1d14583SBruce Richardson 	 * after txq->tx_rs_thresh descriptors have been used. The TX
1378c1d14583SBruce Richardson 	 * descriptor ring will be cleaned after txq->tx_free_thresh
1379c1d14583SBruce Richardson 	 * descriptors are used or if the number of descriptors required to
1380c1d14583SBruce Richardson 	 * transmit a packet is greater than the number of free TX descriptors.
1381c1d14583SBruce Richardson 	 *
1382c1d14583SBruce Richardson 	 * The following constraints must be satisfied:
1383c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be greater than 0.
1384c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
1385c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
1386c1d14583SBruce Richardson 	 *  - tx_rs_thresh must be a divisor of the ring size.
1387c1d14583SBruce Richardson 	 *  - tx_free_thresh must be greater than 0.
1388c1d14583SBruce Richardson 	 *  - tx_free_thresh must be less than the size of the ring minus 3.
1389c1d14583SBruce Richardson 	 *  - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1390c1d14583SBruce Richardson 	 *
1391c1d14583SBruce Richardson 	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1392c1d14583SBruce Richardson 	 * race condition, hence the maximum threshold constraints. When set
1393c1d14583SBruce Richardson 	 * to zero use default values.
1394c1d14583SBruce Richardson 	 */
1395c1d14583SBruce Richardson 	tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1396c1d14583SBruce Richardson 				    tx_conf->tx_free_thresh :
1397c1d14583SBruce Richardson 				    ICE_DEFAULT_TX_FREE_THRESH);
1398c1d14583SBruce Richardson 	/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
1399c1d14583SBruce Richardson 	tx_rs_thresh =
1400c1d14583SBruce Richardson 		(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1401c1d14583SBruce Richardson 			nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1402c1d14583SBruce Richardson 	if (tx_conf->tx_rs_thresh)
1403c1d14583SBruce Richardson 		tx_rs_thresh = tx_conf->tx_rs_thresh;
1404c1d14583SBruce Richardson 	if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1405c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1406c1d14583SBruce Richardson 				"exceed nb_desc. (tx_rs_thresh=%u "
1407c1d14583SBruce Richardson 				"tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1408c1d14583SBruce Richardson 				(unsigned int)tx_rs_thresh,
1409c1d14583SBruce Richardson 				(unsigned int)tx_free_thresh,
1410c1d14583SBruce Richardson 				(unsigned int)nb_desc,
1411c1d14583SBruce Richardson 				(int)dev->data->port_id,
1412c1d14583SBruce Richardson 				(int)queue_idx);
1413c1d14583SBruce Richardson 		return -EINVAL;
1414c1d14583SBruce Richardson 	}
1415c1d14583SBruce Richardson 	if (tx_rs_thresh >= (nb_desc - 2)) {
1416c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1417c1d14583SBruce Richardson 			     "number of TX descriptors minus 2. "
1418c1d14583SBruce Richardson 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1419c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1420c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1421c1d14583SBruce Richardson 			     (int)queue_idx);
1422c1d14583SBruce Richardson 		return -EINVAL;
1423c1d14583SBruce Richardson 	}
1424c1d14583SBruce Richardson 	if (tx_free_thresh >= (nb_desc - 3)) {
1425c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1426c1d14583SBruce Richardson 			     "tx_free_thresh must be less than the "
1427c1d14583SBruce Richardson 			     "number of TX descriptors minus 3. "
1428c1d14583SBruce Richardson 			     "(tx_free_thresh=%u port=%d queue=%d)",
1429c1d14583SBruce Richardson 			     (unsigned int)tx_free_thresh,
1430c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1431c1d14583SBruce Richardson 			     (int)queue_idx);
1432c1d14583SBruce Richardson 		return -EINVAL;
1433c1d14583SBruce Richardson 	}
1434c1d14583SBruce Richardson 	if (tx_rs_thresh > tx_free_thresh) {
1435c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1436c1d14583SBruce Richardson 			     "equal to tx_free_thresh. (tx_free_thresh=%u"
1437c1d14583SBruce Richardson 			     " tx_rs_thresh=%u port=%d queue=%d)",
1438c1d14583SBruce Richardson 			     (unsigned int)tx_free_thresh,
1439c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1440c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1441c1d14583SBruce Richardson 			     (int)queue_idx);
1442c1d14583SBruce Richardson 		return -EINVAL;
1443c1d14583SBruce Richardson 	}
1444c1d14583SBruce Richardson 	if ((nb_desc % tx_rs_thresh) != 0) {
1445c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1446c1d14583SBruce Richardson 			     "number of TX descriptors. (tx_rs_thresh=%u"
1447c1d14583SBruce Richardson 			     " port=%d queue=%d)",
1448c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1449c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1450c1d14583SBruce Richardson 			     (int)queue_idx);
1451c1d14583SBruce Richardson 		return -EINVAL;
1452c1d14583SBruce Richardson 	}
1453c1d14583SBruce Richardson 	if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1454c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1455c1d14583SBruce Richardson 			     "tx_rs_thresh is greater than 1. "
1456c1d14583SBruce Richardson 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1457c1d14583SBruce Richardson 			     (unsigned int)tx_rs_thresh,
1458c1d14583SBruce Richardson 			     (int)dev->data->port_id,
1459c1d14583SBruce Richardson 			     (int)queue_idx);
1460c1d14583SBruce Richardson 		return -EINVAL;
1461c1d14583SBruce Richardson 	}
1462c1d14583SBruce Richardson 
1463c1d14583SBruce Richardson 	/* Free memory if needed. */
1464c1d14583SBruce Richardson 	if (dev->data->tx_queues[queue_idx]) {
1465c1d14583SBruce Richardson 		ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1466c1d14583SBruce Richardson 		dev->data->tx_queues[queue_idx] = NULL;
1467c1d14583SBruce Richardson 	}
1468c1d14583SBruce Richardson 
1469c1d14583SBruce Richardson 	/* Allocate the TX queue data structure. */
1470c1d14583SBruce Richardson 	txq = rte_zmalloc_socket(NULL,
1471c038157aSBruce Richardson 				 sizeof(struct ci_tx_queue),
1472c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
1473c1d14583SBruce Richardson 				 socket_id);
1474c1d14583SBruce Richardson 	if (!txq) {
1475c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1476c1d14583SBruce Richardson 			     "tx queue structure");
1477c1d14583SBruce Richardson 		return -ENOMEM;
1478c1d14583SBruce Richardson 	}
1479c1d14583SBruce Richardson 
1480c1d14583SBruce Richardson 	/* Allocate TX hardware ring descriptors. */
1481c1d14583SBruce Richardson 	ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1482c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
14834d0f54d9SBruce Richardson 	tz = rte_eth_dma_zone_reserve(dev, "ice_tx_ring", queue_idx,
1484c1d14583SBruce Richardson 				      ring_size, ICE_RING_BASE_ALIGN,
1485c1d14583SBruce Richardson 				      socket_id);
1486c1d14583SBruce Richardson 	if (!tz) {
1487c1d14583SBruce Richardson 		ice_tx_queue_release(txq);
1488c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1489c1d14583SBruce Richardson 		return -ENOMEM;
1490c1d14583SBruce Richardson 	}
1491c1d14583SBruce Richardson 
1492c1d14583SBruce Richardson 	txq->mz = tz;
1493c1d14583SBruce Richardson 	txq->nb_tx_desc = nb_desc;
1494c1d14583SBruce Richardson 	txq->tx_rs_thresh = tx_rs_thresh;
1495c1d14583SBruce Richardson 	txq->tx_free_thresh = tx_free_thresh;
1496c1d14583SBruce Richardson 	txq->queue_id = queue_idx;
1497c1d14583SBruce Richardson 
1498c1d14583SBruce Richardson 	txq->reg_idx = vsi->base_queue + queue_idx;
1499c1d14583SBruce Richardson 	txq->port_id = dev->data->port_id;
1500c1d14583SBruce Richardson 	txq->offloads = offloads;
15014d0f54d9SBruce Richardson 	txq->ice_vsi = vsi;
1502c1d14583SBruce Richardson 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
1503c1d14583SBruce Richardson 
1504c1d14583SBruce Richardson 	txq->tx_ring_dma = tz->iova;
15054d0f54d9SBruce Richardson 	txq->ice_tx_ring = tz->addr;
1506c1d14583SBruce Richardson 
1507c1d14583SBruce Richardson 	/* Allocate software ring */
1508c1d14583SBruce Richardson 	txq->sw_ring =
1509c1d14583SBruce Richardson 		rte_zmalloc_socket(NULL,
15105cc9919fSBruce Richardson 				   sizeof(struct ci_tx_entry) * nb_desc,
1511c1d14583SBruce Richardson 				   RTE_CACHE_LINE_SIZE,
1512c1d14583SBruce Richardson 				   socket_id);
1513c1d14583SBruce Richardson 	if (!txq->sw_ring) {
1514c1d14583SBruce Richardson 		ice_tx_queue_release(txq);
1515c1d14583SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1516c1d14583SBruce Richardson 		return -ENOMEM;
1517c1d14583SBruce Richardson 	}
1518c1d14583SBruce Richardson 
1519c1d14583SBruce Richardson 	ice_reset_tx_queue(txq);
1520c1d14583SBruce Richardson 	txq->q_set = true;
1521c1d14583SBruce Richardson 	dev->data->tx_queues[queue_idx] = txq;
1522c1d14583SBruce Richardson 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1523c1d14583SBruce Richardson 	ice_set_tx_function_flag(dev, txq);
1524c1d14583SBruce Richardson 
1525c1d14583SBruce Richardson 	return 0;
1526c1d14583SBruce Richardson }
1527c1d14583SBruce Richardson 
1528c1d14583SBruce Richardson void
1529c1d14583SBruce Richardson ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1530c1d14583SBruce Richardson {
1531c1d14583SBruce Richardson 	ice_rx_queue_release(dev->data->rx_queues[qid]);
1532c1d14583SBruce Richardson }
1533c1d14583SBruce Richardson 
1534c1d14583SBruce Richardson void
1535c1d14583SBruce Richardson ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1536c1d14583SBruce Richardson {
1537c1d14583SBruce Richardson 	ice_tx_queue_release(dev->data->tx_queues[qid]);
1538c1d14583SBruce Richardson }
1539c1d14583SBruce Richardson 
1540c1d14583SBruce Richardson void
1541c1d14583SBruce Richardson ice_tx_queue_release(void *txq)
1542c1d14583SBruce Richardson {
1543c038157aSBruce Richardson 	struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
1544c1d14583SBruce Richardson 
1545c1d14583SBruce Richardson 	if (!q) {
1546c1d14583SBruce Richardson 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1547c1d14583SBruce Richardson 		return;
1548c1d14583SBruce Richardson 	}
1549c1d14583SBruce Richardson 
1550c1d14583SBruce Richardson 	if (q->tx_rel_mbufs != NULL)
1551c1d14583SBruce Richardson 		q->tx_rel_mbufs(q);
1552c1d14583SBruce Richardson 	rte_free(q->sw_ring);
1553c1d14583SBruce Richardson 	rte_memzone_free(q->mz);
1554c1d14583SBruce Richardson 	rte_free(q);
1555c1d14583SBruce Richardson }
1556c1d14583SBruce Richardson 
1557c1d14583SBruce Richardson void
1558c1d14583SBruce Richardson ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1559c1d14583SBruce Richardson 		 struct rte_eth_rxq_info *qinfo)
1560c1d14583SBruce Richardson {
1561c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1562c1d14583SBruce Richardson 
1563c1d14583SBruce Richardson 	rxq = dev->data->rx_queues[queue_id];
1564c1d14583SBruce Richardson 
1565c1d14583SBruce Richardson 	qinfo->mp = rxq->mp;
1566c1d14583SBruce Richardson 	qinfo->scattered_rx = dev->data->scattered_rx;
1567c1d14583SBruce Richardson 	qinfo->nb_desc = rxq->nb_rx_desc;
1568c1d14583SBruce Richardson 
1569c1d14583SBruce Richardson 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1570c1d14583SBruce Richardson 	qinfo->conf.rx_drop_en = rxq->drop_en;
1571c1d14583SBruce Richardson 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1572c1d14583SBruce Richardson }
1573c1d14583SBruce Richardson 
1574c1d14583SBruce Richardson void
1575c1d14583SBruce Richardson ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1576c1d14583SBruce Richardson 		 struct rte_eth_txq_info *qinfo)
1577c1d14583SBruce Richardson {
1578c038157aSBruce Richardson 	struct ci_tx_queue *txq;
1579c1d14583SBruce Richardson 
1580c1d14583SBruce Richardson 	txq = dev->data->tx_queues[queue_id];
1581c1d14583SBruce Richardson 
1582c1d14583SBruce Richardson 	qinfo->nb_desc = txq->nb_tx_desc;
1583c1d14583SBruce Richardson 
1584*e3b5f52dSBruce Richardson 	qinfo->conf.tx_thresh.pthresh = ICE_DEFAULT_TX_PTHRESH;
1585*e3b5f52dSBruce Richardson 	qinfo->conf.tx_thresh.hthresh = ICE_DEFAULT_TX_HTHRESH;
1586*e3b5f52dSBruce Richardson 	qinfo->conf.tx_thresh.wthresh = ICE_DEFAULT_TX_WTHRESH;
1587c1d14583SBruce Richardson 
1588c1d14583SBruce Richardson 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1589c1d14583SBruce Richardson 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1590c1d14583SBruce Richardson 	qinfo->conf.offloads = txq->offloads;
1591c1d14583SBruce Richardson 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1592c1d14583SBruce Richardson }
1593c1d14583SBruce Richardson 
1594c1d14583SBruce Richardson uint32_t
1595c1d14583SBruce Richardson ice_rx_queue_count(void *rx_queue)
1596c1d14583SBruce Richardson {
1597c1d14583SBruce Richardson #define ICE_RXQ_SCAN_INTERVAL 4
1598c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
1599c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
1600c1d14583SBruce Richardson 	uint16_t desc = 0;
1601c1d14583SBruce Richardson 
1602c1d14583SBruce Richardson 	rxq = rx_queue;
1603c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1604c1d14583SBruce Richardson 	while ((desc < rxq->nb_rx_desc) &&
1605c1d14583SBruce Richardson 	       rte_le_to_cpu_16(rxdp->wb.status_error0) &
1606c1d14583SBruce Richardson 	       (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1607c1d14583SBruce Richardson 		/**
1608c1d14583SBruce Richardson 		 * Check the DD bit of a rx descriptor of each 4 in a group,
1609c1d14583SBruce Richardson 		 * to avoid checking too frequently and downgrading performance
1610c1d14583SBruce Richardson 		 * too much.
1611c1d14583SBruce Richardson 		 */
1612c1d14583SBruce Richardson 		desc += ICE_RXQ_SCAN_INTERVAL;
1613c1d14583SBruce Richardson 		rxdp += ICE_RXQ_SCAN_INTERVAL;
1614c1d14583SBruce Richardson 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1615c1d14583SBruce Richardson 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
1616c1d14583SBruce Richardson 				 desc - rxq->nb_rx_desc]);
1617c1d14583SBruce Richardson 	}
1618c1d14583SBruce Richardson 
1619c1d14583SBruce Richardson 	return desc;
1620c1d14583SBruce Richardson }
1621c1d14583SBruce Richardson 
1622c1d14583SBruce Richardson #define ICE_RX_FLEX_ERR0_BITS	\
1623c1d14583SBruce Richardson 	((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) |	\
1624c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |	\
1625c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |	\
1626c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |	\
1627c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |	\
1628c1d14583SBruce Richardson 	 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1629c1d14583SBruce Richardson 
1630c1d14583SBruce Richardson /* Rx L3/L4 checksum */
1631c1d14583SBruce Richardson static inline uint64_t
1632c1d14583SBruce Richardson ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1633c1d14583SBruce Richardson {
1634c1d14583SBruce Richardson 	uint64_t flags = 0;
1635c1d14583SBruce Richardson 
1636c1d14583SBruce Richardson 	/* check if HW has decoded the packet and checksum */
1637c1d14583SBruce Richardson 	if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1638c1d14583SBruce Richardson 		return 0;
1639c1d14583SBruce Richardson 
1640c1d14583SBruce Richardson 	if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1641c1d14583SBruce Richardson 		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1642c1d14583SBruce Richardson 			  RTE_MBUF_F_RX_L4_CKSUM_GOOD |
1643c1d14583SBruce Richardson 			  RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD);
1644c1d14583SBruce Richardson 		return flags;
1645c1d14583SBruce Richardson 	}
1646c1d14583SBruce Richardson 
1647c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1648c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1649c1d14583SBruce Richardson 	else
1650c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1651c1d14583SBruce Richardson 
1652c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1653c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1654c1d14583SBruce Richardson 	else
1655c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1656c1d14583SBruce Richardson 
1657c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1658c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1659c1d14583SBruce Richardson 
1660c1d14583SBruce Richardson 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1661c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
1662c1d14583SBruce Richardson 	else
1663c1d14583SBruce Richardson 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
1664c1d14583SBruce Richardson 
1665c1d14583SBruce Richardson 	return flags;
1666c1d14583SBruce Richardson }
1667c1d14583SBruce Richardson 
1668c1d14583SBruce Richardson static inline void
1669c1d14583SBruce Richardson ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1670c1d14583SBruce Richardson {
1671c1d14583SBruce Richardson 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1672c1d14583SBruce Richardson 	    (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1673c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1674c1d14583SBruce Richardson 		mb->vlan_tci =
1675c1d14583SBruce Richardson 			rte_le_to_cpu_16(rxdp->wb.l2tag1);
1676c1d14583SBruce Richardson 		PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1677c1d14583SBruce Richardson 			   rte_le_to_cpu_16(rxdp->wb.l2tag1));
1678c1d14583SBruce Richardson 	} else {
1679c1d14583SBruce Richardson 		mb->vlan_tci = 0;
1680c1d14583SBruce Richardson 	}
1681c1d14583SBruce Richardson 
1682c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1683c1d14583SBruce Richardson 	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1684c1d14583SBruce Richardson 	    (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1685c1d14583SBruce Richardson 		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
1686c1d14583SBruce Richardson 				RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
1687c1d14583SBruce Richardson 		mb->vlan_tci_outer = mb->vlan_tci;
1688c1d14583SBruce Richardson 		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1689c1d14583SBruce Richardson 		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1690c1d14583SBruce Richardson 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1691c1d14583SBruce Richardson 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1692c1d14583SBruce Richardson 	} else {
1693c1d14583SBruce Richardson 		mb->vlan_tci_outer = 0;
1694c1d14583SBruce Richardson 	}
1695c1d14583SBruce Richardson #endif
1696c1d14583SBruce Richardson 	PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1697c1d14583SBruce Richardson 		   mb->vlan_tci, mb->vlan_tci_outer);
1698c1d14583SBruce Richardson }
1699c1d14583SBruce Richardson 
1700c1d14583SBruce Richardson #define ICE_LOOK_AHEAD 8
1701c1d14583SBruce Richardson #if (ICE_LOOK_AHEAD != 8)
1702c1d14583SBruce Richardson #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1703c1d14583SBruce Richardson #endif
1704c1d14583SBruce Richardson 
1705c1d14583SBruce Richardson #define ICE_PTP_TS_VALID 0x1
1706c1d14583SBruce Richardson 
1707c1d14583SBruce Richardson static inline int
1708c1d14583SBruce Richardson ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1709c1d14583SBruce Richardson {
1710c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
1711c1d14583SBruce Richardson 	struct ice_rx_entry *rxep;
1712c1d14583SBruce Richardson 	struct rte_mbuf *mb;
1713c1d14583SBruce Richardson 	uint16_t stat_err0;
1714c1d14583SBruce Richardson 	uint16_t pkt_len, hdr_len;
1715c1d14583SBruce Richardson 	int32_t s[ICE_LOOK_AHEAD], nb_dd;
1716c1d14583SBruce Richardson 	int32_t i, j, nb_rx = 0;
1717c1d14583SBruce Richardson 	uint64_t pkt_flags = 0;
1718c1d14583SBruce Richardson 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1719c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1720c1d14583SBruce Richardson 	bool is_tsinit = false;
1721c1d14583SBruce Richardson 	uint64_t ts_ns;
1722c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
1723c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1724c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
1725c1d14583SBruce Richardson #endif
1726c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1727c1d14583SBruce Richardson 	rxep = &rxq->sw_ring[rxq->rx_tail];
1728c1d14583SBruce Richardson 
1729c1d14583SBruce Richardson 	stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1730c1d14583SBruce Richardson 
1731c1d14583SBruce Richardson 	/* Make sure there is at least 1 packet to receive */
1732c1d14583SBruce Richardson 	if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1733c1d14583SBruce Richardson 		return 0;
1734c1d14583SBruce Richardson 
1735c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1736c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1737c1d14583SBruce Richardson 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1738c1d14583SBruce Richardson 
1739c1d14583SBruce Richardson 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
1740c1d14583SBruce Richardson 			is_tsinit = 1;
1741c1d14583SBruce Richardson 	}
1742c1d14583SBruce Richardson #endif
1743c1d14583SBruce Richardson 
1744c1d14583SBruce Richardson 	/**
1745c1d14583SBruce Richardson 	 * Scan LOOK_AHEAD descriptors at a time to determine which
1746c1d14583SBruce Richardson 	 * descriptors reference packets that are ready to be received.
1747c1d14583SBruce Richardson 	 */
1748c1d14583SBruce Richardson 	for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1749c1d14583SBruce Richardson 	     rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1750c1d14583SBruce Richardson 		/* Read desc statuses backwards to avoid race condition */
1751c1d14583SBruce Richardson 		for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1752c1d14583SBruce Richardson 			s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1753c1d14583SBruce Richardson 
1754c1d14583SBruce Richardson 		rte_smp_rmb();
1755c1d14583SBruce Richardson 
1756c1d14583SBruce Richardson 		/* Compute how many status bits were set */
1757c1d14583SBruce Richardson 		for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1758c1d14583SBruce Richardson 			nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1759c1d14583SBruce Richardson 
1760c1d14583SBruce Richardson 		nb_rx += nb_dd;
1761c1d14583SBruce Richardson 
1762c1d14583SBruce Richardson 		/* Translate descriptor info to mbuf parameters */
1763c1d14583SBruce Richardson 		for (j = 0; j < nb_dd; j++) {
1764c1d14583SBruce Richardson 			mb = rxep[j].mbuf;
1765c1d14583SBruce Richardson 			pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1766c1d14583SBruce Richardson 				   ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1767c1d14583SBruce Richardson 			mb->data_len = pkt_len;
1768c1d14583SBruce Richardson 			mb->pkt_len = pkt_len;
1769c1d14583SBruce Richardson 
1770c1d14583SBruce Richardson 			if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1771c1d14583SBruce Richardson 				pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1772c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1773c1d14583SBruce Richardson 				mb->data_len = pkt_len;
1774c1d14583SBruce Richardson 				mb->pkt_len = pkt_len;
1775c1d14583SBruce Richardson 			} else {
1776c1d14583SBruce Richardson 				mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs);
1777c1d14583SBruce Richardson 				mb->next->next = NULL;
1778c1d14583SBruce Richardson 				hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) &
1779c1d14583SBruce Richardson 						ICE_RX_FLEX_DESC_HEADER_LEN_M;
1780c1d14583SBruce Richardson 				pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1781c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1782c1d14583SBruce Richardson 				mb->data_len = hdr_len;
1783c1d14583SBruce Richardson 				mb->pkt_len = hdr_len + pkt_len;
1784c1d14583SBruce Richardson 				mb->next->data_len = pkt_len;
1785c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_RX
1786c1d14583SBruce Richardson 				rte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb));
1787c1d14583SBruce Richardson #endif
1788c1d14583SBruce Richardson 			}
1789c1d14583SBruce Richardson 
1790c1d14583SBruce Richardson 			mb->ol_flags = 0;
1791c1d14583SBruce Richardson 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1792c1d14583SBruce Richardson 			pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1793c1d14583SBruce Richardson 			mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1794c1d14583SBruce Richardson 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1795c1d14583SBruce Richardson 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1796c1d14583SBruce Richardson 			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1797c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1798c1d14583SBruce Richardson 			if (ice_timestamp_dynflag > 0 &&
1799c1d14583SBruce Richardson 			    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
1800c1d14583SBruce Richardson 				rxq->time_high =
1801c1d14583SBruce Richardson 				rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1802c1d14583SBruce Richardson 				if (unlikely(is_tsinit)) {
1803c1d14583SBruce Richardson 					ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
1804c1d14583SBruce Richardson 									   rxq->time_high);
1805c1d14583SBruce Richardson 					rxq->hw_time_low = (uint32_t)ts_ns;
1806c1d14583SBruce Richardson 					rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
1807c1d14583SBruce Richardson 					is_tsinit = false;
1808c1d14583SBruce Richardson 				} else {
1809c1d14583SBruce Richardson 					if (rxq->time_high < rxq->hw_time_low)
1810c1d14583SBruce Richardson 						rxq->hw_time_high += 1;
1811c1d14583SBruce Richardson 					ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
1812c1d14583SBruce Richardson 					rxq->hw_time_low = rxq->time_high;
1813c1d14583SBruce Richardson 				}
1814c1d14583SBruce Richardson 				rxq->hw_time_update = rte_get_timer_cycles() /
1815c1d14583SBruce Richardson 						     (rte_get_timer_hz() / 1000);
1816c1d14583SBruce Richardson 				*RTE_MBUF_DYNFIELD(mb,
1817c1d14583SBruce Richardson 						   ice_timestamp_dynfield_offset,
1818c1d14583SBruce Richardson 						   rte_mbuf_timestamp_t *) = ts_ns;
1819c1d14583SBruce Richardson 				pkt_flags |= ice_timestamp_dynflag;
1820c1d14583SBruce Richardson 			}
1821c1d14583SBruce Richardson 
1822c1d14583SBruce Richardson 			if (ad->ptp_ena && ((mb->packet_type &
1823c1d14583SBruce Richardson 			    RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1824c1d14583SBruce Richardson 				rxq->time_high =
1825c1d14583SBruce Richardson 				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1826c1d14583SBruce Richardson 				mb->timesync = rxq->queue_id;
1827c1d14583SBruce Richardson 				pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
1828c1d14583SBruce Richardson 				if (rxdp[j].wb.time_stamp_low &
1829c1d14583SBruce Richardson 				    ICE_PTP_TS_VALID)
1830c1d14583SBruce Richardson 					pkt_flags |=
1831c1d14583SBruce Richardson 						RTE_MBUF_F_RX_IEEE1588_TMST;
1832c1d14583SBruce Richardson 			}
1833c1d14583SBruce Richardson #endif
1834c1d14583SBruce Richardson 			mb->ol_flags |= pkt_flags;
1835c1d14583SBruce Richardson 		}
1836c1d14583SBruce Richardson 
1837c1d14583SBruce Richardson 		for (j = 0; j < ICE_LOOK_AHEAD; j++)
1838c1d14583SBruce Richardson 			rxq->rx_stage[i + j] = rxep[j].mbuf;
1839c1d14583SBruce Richardson 
1840c1d14583SBruce Richardson 		if (nb_dd != ICE_LOOK_AHEAD)
1841c1d14583SBruce Richardson 			break;
1842c1d14583SBruce Richardson 	}
1843c1d14583SBruce Richardson 
1844c1d14583SBruce Richardson 	/* Clear software ring entries */
1845c1d14583SBruce Richardson 	for (i = 0; i < nb_rx; i++)
1846c1d14583SBruce Richardson 		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1847c1d14583SBruce Richardson 
1848c1d14583SBruce Richardson 	PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1849c1d14583SBruce Richardson 		   "port_id=%u, queue_id=%u, nb_rx=%d",
1850c1d14583SBruce Richardson 		   rxq->port_id, rxq->queue_id, nb_rx);
1851c1d14583SBruce Richardson 
1852c1d14583SBruce Richardson 	return nb_rx;
1853c1d14583SBruce Richardson }
1854c1d14583SBruce Richardson 
1855c1d14583SBruce Richardson static inline uint16_t
1856c1d14583SBruce Richardson ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1857c1d14583SBruce Richardson 		       struct rte_mbuf **rx_pkts,
1858c1d14583SBruce Richardson 		       uint16_t nb_pkts)
1859c1d14583SBruce Richardson {
1860c1d14583SBruce Richardson 	uint16_t i;
1861c1d14583SBruce Richardson 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1862c1d14583SBruce Richardson 
1863c1d14583SBruce Richardson 	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1864c1d14583SBruce Richardson 
1865c1d14583SBruce Richardson 	for (i = 0; i < nb_pkts; i++)
1866c1d14583SBruce Richardson 		rx_pkts[i] = stage[i];
1867c1d14583SBruce Richardson 
1868c1d14583SBruce Richardson 	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1869c1d14583SBruce Richardson 	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1870c1d14583SBruce Richardson 
1871c1d14583SBruce Richardson 	return nb_pkts;
1872c1d14583SBruce Richardson }
1873c1d14583SBruce Richardson 
1874c1d14583SBruce Richardson static inline int
1875c1d14583SBruce Richardson ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1876c1d14583SBruce Richardson {
1877c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
1878c1d14583SBruce Richardson 	struct ice_rx_entry *rxep;
1879c1d14583SBruce Richardson 	struct rte_mbuf *mb;
1880c1d14583SBruce Richardson 	uint16_t alloc_idx, i;
1881c1d14583SBruce Richardson 	uint64_t dma_addr;
1882c1d14583SBruce Richardson 	int diag, diag_pay;
1883c1d14583SBruce Richardson 	uint64_t pay_addr;
1884c1d14583SBruce Richardson 	struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh];
1885c1d14583SBruce Richardson 
1886c1d14583SBruce Richardson 	/* Allocate buffers in bulk */
1887c1d14583SBruce Richardson 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1888c1d14583SBruce Richardson 			       (rxq->rx_free_thresh - 1));
1889c1d14583SBruce Richardson 	rxep = &rxq->sw_ring[alloc_idx];
1890c1d14583SBruce Richardson 	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1891c1d14583SBruce Richardson 				    rxq->rx_free_thresh);
1892c1d14583SBruce Richardson 	if (unlikely(diag != 0)) {
1893c1d14583SBruce Richardson 		PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1894c1d14583SBruce Richardson 		return -ENOMEM;
1895c1d14583SBruce Richardson 	}
1896c1d14583SBruce Richardson 
1897c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1898c1d14583SBruce Richardson 		diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp,
1899c1d14583SBruce Richardson 				(void *)mbufs_pay, rxq->rx_free_thresh);
1900c1d14583SBruce Richardson 		if (unlikely(diag_pay != 0)) {
1901c1d14583SBruce Richardson 			rte_mempool_put_bulk(rxq->mp, (void *)rxep,
1902c1d14583SBruce Richardson 				    rxq->rx_free_thresh);
1903c1d14583SBruce Richardson 			PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk");
1904c1d14583SBruce Richardson 			return -ENOMEM;
1905c1d14583SBruce Richardson 		}
1906c1d14583SBruce Richardson 	}
1907c1d14583SBruce Richardson 
1908c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[alloc_idx];
1909c1d14583SBruce Richardson 	for (i = 0; i < rxq->rx_free_thresh; i++) {
1910c1d14583SBruce Richardson 		if (likely(i < (rxq->rx_free_thresh - 1)))
1911c1d14583SBruce Richardson 			/* Prefetch next mbuf */
1912c1d14583SBruce Richardson 			rte_prefetch0(rxep[i + 1].mbuf);
1913c1d14583SBruce Richardson 
1914c1d14583SBruce Richardson 		mb = rxep[i].mbuf;
1915c1d14583SBruce Richardson 		rte_mbuf_refcnt_set(mb, 1);
1916c1d14583SBruce Richardson 		mb->data_off = RTE_PKTMBUF_HEADROOM;
1917c1d14583SBruce Richardson 		mb->nb_segs = 1;
1918c1d14583SBruce Richardson 		mb->port = rxq->port_id;
1919c1d14583SBruce Richardson 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1920c1d14583SBruce Richardson 
1921c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1922c1d14583SBruce Richardson 			mb->next = NULL;
1923c1d14583SBruce Richardson 			rxdp[i].read.hdr_addr = 0;
1924c1d14583SBruce Richardson 			rxdp[i].read.pkt_addr = dma_addr;
1925c1d14583SBruce Richardson 		} else {
1926c1d14583SBruce Richardson 			mb->next = mbufs_pay[i];
1927c1d14583SBruce Richardson 			pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i]));
1928c1d14583SBruce Richardson 			rxdp[i].read.hdr_addr = dma_addr;
1929c1d14583SBruce Richardson 			rxdp[i].read.pkt_addr = pay_addr;
1930c1d14583SBruce Richardson 		}
1931c1d14583SBruce Richardson 	}
1932c1d14583SBruce Richardson 
1933c1d14583SBruce Richardson 	/* Update Rx tail register */
1934c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1935c1d14583SBruce Richardson 
1936c1d14583SBruce Richardson 	rxq->rx_free_trigger =
1937c1d14583SBruce Richardson 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1938c1d14583SBruce Richardson 	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1939c1d14583SBruce Richardson 		rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1940c1d14583SBruce Richardson 
1941c1d14583SBruce Richardson 	return 0;
1942c1d14583SBruce Richardson }
1943c1d14583SBruce Richardson 
1944c1d14583SBruce Richardson static inline uint16_t
1945c1d14583SBruce Richardson rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1946c1d14583SBruce Richardson {
1947c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1948c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
1949c1d14583SBruce Richardson 
1950c1d14583SBruce Richardson 	if (!nb_pkts)
1951c1d14583SBruce Richardson 		return 0;
1952c1d14583SBruce Richardson 
1953c1d14583SBruce Richardson 	if (rxq->rx_nb_avail)
1954c1d14583SBruce Richardson 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1955c1d14583SBruce Richardson 
1956c1d14583SBruce Richardson 	nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1957c1d14583SBruce Richardson 	rxq->rx_next_avail = 0;
1958c1d14583SBruce Richardson 	rxq->rx_nb_avail = nb_rx;
1959c1d14583SBruce Richardson 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1960c1d14583SBruce Richardson 
1961c1d14583SBruce Richardson 	if (rxq->rx_tail > rxq->rx_free_trigger) {
1962c1d14583SBruce Richardson 		if (ice_rx_alloc_bufs(rxq) != 0) {
1963c1d14583SBruce Richardson 			uint16_t i, j;
1964c1d14583SBruce Richardson 
1965c1d14583SBruce Richardson 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1966c1d14583SBruce Richardson 				rxq->rx_free_thresh;
1967c1d14583SBruce Richardson 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1968c1d14583SBruce Richardson 				   "port_id=%u, queue_id=%u",
1969c1d14583SBruce Richardson 				   rxq->port_id, rxq->queue_id);
1970c1d14583SBruce Richardson 			rxq->rx_nb_avail = 0;
1971c1d14583SBruce Richardson 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1972c1d14583SBruce Richardson 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1973c1d14583SBruce Richardson 				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1974c1d14583SBruce Richardson 
1975c1d14583SBruce Richardson 			return 0;
1976c1d14583SBruce Richardson 		}
1977c1d14583SBruce Richardson 	}
1978c1d14583SBruce Richardson 
1979c1d14583SBruce Richardson 	if (rxq->rx_tail >= rxq->nb_rx_desc)
1980c1d14583SBruce Richardson 		rxq->rx_tail = 0;
1981c1d14583SBruce Richardson 
1982c1d14583SBruce Richardson 	if (rxq->rx_nb_avail)
1983c1d14583SBruce Richardson 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1984c1d14583SBruce Richardson 
1985c1d14583SBruce Richardson 	return 0;
1986c1d14583SBruce Richardson }
1987c1d14583SBruce Richardson 
1988c1d14583SBruce Richardson static uint16_t
1989c1d14583SBruce Richardson ice_recv_pkts_bulk_alloc(void *rx_queue,
1990c1d14583SBruce Richardson 			 struct rte_mbuf **rx_pkts,
1991c1d14583SBruce Richardson 			 uint16_t nb_pkts)
1992c1d14583SBruce Richardson {
1993c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
1994c1d14583SBruce Richardson 	uint16_t n;
1995c1d14583SBruce Richardson 	uint16_t count;
1996c1d14583SBruce Richardson 
1997c1d14583SBruce Richardson 	if (unlikely(nb_pkts == 0))
1998c1d14583SBruce Richardson 		return nb_rx;
1999c1d14583SBruce Richardson 
2000c1d14583SBruce Richardson 	if (likely(nb_pkts <= ICE_RX_MAX_BURST))
2001c1d14583SBruce Richardson 		return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2002c1d14583SBruce Richardson 
2003c1d14583SBruce Richardson 	while (nb_pkts) {
2004c1d14583SBruce Richardson 		n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
2005c1d14583SBruce Richardson 		count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2006c1d14583SBruce Richardson 		nb_rx = (uint16_t)(nb_rx + count);
2007c1d14583SBruce Richardson 		nb_pkts = (uint16_t)(nb_pkts - count);
2008c1d14583SBruce Richardson 		if (count < n)
2009c1d14583SBruce Richardson 			break;
2010c1d14583SBruce Richardson 	}
2011c1d14583SBruce Richardson 
2012c1d14583SBruce Richardson 	return nb_rx;
2013c1d14583SBruce Richardson }
2014c1d14583SBruce Richardson 
2015c1d14583SBruce Richardson static uint16_t
2016c1d14583SBruce Richardson ice_recv_scattered_pkts(void *rx_queue,
2017c1d14583SBruce Richardson 			struct rte_mbuf **rx_pkts,
2018c1d14583SBruce Richardson 			uint16_t nb_pkts)
2019c1d14583SBruce Richardson {
2020c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
2021c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2022c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
2023c1d14583SBruce Richardson 	union ice_rx_flex_desc rxd;
2024c1d14583SBruce Richardson 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2025c1d14583SBruce Richardson 	struct ice_rx_entry *rxe;
2026c1d14583SBruce Richardson 	struct rte_mbuf *first_seg = rxq->pkt_first_seg;
2027c1d14583SBruce Richardson 	struct rte_mbuf *last_seg = rxq->pkt_last_seg;
2028c1d14583SBruce Richardson 	struct rte_mbuf *nmb; /* new allocated mbuf */
2029c1d14583SBruce Richardson 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2030c1d14583SBruce Richardson 	uint16_t rx_id = rxq->rx_tail;
2031c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
2032c1d14583SBruce Richardson 	uint16_t nb_hold = 0;
2033c1d14583SBruce Richardson 	uint16_t rx_packet_len;
2034c1d14583SBruce Richardson 	uint16_t rx_stat_err0;
2035c1d14583SBruce Richardson 	uint64_t dma_addr;
2036c1d14583SBruce Richardson 	uint64_t pkt_flags;
2037c1d14583SBruce Richardson 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2038c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2039c1d14583SBruce Richardson 	bool is_tsinit = false;
2040c1d14583SBruce Richardson 	uint64_t ts_ns;
2041c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
2042c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2043c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
2044c1d14583SBruce Richardson 
2045c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2046c1d14583SBruce Richardson 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2047c1d14583SBruce Richardson 
2048c1d14583SBruce Richardson 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
2049c1d14583SBruce Richardson 			is_tsinit = true;
2050c1d14583SBruce Richardson 	}
2051c1d14583SBruce Richardson #endif
2052c1d14583SBruce Richardson 
2053c1d14583SBruce Richardson 	while (nb_rx < nb_pkts) {
2054c1d14583SBruce Richardson 		rxdp = &rx_ring[rx_id];
2055c1d14583SBruce Richardson 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2056c1d14583SBruce Richardson 
2057c1d14583SBruce Richardson 		/* Check the DD bit first */
2058c1d14583SBruce Richardson 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2059c1d14583SBruce Richardson 			break;
2060c1d14583SBruce Richardson 
2061c1d14583SBruce Richardson 		/* allocate mbuf */
2062c1d14583SBruce Richardson 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2063c1d14583SBruce Richardson 		if (unlikely(!nmb)) {
2064c1d14583SBruce Richardson 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2065c1d14583SBruce Richardson 			break;
2066c1d14583SBruce Richardson 		}
2067c1d14583SBruce Richardson 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2068c1d14583SBruce Richardson 
2069c1d14583SBruce Richardson 		nb_hold++;
2070c1d14583SBruce Richardson 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2071c1d14583SBruce Richardson 		rx_id++;
2072c1d14583SBruce Richardson 		if (unlikely(rx_id == rxq->nb_rx_desc))
2073c1d14583SBruce Richardson 			rx_id = 0;
2074c1d14583SBruce Richardson 
2075c1d14583SBruce Richardson 		/* Prefetch next mbuf */
2076c1d14583SBruce Richardson 		rte_prefetch0(sw_ring[rx_id].mbuf);
2077c1d14583SBruce Richardson 
2078c1d14583SBruce Richardson 		/**
2079c1d14583SBruce Richardson 		 * When next RX descriptor is on a cache line boundary,
2080c1d14583SBruce Richardson 		 * prefetch the next 4 RX descriptors and next 8 pointers
2081c1d14583SBruce Richardson 		 * to mbufs.
2082c1d14583SBruce Richardson 		 */
2083c1d14583SBruce Richardson 		if ((rx_id & 0x3) == 0) {
2084c1d14583SBruce Richardson 			rte_prefetch0(&rx_ring[rx_id]);
2085c1d14583SBruce Richardson 			rte_prefetch0(&sw_ring[rx_id]);
2086c1d14583SBruce Richardson 		}
2087c1d14583SBruce Richardson 
2088c1d14583SBruce Richardson 		rxm = rxe->mbuf;
2089c1d14583SBruce Richardson 		rxe->mbuf = nmb;
2090c1d14583SBruce Richardson 		dma_addr =
2091c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2092c1d14583SBruce Richardson 
2093c1d14583SBruce Richardson 		/* Set data buffer address and data length of the mbuf */
2094c1d14583SBruce Richardson 		rxdp->read.hdr_addr = 0;
2095c1d14583SBruce Richardson 		rxdp->read.pkt_addr = dma_addr;
2096c1d14583SBruce Richardson 		rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
2097c1d14583SBruce Richardson 				ICE_RX_FLX_DESC_PKT_LEN_M;
2098c1d14583SBruce Richardson 		rxm->data_len = rx_packet_len;
2099c1d14583SBruce Richardson 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2100c1d14583SBruce Richardson 
2101c1d14583SBruce Richardson 		/**
2102c1d14583SBruce Richardson 		 * If this is the first buffer of the received packet, set the
2103c1d14583SBruce Richardson 		 * pointer to the first mbuf of the packet and initialize its
2104c1d14583SBruce Richardson 		 * context. Otherwise, update the total length and the number
2105c1d14583SBruce Richardson 		 * of segments of the current scattered packet, and update the
2106c1d14583SBruce Richardson 		 * pointer to the last mbuf of the current packet.
2107c1d14583SBruce Richardson 		 */
2108c1d14583SBruce Richardson 		if (!first_seg) {
2109c1d14583SBruce Richardson 			first_seg = rxm;
2110c1d14583SBruce Richardson 			first_seg->nb_segs = 1;
2111c1d14583SBruce Richardson 			first_seg->pkt_len = rx_packet_len;
2112c1d14583SBruce Richardson 		} else {
2113c1d14583SBruce Richardson 			first_seg->pkt_len =
2114c1d14583SBruce Richardson 				(uint16_t)(first_seg->pkt_len +
2115c1d14583SBruce Richardson 					   rx_packet_len);
2116c1d14583SBruce Richardson 			first_seg->nb_segs++;
2117c1d14583SBruce Richardson 			last_seg->next = rxm;
2118c1d14583SBruce Richardson 		}
2119c1d14583SBruce Richardson 
2120c1d14583SBruce Richardson 		/**
2121c1d14583SBruce Richardson 		 * If this is not the last buffer of the received packet,
2122c1d14583SBruce Richardson 		 * update the pointer to the last mbuf of the current scattered
2123c1d14583SBruce Richardson 		 * packet and continue to parse the RX ring.
2124c1d14583SBruce Richardson 		 */
2125c1d14583SBruce Richardson 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
2126c1d14583SBruce Richardson 			last_seg = rxm;
2127c1d14583SBruce Richardson 			continue;
2128c1d14583SBruce Richardson 		}
2129c1d14583SBruce Richardson 
2130c1d14583SBruce Richardson 		/**
2131c1d14583SBruce Richardson 		 * This is the last buffer of the received packet. If the CRC
2132c1d14583SBruce Richardson 		 * is not stripped by the hardware:
2133c1d14583SBruce Richardson 		 *  - Subtract the CRC length from the total packet length.
2134c1d14583SBruce Richardson 		 *  - If the last buffer only contains the whole CRC or a part
2135c1d14583SBruce Richardson 		 *  of it, free the mbuf associated to the last buffer. If part
2136c1d14583SBruce Richardson 		 *  of the CRC is also contained in the previous mbuf, subtract
2137c1d14583SBruce Richardson 		 *  the length of that CRC part from the data length of the
2138c1d14583SBruce Richardson 		 *  previous mbuf.
2139c1d14583SBruce Richardson 		 */
2140c1d14583SBruce Richardson 		rxm->next = NULL;
2141c1d14583SBruce Richardson 		if (unlikely(rxq->crc_len > 0)) {
2142c1d14583SBruce Richardson 			first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
2143c1d14583SBruce Richardson 			if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
2144c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(rxm);
2145c1d14583SBruce Richardson 				first_seg->nb_segs--;
2146c1d14583SBruce Richardson 				last_seg->data_len =
2147c1d14583SBruce Richardson 					(uint16_t)(last_seg->data_len -
2148c1d14583SBruce Richardson 					(RTE_ETHER_CRC_LEN - rx_packet_len));
2149c1d14583SBruce Richardson 				last_seg->next = NULL;
2150c1d14583SBruce Richardson 			} else
2151c1d14583SBruce Richardson 				rxm->data_len = (uint16_t)(rx_packet_len -
2152c1d14583SBruce Richardson 							   RTE_ETHER_CRC_LEN);
2153c1d14583SBruce Richardson 		} else if (rx_packet_len == 0) {
2154c1d14583SBruce Richardson 			rte_pktmbuf_free_seg(rxm);
2155c1d14583SBruce Richardson 			first_seg->nb_segs--;
2156c1d14583SBruce Richardson 			last_seg->next = NULL;
2157c1d14583SBruce Richardson 		}
2158c1d14583SBruce Richardson 
2159c1d14583SBruce Richardson 		first_seg->port = rxq->port_id;
2160c1d14583SBruce Richardson 		first_seg->ol_flags = 0;
2161c1d14583SBruce Richardson 		first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2162c1d14583SBruce Richardson 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2163c1d14583SBruce Richardson 		ice_rxd_to_vlan_tci(first_seg, &rxd);
2164c1d14583SBruce Richardson 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
2165c1d14583SBruce Richardson 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2166c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2167c1d14583SBruce Richardson 		if (ice_timestamp_dynflag > 0 &&
2168c1d14583SBruce Richardson 		    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
2169c1d14583SBruce Richardson 			rxq->time_high =
2170c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2171c1d14583SBruce Richardson 			if (unlikely(is_tsinit)) {
2172c1d14583SBruce Richardson 				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2173c1d14583SBruce Richardson 				rxq->hw_time_low = (uint32_t)ts_ns;
2174c1d14583SBruce Richardson 				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
2175c1d14583SBruce Richardson 				is_tsinit = false;
2176c1d14583SBruce Richardson 			} else {
2177c1d14583SBruce Richardson 				if (rxq->time_high < rxq->hw_time_low)
2178c1d14583SBruce Richardson 					rxq->hw_time_high += 1;
2179c1d14583SBruce Richardson 				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
2180c1d14583SBruce Richardson 				rxq->hw_time_low = rxq->time_high;
2181c1d14583SBruce Richardson 			}
2182c1d14583SBruce Richardson 			rxq->hw_time_update = rte_get_timer_cycles() /
2183c1d14583SBruce Richardson 					     (rte_get_timer_hz() / 1000);
2184c1d14583SBruce Richardson 			*RTE_MBUF_DYNFIELD(first_seg,
2185c1d14583SBruce Richardson 					   (ice_timestamp_dynfield_offset),
2186c1d14583SBruce Richardson 					   rte_mbuf_timestamp_t *) = ts_ns;
2187c1d14583SBruce Richardson 			pkt_flags |= ice_timestamp_dynflag;
2188c1d14583SBruce Richardson 		}
2189c1d14583SBruce Richardson 
2190c1d14583SBruce Richardson 		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
2191c1d14583SBruce Richardson 		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2192c1d14583SBruce Richardson 			rxq->time_high =
2193c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2194c1d14583SBruce Richardson 			first_seg->timesync = rxq->queue_id;
2195c1d14583SBruce Richardson 			pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2196c1d14583SBruce Richardson 		}
2197c1d14583SBruce Richardson #endif
2198c1d14583SBruce Richardson 		first_seg->ol_flags |= pkt_flags;
2199c1d14583SBruce Richardson 		/* Prefetch data of first segment, if configured to do so. */
2200c1d14583SBruce Richardson 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
2201c1d14583SBruce Richardson 					  first_seg->data_off));
2202c1d14583SBruce Richardson 		rx_pkts[nb_rx++] = first_seg;
2203c1d14583SBruce Richardson 		first_seg = NULL;
2204c1d14583SBruce Richardson 	}
2205c1d14583SBruce Richardson 
2206c1d14583SBruce Richardson 	/* Record index of the next RX descriptor to probe. */
2207c1d14583SBruce Richardson 	rxq->rx_tail = rx_id;
2208c1d14583SBruce Richardson 	rxq->pkt_first_seg = first_seg;
2209c1d14583SBruce Richardson 	rxq->pkt_last_seg = last_seg;
2210c1d14583SBruce Richardson 
2211c1d14583SBruce Richardson 	/**
2212c1d14583SBruce Richardson 	 * If the number of free RX descriptors is greater than the RX free
2213c1d14583SBruce Richardson 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2214c1d14583SBruce Richardson 	 * register. Update the RDT with the value of the last processed RX
2215c1d14583SBruce Richardson 	 * descriptor minus 1, to guarantee that the RDT register is never
2216c1d14583SBruce Richardson 	 * equal to the RDH register, which creates a "full" ring situation
2217c1d14583SBruce Richardson 	 * from the hardware point of view.
2218c1d14583SBruce Richardson 	 */
2219c1d14583SBruce Richardson 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2220c1d14583SBruce Richardson 	if (nb_hold > rxq->rx_free_thresh) {
2221c1d14583SBruce Richardson 		rx_id = (uint16_t)(rx_id == 0 ?
2222c1d14583SBruce Richardson 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2223c1d14583SBruce Richardson 		/* write TAIL register */
2224c1d14583SBruce Richardson 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2225c1d14583SBruce Richardson 		nb_hold = 0;
2226c1d14583SBruce Richardson 	}
2227c1d14583SBruce Richardson 	rxq->nb_rx_hold = nb_hold;
2228c1d14583SBruce Richardson 
2229c1d14583SBruce Richardson 	/* return received packet in the burst */
2230c1d14583SBruce Richardson 	return nb_rx;
2231c1d14583SBruce Richardson }
2232c1d14583SBruce Richardson 
2233c1d14583SBruce Richardson const uint32_t *
2234c1d14583SBruce Richardson ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
2235c1d14583SBruce Richardson {
2236c1d14583SBruce Richardson 	struct ice_adapter *ad =
2237c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2238c1d14583SBruce Richardson 	const uint32_t *ptypes;
2239c1d14583SBruce Richardson 
2240c1d14583SBruce Richardson 	static const uint32_t ptypes_os[] = {
2241c1d14583SBruce Richardson 		/* refers to ice_get_default_pkt_type() */
2242c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER,
2243c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_TIMESYNC,
2244c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_LLDP,
2245c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_ARP,
2246c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2247c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2248c1d14583SBruce Richardson 		RTE_PTYPE_L4_FRAG,
2249c1d14583SBruce Richardson 		RTE_PTYPE_L4_ICMP,
2250c1d14583SBruce Richardson 		RTE_PTYPE_L4_NONFRAG,
2251c1d14583SBruce Richardson 		RTE_PTYPE_L4_SCTP,
2252c1d14583SBruce Richardson 		RTE_PTYPE_L4_TCP,
2253c1d14583SBruce Richardson 		RTE_PTYPE_L4_UDP,
2254c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GRENAT,
2255c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_IP,
2256c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L2_ETHER,
2257c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2258c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2259c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_FRAG,
2260c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_ICMP,
2261c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_NONFRAG,
2262c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_SCTP,
2263c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_TCP,
2264c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_UDP,
2265c1d14583SBruce Richardson 	};
2266c1d14583SBruce Richardson 
2267c1d14583SBruce Richardson 	static const uint32_t ptypes_comms[] = {
2268c1d14583SBruce Richardson 		/* refers to ice_get_default_pkt_type() */
2269c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER,
2270c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_TIMESYNC,
2271c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_LLDP,
2272c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_ARP,
2273c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2274c1d14583SBruce Richardson 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2275c1d14583SBruce Richardson 		RTE_PTYPE_L4_FRAG,
2276c1d14583SBruce Richardson 		RTE_PTYPE_L4_ICMP,
2277c1d14583SBruce Richardson 		RTE_PTYPE_L4_NONFRAG,
2278c1d14583SBruce Richardson 		RTE_PTYPE_L4_SCTP,
2279c1d14583SBruce Richardson 		RTE_PTYPE_L4_TCP,
2280c1d14583SBruce Richardson 		RTE_PTYPE_L4_UDP,
2281c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GRENAT,
2282c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_IP,
2283c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L2_ETHER,
2284c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2285c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2286c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_FRAG,
2287c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_ICMP,
2288c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_NONFRAG,
2289c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_SCTP,
2290c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_TCP,
2291c1d14583SBruce Richardson 		RTE_PTYPE_INNER_L4_UDP,
2292c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GTPC,
2293c1d14583SBruce Richardson 		RTE_PTYPE_TUNNEL_GTPU,
2294c1d14583SBruce Richardson 		RTE_PTYPE_L2_ETHER_PPPOE,
2295c1d14583SBruce Richardson 	};
2296c1d14583SBruce Richardson 
2297c1d14583SBruce Richardson 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) {
2298c1d14583SBruce Richardson 		*no_of_elements = RTE_DIM(ptypes_comms);
2299c1d14583SBruce Richardson 		ptypes = ptypes_comms;
2300c1d14583SBruce Richardson 	} else {
2301c1d14583SBruce Richardson 		*no_of_elements = RTE_DIM(ptypes_os);
2302c1d14583SBruce Richardson 		ptypes = ptypes_os;
2303c1d14583SBruce Richardson 	}
2304c1d14583SBruce Richardson 
2305c1d14583SBruce Richardson 	if (dev->rx_pkt_burst == ice_recv_pkts ||
2306c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2307c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts)
2308c1d14583SBruce Richardson 		return ptypes;
2309c1d14583SBruce Richardson 
2310c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
2311c1d14583SBruce Richardson 	if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2312c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2313c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
2314c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2315c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2316c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2317c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2318c1d14583SBruce Richardson #endif
2319c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2320c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2321c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2322c1d14583SBruce Richardson 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2323c1d14583SBruce Richardson 		return ptypes;
2324c1d14583SBruce Richardson #endif
2325c1d14583SBruce Richardson 
2326c1d14583SBruce Richardson 	return NULL;
2327c1d14583SBruce Richardson }
2328c1d14583SBruce Richardson 
2329c1d14583SBruce Richardson int
2330c1d14583SBruce Richardson ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2331c1d14583SBruce Richardson {
2332c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
2333c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
2334c1d14583SBruce Richardson 	uint32_t desc;
2335c1d14583SBruce Richardson 
2336c1d14583SBruce Richardson 	if (unlikely(offset >= rxq->nb_rx_desc))
2337c1d14583SBruce Richardson 		return -EINVAL;
2338c1d14583SBruce Richardson 
2339c1d14583SBruce Richardson 	if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2340c1d14583SBruce Richardson 		return RTE_ETH_RX_DESC_UNAVAIL;
2341c1d14583SBruce Richardson 
2342c1d14583SBruce Richardson 	desc = rxq->rx_tail + offset;
2343c1d14583SBruce Richardson 	if (desc >= rxq->nb_rx_desc)
2344c1d14583SBruce Richardson 		desc -= rxq->nb_rx_desc;
2345c1d14583SBruce Richardson 
2346c1d14583SBruce Richardson 	rxdp = &rxq->rx_ring[desc];
2347c1d14583SBruce Richardson 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2348c1d14583SBruce Richardson 	    (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2349c1d14583SBruce Richardson 		return RTE_ETH_RX_DESC_DONE;
2350c1d14583SBruce Richardson 
2351c1d14583SBruce Richardson 	return RTE_ETH_RX_DESC_AVAIL;
2352c1d14583SBruce Richardson }
2353c1d14583SBruce Richardson 
2354c1d14583SBruce Richardson int
2355c1d14583SBruce Richardson ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2356c1d14583SBruce Richardson {
2357c038157aSBruce Richardson 	struct ci_tx_queue *txq = tx_queue;
2358c1d14583SBruce Richardson 	volatile uint64_t *status;
2359c1d14583SBruce Richardson 	uint64_t mask, expect;
2360c1d14583SBruce Richardson 	uint32_t desc;
2361c1d14583SBruce Richardson 
2362c1d14583SBruce Richardson 	if (unlikely(offset >= txq->nb_tx_desc))
2363c1d14583SBruce Richardson 		return -EINVAL;
2364c1d14583SBruce Richardson 
2365c1d14583SBruce Richardson 	desc = txq->tx_tail + offset;
2366c1d14583SBruce Richardson 	/* go to next desc that has the RS bit */
2367c1d14583SBruce Richardson 	desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2368c1d14583SBruce Richardson 		txq->tx_rs_thresh;
2369c1d14583SBruce Richardson 	if (desc >= txq->nb_tx_desc) {
2370c1d14583SBruce Richardson 		desc -= txq->nb_tx_desc;
2371c1d14583SBruce Richardson 		if (desc >= txq->nb_tx_desc)
2372c1d14583SBruce Richardson 			desc -= txq->nb_tx_desc;
2373c1d14583SBruce Richardson 	}
2374c1d14583SBruce Richardson 
23754d0f54d9SBruce Richardson 	status = &txq->ice_tx_ring[desc].cmd_type_offset_bsz;
2376c1d14583SBruce Richardson 	mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2377c1d14583SBruce Richardson 	expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2378c1d14583SBruce Richardson 				  ICE_TXD_QW1_DTYPE_S);
2379c1d14583SBruce Richardson 	if ((*status & mask) == expect)
2380c1d14583SBruce Richardson 		return RTE_ETH_TX_DESC_DONE;
2381c1d14583SBruce Richardson 
2382c1d14583SBruce Richardson 	return RTE_ETH_TX_DESC_FULL;
2383c1d14583SBruce Richardson }
2384c1d14583SBruce Richardson 
2385c1d14583SBruce Richardson void
2386c1d14583SBruce Richardson ice_free_queues(struct rte_eth_dev *dev)
2387c1d14583SBruce Richardson {
2388c1d14583SBruce Richardson 	uint16_t i;
2389c1d14583SBruce Richardson 
2390c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
2391c1d14583SBruce Richardson 
2392c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2393c1d14583SBruce Richardson 		if (!dev->data->rx_queues[i])
2394c1d14583SBruce Richardson 			continue;
2395c1d14583SBruce Richardson 		ice_rx_queue_release(dev->data->rx_queues[i]);
2396c1d14583SBruce Richardson 		dev->data->rx_queues[i] = NULL;
2397c1d14583SBruce Richardson 	}
2398c1d14583SBruce Richardson 	dev->data->nb_rx_queues = 0;
2399c1d14583SBruce Richardson 
2400c1d14583SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2401c1d14583SBruce Richardson 		if (!dev->data->tx_queues[i])
2402c1d14583SBruce Richardson 			continue;
2403c1d14583SBruce Richardson 		ice_tx_queue_release(dev->data->tx_queues[i]);
2404c1d14583SBruce Richardson 		dev->data->tx_queues[i] = NULL;
2405c1d14583SBruce Richardson 	}
2406c1d14583SBruce Richardson 	dev->data->nb_tx_queues = 0;
2407c1d14583SBruce Richardson }
2408c1d14583SBruce Richardson 
2409c1d14583SBruce Richardson #define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
2410c1d14583SBruce Richardson #define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
2411c1d14583SBruce Richardson 
2412c1d14583SBruce Richardson int
2413c1d14583SBruce Richardson ice_fdir_setup_tx_resources(struct ice_pf *pf)
2414c1d14583SBruce Richardson {
2415c038157aSBruce Richardson 	struct ci_tx_queue *txq;
2416c1d14583SBruce Richardson 	const struct rte_memzone *tz = NULL;
2417c1d14583SBruce Richardson 	uint32_t ring_size;
2418c1d14583SBruce Richardson 	struct rte_eth_dev *dev;
2419c1d14583SBruce Richardson 
2420c1d14583SBruce Richardson 	if (!pf) {
2421c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "PF is not available");
2422c1d14583SBruce Richardson 		return -EINVAL;
2423c1d14583SBruce Richardson 	}
2424c1d14583SBruce Richardson 
2425c1d14583SBruce Richardson 	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2426c1d14583SBruce Richardson 
2427c1d14583SBruce Richardson 	/* Allocate the TX queue data structure. */
2428c1d14583SBruce Richardson 	txq = rte_zmalloc_socket("ice fdir tx queue",
2429c038157aSBruce Richardson 				 sizeof(struct ci_tx_queue),
2430c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
2431c1d14583SBruce Richardson 				 SOCKET_ID_ANY);
2432c1d14583SBruce Richardson 	if (!txq) {
2433c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2434c1d14583SBruce Richardson 			    "tx queue structure.");
2435c1d14583SBruce Richardson 		return -ENOMEM;
2436c1d14583SBruce Richardson 	}
2437c1d14583SBruce Richardson 
2438c1d14583SBruce Richardson 	/* Allocate TX hardware ring descriptors. */
2439c1d14583SBruce Richardson 	ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2440c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2441c1d14583SBruce Richardson 
2442c1d14583SBruce Richardson 	tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2443c1d14583SBruce Richardson 				      ICE_FDIR_QUEUE_ID, ring_size,
2444c1d14583SBruce Richardson 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2445c1d14583SBruce Richardson 	if (!tz) {
2446c1d14583SBruce Richardson 		ice_tx_queue_release(txq);
2447c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2448c1d14583SBruce Richardson 		return -ENOMEM;
2449c1d14583SBruce Richardson 	}
2450c1d14583SBruce Richardson 
2451c1d14583SBruce Richardson 	txq->mz = tz;
2452c1d14583SBruce Richardson 	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2453c1d14583SBruce Richardson 	txq->queue_id = ICE_FDIR_QUEUE_ID;
2454c1d14583SBruce Richardson 	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
24554d0f54d9SBruce Richardson 	txq->ice_vsi = pf->fdir.fdir_vsi;
2456c1d14583SBruce Richardson 
2457c1d14583SBruce Richardson 	txq->tx_ring_dma = tz->iova;
24584d0f54d9SBruce Richardson 	txq->ice_tx_ring = (struct ice_tx_desc *)tz->addr;
2459c1d14583SBruce Richardson 	/*
2460c1d14583SBruce Richardson 	 * don't need to allocate software ring and reset for the fdir
2461c1d14583SBruce Richardson 	 * program queue just set the queue has been configured.
2462c1d14583SBruce Richardson 	 */
2463c1d14583SBruce Richardson 	txq->q_set = true;
2464c1d14583SBruce Richardson 	pf->fdir.txq = txq;
2465c1d14583SBruce Richardson 
2466c1d14583SBruce Richardson 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2467c1d14583SBruce Richardson 
2468c1d14583SBruce Richardson 	return ICE_SUCCESS;
2469c1d14583SBruce Richardson }
2470c1d14583SBruce Richardson 
2471c1d14583SBruce Richardson int
2472c1d14583SBruce Richardson ice_fdir_setup_rx_resources(struct ice_pf *pf)
2473c1d14583SBruce Richardson {
2474c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
2475c1d14583SBruce Richardson 	const struct rte_memzone *rz = NULL;
2476c1d14583SBruce Richardson 	uint32_t ring_size;
2477c1d14583SBruce Richardson 	struct rte_eth_dev *dev;
2478c1d14583SBruce Richardson 
2479c1d14583SBruce Richardson 	if (!pf) {
2480c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "PF is not available");
2481c1d14583SBruce Richardson 		return -EINVAL;
2482c1d14583SBruce Richardson 	}
2483c1d14583SBruce Richardson 
2484c1d14583SBruce Richardson 	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2485c1d14583SBruce Richardson 
2486c1d14583SBruce Richardson 	/* Allocate the RX queue data structure. */
2487c1d14583SBruce Richardson 	rxq = rte_zmalloc_socket("ice fdir rx queue",
2488c1d14583SBruce Richardson 				 sizeof(struct ice_rx_queue),
2489c1d14583SBruce Richardson 				 RTE_CACHE_LINE_SIZE,
2490c1d14583SBruce Richardson 				 SOCKET_ID_ANY);
2491c1d14583SBruce Richardson 	if (!rxq) {
2492c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2493c1d14583SBruce Richardson 			    "rx queue structure.");
2494c1d14583SBruce Richardson 		return -ENOMEM;
2495c1d14583SBruce Richardson 	}
2496c1d14583SBruce Richardson 
2497c1d14583SBruce Richardson 	/* Allocate RX hardware ring descriptors. */
2498c1d14583SBruce Richardson 	ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2499c1d14583SBruce Richardson 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2500c1d14583SBruce Richardson 
2501c1d14583SBruce Richardson 	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2502c1d14583SBruce Richardson 				      ICE_FDIR_QUEUE_ID, ring_size,
2503c1d14583SBruce Richardson 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2504c1d14583SBruce Richardson 	if (!rz) {
2505c1d14583SBruce Richardson 		ice_rx_queue_release(rxq);
2506c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2507c1d14583SBruce Richardson 		return -ENOMEM;
2508c1d14583SBruce Richardson 	}
2509c1d14583SBruce Richardson 
2510c1d14583SBruce Richardson 	rxq->mz = rz;
2511c1d14583SBruce Richardson 	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2512c1d14583SBruce Richardson 	rxq->queue_id = ICE_FDIR_QUEUE_ID;
2513c1d14583SBruce Richardson 	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2514c1d14583SBruce Richardson 	rxq->vsi = pf->fdir.fdir_vsi;
2515c1d14583SBruce Richardson 
2516c1d14583SBruce Richardson 	rxq->rx_ring_dma = rz->iova;
2517c1d14583SBruce Richardson 	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2518c1d14583SBruce Richardson 	       sizeof(union ice_32byte_rx_desc));
2519c1d14583SBruce Richardson 	rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2520c1d14583SBruce Richardson 
2521c1d14583SBruce Richardson 	/*
2522c1d14583SBruce Richardson 	 * Don't need to allocate software ring and reset for the fdir
2523c1d14583SBruce Richardson 	 * rx queue, just set the queue has been configured.
2524c1d14583SBruce Richardson 	 */
2525c1d14583SBruce Richardson 	rxq->q_set = true;
2526c1d14583SBruce Richardson 	pf->fdir.rxq = rxq;
2527c1d14583SBruce Richardson 
2528c1d14583SBruce Richardson 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2529c1d14583SBruce Richardson 
2530c1d14583SBruce Richardson 	return ICE_SUCCESS;
2531c1d14583SBruce Richardson }
2532c1d14583SBruce Richardson 
2533c1d14583SBruce Richardson uint16_t
2534c1d14583SBruce Richardson ice_recv_pkts(void *rx_queue,
2535c1d14583SBruce Richardson 	      struct rte_mbuf **rx_pkts,
2536c1d14583SBruce Richardson 	      uint16_t nb_pkts)
2537c1d14583SBruce Richardson {
2538c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = rx_queue;
2539c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2540c1d14583SBruce Richardson 	volatile union ice_rx_flex_desc *rxdp;
2541c1d14583SBruce Richardson 	union ice_rx_flex_desc rxd;
2542c1d14583SBruce Richardson 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2543c1d14583SBruce Richardson 	struct ice_rx_entry *rxe;
2544c1d14583SBruce Richardson 	struct rte_mbuf *nmb; /* new allocated mbuf */
2545c1d14583SBruce Richardson 	struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */
2546c1d14583SBruce Richardson 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2547c1d14583SBruce Richardson 	uint16_t rx_id = rxq->rx_tail;
2548c1d14583SBruce Richardson 	uint16_t nb_rx = 0;
2549c1d14583SBruce Richardson 	uint16_t nb_hold = 0;
2550c1d14583SBruce Richardson 	uint16_t rx_packet_len;
2551c1d14583SBruce Richardson 	uint16_t rx_header_len;
2552c1d14583SBruce Richardson 	uint16_t rx_stat_err0;
2553c1d14583SBruce Richardson 	uint64_t dma_addr;
2554c1d14583SBruce Richardson 	uint64_t pkt_flags;
2555c1d14583SBruce Richardson 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2556c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2557c1d14583SBruce Richardson 	bool is_tsinit = false;
2558c1d14583SBruce Richardson 	uint64_t ts_ns;
2559c1d14583SBruce Richardson 	struct ice_vsi *vsi = rxq->vsi;
2560c1d14583SBruce Richardson 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2561c1d14583SBruce Richardson 	struct ice_adapter *ad = rxq->vsi->adapter;
2562c1d14583SBruce Richardson 
2563c1d14583SBruce Richardson 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2564c1d14583SBruce Richardson 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2565c1d14583SBruce Richardson 
2566c1d14583SBruce Richardson 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
2567c1d14583SBruce Richardson 			is_tsinit = 1;
2568c1d14583SBruce Richardson 	}
2569c1d14583SBruce Richardson #endif
2570c1d14583SBruce Richardson 
2571c1d14583SBruce Richardson 	while (nb_rx < nb_pkts) {
2572c1d14583SBruce Richardson 		rxdp = &rx_ring[rx_id];
2573c1d14583SBruce Richardson 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2574c1d14583SBruce Richardson 
2575c1d14583SBruce Richardson 		/* Check the DD bit first */
2576c1d14583SBruce Richardson 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2577c1d14583SBruce Richardson 			break;
2578c1d14583SBruce Richardson 
2579c1d14583SBruce Richardson 		/* allocate header mbuf */
2580c1d14583SBruce Richardson 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2581c1d14583SBruce Richardson 		if (unlikely(!nmb)) {
2582c1d14583SBruce Richardson 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2583c1d14583SBruce Richardson 			break;
2584c1d14583SBruce Richardson 		}
2585c1d14583SBruce Richardson 
2586c1d14583SBruce Richardson 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2587c1d14583SBruce Richardson 
2588c1d14583SBruce Richardson 		nb_hold++;
2589c1d14583SBruce Richardson 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2590c1d14583SBruce Richardson 		rx_id++;
2591c1d14583SBruce Richardson 		if (unlikely(rx_id == rxq->nb_rx_desc))
2592c1d14583SBruce Richardson 			rx_id = 0;
2593c1d14583SBruce Richardson 		rxm = rxe->mbuf;
2594c1d14583SBruce Richardson 		rxe->mbuf = nmb;
2595c1d14583SBruce Richardson 		dma_addr =
2596c1d14583SBruce Richardson 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2597c1d14583SBruce Richardson 
2598c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
2599c1d14583SBruce Richardson 			/**
2600c1d14583SBruce Richardson 			 * fill the read format of descriptor with physic address in
2601c1d14583SBruce Richardson 			 * new allocated mbuf: nmb
2602c1d14583SBruce Richardson 			 */
2603c1d14583SBruce Richardson 			rxdp->read.hdr_addr = 0;
2604c1d14583SBruce Richardson 			rxdp->read.pkt_addr = dma_addr;
2605c1d14583SBruce Richardson 		} else {
2606c1d14583SBruce Richardson 			/* allocate payload mbuf */
2607c1d14583SBruce Richardson 			nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);
2608c1d14583SBruce Richardson 			if (unlikely(!nmb_pay)) {
2609c1d14583SBruce Richardson 				rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2610c1d14583SBruce Richardson 				rxe->mbuf = NULL;
2611c1d14583SBruce Richardson 				nb_hold--;
2612c1d14583SBruce Richardson 				if (unlikely(rx_id == 0))
2613c1d14583SBruce Richardson 					rx_id = rxq->nb_rx_desc;
2614c1d14583SBruce Richardson 
2615c1d14583SBruce Richardson 				rx_id--;
2616c1d14583SBruce Richardson 				rte_pktmbuf_free(nmb);
2617c1d14583SBruce Richardson 				break;
2618c1d14583SBruce Richardson 			}
2619c1d14583SBruce Richardson 
2620c1d14583SBruce Richardson 			nmb->next = nmb_pay;
2621c1d14583SBruce Richardson 			nmb_pay->next = NULL;
2622c1d14583SBruce Richardson 
2623c1d14583SBruce Richardson 			/**
2624c1d14583SBruce Richardson 			 * fill the read format of descriptor with physic address in
2625c1d14583SBruce Richardson 			 * new allocated mbuf: nmb
2626c1d14583SBruce Richardson 			 */
2627c1d14583SBruce Richardson 			rxdp->read.hdr_addr = dma_addr;
2628c1d14583SBruce Richardson 			rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay));
2629c1d14583SBruce Richardson 		}
2630c1d14583SBruce Richardson 
2631c1d14583SBruce Richardson 		/* fill old mbuf with received descriptor: rxd */
2632c1d14583SBruce Richardson 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2633c1d14583SBruce Richardson 		rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2634c1d14583SBruce Richardson 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
2635c1d14583SBruce Richardson 			rxm->nb_segs = 1;
2636c1d14583SBruce Richardson 			rxm->next = NULL;
2637c1d14583SBruce Richardson 			/* calculate rx_packet_len of the received pkt */
2638c1d14583SBruce Richardson 			rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2639c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2640c1d14583SBruce Richardson 			rxm->data_len = rx_packet_len;
2641c1d14583SBruce Richardson 			rxm->pkt_len = rx_packet_len;
2642c1d14583SBruce Richardson 		} else {
2643c1d14583SBruce Richardson 			rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs);
2644c1d14583SBruce Richardson 			rxm->next->next = NULL;
2645c1d14583SBruce Richardson 			/* calculate rx_packet_len of the received pkt */
2646c1d14583SBruce Richardson 			rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) &
2647c1d14583SBruce Richardson 					ICE_RX_FLEX_DESC_HEADER_LEN_M;
2648c1d14583SBruce Richardson 			rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2649c1d14583SBruce Richardson 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2650c1d14583SBruce Richardson 			rxm->data_len = rx_header_len;
2651c1d14583SBruce Richardson 			rxm->pkt_len = rx_header_len + rx_packet_len;
2652c1d14583SBruce Richardson 			rxm->next->data_len = rx_packet_len;
2653c1d14583SBruce Richardson 
2654c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_RX
2655c1d14583SBruce Richardson 			rte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm));
2656c1d14583SBruce Richardson #endif
2657c1d14583SBruce Richardson 		}
2658c1d14583SBruce Richardson 
2659c1d14583SBruce Richardson 		rxm->port = rxq->port_id;
2660c1d14583SBruce Richardson 		rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2661c1d14583SBruce Richardson 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2662c1d14583SBruce Richardson 		ice_rxd_to_vlan_tci(rxm, &rxd);
2663c1d14583SBruce Richardson 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
2664c1d14583SBruce Richardson 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2665c1d14583SBruce Richardson #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2666c1d14583SBruce Richardson 		if (ice_timestamp_dynflag > 0 &&
2667c1d14583SBruce Richardson 		    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
2668c1d14583SBruce Richardson 			rxq->time_high =
2669c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2670c1d14583SBruce Richardson 			if (unlikely(is_tsinit)) {
2671c1d14583SBruce Richardson 				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2672c1d14583SBruce Richardson 				rxq->hw_time_low = (uint32_t)ts_ns;
2673c1d14583SBruce Richardson 				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
2674c1d14583SBruce Richardson 				is_tsinit = false;
2675c1d14583SBruce Richardson 			} else {
2676c1d14583SBruce Richardson 				if (rxq->time_high < rxq->hw_time_low)
2677c1d14583SBruce Richardson 					rxq->hw_time_high += 1;
2678c1d14583SBruce Richardson 				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
2679c1d14583SBruce Richardson 				rxq->hw_time_low = rxq->time_high;
2680c1d14583SBruce Richardson 			}
2681c1d14583SBruce Richardson 			rxq->hw_time_update = rte_get_timer_cycles() /
2682c1d14583SBruce Richardson 					     (rte_get_timer_hz() / 1000);
2683c1d14583SBruce Richardson 			*RTE_MBUF_DYNFIELD(rxm,
2684c1d14583SBruce Richardson 					   (ice_timestamp_dynfield_offset),
2685c1d14583SBruce Richardson 					   rte_mbuf_timestamp_t *) = ts_ns;
2686c1d14583SBruce Richardson 			pkt_flags |= ice_timestamp_dynflag;
2687c1d14583SBruce Richardson 		}
2688c1d14583SBruce Richardson 
2689c1d14583SBruce Richardson 		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2690c1d14583SBruce Richardson 		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2691c1d14583SBruce Richardson 			rxq->time_high =
2692c1d14583SBruce Richardson 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2693c1d14583SBruce Richardson 			rxm->timesync = rxq->queue_id;
2694c1d14583SBruce Richardson 			pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2695c1d14583SBruce Richardson 		}
2696c1d14583SBruce Richardson #endif
2697c1d14583SBruce Richardson 		rxm->ol_flags |= pkt_flags;
2698c1d14583SBruce Richardson 		/* copy old mbuf to rx_pkts */
2699c1d14583SBruce Richardson 		rx_pkts[nb_rx++] = rxm;
2700c1d14583SBruce Richardson 	}
2701c1d14583SBruce Richardson 
2702c1d14583SBruce Richardson 	rxq->rx_tail = rx_id;
2703c1d14583SBruce Richardson 	/**
2704c1d14583SBruce Richardson 	 * If the number of free RX descriptors is greater than the RX free
2705c1d14583SBruce Richardson 	 * threshold of the queue, advance the receive tail register of queue.
2706c1d14583SBruce Richardson 	 * Update that register with the value of the last processed RX
2707c1d14583SBruce Richardson 	 * descriptor minus 1.
2708c1d14583SBruce Richardson 	 */
2709c1d14583SBruce Richardson 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2710c1d14583SBruce Richardson 	if (nb_hold > rxq->rx_free_thresh) {
2711c1d14583SBruce Richardson 		rx_id = (uint16_t)(rx_id == 0 ?
2712c1d14583SBruce Richardson 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2713c1d14583SBruce Richardson 		/* write TAIL register */
2714c1d14583SBruce Richardson 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2715c1d14583SBruce Richardson 		nb_hold = 0;
2716c1d14583SBruce Richardson 	}
2717c1d14583SBruce Richardson 	rxq->nb_rx_hold = nb_hold;
2718c1d14583SBruce Richardson 
2719c1d14583SBruce Richardson 	/* return received packet in the burst */
2720c1d14583SBruce Richardson 	return nb_rx;
2721c1d14583SBruce Richardson }
2722c1d14583SBruce Richardson 
2723c1d14583SBruce Richardson static inline void
2724c1d14583SBruce Richardson ice_parse_tunneling_params(uint64_t ol_flags,
2725c1d14583SBruce Richardson 			    union ice_tx_offload tx_offload,
2726c1d14583SBruce Richardson 			    uint32_t *cd_tunneling)
2727c1d14583SBruce Richardson {
2728c1d14583SBruce Richardson 	/* EIPT: External (outer) IP header type */
2729c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
2730c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2731c1d14583SBruce Richardson 	else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
2732c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2733c1d14583SBruce Richardson 	else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
2734c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2735c1d14583SBruce Richardson 
2736c1d14583SBruce Richardson 	/* EIPLEN: External (outer) IP header length, in DWords */
2737c1d14583SBruce Richardson 	*cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2738c1d14583SBruce Richardson 		ICE_TXD_CTX_QW0_EIPLEN_S;
2739c1d14583SBruce Richardson 
2740c1d14583SBruce Richardson 	/* L4TUNT: L4 Tunneling Type */
2741c1d14583SBruce Richardson 	switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2742c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_IPIP:
2743c1d14583SBruce Richardson 		/* for non UDP / GRE tunneling, set to 00b */
2744c1d14583SBruce Richardson 		break;
2745c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
2746c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
2747c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_GTP:
2748c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
2749c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2750c1d14583SBruce Richardson 		break;
2751c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TUNNEL_GRE:
2752c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2753c1d14583SBruce Richardson 		break;
2754c1d14583SBruce Richardson 	default:
2755c1d14583SBruce Richardson 		PMD_TX_LOG(ERR, "Tunnel type not supported");
2756c1d14583SBruce Richardson 		return;
2757c1d14583SBruce Richardson 	}
2758c1d14583SBruce Richardson 
2759c1d14583SBruce Richardson 	/* L4TUNLEN: L4 Tunneling Length, in Words
2760c1d14583SBruce Richardson 	 *
2761c1d14583SBruce Richardson 	 * We depend on app to set rte_mbuf.l2_len correctly.
2762c1d14583SBruce Richardson 	 * For IP in GRE it should be set to the length of the GRE
2763c1d14583SBruce Richardson 	 * header;
2764c1d14583SBruce Richardson 	 * For MAC in GRE or MAC in UDP it should be set to the length
2765c1d14583SBruce Richardson 	 * of the GRE or UDP headers plus the inner MAC up to including
2766c1d14583SBruce Richardson 	 * its last Ethertype.
2767c1d14583SBruce Richardson 	 * If MPLS labels exists, it should include them as well.
2768c1d14583SBruce Richardson 	 */
2769c1d14583SBruce Richardson 	*cd_tunneling |= (tx_offload.l2_len >> 1) <<
2770c1d14583SBruce Richardson 		ICE_TXD_CTX_QW0_NATLEN_S;
2771c1d14583SBruce Richardson 
2772c1d14583SBruce Richardson 	/**
2773c1d14583SBruce Richardson 	 * Calculate the tunneling UDP checksum.
2774c1d14583SBruce Richardson 	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2775c1d14583SBruce Richardson 	 */
2776c1d14583SBruce Richardson 	if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) &&
2777c1d14583SBruce Richardson 			(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) &&
2778c1d14583SBruce Richardson 			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
2779c1d14583SBruce Richardson 		*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2780c1d14583SBruce Richardson }
2781c1d14583SBruce Richardson 
2782c1d14583SBruce Richardson static inline void
2783c1d14583SBruce Richardson ice_txd_enable_checksum(uint64_t ol_flags,
2784c1d14583SBruce Richardson 			uint32_t *td_cmd,
2785c1d14583SBruce Richardson 			uint32_t *td_offset,
2786c1d14583SBruce Richardson 			union ice_tx_offload tx_offload)
2787c1d14583SBruce Richardson {
2788c1d14583SBruce Richardson 	/* Set MACLEN */
2789c1d14583SBruce Richardson 	if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK))
2790c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l2_len >> 1)
2791c1d14583SBruce Richardson 			<< ICE_TX_DESC_LEN_MACLEN_S;
2792c1d14583SBruce Richardson 
2793c1d14583SBruce Richardson 	/* Enable L3 checksum offloads */
2794c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2795c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2796c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l3_len >> 2) <<
2797c1d14583SBruce Richardson 			ICE_TX_DESC_LEN_IPLEN_S;
2798c1d14583SBruce Richardson 	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2799c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2800c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l3_len >> 2) <<
2801c1d14583SBruce Richardson 			ICE_TX_DESC_LEN_IPLEN_S;
2802c1d14583SBruce Richardson 	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2803c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2804c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l3_len >> 2) <<
2805c1d14583SBruce Richardson 			ICE_TX_DESC_LEN_IPLEN_S;
2806c1d14583SBruce Richardson 	}
2807c1d14583SBruce Richardson 
2808c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2809c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2810c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l4_len >> 2) <<
2811c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2812c1d14583SBruce Richardson 		return;
2813c1d14583SBruce Richardson 	}
2814c1d14583SBruce Richardson 
2815c1d14583SBruce Richardson 	if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
2816c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2817c1d14583SBruce Richardson 		*td_offset |= (tx_offload.l4_len >> 2) <<
2818c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2819c1d14583SBruce Richardson 		return;
2820c1d14583SBruce Richardson 	}
2821c1d14583SBruce Richardson 
2822c1d14583SBruce Richardson 	/* Enable L4 checksum offloads */
2823c1d14583SBruce Richardson 	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2824c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_TCP_CKSUM:
2825c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2826c1d14583SBruce Richardson 		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2827c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2828c1d14583SBruce Richardson 		break;
2829c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_SCTP_CKSUM:
2830c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2831c1d14583SBruce Richardson 		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2832c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2833c1d14583SBruce Richardson 		break;
2834c1d14583SBruce Richardson 	case RTE_MBUF_F_TX_UDP_CKSUM:
2835c1d14583SBruce Richardson 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2836c1d14583SBruce Richardson 		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2837c1d14583SBruce Richardson 			      ICE_TX_DESC_LEN_L4_LEN_S;
2838c1d14583SBruce Richardson 		break;
2839c1d14583SBruce Richardson 	default:
2840c1d14583SBruce Richardson 		break;
2841c1d14583SBruce Richardson 	}
2842c1d14583SBruce Richardson }
2843c1d14583SBruce Richardson 
2844c1d14583SBruce Richardson static inline int
2845c038157aSBruce Richardson ice_xmit_cleanup(struct ci_tx_queue *txq)
2846c1d14583SBruce Richardson {
28475cc9919fSBruce Richardson 	struct ci_tx_entry *sw_ring = txq->sw_ring;
28484d0f54d9SBruce Richardson 	volatile struct ice_tx_desc *txd = txq->ice_tx_ring;
2849c1d14583SBruce Richardson 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2850c1d14583SBruce Richardson 	uint16_t nb_tx_desc = txq->nb_tx_desc;
2851c1d14583SBruce Richardson 	uint16_t desc_to_clean_to;
2852c1d14583SBruce Richardson 	uint16_t nb_tx_to_clean;
2853c1d14583SBruce Richardson 
2854c1d14583SBruce Richardson 	/* Determine the last descriptor needing to be cleaned */
2855c1d14583SBruce Richardson 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2856c1d14583SBruce Richardson 	if (desc_to_clean_to >= nb_tx_desc)
2857c1d14583SBruce Richardson 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2858c1d14583SBruce Richardson 
2859c1d14583SBruce Richardson 	/* Check to make sure the last descriptor to clean is done */
2860c1d14583SBruce Richardson 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2861c1d14583SBruce Richardson 	if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2862c1d14583SBruce Richardson 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2863c1d14583SBruce Richardson 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2864c1d14583SBruce Richardson 			   "(port=%d queue=%d) value=0x%"PRIx64,
2865c1d14583SBruce Richardson 			   desc_to_clean_to,
2866c1d14583SBruce Richardson 			   txq->port_id, txq->queue_id,
2867c1d14583SBruce Richardson 			   txd[desc_to_clean_to].cmd_type_offset_bsz);
2868c1d14583SBruce Richardson 		/* Failed to clean any descriptors */
2869c1d14583SBruce Richardson 		return -1;
2870c1d14583SBruce Richardson 	}
2871c1d14583SBruce Richardson 
2872c1d14583SBruce Richardson 	/* Figure out how many descriptors will be cleaned */
2873c1d14583SBruce Richardson 	if (last_desc_cleaned > desc_to_clean_to)
2874c1d14583SBruce Richardson 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2875c1d14583SBruce Richardson 					    desc_to_clean_to);
2876c1d14583SBruce Richardson 	else
2877c1d14583SBruce Richardson 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2878c1d14583SBruce Richardson 					    last_desc_cleaned);
2879c1d14583SBruce Richardson 
2880c1d14583SBruce Richardson 	/* The last descriptor to clean is done, so that means all the
2881c1d14583SBruce Richardson 	 * descriptors from the last descriptor that was cleaned
2882c1d14583SBruce Richardson 	 * up to the last descriptor with the RS bit set
2883c1d14583SBruce Richardson 	 * are done. Only reset the threshold descriptor.
2884c1d14583SBruce Richardson 	 */
2885c1d14583SBruce Richardson 	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2886c1d14583SBruce Richardson 
2887c1d14583SBruce Richardson 	/* Update the txq to reflect the last descriptor that was cleaned */
2888c1d14583SBruce Richardson 	txq->last_desc_cleaned = desc_to_clean_to;
2889c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2890c1d14583SBruce Richardson 
2891c1d14583SBruce Richardson 	return 0;
2892c1d14583SBruce Richardson }
2893c1d14583SBruce Richardson 
2894c1d14583SBruce Richardson /* Construct the tx flags */
2895c1d14583SBruce Richardson static inline uint64_t
2896c1d14583SBruce Richardson ice_build_ctob(uint32_t td_cmd,
2897c1d14583SBruce Richardson 	       uint32_t td_offset,
2898c1d14583SBruce Richardson 	       uint16_t size,
2899c1d14583SBruce Richardson 	       uint32_t td_tag)
2900c1d14583SBruce Richardson {
2901c1d14583SBruce Richardson 	return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2902c1d14583SBruce Richardson 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2903c1d14583SBruce Richardson 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2904c1d14583SBruce Richardson 				((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2905c1d14583SBruce Richardson 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2906c1d14583SBruce Richardson }
2907c1d14583SBruce Richardson 
2908c1d14583SBruce Richardson /* Check if the context descriptor is needed for TX offloading */
2909c1d14583SBruce Richardson static inline uint16_t
2910c1d14583SBruce Richardson ice_calc_context_desc(uint64_t flags)
2911c1d14583SBruce Richardson {
2912c1d14583SBruce Richardson 	static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
2913c1d14583SBruce Richardson 		RTE_MBUF_F_TX_UDP_SEG |
2914c1d14583SBruce Richardson 		RTE_MBUF_F_TX_QINQ |
2915c1d14583SBruce Richardson 		RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2916c1d14583SBruce Richardson 		RTE_MBUF_F_TX_TUNNEL_MASK |
2917c1d14583SBruce Richardson 		RTE_MBUF_F_TX_IEEE1588_TMST;
2918c1d14583SBruce Richardson 
2919c1d14583SBruce Richardson 	return (flags & mask) ? 1 : 0;
2920c1d14583SBruce Richardson }
2921c1d14583SBruce Richardson 
2922c1d14583SBruce Richardson /* set ice TSO context descriptor */
2923c1d14583SBruce Richardson static inline uint64_t
2924c1d14583SBruce Richardson ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2925c1d14583SBruce Richardson {
2926c1d14583SBruce Richardson 	uint64_t ctx_desc = 0;
2927c1d14583SBruce Richardson 	uint32_t cd_cmd, hdr_len, cd_tso_len;
2928c1d14583SBruce Richardson 
2929c1d14583SBruce Richardson 	if (!tx_offload.l4_len) {
2930c1d14583SBruce Richardson 		PMD_TX_LOG(DEBUG, "L4 length set to 0");
2931c1d14583SBruce Richardson 		return ctx_desc;
2932c1d14583SBruce Richardson 	}
2933c1d14583SBruce Richardson 
2934c1d14583SBruce Richardson 	hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2935c1d14583SBruce Richardson 	hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
2936c1d14583SBruce Richardson 		   tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2937c1d14583SBruce Richardson 
2938c1d14583SBruce Richardson 	cd_cmd = ICE_TX_CTX_DESC_TSO;
2939c1d14583SBruce Richardson 	cd_tso_len = mbuf->pkt_len - hdr_len;
2940c1d14583SBruce Richardson 	ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2941c1d14583SBruce Richardson 		    ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2942c1d14583SBruce Richardson 		    ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2943c1d14583SBruce Richardson 
2944c1d14583SBruce Richardson 	return ctx_desc;
2945c1d14583SBruce Richardson }
2946c1d14583SBruce Richardson 
2947c1d14583SBruce Richardson /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2948c1d14583SBruce Richardson #define ICE_MAX_DATA_PER_TXD \
2949c1d14583SBruce Richardson 	(ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2950c1d14583SBruce Richardson /* Calculate the number of TX descriptors needed for each pkt */
2951c1d14583SBruce Richardson static inline uint16_t
2952c1d14583SBruce Richardson ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2953c1d14583SBruce Richardson {
2954c1d14583SBruce Richardson 	struct rte_mbuf *txd = tx_pkt;
2955c1d14583SBruce Richardson 	uint16_t count = 0;
2956c1d14583SBruce Richardson 
2957c1d14583SBruce Richardson 	while (txd != NULL) {
2958c1d14583SBruce Richardson 		count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2959c1d14583SBruce Richardson 		txd = txd->next;
2960c1d14583SBruce Richardson 	}
2961c1d14583SBruce Richardson 
2962c1d14583SBruce Richardson 	return count;
2963c1d14583SBruce Richardson }
2964c1d14583SBruce Richardson 
2965c1d14583SBruce Richardson uint16_t
2966c1d14583SBruce Richardson ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2967c1d14583SBruce Richardson {
2968c038157aSBruce Richardson 	struct ci_tx_queue *txq;
29694d0f54d9SBruce Richardson 	volatile struct ice_tx_desc *ice_tx_ring;
2970c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txd;
29715cc9919fSBruce Richardson 	struct ci_tx_entry *sw_ring;
29725cc9919fSBruce Richardson 	struct ci_tx_entry *txe, *txn;
2973c1d14583SBruce Richardson 	struct rte_mbuf *tx_pkt;
2974c1d14583SBruce Richardson 	struct rte_mbuf *m_seg;
2975c1d14583SBruce Richardson 	uint32_t cd_tunneling_params;
2976c1d14583SBruce Richardson 	uint16_t tx_id;
2977c1d14583SBruce Richardson 	uint16_t nb_tx;
2978c1d14583SBruce Richardson 	uint16_t nb_used;
2979c1d14583SBruce Richardson 	uint16_t nb_ctx;
2980c1d14583SBruce Richardson 	uint32_t td_cmd = 0;
2981c1d14583SBruce Richardson 	uint32_t td_offset = 0;
2982c1d14583SBruce Richardson 	uint32_t td_tag = 0;
2983c1d14583SBruce Richardson 	uint16_t tx_last;
2984c1d14583SBruce Richardson 	uint16_t slen;
2985c1d14583SBruce Richardson 	uint64_t buf_dma_addr;
2986c1d14583SBruce Richardson 	uint64_t ol_flags;
2987c1d14583SBruce Richardson 	union ice_tx_offload tx_offload = {0};
2988c1d14583SBruce Richardson 
2989c1d14583SBruce Richardson 	txq = tx_queue;
2990c1d14583SBruce Richardson 	sw_ring = txq->sw_ring;
29914d0f54d9SBruce Richardson 	ice_tx_ring = txq->ice_tx_ring;
2992c1d14583SBruce Richardson 	tx_id = txq->tx_tail;
2993c1d14583SBruce Richardson 	txe = &sw_ring[tx_id];
2994c1d14583SBruce Richardson 
2995c1d14583SBruce Richardson 	/* Check if the descriptor ring needs to be cleaned. */
2996c1d14583SBruce Richardson 	if (txq->nb_tx_free < txq->tx_free_thresh)
2997c1d14583SBruce Richardson 		(void)ice_xmit_cleanup(txq);
2998c1d14583SBruce Richardson 
2999c1d14583SBruce Richardson 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
3000c1d14583SBruce Richardson 		tx_pkt = *tx_pkts++;
3001c1d14583SBruce Richardson 
3002c1d14583SBruce Richardson 		td_cmd = 0;
3003c1d14583SBruce Richardson 		td_tag = 0;
3004c1d14583SBruce Richardson 		td_offset = 0;
3005c1d14583SBruce Richardson 		ol_flags = tx_pkt->ol_flags;
3006c1d14583SBruce Richardson 		tx_offload.l2_len = tx_pkt->l2_len;
3007c1d14583SBruce Richardson 		tx_offload.l3_len = tx_pkt->l3_len;
3008c1d14583SBruce Richardson 		tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
3009c1d14583SBruce Richardson 		tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
3010c1d14583SBruce Richardson 		tx_offload.l4_len = tx_pkt->l4_len;
3011c1d14583SBruce Richardson 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
3012c1d14583SBruce Richardson 		/* Calculate the number of context descriptors needed. */
3013c1d14583SBruce Richardson 		nb_ctx = ice_calc_context_desc(ol_flags);
3014c1d14583SBruce Richardson 
3015c1d14583SBruce Richardson 		/* The number of descriptors that must be allocated for
3016c1d14583SBruce Richardson 		 * a packet equals to the number of the segments of that
3017c1d14583SBruce Richardson 		 * packet plus the number of context descriptor if needed.
3018c1d14583SBruce Richardson 		 * Recalculate the needed tx descs when TSO enabled in case
3019c1d14583SBruce Richardson 		 * the mbuf data size exceeds max data size that hw allows
3020c1d14583SBruce Richardson 		 * per tx desc.
3021c1d14583SBruce Richardson 		 */
3022c1d14583SBruce Richardson 		if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
3023c1d14583SBruce Richardson 			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
3024c1d14583SBruce Richardson 					     nb_ctx);
3025c1d14583SBruce Richardson 		else
3026c1d14583SBruce Richardson 			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
3027c1d14583SBruce Richardson 		tx_last = (uint16_t)(tx_id + nb_used - 1);
3028c1d14583SBruce Richardson 
3029c1d14583SBruce Richardson 		/* Circular ring */
3030c1d14583SBruce Richardson 		if (tx_last >= txq->nb_tx_desc)
3031c1d14583SBruce Richardson 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
3032c1d14583SBruce Richardson 
3033c1d14583SBruce Richardson 		if (nb_used > txq->nb_tx_free) {
3034c1d14583SBruce Richardson 			if (ice_xmit_cleanup(txq) != 0) {
3035c1d14583SBruce Richardson 				if (nb_tx == 0)
3036c1d14583SBruce Richardson 					return 0;
3037c1d14583SBruce Richardson 				goto end_of_tx;
3038c1d14583SBruce Richardson 			}
3039c1d14583SBruce Richardson 			if (unlikely(nb_used > txq->tx_rs_thresh)) {
3040c1d14583SBruce Richardson 				while (nb_used > txq->nb_tx_free) {
3041c1d14583SBruce Richardson 					if (ice_xmit_cleanup(txq) != 0) {
3042c1d14583SBruce Richardson 						if (nb_tx == 0)
3043c1d14583SBruce Richardson 							return 0;
3044c1d14583SBruce Richardson 						goto end_of_tx;
3045c1d14583SBruce Richardson 					}
3046c1d14583SBruce Richardson 				}
3047c1d14583SBruce Richardson 			}
3048c1d14583SBruce Richardson 		}
3049c1d14583SBruce Richardson 
3050c1d14583SBruce Richardson 		/* Descriptor based VLAN insertion */
3051c1d14583SBruce Richardson 		if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
3052c1d14583SBruce Richardson 			td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
3053c1d14583SBruce Richardson 			td_tag = tx_pkt->vlan_tci;
3054c1d14583SBruce Richardson 		}
3055c1d14583SBruce Richardson 
3056c1d14583SBruce Richardson 		/* Fill in tunneling parameters if necessary */
3057c1d14583SBruce Richardson 		cd_tunneling_params = 0;
3058c1d14583SBruce Richardson 		if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
3059c1d14583SBruce Richardson 			td_offset |= (tx_offload.outer_l2_len >> 1)
3060c1d14583SBruce Richardson 				<< ICE_TX_DESC_LEN_MACLEN_S;
3061c1d14583SBruce Richardson 			ice_parse_tunneling_params(ol_flags, tx_offload,
3062c1d14583SBruce Richardson 						   &cd_tunneling_params);
3063c1d14583SBruce Richardson 		}
3064c1d14583SBruce Richardson 
3065c1d14583SBruce Richardson 		/* Enable checksum offloading */
3066c1d14583SBruce Richardson 		if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
3067c1d14583SBruce Richardson 			ice_txd_enable_checksum(ol_flags, &td_cmd,
3068c1d14583SBruce Richardson 						&td_offset, tx_offload);
3069c1d14583SBruce Richardson 
3070c1d14583SBruce Richardson 		if (nb_ctx) {
3071c1d14583SBruce Richardson 			/* Setup TX context descriptor if required */
3072c1d14583SBruce Richardson 			volatile struct ice_tx_ctx_desc *ctx_txd =
3073c1d14583SBruce Richardson 				(volatile struct ice_tx_ctx_desc *)
30744d0f54d9SBruce Richardson 					&ice_tx_ring[tx_id];
3075c1d14583SBruce Richardson 			uint16_t cd_l2tag2 = 0;
3076c1d14583SBruce Richardson 			uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
3077c1d14583SBruce Richardson 
3078c1d14583SBruce Richardson 			txn = &sw_ring[txe->next_id];
3079c1d14583SBruce Richardson 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
3080c1d14583SBruce Richardson 			if (txe->mbuf) {
3081c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(txe->mbuf);
3082c1d14583SBruce Richardson 				txe->mbuf = NULL;
3083c1d14583SBruce Richardson 			}
3084c1d14583SBruce Richardson 
3085c1d14583SBruce Richardson 			if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
3086c1d14583SBruce Richardson 				cd_type_cmd_tso_mss |=
3087c1d14583SBruce Richardson 					ice_set_tso_ctx(tx_pkt, tx_offload);
3088c1d14583SBruce Richardson 			else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
3089c1d14583SBruce Richardson 				cd_type_cmd_tso_mss |=
3090c1d14583SBruce Richardson 					((uint64_t)ICE_TX_CTX_DESC_TSYN <<
3091c1d14583SBruce Richardson 					ICE_TXD_CTX_QW1_CMD_S) |
30924d0f54d9SBruce Richardson 					 (((uint64_t)txq->ice_vsi->adapter->ptp_tx_index <<
3093c1d14583SBruce Richardson 					 ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M);
3094c1d14583SBruce Richardson 
3095c1d14583SBruce Richardson 			ctx_txd->tunneling_params =
3096c1d14583SBruce Richardson 				rte_cpu_to_le_32(cd_tunneling_params);
3097c1d14583SBruce Richardson 
3098c1d14583SBruce Richardson 			/* TX context descriptor based double VLAN insert */
3099c1d14583SBruce Richardson 			if (ol_flags & RTE_MBUF_F_TX_QINQ) {
3100c1d14583SBruce Richardson 				cd_l2tag2 = tx_pkt->vlan_tci_outer;
3101c1d14583SBruce Richardson 				cd_type_cmd_tso_mss |=
3102c1d14583SBruce Richardson 					((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
3103c1d14583SBruce Richardson 					 ICE_TXD_CTX_QW1_CMD_S);
3104c1d14583SBruce Richardson 			}
3105c1d14583SBruce Richardson 			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
3106c1d14583SBruce Richardson 			ctx_txd->qw1 =
3107c1d14583SBruce Richardson 				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
3108c1d14583SBruce Richardson 
3109c1d14583SBruce Richardson 			txe->last_id = tx_last;
3110c1d14583SBruce Richardson 			tx_id = txe->next_id;
3111c1d14583SBruce Richardson 			txe = txn;
3112c1d14583SBruce Richardson 		}
3113c1d14583SBruce Richardson 		m_seg = tx_pkt;
3114c1d14583SBruce Richardson 
3115c1d14583SBruce Richardson 		do {
31164d0f54d9SBruce Richardson 			txd = &ice_tx_ring[tx_id];
3117c1d14583SBruce Richardson 			txn = &sw_ring[txe->next_id];
3118c1d14583SBruce Richardson 
3119c1d14583SBruce Richardson 			if (txe->mbuf)
3120c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(txe->mbuf);
3121c1d14583SBruce Richardson 			txe->mbuf = m_seg;
3122c1d14583SBruce Richardson 
3123c1d14583SBruce Richardson 			/* Setup TX Descriptor */
3124c1d14583SBruce Richardson 			slen = m_seg->data_len;
3125c1d14583SBruce Richardson 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
3126c1d14583SBruce Richardson 
3127c1d14583SBruce Richardson 			while ((ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) &&
3128c1d14583SBruce Richardson 				unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
3129c1d14583SBruce Richardson 				txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
3130c1d14583SBruce Richardson 				txd->cmd_type_offset_bsz =
3131c1d14583SBruce Richardson 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
3132c1d14583SBruce Richardson 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
3133c1d14583SBruce Richardson 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
3134c1d14583SBruce Richardson 				((uint64_t)ICE_MAX_DATA_PER_TXD <<
3135c1d14583SBruce Richardson 				 ICE_TXD_QW1_TX_BUF_SZ_S) |
3136c1d14583SBruce Richardson 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
3137c1d14583SBruce Richardson 
3138c1d14583SBruce Richardson 				buf_dma_addr += ICE_MAX_DATA_PER_TXD;
3139c1d14583SBruce Richardson 				slen -= ICE_MAX_DATA_PER_TXD;
3140c1d14583SBruce Richardson 
3141c1d14583SBruce Richardson 				txe->last_id = tx_last;
3142c1d14583SBruce Richardson 				tx_id = txe->next_id;
3143c1d14583SBruce Richardson 				txe = txn;
31444d0f54d9SBruce Richardson 				txd = &ice_tx_ring[tx_id];
3145c1d14583SBruce Richardson 				txn = &sw_ring[txe->next_id];
3146c1d14583SBruce Richardson 			}
3147c1d14583SBruce Richardson 
3148c1d14583SBruce Richardson 			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
3149c1d14583SBruce Richardson 			txd->cmd_type_offset_bsz =
3150c1d14583SBruce Richardson 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
3151c1d14583SBruce Richardson 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
3152c1d14583SBruce Richardson 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
3153c1d14583SBruce Richardson 				((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
3154c1d14583SBruce Richardson 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
3155c1d14583SBruce Richardson 
3156c1d14583SBruce Richardson 			txe->last_id = tx_last;
3157c1d14583SBruce Richardson 			tx_id = txe->next_id;
3158c1d14583SBruce Richardson 			txe = txn;
3159c1d14583SBruce Richardson 			m_seg = m_seg->next;
3160c1d14583SBruce Richardson 		} while (m_seg);
3161c1d14583SBruce Richardson 
3162c1d14583SBruce Richardson 		/* fill the last descriptor with End of Packet (EOP) bit */
3163c1d14583SBruce Richardson 		td_cmd |= ICE_TX_DESC_CMD_EOP;
3164c1d14583SBruce Richardson 		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
3165c1d14583SBruce Richardson 		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
3166c1d14583SBruce Richardson 
3167c1d14583SBruce Richardson 		/* set RS bit on the last descriptor of one packet */
3168c1d14583SBruce Richardson 		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
3169c1d14583SBruce Richardson 			PMD_TX_LOG(DEBUG,
3170c1d14583SBruce Richardson 				   "Setting RS bit on TXD id="
3171c1d14583SBruce Richardson 				   "%4u (port=%d queue=%d)",
3172c1d14583SBruce Richardson 				   tx_last, txq->port_id, txq->queue_id);
3173c1d14583SBruce Richardson 
3174c1d14583SBruce Richardson 			td_cmd |= ICE_TX_DESC_CMD_RS;
3175c1d14583SBruce Richardson 
3176c1d14583SBruce Richardson 			/* Update txq RS bit counters */
3177c1d14583SBruce Richardson 			txq->nb_tx_used = 0;
3178c1d14583SBruce Richardson 		}
3179c1d14583SBruce Richardson 		txd->cmd_type_offset_bsz |=
3180c1d14583SBruce Richardson 			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
3181c1d14583SBruce Richardson 					 ICE_TXD_QW1_CMD_S);
3182c1d14583SBruce Richardson 	}
3183c1d14583SBruce Richardson end_of_tx:
3184c1d14583SBruce Richardson 	/* update Tail register */
3185c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
3186c1d14583SBruce Richardson 	txq->tx_tail = tx_id;
3187c1d14583SBruce Richardson 
3188c1d14583SBruce Richardson 	return nb_tx;
3189c1d14583SBruce Richardson }
3190c1d14583SBruce Richardson 
3191c1d14583SBruce Richardson static __rte_always_inline int
3192c038157aSBruce Richardson ice_tx_free_bufs(struct ci_tx_queue *txq)
3193c1d14583SBruce Richardson {
31945cc9919fSBruce Richardson 	struct ci_tx_entry *txep;
3195c1d14583SBruce Richardson 	uint16_t i;
3196c1d14583SBruce Richardson 
31974d0f54d9SBruce Richardson 	if ((txq->ice_tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
3198c1d14583SBruce Richardson 	     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
3199c1d14583SBruce Richardson 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3200c1d14583SBruce Richardson 		return 0;
3201c1d14583SBruce Richardson 
3202c1d14583SBruce Richardson 	txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
3203c1d14583SBruce Richardson 
3204c1d14583SBruce Richardson 	for (i = 0; i < txq->tx_rs_thresh; i++)
3205c1d14583SBruce Richardson 		rte_prefetch0((txep + i)->mbuf);
3206c1d14583SBruce Richardson 
3207c1d14583SBruce Richardson 	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
3208c1d14583SBruce Richardson 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
3209c1d14583SBruce Richardson 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
3210c1d14583SBruce Richardson 			txep->mbuf = NULL;
3211c1d14583SBruce Richardson 		}
3212c1d14583SBruce Richardson 	} else {
3213c1d14583SBruce Richardson 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
3214c1d14583SBruce Richardson 			rte_pktmbuf_free_seg(txep->mbuf);
3215c1d14583SBruce Richardson 			txep->mbuf = NULL;
3216c1d14583SBruce Richardson 		}
3217c1d14583SBruce Richardson 	}
3218c1d14583SBruce Richardson 
3219c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
3220c1d14583SBruce Richardson 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
3221c1d14583SBruce Richardson 	if (txq->tx_next_dd >= txq->nb_tx_desc)
3222c1d14583SBruce Richardson 		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
3223c1d14583SBruce Richardson 
3224c1d14583SBruce Richardson 	return txq->tx_rs_thresh;
3225c1d14583SBruce Richardson }
3226c1d14583SBruce Richardson 
3227c1d14583SBruce Richardson static int
3228c038157aSBruce Richardson ice_tx_done_cleanup_full(struct ci_tx_queue *txq,
3229c1d14583SBruce Richardson 			uint32_t free_cnt)
3230c1d14583SBruce Richardson {
32315cc9919fSBruce Richardson 	struct ci_tx_entry *swr_ring = txq->sw_ring;
3232c1d14583SBruce Richardson 	uint16_t i, tx_last, tx_id;
3233c1d14583SBruce Richardson 	uint16_t nb_tx_free_last;
3234c1d14583SBruce Richardson 	uint16_t nb_tx_to_clean;
3235c1d14583SBruce Richardson 	uint32_t pkt_cnt;
3236c1d14583SBruce Richardson 
3237c1d14583SBruce Richardson 	/* Start free mbuf from the next of tx_tail */
3238c1d14583SBruce Richardson 	tx_last = txq->tx_tail;
3239c1d14583SBruce Richardson 	tx_id  = swr_ring[tx_last].next_id;
3240c1d14583SBruce Richardson 
3241c1d14583SBruce Richardson 	if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
3242c1d14583SBruce Richardson 		return 0;
3243c1d14583SBruce Richardson 
3244c1d14583SBruce Richardson 	nb_tx_to_clean = txq->nb_tx_free;
3245c1d14583SBruce Richardson 	nb_tx_free_last = txq->nb_tx_free;
3246c1d14583SBruce Richardson 	if (!free_cnt)
3247c1d14583SBruce Richardson 		free_cnt = txq->nb_tx_desc;
3248c1d14583SBruce Richardson 
3249c1d14583SBruce Richardson 	/* Loop through swr_ring to count the amount of
3250c1d14583SBruce Richardson 	 * freeable mubfs and packets.
3251c1d14583SBruce Richardson 	 */
3252c1d14583SBruce Richardson 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3253c1d14583SBruce Richardson 		for (i = 0; i < nb_tx_to_clean &&
3254c1d14583SBruce Richardson 			pkt_cnt < free_cnt &&
3255c1d14583SBruce Richardson 			tx_id != tx_last; i++) {
3256c1d14583SBruce Richardson 			if (swr_ring[tx_id].mbuf != NULL) {
3257c1d14583SBruce Richardson 				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3258c1d14583SBruce Richardson 				swr_ring[tx_id].mbuf = NULL;
3259c1d14583SBruce Richardson 
3260c1d14583SBruce Richardson 				/*
3261c1d14583SBruce Richardson 				 * last segment in the packet,
3262c1d14583SBruce Richardson 				 * increment packet count
3263c1d14583SBruce Richardson 				 */
3264c1d14583SBruce Richardson 				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3265c1d14583SBruce Richardson 			}
3266c1d14583SBruce Richardson 
3267c1d14583SBruce Richardson 			tx_id = swr_ring[tx_id].next_id;
3268c1d14583SBruce Richardson 		}
3269c1d14583SBruce Richardson 
3270c1d14583SBruce Richardson 		if (txq->tx_rs_thresh > txq->nb_tx_desc -
3271c1d14583SBruce Richardson 			txq->nb_tx_free || tx_id == tx_last)
3272c1d14583SBruce Richardson 			break;
3273c1d14583SBruce Richardson 
3274c1d14583SBruce Richardson 		if (pkt_cnt < free_cnt) {
3275c1d14583SBruce Richardson 			if (ice_xmit_cleanup(txq))
3276c1d14583SBruce Richardson 				break;
3277c1d14583SBruce Richardson 
3278c1d14583SBruce Richardson 			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
3279c1d14583SBruce Richardson 			nb_tx_free_last = txq->nb_tx_free;
3280c1d14583SBruce Richardson 		}
3281c1d14583SBruce Richardson 	}
3282c1d14583SBruce Richardson 
3283c1d14583SBruce Richardson 	return (int)pkt_cnt;
3284c1d14583SBruce Richardson }
3285c1d14583SBruce Richardson 
3286c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3287c1d14583SBruce Richardson static int
3288c038157aSBruce Richardson ice_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused,
3289c1d14583SBruce Richardson 			uint32_t free_cnt __rte_unused)
3290c1d14583SBruce Richardson {
3291c1d14583SBruce Richardson 	return -ENOTSUP;
3292c1d14583SBruce Richardson }
3293c1d14583SBruce Richardson #endif
3294c1d14583SBruce Richardson 
3295c1d14583SBruce Richardson static int
3296c038157aSBruce Richardson ice_tx_done_cleanup_simple(struct ci_tx_queue *txq,
3297c1d14583SBruce Richardson 			uint32_t free_cnt)
3298c1d14583SBruce Richardson {
3299c1d14583SBruce Richardson 	int i, n, cnt;
3300c1d14583SBruce Richardson 
3301c1d14583SBruce Richardson 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
3302c1d14583SBruce Richardson 		free_cnt = txq->nb_tx_desc;
3303c1d14583SBruce Richardson 
3304c1d14583SBruce Richardson 	cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
3305c1d14583SBruce Richardson 
3306c1d14583SBruce Richardson 	for (i = 0; i < cnt; i += n) {
3307c1d14583SBruce Richardson 		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
3308c1d14583SBruce Richardson 			break;
3309c1d14583SBruce Richardson 
3310c1d14583SBruce Richardson 		n = ice_tx_free_bufs(txq);
3311c1d14583SBruce Richardson 
3312c1d14583SBruce Richardson 		if (n == 0)
3313c1d14583SBruce Richardson 			break;
3314c1d14583SBruce Richardson 	}
3315c1d14583SBruce Richardson 
3316c1d14583SBruce Richardson 	return i;
3317c1d14583SBruce Richardson }
3318c1d14583SBruce Richardson 
3319c1d14583SBruce Richardson int
3320c1d14583SBruce Richardson ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
3321c1d14583SBruce Richardson {
3322c038157aSBruce Richardson 	struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
3323c1d14583SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
3324c1d14583SBruce Richardson 	struct ice_adapter *ad =
3325c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3326c1d14583SBruce Richardson 
3327c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3328c1d14583SBruce Richardson 	if (ad->tx_vec_allowed)
3329c1d14583SBruce Richardson 		return ice_tx_done_cleanup_vec(q, free_cnt);
3330c1d14583SBruce Richardson #endif
3331c1d14583SBruce Richardson 	if (ad->tx_simple_allowed)
3332c1d14583SBruce Richardson 		return ice_tx_done_cleanup_simple(q, free_cnt);
3333c1d14583SBruce Richardson 	else
3334c1d14583SBruce Richardson 		return ice_tx_done_cleanup_full(q, free_cnt);
3335c1d14583SBruce Richardson }
3336c1d14583SBruce Richardson 
3337c1d14583SBruce Richardson /* Populate 4 descriptors with data from 4 mbufs */
3338c1d14583SBruce Richardson static inline void
3339c1d14583SBruce Richardson tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3340c1d14583SBruce Richardson {
3341c1d14583SBruce Richardson 	uint64_t dma_addr;
3342c1d14583SBruce Richardson 	uint32_t i;
3343c1d14583SBruce Richardson 
3344c1d14583SBruce Richardson 	for (i = 0; i < 4; i++, txdp++, pkts++) {
3345c1d14583SBruce Richardson 		dma_addr = rte_mbuf_data_iova(*pkts);
3346c1d14583SBruce Richardson 		txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3347c1d14583SBruce Richardson 		txdp->cmd_type_offset_bsz =
3348c1d14583SBruce Richardson 			ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3349c1d14583SBruce Richardson 				       (*pkts)->data_len, 0);
3350c1d14583SBruce Richardson 	}
3351c1d14583SBruce Richardson }
3352c1d14583SBruce Richardson 
3353c1d14583SBruce Richardson /* Populate 1 descriptor with data from 1 mbuf */
3354c1d14583SBruce Richardson static inline void
3355c1d14583SBruce Richardson tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3356c1d14583SBruce Richardson {
3357c1d14583SBruce Richardson 	uint64_t dma_addr;
3358c1d14583SBruce Richardson 
3359c1d14583SBruce Richardson 	dma_addr = rte_mbuf_data_iova(*pkts);
3360c1d14583SBruce Richardson 	txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3361c1d14583SBruce Richardson 	txdp->cmd_type_offset_bsz =
3362c1d14583SBruce Richardson 		ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3363c1d14583SBruce Richardson 			       (*pkts)->data_len, 0);
3364c1d14583SBruce Richardson }
3365c1d14583SBruce Richardson 
3366c1d14583SBruce Richardson static inline void
3367c038157aSBruce Richardson ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
3368c1d14583SBruce Richardson 		    uint16_t nb_pkts)
3369c1d14583SBruce Richardson {
33704d0f54d9SBruce Richardson 	volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail];
33715cc9919fSBruce Richardson 	struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3372c1d14583SBruce Richardson 	const int N_PER_LOOP = 4;
3373c1d14583SBruce Richardson 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3374c1d14583SBruce Richardson 	int mainpart, leftover;
3375c1d14583SBruce Richardson 	int i, j;
3376c1d14583SBruce Richardson 
3377c1d14583SBruce Richardson 	/**
3378c1d14583SBruce Richardson 	 * Process most of the packets in chunks of N pkts.  Any
3379c1d14583SBruce Richardson 	 * leftover packets will get processed one at a time.
3380c1d14583SBruce Richardson 	 */
3381c1d14583SBruce Richardson 	mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3382c1d14583SBruce Richardson 	leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3383c1d14583SBruce Richardson 	for (i = 0; i < mainpart; i += N_PER_LOOP) {
3384c1d14583SBruce Richardson 		/* Copy N mbuf pointers to the S/W ring */
3385c1d14583SBruce Richardson 		for (j = 0; j < N_PER_LOOP; ++j)
3386c1d14583SBruce Richardson 			(txep + i + j)->mbuf = *(pkts + i + j);
3387c1d14583SBruce Richardson 		tx4(txdp + i, pkts + i);
3388c1d14583SBruce Richardson 	}
3389c1d14583SBruce Richardson 
3390c1d14583SBruce Richardson 	if (unlikely(leftover > 0)) {
3391c1d14583SBruce Richardson 		for (i = 0; i < leftover; ++i) {
3392c1d14583SBruce Richardson 			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3393c1d14583SBruce Richardson 			tx1(txdp + mainpart + i, pkts + mainpart + i);
3394c1d14583SBruce Richardson 		}
3395c1d14583SBruce Richardson 	}
3396c1d14583SBruce Richardson }
3397c1d14583SBruce Richardson 
3398c1d14583SBruce Richardson static inline uint16_t
3399c038157aSBruce Richardson tx_xmit_pkts(struct ci_tx_queue *txq,
3400c1d14583SBruce Richardson 	     struct rte_mbuf **tx_pkts,
3401c1d14583SBruce Richardson 	     uint16_t nb_pkts)
3402c1d14583SBruce Richardson {
34034d0f54d9SBruce Richardson 	volatile struct ice_tx_desc *txr = txq->ice_tx_ring;
3404c1d14583SBruce Richardson 	uint16_t n = 0;
3405c1d14583SBruce Richardson 
3406c1d14583SBruce Richardson 	/**
3407c1d14583SBruce Richardson 	 * Begin scanning the H/W ring for done descriptors when the number
3408c1d14583SBruce Richardson 	 * of available descriptors drops below tx_free_thresh. For each done
3409c1d14583SBruce Richardson 	 * descriptor, free the associated buffer.
3410c1d14583SBruce Richardson 	 */
3411c1d14583SBruce Richardson 	if (txq->nb_tx_free < txq->tx_free_thresh)
3412c1d14583SBruce Richardson 		ice_tx_free_bufs(txq);
3413c1d14583SBruce Richardson 
3414c1d14583SBruce Richardson 	/* Use available descriptor only */
3415c1d14583SBruce Richardson 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3416c1d14583SBruce Richardson 	if (unlikely(!nb_pkts))
3417c1d14583SBruce Richardson 		return 0;
3418c1d14583SBruce Richardson 
3419c1d14583SBruce Richardson 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3420c1d14583SBruce Richardson 	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3421c1d14583SBruce Richardson 		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3422c1d14583SBruce Richardson 		ice_tx_fill_hw_ring(txq, tx_pkts, n);
3423c1d14583SBruce Richardson 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3424c1d14583SBruce Richardson 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3425c1d14583SBruce Richardson 					 ICE_TXD_QW1_CMD_S);
3426c1d14583SBruce Richardson 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3427c1d14583SBruce Richardson 		txq->tx_tail = 0;
3428c1d14583SBruce Richardson 	}
3429c1d14583SBruce Richardson 
3430c1d14583SBruce Richardson 	/* Fill hardware descriptor ring with mbuf data */
3431c1d14583SBruce Richardson 	ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3432c1d14583SBruce Richardson 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3433c1d14583SBruce Richardson 
3434c1d14583SBruce Richardson 	/* Determine if RS bit needs to be set */
3435c1d14583SBruce Richardson 	if (txq->tx_tail > txq->tx_next_rs) {
3436c1d14583SBruce Richardson 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3437c1d14583SBruce Richardson 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3438c1d14583SBruce Richardson 					 ICE_TXD_QW1_CMD_S);
3439c1d14583SBruce Richardson 		txq->tx_next_rs =
3440c1d14583SBruce Richardson 			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3441c1d14583SBruce Richardson 		if (txq->tx_next_rs >= txq->nb_tx_desc)
3442c1d14583SBruce Richardson 			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3443c1d14583SBruce Richardson 	}
3444c1d14583SBruce Richardson 
3445c1d14583SBruce Richardson 	if (txq->tx_tail >= txq->nb_tx_desc)
3446c1d14583SBruce Richardson 		txq->tx_tail = 0;
3447c1d14583SBruce Richardson 
3448c1d14583SBruce Richardson 	/* Update the tx tail register */
3449c1d14583SBruce Richardson 	ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3450c1d14583SBruce Richardson 
3451c1d14583SBruce Richardson 	return nb_pkts;
3452c1d14583SBruce Richardson }
3453c1d14583SBruce Richardson 
3454c1d14583SBruce Richardson static uint16_t
3455c1d14583SBruce Richardson ice_xmit_pkts_simple(void *tx_queue,
3456c1d14583SBruce Richardson 		     struct rte_mbuf **tx_pkts,
3457c1d14583SBruce Richardson 		     uint16_t nb_pkts)
3458c1d14583SBruce Richardson {
3459c1d14583SBruce Richardson 	uint16_t nb_tx = 0;
3460c1d14583SBruce Richardson 
3461c1d14583SBruce Richardson 	if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3462c038157aSBruce Richardson 		return tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
3463c1d14583SBruce Richardson 				    tx_pkts, nb_pkts);
3464c1d14583SBruce Richardson 
3465c1d14583SBruce Richardson 	while (nb_pkts) {
3466c1d14583SBruce Richardson 		uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3467c1d14583SBruce Richardson 						      ICE_TX_MAX_BURST);
3468c1d14583SBruce Richardson 
3469c038157aSBruce Richardson 		ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
3470c1d14583SBruce Richardson 				   &tx_pkts[nb_tx], num);
3471c1d14583SBruce Richardson 		nb_tx = (uint16_t)(nb_tx + ret);
3472c1d14583SBruce Richardson 		nb_pkts = (uint16_t)(nb_pkts - ret);
3473c1d14583SBruce Richardson 		if (ret < num)
3474c1d14583SBruce Richardson 			break;
3475c1d14583SBruce Richardson 	}
3476c1d14583SBruce Richardson 
3477c1d14583SBruce Richardson 	return nb_tx;
3478c1d14583SBruce Richardson }
3479c1d14583SBruce Richardson 
3480c1d14583SBruce Richardson void __rte_cold
3481c1d14583SBruce Richardson ice_set_rx_function(struct rte_eth_dev *dev)
3482c1d14583SBruce Richardson {
3483c1d14583SBruce Richardson 	PMD_INIT_FUNC_TRACE();
3484c1d14583SBruce Richardson 	struct ice_adapter *ad =
3485c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3486c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3487c1d14583SBruce Richardson 	struct ice_rx_queue *rxq;
3488c1d14583SBruce Richardson 	int i;
3489c1d14583SBruce Richardson 	int rx_check_ret = -1;
3490c1d14583SBruce Richardson 
3491c1d14583SBruce Richardson 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3492c1d14583SBruce Richardson 		ad->rx_use_avx512 = false;
3493c1d14583SBruce Richardson 		ad->rx_use_avx2 = false;
3494c1d14583SBruce Richardson 		rx_check_ret = ice_rx_vec_dev_check(dev);
3495c1d14583SBruce Richardson 		if (ad->ptp_ena)
3496c1d14583SBruce Richardson 			rx_check_ret = -1;
3497c1d14583SBruce Richardson 		ad->rx_vec_offload_support =
3498c1d14583SBruce Richardson 				(rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
3499c1d14583SBruce Richardson 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3500c1d14583SBruce Richardson 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3501c1d14583SBruce Richardson 			ad->rx_vec_allowed = true;
3502c1d14583SBruce Richardson 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
3503c1d14583SBruce Richardson 				rxq = dev->data->rx_queues[i];
3504c1d14583SBruce Richardson 				if (rxq && ice_rxq_vec_setup(rxq)) {
3505c1d14583SBruce Richardson 					ad->rx_vec_allowed = false;
3506c1d14583SBruce Richardson 					break;
3507c1d14583SBruce Richardson 				}
3508c1d14583SBruce Richardson 			}
3509c1d14583SBruce Richardson 
3510c1d14583SBruce Richardson 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3511c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3512c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3513c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3514c1d14583SBruce Richardson 				ad->rx_use_avx512 = true;
3515c1d14583SBruce Richardson #else
3516c1d14583SBruce Richardson 			PMD_DRV_LOG(NOTICE,
3517c1d14583SBruce Richardson 				"AVX512 is not supported in build env");
3518c1d14583SBruce Richardson #endif
3519c1d14583SBruce Richardson 			if (!ad->rx_use_avx512 &&
3520c1d14583SBruce Richardson 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3521c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3522c1d14583SBruce Richardson 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3523c1d14583SBruce Richardson 				ad->rx_use_avx2 = true;
3524c1d14583SBruce Richardson 
3525c1d14583SBruce Richardson 		} else {
3526c1d14583SBruce Richardson 			ad->rx_vec_allowed = false;
3527c1d14583SBruce Richardson 		}
3528c1d14583SBruce Richardson 	}
3529c1d14583SBruce Richardson 
3530c1d14583SBruce Richardson 	if (ad->rx_vec_allowed) {
3531c1d14583SBruce Richardson 		if (dev->data->scattered_rx) {
3532c1d14583SBruce Richardson 			if (ad->rx_use_avx512) {
3533c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3534c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3535c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3536c1d14583SBruce Richardson 						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3537c1d14583SBruce Richardson 						dev->data->port_id);
3538c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3539c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx512_offload;
3540c1d14583SBruce Richardson 				} else {
3541c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3542c1d14583SBruce Richardson 						"Using AVX512 Vector Scattered Rx (port %d).",
3543c1d14583SBruce Richardson 						dev->data->port_id);
3544c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3545c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx512;
3546c1d14583SBruce Richardson 				}
3547c1d14583SBruce Richardson #endif
3548c1d14583SBruce Richardson 			} else if (ad->rx_use_avx2) {
3549c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3550c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3551c1d14583SBruce Richardson 						    "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3552c1d14583SBruce Richardson 						    dev->data->port_id);
3553c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3554c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx2_offload;
3555c1d14583SBruce Richardson 				} else {
3556c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3557c1d14583SBruce Richardson 						    "Using AVX2 Vector Scattered Rx (port %d).",
3558c1d14583SBruce Richardson 						    dev->data->port_id);
3559c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3560c1d14583SBruce Richardson 						ice_recv_scattered_pkts_vec_avx2;
3561c1d14583SBruce Richardson 				}
3562c1d14583SBruce Richardson 			} else {
3563c1d14583SBruce Richardson 				PMD_DRV_LOG(DEBUG,
3564c1d14583SBruce Richardson 					"Using Vector Scattered Rx (port %d).",
3565c1d14583SBruce Richardson 					dev->data->port_id);
3566c1d14583SBruce Richardson 				dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3567c1d14583SBruce Richardson 			}
3568c1d14583SBruce Richardson 		} else {
3569c1d14583SBruce Richardson 			if (ad->rx_use_avx512) {
3570c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3571c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3572c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3573c1d14583SBruce Richardson 						"Using AVX512 OFFLOAD Vector Rx (port %d).",
3574c1d14583SBruce Richardson 						dev->data->port_id);
3575c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3576c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx512_offload;
3577c1d14583SBruce Richardson 				} else {
3578c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3579c1d14583SBruce Richardson 						"Using AVX512 Vector Rx (port %d).",
3580c1d14583SBruce Richardson 						dev->data->port_id);
3581c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3582c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx512;
3583c1d14583SBruce Richardson 				}
3584c1d14583SBruce Richardson #endif
3585c1d14583SBruce Richardson 			} else if (ad->rx_use_avx2) {
3586c1d14583SBruce Richardson 				if (ad->rx_vec_offload_support) {
3587c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3588c1d14583SBruce Richardson 						    "Using AVX2 OFFLOAD Vector Rx (port %d).",
3589c1d14583SBruce Richardson 						    dev->data->port_id);
3590c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3591c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx2_offload;
3592c1d14583SBruce Richardson 				} else {
3593c1d14583SBruce Richardson 					PMD_DRV_LOG(NOTICE,
3594c1d14583SBruce Richardson 						    "Using AVX2 Vector Rx (port %d).",
3595c1d14583SBruce Richardson 						    dev->data->port_id);
3596c1d14583SBruce Richardson 					dev->rx_pkt_burst =
3597c1d14583SBruce Richardson 						ice_recv_pkts_vec_avx2;
3598c1d14583SBruce Richardson 				}
3599c1d14583SBruce Richardson 			} else {
3600c1d14583SBruce Richardson 				PMD_DRV_LOG(DEBUG,
3601c1d14583SBruce Richardson 					"Using Vector Rx (port %d).",
3602c1d14583SBruce Richardson 					dev->data->port_id);
3603c1d14583SBruce Richardson 				dev->rx_pkt_burst = ice_recv_pkts_vec;
3604c1d14583SBruce Richardson 			}
3605c1d14583SBruce Richardson 		}
3606c1d14583SBruce Richardson 		return;
3607c1d14583SBruce Richardson 	}
3608c1d14583SBruce Richardson 
3609c1d14583SBruce Richardson #endif
3610c1d14583SBruce Richardson 
3611c1d14583SBruce Richardson 	if (dev->data->scattered_rx) {
3612c1d14583SBruce Richardson 		/* Set the non-LRO scattered function */
3613c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3614c1d14583SBruce Richardson 			     "Using a Scattered function on port %d.",
3615c1d14583SBruce Richardson 			     dev->data->port_id);
3616c1d14583SBruce Richardson 		dev->rx_pkt_burst = ice_recv_scattered_pkts;
3617c1d14583SBruce Richardson 	} else if (ad->rx_bulk_alloc_allowed) {
3618c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3619c1d14583SBruce Richardson 			     "Rx Burst Bulk Alloc Preconditions are "
3620c1d14583SBruce Richardson 			     "satisfied. Rx Burst Bulk Alloc function "
3621c1d14583SBruce Richardson 			     "will be used on port %d.",
3622c1d14583SBruce Richardson 			     dev->data->port_id);
3623c1d14583SBruce Richardson 		dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3624c1d14583SBruce Richardson 	} else {
3625c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3626c1d14583SBruce Richardson 			     "Rx Burst Bulk Alloc Preconditions are not "
3627c1d14583SBruce Richardson 			     "satisfied, Normal Rx will be used on port %d.",
3628c1d14583SBruce Richardson 			     dev->data->port_id);
3629c1d14583SBruce Richardson 		dev->rx_pkt_burst = ice_recv_pkts;
3630c1d14583SBruce Richardson 	}
3631c1d14583SBruce Richardson }
3632c1d14583SBruce Richardson 
3633c1d14583SBruce Richardson static const struct {
3634c1d14583SBruce Richardson 	eth_rx_burst_t pkt_burst;
3635c1d14583SBruce Richardson 	const char *info;
3636c1d14583SBruce Richardson } ice_rx_burst_infos[] = {
3637c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts,          "Scalar Scattered" },
3638c1d14583SBruce Richardson 	{ ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
3639c1d14583SBruce Richardson 	{ ice_recv_pkts,                    "Scalar" },
3640c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3641c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3642c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3643c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3644c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
3645c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
3646c1d14583SBruce Richardson #endif
3647c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3648c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3649c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
3650c1d14583SBruce Richardson 	{ ice_recv_pkts_vec_avx2_offload,   "Offload Vector AVX2" },
3651c1d14583SBruce Richardson 	{ ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
3652c1d14583SBruce Richardson 	{ ice_recv_pkts_vec,                "Vector SSE" },
3653c1d14583SBruce Richardson #endif
3654c1d14583SBruce Richardson };
3655c1d14583SBruce Richardson 
3656c1d14583SBruce Richardson int
3657c1d14583SBruce Richardson ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3658c1d14583SBruce Richardson 		      struct rte_eth_burst_mode *mode)
3659c1d14583SBruce Richardson {
3660c1d14583SBruce Richardson 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3661c1d14583SBruce Richardson 	int ret = -EINVAL;
3662c1d14583SBruce Richardson 	unsigned int i;
3663c1d14583SBruce Richardson 
3664c1d14583SBruce Richardson 	for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3665c1d14583SBruce Richardson 		if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3666c1d14583SBruce Richardson 			snprintf(mode->info, sizeof(mode->info), "%s",
3667c1d14583SBruce Richardson 				 ice_rx_burst_infos[i].info);
3668c1d14583SBruce Richardson 			ret = 0;
3669c1d14583SBruce Richardson 			break;
3670c1d14583SBruce Richardson 		}
3671c1d14583SBruce Richardson 	}
3672c1d14583SBruce Richardson 
3673c1d14583SBruce Richardson 	return ret;
3674c1d14583SBruce Richardson }
3675c1d14583SBruce Richardson 
3676c1d14583SBruce Richardson void __rte_cold
3677c038157aSBruce Richardson ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq)
3678c1d14583SBruce Richardson {
3679c1d14583SBruce Richardson 	struct ice_adapter *ad =
3680c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3681c1d14583SBruce Richardson 
3682c1d14583SBruce Richardson 	/* Use a simple Tx queue if possible (only fast free is allowed) */
3683c1d14583SBruce Richardson 	ad->tx_simple_allowed =
3684c1d14583SBruce Richardson 		(txq->offloads ==
3685c1d14583SBruce Richardson 		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
3686c1d14583SBruce Richardson 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3687c1d14583SBruce Richardson 
3688c1d14583SBruce Richardson 	if (ad->tx_simple_allowed)
3689c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3690c1d14583SBruce Richardson 			     txq->queue_id);
3691c1d14583SBruce Richardson 	else
3692c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG,
3693c1d14583SBruce Richardson 			     "Simple Tx can NOT be enabled on Tx queue %u.",
3694c1d14583SBruce Richardson 			     txq->queue_id);
3695c1d14583SBruce Richardson }
3696c1d14583SBruce Richardson 
3697c1d14583SBruce Richardson /*********************************************************************
3698c1d14583SBruce Richardson  *
3699c1d14583SBruce Richardson  *  TX prep functions
3700c1d14583SBruce Richardson  *
3701c1d14583SBruce Richardson  **********************************************************************/
3702c1d14583SBruce Richardson /* The default values of TSO MSS */
3703c1d14583SBruce Richardson #define ICE_MIN_TSO_MSS            64
3704c1d14583SBruce Richardson #define ICE_MAX_TSO_MSS            9728
3705c1d14583SBruce Richardson #define ICE_MAX_TSO_FRAME_SIZE     262144
3706c1d14583SBruce Richardson 
3707c1d14583SBruce Richardson /*Check for empty mbuf*/
3708c1d14583SBruce Richardson static inline uint16_t
3709c1d14583SBruce Richardson ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
3710c1d14583SBruce Richardson {
3711c1d14583SBruce Richardson 	struct rte_mbuf *txd = tx_pkt;
3712c1d14583SBruce Richardson 
3713c1d14583SBruce Richardson 	while (txd != NULL) {
3714c1d14583SBruce Richardson 		if (txd->data_len == 0)
3715c1d14583SBruce Richardson 			return -1;
3716c1d14583SBruce Richardson 		txd = txd->next;
3717c1d14583SBruce Richardson 	}
3718c1d14583SBruce Richardson 
3719c1d14583SBruce Richardson 	return 0;
3720c1d14583SBruce Richardson }
3721c1d14583SBruce Richardson 
3722c1d14583SBruce Richardson /* Tx mbuf check */
3723c1d14583SBruce Richardson static uint16_t
3724c1d14583SBruce Richardson ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3725c1d14583SBruce Richardson {
3726c038157aSBruce Richardson 	struct ci_tx_queue *txq = tx_queue;
3727c1d14583SBruce Richardson 	uint16_t idx;
3728c1d14583SBruce Richardson 	struct rte_mbuf *mb;
3729c1d14583SBruce Richardson 	bool pkt_error = false;
3730c1d14583SBruce Richardson 	uint16_t good_pkts = nb_pkts;
3731c1d14583SBruce Richardson 	const char *reason = NULL;
37324d0f54d9SBruce Richardson 	struct ice_adapter *adapter = txq->ice_vsi->adapter;
3733c1d14583SBruce Richardson 	uint64_t ol_flags;
3734c1d14583SBruce Richardson 
3735c1d14583SBruce Richardson 	for (idx = 0; idx < nb_pkts; idx++) {
3736c1d14583SBruce Richardson 		mb = tx_pkts[idx];
3737c1d14583SBruce Richardson 		ol_flags = mb->ol_flags;
3738c1d14583SBruce Richardson 
3739c1d14583SBruce Richardson 		if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_MBUF) &&
3740c1d14583SBruce Richardson 		    (rte_mbuf_check(mb, 1, &reason) != 0)) {
3741c1d14583SBruce Richardson 			PMD_TX_LOG(ERR, "INVALID mbuf: %s", reason);
3742c1d14583SBruce Richardson 			pkt_error = true;
3743c1d14583SBruce Richardson 			break;
3744c1d14583SBruce Richardson 		}
3745c1d14583SBruce Richardson 
3746c1d14583SBruce Richardson 		if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SIZE) &&
3747c1d14583SBruce Richardson 		    (mb->data_len > mb->pkt_len ||
3748c1d14583SBruce Richardson 		     mb->data_len < ICE_TX_MIN_PKT_LEN ||
3749c1d14583SBruce Richardson 		     mb->data_len > ICE_FRAME_SIZE_MAX)) {
3750c1d14583SBruce Richardson 			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out of range, reasonable range (%d - %d)",
3751c1d14583SBruce Richardson 				mb->data_len, ICE_TX_MIN_PKT_LEN, ICE_FRAME_SIZE_MAX);
3752c1d14583SBruce Richardson 			pkt_error = true;
3753c1d14583SBruce Richardson 			break;
3754c1d14583SBruce Richardson 		}
3755c1d14583SBruce Richardson 
3756c1d14583SBruce Richardson 		if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SEGMENT) {
3757c1d14583SBruce Richardson 			if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3758c1d14583SBruce Richardson 				/**
3759c1d14583SBruce Richardson 				 * No TSO case: nb->segs, pkt_len to not exceed
3760c1d14583SBruce Richardson 				 * the limites.
3761c1d14583SBruce Richardson 				 */
3762c1d14583SBruce Richardson 				if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) {
3763c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds HW limit, maximum allowed value is %d",
3764c1d14583SBruce Richardson 						mb->nb_segs, ICE_TX_MTU_SEG_MAX);
3765c1d14583SBruce Richardson 					pkt_error = true;
3766c1d14583SBruce Richardson 					break;
3767c1d14583SBruce Richardson 				}
3768c1d14583SBruce Richardson 				if (mb->pkt_len > ICE_FRAME_SIZE_MAX) {
3769c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds HW limit, maximum allowed value is %d",
3770c1d14583SBruce Richardson 						mb->nb_segs, ICE_FRAME_SIZE_MAX);
3771c1d14583SBruce Richardson 					pkt_error = true;
3772c1d14583SBruce Richardson 					break;
3773c1d14583SBruce Richardson 				}
3774c1d14583SBruce Richardson 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
3775c1d14583SBruce Richardson 				/** TSO case: tso_segsz, nb_segs, pkt_len not exceed
3776c1d14583SBruce Richardson 				 * the limits.
3777c1d14583SBruce Richardson 				 */
3778c1d14583SBruce Richardson 				if (mb->tso_segsz < ICE_MIN_TSO_MSS ||
3779c1d14583SBruce Richardson 				    mb->tso_segsz > ICE_MAX_TSO_MSS) {
3780c1d14583SBruce Richardson 					/**
3781c1d14583SBruce Richardson 					 * MSS outside the range are considered malicious
3782c1d14583SBruce Richardson 					 */
3783c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out of range, reasonable range (%d - %u)",
3784c1d14583SBruce Richardson 						mb->tso_segsz, ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS);
3785c1d14583SBruce Richardson 					pkt_error = true;
3786c1d14583SBruce Richardson 					break;
3787c1d14583SBruce Richardson 				}
3788c038157aSBruce Richardson 				if (mb->nb_segs > ((struct ci_tx_queue *)tx_queue)->nb_tx_desc) {
3789c1d14583SBruce Richardson 					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out of ring length");
3790c1d14583SBruce Richardson 					pkt_error = true;
3791c1d14583SBruce Richardson 					break;
3792c1d14583SBruce Richardson 				}
3793c1d14583SBruce Richardson 			}
3794c1d14583SBruce Richardson 		}
3795c1d14583SBruce Richardson 
3796c1d14583SBruce Richardson 		if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_OFFLOAD) {
3797c1d14583SBruce Richardson 			if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) {
3798c1d14583SBruce Richardson 				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload is not supported");
3799c1d14583SBruce Richardson 				pkt_error = true;
3800c1d14583SBruce Richardson 				break;
3801c1d14583SBruce Richardson 			}
3802c1d14583SBruce Richardson 
3803c1d14583SBruce Richardson 			if (!rte_validate_tx_offload(mb)) {
3804c1d14583SBruce Richardson 				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload setup error");
3805c1d14583SBruce Richardson 				pkt_error = true;
3806c1d14583SBruce Richardson 				break;
3807c1d14583SBruce Richardson 			}
3808c1d14583SBruce Richardson 		}
3809c1d14583SBruce Richardson 	}
3810c1d14583SBruce Richardson 
3811c1d14583SBruce Richardson 	if (pkt_error) {
3812c1d14583SBruce Richardson 		txq->mbuf_errors++;
3813c1d14583SBruce Richardson 		good_pkts = idx;
3814c1d14583SBruce Richardson 		if (good_pkts == 0)
3815c1d14583SBruce Richardson 			return 0;
3816c1d14583SBruce Richardson 	}
3817c1d14583SBruce Richardson 
3818c1d14583SBruce Richardson 	return adapter->tx_pkt_burst(tx_queue, tx_pkts, good_pkts);
3819c1d14583SBruce Richardson }
3820c1d14583SBruce Richardson 
3821c1d14583SBruce Richardson uint16_t
3822c1d14583SBruce Richardson ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
3823c1d14583SBruce Richardson 	      uint16_t nb_pkts)
3824c1d14583SBruce Richardson {
3825c1d14583SBruce Richardson 	int i, ret;
3826c1d14583SBruce Richardson 	uint64_t ol_flags;
3827c1d14583SBruce Richardson 	struct rte_mbuf *m;
3828c1d14583SBruce Richardson 
3829c1d14583SBruce Richardson 	for (i = 0; i < nb_pkts; i++) {
3830c1d14583SBruce Richardson 		m = tx_pkts[i];
3831c1d14583SBruce Richardson 		ol_flags = m->ol_flags;
3832c1d14583SBruce Richardson 
3833c1d14583SBruce Richardson 		if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
3834c1d14583SBruce Richardson 		    /**
3835c1d14583SBruce Richardson 		     * No TSO case: nb->segs, pkt_len to not exceed
3836c1d14583SBruce Richardson 		     * the limites.
3837c1d14583SBruce Richardson 		     */
3838c1d14583SBruce Richardson 		    (m->nb_segs > ICE_TX_MTU_SEG_MAX ||
3839c1d14583SBruce Richardson 		     m->pkt_len > ICE_FRAME_SIZE_MAX)) {
3840c1d14583SBruce Richardson 			rte_errno = EINVAL;
3841c1d14583SBruce Richardson 			return i;
3842c1d14583SBruce Richardson 		} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
3843c1d14583SBruce Richardson 		    /** TSO case: tso_segsz, nb_segs, pkt_len not exceed
3844c1d14583SBruce Richardson 		     * the limits.
3845c1d14583SBruce Richardson 		     */
3846c1d14583SBruce Richardson 		    (m->tso_segsz < ICE_MIN_TSO_MSS ||
3847c1d14583SBruce Richardson 		     m->tso_segsz > ICE_MAX_TSO_MSS ||
3848c1d14583SBruce Richardson 		     m->nb_segs >
3849c038157aSBruce Richardson 			((struct ci_tx_queue *)tx_queue)->nb_tx_desc ||
3850c1d14583SBruce Richardson 		     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3851c1d14583SBruce Richardson 			/**
3852c1d14583SBruce Richardson 			 * MSS outside the range are considered malicious
3853c1d14583SBruce Richardson 			 */
3854c1d14583SBruce Richardson 			rte_errno = EINVAL;
3855c1d14583SBruce Richardson 			return i;
3856c1d14583SBruce Richardson 		}
3857c1d14583SBruce Richardson 
3858c1d14583SBruce Richardson 		if (m->pkt_len < ICE_TX_MIN_PKT_LEN) {
3859c1d14583SBruce Richardson 			rte_errno = EINVAL;
3860c1d14583SBruce Richardson 			return i;
3861c1d14583SBruce Richardson 		}
3862c1d14583SBruce Richardson 
3863c1d14583SBruce Richardson #ifdef RTE_ETHDEV_DEBUG_TX
3864c1d14583SBruce Richardson 		ret = rte_validate_tx_offload(m);
3865c1d14583SBruce Richardson 		if (ret != 0) {
3866c1d14583SBruce Richardson 			rte_errno = -ret;
3867c1d14583SBruce Richardson 			return i;
3868c1d14583SBruce Richardson 		}
3869c1d14583SBruce Richardson #endif
3870c1d14583SBruce Richardson 		ret = rte_net_intel_cksum_prepare(m);
3871c1d14583SBruce Richardson 		if (ret != 0) {
3872c1d14583SBruce Richardson 			rte_errno = -ret;
3873c1d14583SBruce Richardson 			return i;
3874c1d14583SBruce Richardson 		}
3875c1d14583SBruce Richardson 
3876c1d14583SBruce Richardson 		if (ice_check_empty_mbuf(m) != 0) {
3877c1d14583SBruce Richardson 			rte_errno = EINVAL;
3878c1d14583SBruce Richardson 			return i;
3879c1d14583SBruce Richardson 		}
3880c1d14583SBruce Richardson 	}
3881c1d14583SBruce Richardson 	return i;
3882c1d14583SBruce Richardson }
3883c1d14583SBruce Richardson 
3884c1d14583SBruce Richardson void __rte_cold
3885c1d14583SBruce Richardson ice_set_tx_function(struct rte_eth_dev *dev)
3886c1d14583SBruce Richardson {
3887c1d14583SBruce Richardson 	struct ice_adapter *ad =
3888c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3889c1d14583SBruce Richardson 	int mbuf_check = ad->devargs.mbuf_check;
3890c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
3891c038157aSBruce Richardson 	struct ci_tx_queue *txq;
3892c1d14583SBruce Richardson 	int i;
3893c1d14583SBruce Richardson 	int tx_check_ret = -1;
3894c1d14583SBruce Richardson 
3895c1d14583SBruce Richardson 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3896c1d14583SBruce Richardson 		ad->tx_use_avx2 = false;
3897c1d14583SBruce Richardson 		ad->tx_use_avx512 = false;
3898c1d14583SBruce Richardson 		tx_check_ret = ice_tx_vec_dev_check(dev);
3899c1d14583SBruce Richardson 		if (tx_check_ret >= 0 &&
3900c1d14583SBruce Richardson 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3901c1d14583SBruce Richardson 			ad->tx_vec_allowed = true;
3902c1d14583SBruce Richardson 
3903c1d14583SBruce Richardson 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3904c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3905c1d14583SBruce Richardson 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3906c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3907c1d14583SBruce Richardson 				ad->tx_use_avx512 = true;
3908c1d14583SBruce Richardson #else
3909c1d14583SBruce Richardson 			PMD_DRV_LOG(NOTICE,
3910c1d14583SBruce Richardson 				"AVX512 is not supported in build env");
3911c1d14583SBruce Richardson #endif
3912c1d14583SBruce Richardson 			if (!ad->tx_use_avx512 &&
3913c1d14583SBruce Richardson 				(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3914c1d14583SBruce Richardson 				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3915c1d14583SBruce Richardson 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3916c1d14583SBruce Richardson 				ad->tx_use_avx2 = true;
3917c1d14583SBruce Richardson 
3918c1d14583SBruce Richardson 			if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3919c1d14583SBruce Richardson 				tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3920c1d14583SBruce Richardson 				ad->tx_vec_allowed = false;
3921c1d14583SBruce Richardson 
3922c1d14583SBruce Richardson 			if (ad->tx_vec_allowed) {
3923c1d14583SBruce Richardson 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
3924c1d14583SBruce Richardson 					txq = dev->data->tx_queues[i];
3925c1d14583SBruce Richardson 					if (txq && ice_txq_vec_setup(txq)) {
3926c1d14583SBruce Richardson 						ad->tx_vec_allowed = false;
3927c1d14583SBruce Richardson 						break;
3928c1d14583SBruce Richardson 					}
3929c1d14583SBruce Richardson 				}
3930c1d14583SBruce Richardson 			}
3931c1d14583SBruce Richardson 		} else {
3932c1d14583SBruce Richardson 			ad->tx_vec_allowed = false;
3933c1d14583SBruce Richardson 		}
3934c1d14583SBruce Richardson 	}
3935c1d14583SBruce Richardson 
3936c1d14583SBruce Richardson 	if (ad->tx_vec_allowed) {
3937c1d14583SBruce Richardson 		dev->tx_pkt_prepare = NULL;
3938c1d14583SBruce Richardson 		if (ad->tx_use_avx512) {
3939c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
3940c1d14583SBruce Richardson 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3941c1d14583SBruce Richardson 				PMD_DRV_LOG(NOTICE,
3942c1d14583SBruce Richardson 					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
3943c1d14583SBruce Richardson 					    dev->data->port_id);
3944c1d14583SBruce Richardson 				dev->tx_pkt_burst =
3945c1d14583SBruce Richardson 					ice_xmit_pkts_vec_avx512_offload;
3946c1d14583SBruce Richardson 				dev->tx_pkt_prepare = ice_prep_pkts;
3947c1d14583SBruce Richardson 			} else {
3948c1d14583SBruce Richardson 				PMD_DRV_LOG(NOTICE,
3949c1d14583SBruce Richardson 					    "Using AVX512 Vector Tx (port %d).",
3950c1d14583SBruce Richardson 					    dev->data->port_id);
3951c1d14583SBruce Richardson 				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3952c1d14583SBruce Richardson 			}
3953c1d14583SBruce Richardson #endif
3954c1d14583SBruce Richardson 		} else {
3955c1d14583SBruce Richardson 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3956c1d14583SBruce Richardson 				PMD_DRV_LOG(NOTICE,
3957c1d14583SBruce Richardson 					    "Using AVX2 OFFLOAD Vector Tx (port %d).",
3958c1d14583SBruce Richardson 					    dev->data->port_id);
3959c1d14583SBruce Richardson 				dev->tx_pkt_burst =
3960c1d14583SBruce Richardson 					ice_xmit_pkts_vec_avx2_offload;
3961c1d14583SBruce Richardson 				dev->tx_pkt_prepare = ice_prep_pkts;
3962c1d14583SBruce Richardson 			} else {
3963c1d14583SBruce Richardson 				PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3964c1d14583SBruce Richardson 					    ad->tx_use_avx2 ? "avx2 " : "",
3965c1d14583SBruce Richardson 					    dev->data->port_id);
3966c1d14583SBruce Richardson 				dev->tx_pkt_burst = ad->tx_use_avx2 ?
3967c1d14583SBruce Richardson 						    ice_xmit_pkts_vec_avx2 :
3968c1d14583SBruce Richardson 						    ice_xmit_pkts_vec;
3969c1d14583SBruce Richardson 			}
3970c1d14583SBruce Richardson 		}
3971c1d14583SBruce Richardson 
3972c1d14583SBruce Richardson 		if (mbuf_check) {
3973c1d14583SBruce Richardson 			ad->tx_pkt_burst = dev->tx_pkt_burst;
3974c1d14583SBruce Richardson 			dev->tx_pkt_burst = ice_xmit_pkts_check;
3975c1d14583SBruce Richardson 		}
3976c1d14583SBruce Richardson 		return;
3977c1d14583SBruce Richardson 	}
3978c1d14583SBruce Richardson #endif
3979c1d14583SBruce Richardson 
3980c1d14583SBruce Richardson 	if (ad->tx_simple_allowed) {
3981c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3982c1d14583SBruce Richardson 		dev->tx_pkt_burst = ice_xmit_pkts_simple;
3983c1d14583SBruce Richardson 		dev->tx_pkt_prepare = NULL;
3984c1d14583SBruce Richardson 	} else {
3985c1d14583SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3986c1d14583SBruce Richardson 		dev->tx_pkt_burst = ice_xmit_pkts;
3987c1d14583SBruce Richardson 		dev->tx_pkt_prepare = ice_prep_pkts;
3988c1d14583SBruce Richardson 	}
3989c1d14583SBruce Richardson 
3990c1d14583SBruce Richardson 	if (mbuf_check) {
3991c1d14583SBruce Richardson 		ad->tx_pkt_burst = dev->tx_pkt_burst;
3992c1d14583SBruce Richardson 		dev->tx_pkt_burst = ice_xmit_pkts_check;
3993c1d14583SBruce Richardson 	}
3994c1d14583SBruce Richardson }
3995c1d14583SBruce Richardson 
3996c1d14583SBruce Richardson static const struct {
3997c1d14583SBruce Richardson 	eth_tx_burst_t pkt_burst;
3998c1d14583SBruce Richardson 	const char *info;
3999c1d14583SBruce Richardson } ice_tx_burst_infos[] = {
4000c1d14583SBruce Richardson 	{ ice_xmit_pkts_simple,   "Scalar Simple" },
4001c1d14583SBruce Richardson 	{ ice_xmit_pkts,          "Scalar" },
4002c1d14583SBruce Richardson #ifdef RTE_ARCH_X86
4003c1d14583SBruce Richardson #ifdef CC_AVX512_SUPPORT
4004c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
4005c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
4006c1d14583SBruce Richardson #endif
4007c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx2,         "Vector AVX2" },
4008c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
4009c1d14583SBruce Richardson 	{ ice_xmit_pkts_vec,              "Vector SSE" },
4010c1d14583SBruce Richardson #endif
4011c1d14583SBruce Richardson };
4012c1d14583SBruce Richardson 
4013c1d14583SBruce Richardson int
4014c1d14583SBruce Richardson ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4015c1d14583SBruce Richardson 		      struct rte_eth_burst_mode *mode)
4016c1d14583SBruce Richardson {
4017c1d14583SBruce Richardson 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4018c1d14583SBruce Richardson 	int ret = -EINVAL;
4019c1d14583SBruce Richardson 	unsigned int i;
4020c1d14583SBruce Richardson 
4021c1d14583SBruce Richardson 	for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
4022c1d14583SBruce Richardson 		if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
4023c1d14583SBruce Richardson 			snprintf(mode->info, sizeof(mode->info), "%s",
4024c1d14583SBruce Richardson 				 ice_tx_burst_infos[i].info);
4025c1d14583SBruce Richardson 			ret = 0;
4026c1d14583SBruce Richardson 			break;
4027c1d14583SBruce Richardson 		}
4028c1d14583SBruce Richardson 	}
4029c1d14583SBruce Richardson 
4030c1d14583SBruce Richardson 	return ret;
4031c1d14583SBruce Richardson }
4032c1d14583SBruce Richardson 
4033c1d14583SBruce Richardson /* For each value it means, datasheet of hardware can tell more details
4034c1d14583SBruce Richardson  *
4035c1d14583SBruce Richardson  * @note: fix ice_dev_supported_ptypes_get() if any change here.
4036c1d14583SBruce Richardson  */
4037c1d14583SBruce Richardson static inline uint32_t
4038c1d14583SBruce Richardson ice_get_default_pkt_type(uint16_t ptype)
4039c1d14583SBruce Richardson {
4040c1d14583SBruce Richardson 	static const alignas(RTE_CACHE_LINE_SIZE) uint32_t type_table[ICE_MAX_PKT_TYPE] = {
4041c1d14583SBruce Richardson 		/* L2 types */
4042c1d14583SBruce Richardson 		/* [0] reserved */
4043c1d14583SBruce Richardson 		[1] = RTE_PTYPE_L2_ETHER,
4044c1d14583SBruce Richardson 		[2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
4045c1d14583SBruce Richardson 		/* [3] - [5] reserved */
4046c1d14583SBruce Richardson 		[6] = RTE_PTYPE_L2_ETHER_LLDP,
4047c1d14583SBruce Richardson 		/* [7] - [10] reserved */
4048c1d14583SBruce Richardson 		[11] = RTE_PTYPE_L2_ETHER_ARP,
4049c1d14583SBruce Richardson 		/* [12] - [21] reserved */
4050c1d14583SBruce Richardson 
4051c1d14583SBruce Richardson 		/* Non tunneled IPv4 */
4052c1d14583SBruce Richardson 		[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4053c1d14583SBruce Richardson 		       RTE_PTYPE_L4_FRAG,
4054c1d14583SBruce Richardson 		[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4055c1d14583SBruce Richardson 		       RTE_PTYPE_L4_NONFRAG,
4056c1d14583SBruce Richardson 		[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4057c1d14583SBruce Richardson 		       RTE_PTYPE_L4_UDP,
4058c1d14583SBruce Richardson 		/* [25] reserved */
4059c1d14583SBruce Richardson 		[26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4060c1d14583SBruce Richardson 		       RTE_PTYPE_L4_TCP,
4061c1d14583SBruce Richardson 		[27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4062c1d14583SBruce Richardson 		       RTE_PTYPE_L4_SCTP,
4063c1d14583SBruce Richardson 		[28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4064c1d14583SBruce Richardson 		       RTE_PTYPE_L4_ICMP,
4065c1d14583SBruce Richardson 
4066c1d14583SBruce Richardson 		/* IPv4 --> IPv4 */
4067c1d14583SBruce Richardson 		[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4068c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4069c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4070c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4071c1d14583SBruce Richardson 		[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4072c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4073c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4074c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4075c1d14583SBruce Richardson 		[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4076c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4077c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4078c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4079c1d14583SBruce Richardson 		/* [32] reserved */
4080c1d14583SBruce Richardson 		[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4081c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4082c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4083c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4084c1d14583SBruce Richardson 		[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4085c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4086c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4087c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4088c1d14583SBruce Richardson 		[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4089c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4090c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4091c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4092c1d14583SBruce Richardson 
4093c1d14583SBruce Richardson 		/* IPv4 --> IPv6 */
4094c1d14583SBruce Richardson 		[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4095c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4096c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4097c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4098c1d14583SBruce Richardson 		[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4099c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4100c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4101c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4102c1d14583SBruce Richardson 		[38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4103c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4104c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4105c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4106c1d14583SBruce Richardson 		/* [39] reserved */
4107c1d14583SBruce Richardson 		[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4108c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4109c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4110c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4111c1d14583SBruce Richardson 		[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4112c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4113c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4114c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4115c1d14583SBruce Richardson 		[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4116c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4117c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4118c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4119c1d14583SBruce Richardson 
4120c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN */
4121c1d14583SBruce Richardson 		[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4122c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT,
4123c1d14583SBruce Richardson 
4124c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
4125c1d14583SBruce Richardson 		[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4126c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4127c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4128c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4129c1d14583SBruce Richardson 		[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4130c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4131c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4132c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4133c1d14583SBruce Richardson 		[46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4134c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4135c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4136c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4137c1d14583SBruce Richardson 		/* [47] reserved */
4138c1d14583SBruce Richardson 		[48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4139c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4140c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4141c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4142c1d14583SBruce Richardson 		[49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4143c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4144c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4145c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4146c1d14583SBruce Richardson 		[50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4147c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4148c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4149c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4150c1d14583SBruce Richardson 
4151c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
4152c1d14583SBruce Richardson 		[51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4153c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4154c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4155c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4156c1d14583SBruce Richardson 		[52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4157c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4158c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4159c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4160c1d14583SBruce Richardson 		[53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4161c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4162c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4163c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4164c1d14583SBruce Richardson 		/* [54] reserved */
4165c1d14583SBruce Richardson 		[55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4166c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4167c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4168c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4169c1d14583SBruce Richardson 		[56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4170c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4171c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4172c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4173c1d14583SBruce Richardson 		[57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4174c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT |
4175c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4176c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4177c1d14583SBruce Richardson 
4178c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC */
4179c1d14583SBruce Richardson 		[58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4180c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
4181c1d14583SBruce Richardson 
4182c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
4183c1d14583SBruce Richardson 		[59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4184c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4185c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4186c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4187c1d14583SBruce Richardson 		[60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4188c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4189c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4190c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4191c1d14583SBruce Richardson 		[61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4192c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4193c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4194c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4195c1d14583SBruce Richardson 		/* [62] reserved */
4196c1d14583SBruce Richardson 		[63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4197c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4198c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4199c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4200c1d14583SBruce Richardson 		[64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4201c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4202c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4203c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4204c1d14583SBruce Richardson 		[65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4205c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4206c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4207c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4208c1d14583SBruce Richardson 
4209c1d14583SBruce Richardson 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
4210c1d14583SBruce Richardson 		[66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4211c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4212c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4213c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4214c1d14583SBruce Richardson 		[67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4215c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4216c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4217c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4218c1d14583SBruce Richardson 		[68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4219c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4220c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4221c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4222c1d14583SBruce Richardson 		/* [69] reserved */
4223c1d14583SBruce Richardson 		[70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4224c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4225c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4226c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4227c1d14583SBruce Richardson 		[71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4228c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4229c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4230c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_SCTP,
4231c1d14583SBruce Richardson 		[72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4232c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4233c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4234c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_ICMP,
4235c1d14583SBruce Richardson 		/* [73] - [87] reserved */
4236c1d14583SBruce Richardson 
4237c1d14583SBruce Richardson 		/* Non tunneled IPv6 */
4238c1d14583SBruce Richardson 		[88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4239c1d14583SBruce Richardson 		       RTE_PTYPE_L4_FRAG,
4240c1d14583SBruce Richardson 		[89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4241c1d14583SBruce Richardson 		       RTE_PTYPE_L4_NONFRAG,
4242c1d14583SBruce Richardson 		[90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4243c1d14583SBruce Richardson 		       RTE_PTYPE_L4_UDP,
4244c1d14583SBruce Richardson 		/* [91] reserved */
4245c1d14583SBruce Richardson 		[92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4246c1d14583SBruce Richardson 		       RTE_PTYPE_L4_TCP,
4247c1d14583SBruce Richardson 		[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4248c1d14583SBruce Richardson 		       RTE_PTYPE_L4_SCTP,
4249c1d14583SBruce Richardson 		[94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4250c1d14583SBruce Richardson 		       RTE_PTYPE_L4_ICMP,
4251c1d14583SBruce Richardson 
4252c1d14583SBruce Richardson 		/* IPv6 --> IPv4 */
4253c1d14583SBruce Richardson 		[95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4254c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4255c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4256c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_FRAG,
4257c1d14583SBruce Richardson 		[96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4258c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4259c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4260c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_NONFRAG,
4261c1d14583SBruce Richardson 		[97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4262c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4263c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4264c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_UDP,
4265c1d14583SBruce Richardson 		/* [98] reserved */
4266c1d14583SBruce Richardson 		[99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4267c1d14583SBruce Richardson 		       RTE_PTYPE_TUNNEL_IP |
4268c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4269c1d14583SBruce Richardson 		       RTE_PTYPE_INNER_L4_TCP,
4270c1d14583SBruce Richardson 		[100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4271c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4272c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4273c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4274c1d14583SBruce Richardson 		[101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4275c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4276c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4277c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4278c1d14583SBruce Richardson 
4279c1d14583SBruce Richardson 		/* IPv6 --> IPv6 */
4280c1d14583SBruce Richardson 		[102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4281c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4282c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4283c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4284c1d14583SBruce Richardson 		[103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4285c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4286c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4287c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4288c1d14583SBruce Richardson 		[104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4289c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4290c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4291c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4292c1d14583SBruce Richardson 		/* [105] reserved */
4293c1d14583SBruce Richardson 		[106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4294c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4295c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4296c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4297c1d14583SBruce Richardson 		[107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4298c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4299c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4300c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4301c1d14583SBruce Richardson 		[108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4302c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_IP |
4303c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4304c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4305c1d14583SBruce Richardson 
4306c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN */
4307c1d14583SBruce Richardson 		[109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4308c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT,
4309c1d14583SBruce Richardson 
4310c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
4311c1d14583SBruce Richardson 		[110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4312c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4313c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4314c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4315c1d14583SBruce Richardson 		[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4316c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4317c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4318c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4319c1d14583SBruce Richardson 		[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4320c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4321c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4322c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4323c1d14583SBruce Richardson 		/* [113] reserved */
4324c1d14583SBruce Richardson 		[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4325c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4326c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4327c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4328c1d14583SBruce Richardson 		[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4329c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4330c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4331c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4332c1d14583SBruce Richardson 		[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4333c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4334c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4335c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4336c1d14583SBruce Richardson 
4337c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
4338c1d14583SBruce Richardson 		[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4339c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4340c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4341c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4342c1d14583SBruce Richardson 		[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4343c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4344c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4345c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4346c1d14583SBruce Richardson 		[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4347c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4348c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4349c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4350c1d14583SBruce Richardson 		/* [120] reserved */
4351c1d14583SBruce Richardson 		[121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4352c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4353c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4354c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4355c1d14583SBruce Richardson 		[122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4356c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4357c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4358c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4359c1d14583SBruce Richardson 		[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4360c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT |
4361c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4362c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4363c1d14583SBruce Richardson 
4364c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC */
4365c1d14583SBruce Richardson 		[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4366c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
4367c1d14583SBruce Richardson 
4368c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
4369c1d14583SBruce Richardson 		[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4370c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4371c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4372c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4373c1d14583SBruce Richardson 		[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4374c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4375c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4376c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4377c1d14583SBruce Richardson 		[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4378c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4379c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4380c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4381c1d14583SBruce Richardson 		/* [128] reserved */
4382c1d14583SBruce Richardson 		[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4383c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4384c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4385c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4386c1d14583SBruce Richardson 		[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4387c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4388c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4389c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4390c1d14583SBruce Richardson 		[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4391c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4392c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4393c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4394c1d14583SBruce Richardson 
4395c1d14583SBruce Richardson 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
4396c1d14583SBruce Richardson 		[132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4397c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4398c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4399c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4400c1d14583SBruce Richardson 		[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4401c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4402c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4403c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4404c1d14583SBruce Richardson 		[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4405c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4406c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4407c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4408c1d14583SBruce Richardson 		/* [135] reserved */
4409c1d14583SBruce Richardson 		[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4410c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4411c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4412c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4413c1d14583SBruce Richardson 		[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4414c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4415c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4416c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_SCTP,
4417c1d14583SBruce Richardson 		[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4418c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4419c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4420c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4421c1d14583SBruce Richardson 		/* [139] - [299] reserved */
4422c1d14583SBruce Richardson 
4423c1d14583SBruce Richardson 		/* PPPoE */
4424c1d14583SBruce Richardson 		[300] = RTE_PTYPE_L2_ETHER_PPPOE,
4425c1d14583SBruce Richardson 		[301] = RTE_PTYPE_L2_ETHER_PPPOE,
4426c1d14583SBruce Richardson 
4427c1d14583SBruce Richardson 		/* PPPoE --> IPv4 */
4428c1d14583SBruce Richardson 		[302] = RTE_PTYPE_L2_ETHER_PPPOE |
4429c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4430c1d14583SBruce Richardson 			RTE_PTYPE_L4_FRAG,
4431c1d14583SBruce Richardson 		[303] = RTE_PTYPE_L2_ETHER_PPPOE |
4432c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4433c1d14583SBruce Richardson 			RTE_PTYPE_L4_NONFRAG,
4434c1d14583SBruce Richardson 		[304] = RTE_PTYPE_L2_ETHER_PPPOE |
4435c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4436c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4437c1d14583SBruce Richardson 		[305] = RTE_PTYPE_L2_ETHER_PPPOE |
4438c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4439c1d14583SBruce Richardson 			RTE_PTYPE_L4_TCP,
4440c1d14583SBruce Richardson 		[306] = RTE_PTYPE_L2_ETHER_PPPOE |
4441c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4442c1d14583SBruce Richardson 			RTE_PTYPE_L4_SCTP,
4443c1d14583SBruce Richardson 		[307] = RTE_PTYPE_L2_ETHER_PPPOE |
4444c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4445c1d14583SBruce Richardson 			RTE_PTYPE_L4_ICMP,
4446c1d14583SBruce Richardson 
4447c1d14583SBruce Richardson 		/* PPPoE --> IPv6 */
4448c1d14583SBruce Richardson 		[308] = RTE_PTYPE_L2_ETHER_PPPOE |
4449c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4450c1d14583SBruce Richardson 			RTE_PTYPE_L4_FRAG,
4451c1d14583SBruce Richardson 		[309] = RTE_PTYPE_L2_ETHER_PPPOE |
4452c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4453c1d14583SBruce Richardson 			RTE_PTYPE_L4_NONFRAG,
4454c1d14583SBruce Richardson 		[310] = RTE_PTYPE_L2_ETHER_PPPOE |
4455c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4456c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4457c1d14583SBruce Richardson 		[311] = RTE_PTYPE_L2_ETHER_PPPOE |
4458c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4459c1d14583SBruce Richardson 			RTE_PTYPE_L4_TCP,
4460c1d14583SBruce Richardson 		[312] = RTE_PTYPE_L2_ETHER_PPPOE |
4461c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4462c1d14583SBruce Richardson 			RTE_PTYPE_L4_SCTP,
4463c1d14583SBruce Richardson 		[313] = RTE_PTYPE_L2_ETHER_PPPOE |
4464c1d14583SBruce Richardson 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4465c1d14583SBruce Richardson 			RTE_PTYPE_L4_ICMP,
4466c1d14583SBruce Richardson 		/* [314] - [324] reserved */
4467c1d14583SBruce Richardson 
4468c1d14583SBruce Richardson 		/* IPv4/IPv6 --> GTPC/GTPU */
4469c1d14583SBruce Richardson 		[325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4470c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4471c1d14583SBruce Richardson 		[326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4472c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4473c1d14583SBruce Richardson 		[327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4474c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4475c1d14583SBruce Richardson 		[328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4476c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPC,
4477c1d14583SBruce Richardson 		[329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4478c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU,
4479c1d14583SBruce Richardson 		[330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4480c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU,
4481c1d14583SBruce Richardson 
4482c1d14583SBruce Richardson 		/* IPv4 --> GTPU --> IPv4 */
4483c1d14583SBruce Richardson 		[331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4484c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4485c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4486c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4487c1d14583SBruce Richardson 		[332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4488c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4489c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4490c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4491c1d14583SBruce Richardson 		[333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4492c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4493c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4494c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4495c1d14583SBruce Richardson 		[334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4496c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4497c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4498c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4499c1d14583SBruce Richardson 		[335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4500c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4501c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4502c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4503c1d14583SBruce Richardson 
4504c1d14583SBruce Richardson 		/* IPv6 --> GTPU --> IPv4 */
4505c1d14583SBruce Richardson 		[336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4506c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4507c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4508c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4509c1d14583SBruce Richardson 		[337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4510c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4511c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4512c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4513c1d14583SBruce Richardson 		[338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4514c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4515c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4516c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4517c1d14583SBruce Richardson 		[339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4518c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4519c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4520c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4521c1d14583SBruce Richardson 		[340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4522c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4523c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4524c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4525c1d14583SBruce Richardson 
4526c1d14583SBruce Richardson 		/* IPv4 --> GTPU --> IPv6 */
4527c1d14583SBruce Richardson 		[341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4528c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4529c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4530c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4531c1d14583SBruce Richardson 		[342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4532c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4533c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4534c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4535c1d14583SBruce Richardson 		[343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4536c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4537c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4538c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4539c1d14583SBruce Richardson 		[344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4540c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4541c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4542c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4543c1d14583SBruce Richardson 		[345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4544c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4545c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4546c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4547c1d14583SBruce Richardson 
4548c1d14583SBruce Richardson 		/* IPv6 --> GTPU --> IPv6 */
4549c1d14583SBruce Richardson 		[346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4550c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4551c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4552c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_FRAG,
4553c1d14583SBruce Richardson 		[347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4554c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4555c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4556c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_NONFRAG,
4557c1d14583SBruce Richardson 		[348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4558c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4559c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4560c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_UDP,
4561c1d14583SBruce Richardson 		[349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4562c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4563c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4564c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_TCP,
4565c1d14583SBruce Richardson 		[350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4566c1d14583SBruce Richardson 			RTE_PTYPE_TUNNEL_GTPU |
4567c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4568c1d14583SBruce Richardson 			RTE_PTYPE_INNER_L4_ICMP,
4569c1d14583SBruce Richardson 
4570c1d14583SBruce Richardson 		/* IPv4 --> UDP ECPRI */
4571c1d14583SBruce Richardson 		[372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4572c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4573c1d14583SBruce Richardson 		[373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4574c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4575c1d14583SBruce Richardson 		[374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4576c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4577c1d14583SBruce Richardson 		[375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4578c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4579c1d14583SBruce Richardson 		[376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4580c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4581c1d14583SBruce Richardson 		[377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4582c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4583c1d14583SBruce Richardson 		[378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4584c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4585c1d14583SBruce Richardson 		[379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4586c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4587c1d14583SBruce Richardson 		[380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4588c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4589c1d14583SBruce Richardson 		[381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4590c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4591c1d14583SBruce Richardson 
4592c1d14583SBruce Richardson 		/* IPV6 --> UDP ECPRI */
4593c1d14583SBruce Richardson 		[382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4594c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4595c1d14583SBruce Richardson 		[383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4596c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4597c1d14583SBruce Richardson 		[384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4598c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4599c1d14583SBruce Richardson 		[385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4600c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4601c1d14583SBruce Richardson 		[386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4602c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4603c1d14583SBruce Richardson 		[387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4604c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4605c1d14583SBruce Richardson 		[388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4606c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4607c1d14583SBruce Richardson 		[389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4608c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4609c1d14583SBruce Richardson 		[390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4610c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4611c1d14583SBruce Richardson 		[391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4612c1d14583SBruce Richardson 			RTE_PTYPE_L4_UDP,
4613c1d14583SBruce Richardson 		/* All others reserved */
4614c1d14583SBruce Richardson 	};
4615c1d14583SBruce Richardson 
4616c1d14583SBruce Richardson 	return type_table[ptype];
4617c1d14583SBruce Richardson }
4618c1d14583SBruce Richardson 
4619c1d14583SBruce Richardson void __rte_cold
4620c1d14583SBruce Richardson ice_set_default_ptype_table(struct rte_eth_dev *dev)
4621c1d14583SBruce Richardson {
4622c1d14583SBruce Richardson 	struct ice_adapter *ad =
4623c1d14583SBruce Richardson 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4624c1d14583SBruce Richardson 	int i;
4625c1d14583SBruce Richardson 
4626c1d14583SBruce Richardson 	for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4627c1d14583SBruce Richardson 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4628c1d14583SBruce Richardson }
4629c1d14583SBruce Richardson 
4630c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S	1
4631c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M	\
4632c1d14583SBruce Richardson 			(0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4633c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4634c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4635c1d14583SBruce Richardson 
4636c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S	4
4637c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M	\
4638c1d14583SBruce Richardson 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4639c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S	5
4640c1d14583SBruce Richardson #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M	\
4641c1d14583SBruce Richardson 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4642c1d14583SBruce Richardson 
4643c1d14583SBruce Richardson /*
4644c1d14583SBruce Richardson  * check the programming status descriptor in rx queue.
4645c1d14583SBruce Richardson  * done after Programming Flow Director is programmed on
4646c1d14583SBruce Richardson  * tx queue
4647c1d14583SBruce Richardson  */
4648c1d14583SBruce Richardson static inline int
4649c1d14583SBruce Richardson ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4650c1d14583SBruce Richardson {
4651c1d14583SBruce Richardson 	volatile union ice_32byte_rx_desc *rxdp;
4652c1d14583SBruce Richardson 	uint64_t qword1;
4653c1d14583SBruce Richardson 	uint32_t rx_status;
4654c1d14583SBruce Richardson 	uint32_t error;
4655c1d14583SBruce Richardson 	uint32_t id;
4656c1d14583SBruce Richardson 	int ret = -EAGAIN;
4657c1d14583SBruce Richardson 
4658c1d14583SBruce Richardson 	rxdp = (volatile union ice_32byte_rx_desc *)
4659c1d14583SBruce Richardson 		(&rxq->rx_ring[rxq->rx_tail]);
4660c1d14583SBruce Richardson 	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4661c1d14583SBruce Richardson 	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4662c1d14583SBruce Richardson 			>> ICE_RXD_QW1_STATUS_S;
4663c1d14583SBruce Richardson 
4664c1d14583SBruce Richardson 	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4665c1d14583SBruce Richardson 		ret = 0;
4666c1d14583SBruce Richardson 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4667c1d14583SBruce Richardson 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4668c1d14583SBruce Richardson 		id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4669c1d14583SBruce Richardson 			ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4670c1d14583SBruce Richardson 		if (error) {
4671c1d14583SBruce Richardson 			if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4672c1d14583SBruce Richardson 				PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4673c1d14583SBruce Richardson 			else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4674c1d14583SBruce Richardson 				PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4675c1d14583SBruce Richardson 			ret = -EINVAL;
4676c1d14583SBruce Richardson 			goto err;
4677c1d14583SBruce Richardson 		}
4678c1d14583SBruce Richardson 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4679c1d14583SBruce Richardson 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4680c1d14583SBruce Richardson 		if (error) {
4681c1d14583SBruce Richardson 			PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4682c1d14583SBruce Richardson 			ret = -EINVAL;
4683c1d14583SBruce Richardson 		}
4684c1d14583SBruce Richardson err:
4685c1d14583SBruce Richardson 		rxdp->wb.qword1.status_error_len = 0;
4686c1d14583SBruce Richardson 		rxq->rx_tail++;
4687c1d14583SBruce Richardson 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4688c1d14583SBruce Richardson 			rxq->rx_tail = 0;
4689c1d14583SBruce Richardson 		if (rxq->rx_tail == 0)
4690c1d14583SBruce Richardson 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4691c1d14583SBruce Richardson 		else
4692c1d14583SBruce Richardson 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4693c1d14583SBruce Richardson 	}
4694c1d14583SBruce Richardson 
4695c1d14583SBruce Richardson 	return ret;
4696c1d14583SBruce Richardson }
4697c1d14583SBruce Richardson 
4698c1d14583SBruce Richardson #define ICE_FDIR_MAX_WAIT_US 10000
4699c1d14583SBruce Richardson 
4700c1d14583SBruce Richardson int
4701c1d14583SBruce Richardson ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4702c1d14583SBruce Richardson {
4703c038157aSBruce Richardson 	struct ci_tx_queue *txq = pf->fdir.txq;
4704c1d14583SBruce Richardson 	struct ice_rx_queue *rxq = pf->fdir.rxq;
4705c1d14583SBruce Richardson 	volatile struct ice_fltr_desc *fdirdp;
4706c1d14583SBruce Richardson 	volatile struct ice_tx_desc *txdp;
4707c1d14583SBruce Richardson 	uint32_t td_cmd;
4708c1d14583SBruce Richardson 	uint16_t i;
4709c1d14583SBruce Richardson 
4710c1d14583SBruce Richardson 	fdirdp = (volatile struct ice_fltr_desc *)
47114d0f54d9SBruce Richardson 		(&txq->ice_tx_ring[txq->tx_tail]);
4712c1d14583SBruce Richardson 	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4713c1d14583SBruce Richardson 	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4714c1d14583SBruce Richardson 
47154d0f54d9SBruce Richardson 	txdp = &txq->ice_tx_ring[txq->tx_tail + 1];
4716c1d14583SBruce Richardson 	txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4717c1d14583SBruce Richardson 	td_cmd = ICE_TX_DESC_CMD_EOP |
4718c1d14583SBruce Richardson 		ICE_TX_DESC_CMD_RS  |
4719c1d14583SBruce Richardson 		ICE_TX_DESC_CMD_DUMMY;
4720c1d14583SBruce Richardson 
4721c1d14583SBruce Richardson 	txdp->cmd_type_offset_bsz =
4722c1d14583SBruce Richardson 		ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4723c1d14583SBruce Richardson 
4724c1d14583SBruce Richardson 	txq->tx_tail += 2;
4725c1d14583SBruce Richardson 	if (txq->tx_tail >= txq->nb_tx_desc)
4726c1d14583SBruce Richardson 		txq->tx_tail = 0;
4727c1d14583SBruce Richardson 	/* Update the tx tail register */
4728c1d14583SBruce Richardson 	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4729c1d14583SBruce Richardson 	for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4730c1d14583SBruce Richardson 		if ((txdp->cmd_type_offset_bsz &
4731c1d14583SBruce Richardson 		     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4732c1d14583SBruce Richardson 		    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4733c1d14583SBruce Richardson 			break;
4734c1d14583SBruce Richardson 		rte_delay_us(1);
4735c1d14583SBruce Richardson 	}
4736c1d14583SBruce Richardson 	if (i >= ICE_FDIR_MAX_WAIT_US) {
4737c1d14583SBruce Richardson 		PMD_DRV_LOG(ERR,
4738c1d14583SBruce Richardson 			    "Failed to program FDIR filter: time out to get DD on tx queue.");
4739c1d14583SBruce Richardson 		return -ETIMEDOUT;
4740c1d14583SBruce Richardson 	}
4741c1d14583SBruce Richardson 
4742c1d14583SBruce Richardson 	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4743c1d14583SBruce Richardson 		int ret;
4744c1d14583SBruce Richardson 
4745c1d14583SBruce Richardson 		ret = ice_check_fdir_programming_status(rxq);
4746c1d14583SBruce Richardson 		if (ret == -EAGAIN)
4747c1d14583SBruce Richardson 			rte_delay_us(1);
4748c1d14583SBruce Richardson 		else
4749c1d14583SBruce Richardson 			return ret;
4750c1d14583SBruce Richardson 	}
4751c1d14583SBruce Richardson 
4752c1d14583SBruce Richardson 	PMD_DRV_LOG(ERR,
4753c1d14583SBruce Richardson 		    "Failed to program FDIR filter: programming status reported.");
4754c1d14583SBruce Richardson 	return -ETIMEDOUT;
4755c1d14583SBruce Richardson 
4756c1d14583SBruce Richardson 
4757c1d14583SBruce Richardson }
4758