xref: /dpdk/drivers/net/intel/ice/ice_rxtx.c (revision 5cc9919fd443fbd3fce77a257601890a0ee6a247)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4 
5 #include <ethdev_driver.h>
6 #include <rte_net.h>
7 #include <rte_vect.h>
8 
9 #include "ice_rxtx.h"
10 #include "ice_rxtx_vec_common.h"
11 
12 #define ICE_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |		 \
13 		RTE_MBUF_F_TX_L4_MASK |		 \
14 		RTE_MBUF_F_TX_TCP_SEG |		 \
15 		RTE_MBUF_F_TX_UDP_SEG |		 \
16 		RTE_MBUF_F_TX_OUTER_IP_CKSUM)
17 
18 /**
19  * The mbuf dynamic field pointer for protocol extraction metadata.
20  */
21 #define ICE_DYNF_PROTO_XTR_METADATA(m, n) \
22 	RTE_MBUF_DYNFIELD((m), (n), uint32_t *)
23 
24 static int
25 ice_monitor_callback(const uint64_t value,
26 		const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused)
27 {
28 	const uint64_t m = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
29 	/*
30 	 * we expect the DD bit to be set to 1 if this descriptor was already
31 	 * written to.
32 	 */
33 	return (value & m) == m ? -1 : 0;
34 }
35 
36 int
37 ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
38 {
39 	volatile union ice_rx_flex_desc *rxdp;
40 	struct ice_rx_queue *rxq = rx_queue;
41 	uint16_t desc;
42 
43 	desc = rxq->rx_tail;
44 	rxdp = &rxq->rx_ring[desc];
45 	/* watch for changes in status bit */
46 	pmc->addr = &rxdp->wb.status_error0;
47 
48 	/* comparison callback */
49 	pmc->fn = ice_monitor_callback;
50 
51 	/* register is 16-bit */
52 	pmc->size = sizeof(uint16_t);
53 
54 	return 0;
55 }
56 
57 
58 static inline uint8_t
59 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
60 {
61 	static uint8_t rxdid_map[] = {
62 		[PROTO_XTR_NONE]      = ICE_RXDID_COMMS_OVS,
63 		[PROTO_XTR_VLAN]      = ICE_RXDID_COMMS_AUX_VLAN,
64 		[PROTO_XTR_IPV4]      = ICE_RXDID_COMMS_AUX_IPV4,
65 		[PROTO_XTR_IPV6]      = ICE_RXDID_COMMS_AUX_IPV6,
66 		[PROTO_XTR_IPV6_FLOW] = ICE_RXDID_COMMS_AUX_IPV6_FLOW,
67 		[PROTO_XTR_TCP]       = ICE_RXDID_COMMS_AUX_TCP,
68 		[PROTO_XTR_IP_OFFSET] = ICE_RXDID_COMMS_AUX_IP_OFFSET,
69 	};
70 
71 	return xtr_type < RTE_DIM(rxdid_map) ?
72 				rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
73 }
74 
75 static inline void
76 ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
77 				       struct rte_mbuf *mb,
78 				       volatile union ice_rx_flex_desc *rxdp)
79 {
80 	volatile struct ice_32b_rx_flex_desc_comms *desc =
81 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
82 	uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
83 
84 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
85 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
86 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
87 	}
88 
89 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
90 	if (desc->flow_id != 0xFFFFFFFF) {
91 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
92 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
93 	}
94 #endif
95 }
96 
97 static inline void
98 ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
99 				   struct rte_mbuf *mb,
100 				   volatile union ice_rx_flex_desc *rxdp)
101 {
102 	volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
103 			(volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
104 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
105 	uint16_t stat_err;
106 #endif
107 
108 	if (desc->flow_id != 0xFFFFFFFF) {
109 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
110 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
111 	}
112 
113 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
114 	stat_err = rte_le_to_cpu_16(desc->status_error0);
115 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
116 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
117 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
118 	}
119 #endif
120 }
121 
122 static inline void
123 ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
124 				      struct rte_mbuf *mb,
125 				      volatile union ice_rx_flex_desc *rxdp)
126 {
127 	volatile struct ice_32b_rx_flex_desc_comms *desc =
128 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
129 	uint16_t stat_err;
130 
131 	stat_err = rte_le_to_cpu_16(desc->status_error0);
132 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
133 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
134 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
135 	}
136 
137 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
138 	if (desc->flow_id != 0xFFFFFFFF) {
139 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
140 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
141 	}
142 
143 	if (rxq->xtr_ol_flag) {
144 		uint32_t metadata = 0;
145 
146 		stat_err = rte_le_to_cpu_16(desc->status_error1);
147 
148 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
149 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
150 
151 		if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
152 			metadata |=
153 				rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
154 
155 		if (metadata) {
156 			mb->ol_flags |= rxq->xtr_ol_flag;
157 
158 			*ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata;
159 		}
160 	}
161 #else
162 	RTE_SET_USED(rxq);
163 #endif
164 }
165 
166 static inline void
167 ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
168 				      struct rte_mbuf *mb,
169 				      volatile union ice_rx_flex_desc *rxdp)
170 {
171 	volatile struct ice_32b_rx_flex_desc_comms *desc =
172 			(volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
173 	uint16_t stat_err;
174 
175 	stat_err = rte_le_to_cpu_16(desc->status_error0);
176 	if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
177 		mb->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
178 		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
179 	}
180 
181 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
182 	if (desc->flow_id != 0xFFFFFFFF) {
183 		mb->ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
184 		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
185 	}
186 
187 	if (rxq->xtr_ol_flag) {
188 		uint32_t metadata = 0;
189 
190 		if (desc->flex_ts.flex.aux0 != 0xFFFF)
191 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
192 		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
193 			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
194 
195 		if (metadata) {
196 			mb->ol_flags |= rxq->xtr_ol_flag;
197 
198 			*ICE_DYNF_PROTO_XTR_METADATA(mb, rxq->xtr_field_offs) = metadata;
199 		}
200 	}
201 #else
202 	RTE_SET_USED(rxq);
203 #endif
204 }
205 
206 static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
207 	[ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
208 	[ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
209 	[ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
210 	[ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
211 	[ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
212 	[ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
213 	[ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
214 	[ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
215 };
216 
217 void
218 ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
219 {
220 	rxq->rxdid = rxdid;
221 
222 	switch (rxdid) {
223 	case ICE_RXDID_COMMS_AUX_VLAN:
224 	case ICE_RXDID_COMMS_AUX_IPV4:
225 	case ICE_RXDID_COMMS_AUX_IPV6:
226 	case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
227 	case ICE_RXDID_COMMS_AUX_TCP:
228 	case ICE_RXDID_COMMS_AUX_IP_OFFSET:
229 		break;
230 	case ICE_RXDID_COMMS_GENERIC:
231 		/* fallthrough */
232 	case ICE_RXDID_COMMS_OVS:
233 		break;
234 
235 	default:
236 		/* update this according to the RXDID for PROTO_XTR_NONE */
237 		rxq->rxdid = ICE_RXDID_COMMS_OVS;
238 		break;
239 	}
240 
241 	if (rxq->xtr_field_offs == -1)
242 		rxq->xtr_ol_flag = 0;
243 }
244 
245 static int
246 ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
247 {
248 	struct ice_vsi *vsi = rxq->vsi;
249 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
250 	struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
251 	struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
252 	struct ice_rlan_ctx rx_ctx;
253 	uint16_t buf_size;
254 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
255 	uint32_t regval;
256 	struct ice_adapter *ad = rxq->vsi->adapter;
257 	uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
258 	int err;
259 
260 	/* Set buffer size as the head split is disabled. */
261 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
262 			      RTE_PKTMBUF_HEADROOM);
263 	rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
264 	rxq->rx_buf_len = RTE_MIN(rxq->rx_buf_len, ICE_RX_MAX_DATA_BUF_SIZE);
265 	rxq->max_pkt_len =
266 		RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
267 			frame_size);
268 
269 	if (rxq->max_pkt_len <= RTE_ETHER_MIN_LEN ||
270 	    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
271 		PMD_DRV_LOG(ERR, "maximum packet length must "
272 			    "be larger than %u and smaller than %u",
273 			    (uint32_t)RTE_ETHER_MIN_LEN,
274 			    (uint32_t)ICE_FRAME_SIZE_MAX);
275 		return -EINVAL;
276 	}
277 
278 	if (!rxq->ts_enable && (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
279 		/* Register mbuf field and flag for Rx timestamp */
280 		err = rte_mbuf_dyn_rx_timestamp_register(
281 				&ice_timestamp_dynfield_offset,
282 				&ice_timestamp_dynflag);
283 		if (err) {
284 			PMD_DRV_LOG(ERR,
285 				"Cannot register mbuf field/flag for timestamp");
286 			return -EINVAL;
287 		}
288 		rxq->ts_enable = true;
289 	}
290 
291 	memset(&rx_ctx, 0, sizeof(rx_ctx));
292 
293 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
294 		uint32_t proto_hdr;
295 		proto_hdr = rxq->rxseg[0].proto_hdr;
296 
297 		if (proto_hdr == RTE_PTYPE_UNKNOWN) {
298 			PMD_DRV_LOG(ERR, "Buffer split protocol must be configured");
299 			return -EINVAL;
300 		}
301 
302 		switch (proto_hdr & RTE_PTYPE_L4_MASK) {
303 		case RTE_PTYPE_L4_TCP:
304 		case RTE_PTYPE_L4_UDP:
305 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
306 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;
307 			goto set_hsplit_finish;
308 		case RTE_PTYPE_L4_SCTP:
309 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
310 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
311 			goto set_hsplit_finish;
312 		}
313 
314 		switch (proto_hdr & RTE_PTYPE_L3_MASK) {
315 		case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
316 		case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
317 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
318 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;
319 			goto set_hsplit_finish;
320 		}
321 
322 		switch (proto_hdr & RTE_PTYPE_L2_MASK) {
323 		case RTE_PTYPE_L2_ETHER:
324 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
325 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;
326 			rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_L2;
327 			goto set_hsplit_finish;
328 		}
329 
330 		switch (proto_hdr & RTE_PTYPE_INNER_L4_MASK) {
331 		case RTE_PTYPE_INNER_L4_TCP:
332 		case RTE_PTYPE_INNER_L4_UDP:
333 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
334 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP;
335 			goto set_hsplit_finish;
336 		case RTE_PTYPE_INNER_L4_SCTP:
337 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
338 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
339 			goto set_hsplit_finish;
340 		}
341 
342 		switch (proto_hdr & RTE_PTYPE_INNER_L3_MASK) {
343 		case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
344 		case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
345 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
346 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_IP;
347 			goto set_hsplit_finish;
348 		}
349 
350 		switch (proto_hdr & RTE_PTYPE_INNER_L2_MASK) {
351 		case RTE_PTYPE_INNER_L2_ETHER:
352 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
353 			rx_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2;
354 			goto set_hsplit_finish;
355 		}
356 
357 		switch (proto_hdr & RTE_PTYPE_TUNNEL_MASK) {
358 		case RTE_PTYPE_TUNNEL_GRENAT:
359 			rx_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
360 			rx_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS;
361 			goto set_hsplit_finish;
362 		}
363 
364 		PMD_DRV_LOG(ERR, "Buffer split protocol is not supported");
365 		return -EINVAL;
366 
367 set_hsplit_finish:
368 		rxq->rx_hdr_len = ICE_RX_HDR_BUF_SIZE;
369 	} else {
370 		rxq->rx_hdr_len = 0;
371 		rx_ctx.dtype = 0; /* No Protocol Based Buffer Split mode */
372 	}
373 
374 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
375 	rx_ctx.qlen = rxq->nb_rx_desc;
376 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
377 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
378 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
379 	rx_ctx.dsize = 1; /* 32B descriptors */
380 #endif
381 	rx_ctx.rxmax = rxq->max_pkt_len;
382 	/* TPH: Transaction Layer Packet (TLP) processing hints */
383 	rx_ctx.tphrdesc_ena = 1;
384 	rx_ctx.tphwdesc_ena = 1;
385 	rx_ctx.tphdata_ena = 1;
386 	rx_ctx.tphhead_ena = 1;
387 	/* Low Receive Queue Threshold defined in 64 descriptors units.
388 	 * When the number of free descriptors goes below the lrxqthresh,
389 	 * an immediate interrupt is triggered.
390 	 */
391 	rx_ctx.lrxqthresh = 2;
392 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
393 	rx_ctx.l2tsel = 1;
394 	rx_ctx.showiv = 0;
395 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
396 
397 	rxdid = ice_proto_xtr_type_to_rxdid(rxq->proto_xtr);
398 
399 	PMD_DRV_LOG(DEBUG, "Port (%u) - Rx queue (%u) is set with RXDID : %u",
400 		    rxq->port_id, rxq->queue_id, rxdid);
401 
402 	if (!(pf->supported_rxdid & BIT(rxdid))) {
403 		PMD_DRV_LOG(ERR, "currently package doesn't support RXDID (%u)",
404 			    rxdid);
405 		return -EINVAL;
406 	}
407 
408 	rxq->rxdid = rxdid;
409 
410 	/* Enable Flexible Descriptors in the queue context which
411 	 * allows this driver to select a specific receive descriptor format
412 	 */
413 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
414 		QRXFLXP_CNTXT_RXDID_IDX_M;
415 
416 	/* increasing context priority to pick up profile ID;
417 	 * default is 0x01; setting to 0x03 to ensure profile
418 	 * is programming if prev context is of same priority
419 	 */
420 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
421 		QRXFLXP_CNTXT_RXDID_PRIO_M;
422 
423 	if (ad->ptp_ena || rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
424 		regval |= QRXFLXP_CNTXT_TS_M;
425 
426 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
427 
428 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
429 	if (err) {
430 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
431 			    rxq->queue_id);
432 		return -EINVAL;
433 	}
434 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
435 	if (err) {
436 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
437 			    rxq->queue_id);
438 		return -EINVAL;
439 	}
440 
441 	/* Check if scattered RX needs to be used. */
442 	if (frame_size > buf_size)
443 		dev_data->scattered_rx = 1;
444 
445 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
446 
447 	/* Init the Rx tail register*/
448 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
449 
450 	return 0;
451 }
452 
453 /* Allocate mbufs for all descriptors in rx queue */
454 static int
455 ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq)
456 {
457 	struct ice_rx_entry *rxe = rxq->sw_ring;
458 	uint64_t dma_addr;
459 	uint16_t i;
460 
461 	for (i = 0; i < rxq->nb_rx_desc; i++) {
462 		volatile union ice_rx_flex_desc *rxd;
463 		rxd = &rxq->rx_ring[i];
464 		struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
465 
466 		if (unlikely(!mbuf)) {
467 			PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
468 			return -ENOMEM;
469 		}
470 
471 		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
472 		mbuf->nb_segs = 1;
473 		mbuf->port = rxq->port_id;
474 
475 		dma_addr =
476 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
477 
478 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
479 			rte_mbuf_refcnt_set(mbuf, 1);
480 			mbuf->next = NULL;
481 			rxd->read.hdr_addr = 0;
482 			rxd->read.pkt_addr = dma_addr;
483 		} else {
484 			struct rte_mbuf *mbuf_pay;
485 			mbuf_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);
486 			if (unlikely(!mbuf_pay)) {
487 				rte_pktmbuf_free(mbuf);
488 				PMD_DRV_LOG(ERR, "Failed to allocate payload mbuf for RX");
489 				return -ENOMEM;
490 			}
491 
492 			mbuf_pay->next = NULL;
493 			mbuf_pay->data_off = RTE_PKTMBUF_HEADROOM;
494 			mbuf_pay->nb_segs = 1;
495 			mbuf_pay->port = rxq->port_id;
496 			mbuf->next = mbuf_pay;
497 
498 			rxd->read.hdr_addr = dma_addr;
499 			/* The LS bit should be set to zero regardless of
500 			 * buffer split enablement.
501 			 */
502 			rxd->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf_pay));
503 		}
504 
505 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
506 		rxd->read.rsvd1 = 0;
507 		rxd->read.rsvd2 = 0;
508 #endif
509 		rxe[i].mbuf = mbuf;
510 	}
511 
512 	return 0;
513 }
514 
515 /* Free all mbufs for descriptors in rx queue */
516 static void
517 _ice_rx_queue_release_mbufs(struct ice_rx_queue *rxq)
518 {
519 	uint16_t i;
520 
521 	if (!rxq || !rxq->sw_ring) {
522 		PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
523 		return;
524 	}
525 
526 	for (i = 0; i < rxq->nb_rx_desc; i++) {
527 		if (rxq->sw_ring[i].mbuf) {
528 			rte_pktmbuf_free(rxq->sw_ring[i].mbuf);
529 			rxq->sw_ring[i].mbuf = NULL;
530 		}
531 	}
532 	if (rxq->rx_nb_avail == 0)
533 		return;
534 	for (i = 0; i < rxq->rx_nb_avail; i++)
535 		rte_pktmbuf_free(rxq->rx_stage[rxq->rx_next_avail + i]);
536 
537 	rxq->rx_nb_avail = 0;
538 }
539 
540 /* turn on or off rx queue
541  * @q_idx: queue index in pf scope
542  * @on: turn on or off the queue
543  */
544 static int
545 ice_switch_rx_queue(struct ice_hw *hw, uint16_t q_idx, bool on)
546 {
547 	uint32_t reg;
548 	uint16_t j;
549 
550 	/* QRX_CTRL = QRX_ENA */
551 	reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
552 
553 	if (on) {
554 		if (reg & QRX_CTRL_QENA_STAT_M)
555 			return 0; /* Already on, skip */
556 		reg |= QRX_CTRL_QENA_REQ_M;
557 	} else {
558 		if (!(reg & QRX_CTRL_QENA_STAT_M))
559 			return 0; /* Already off, skip */
560 		reg &= ~QRX_CTRL_QENA_REQ_M;
561 	}
562 
563 	/* Write the register */
564 	ICE_WRITE_REG(hw, QRX_CTRL(q_idx), reg);
565 	/* Check the result. It is said that QENA_STAT
566 	 * follows the QENA_REQ not more than 10 use.
567 	 * TODO: need to change the wait counter later
568 	 */
569 	for (j = 0; j < ICE_CHK_Q_ENA_COUNT; j++) {
570 		rte_delay_us(ICE_CHK_Q_ENA_INTERVAL_US);
571 		reg = ICE_READ_REG(hw, QRX_CTRL(q_idx));
572 		if (on) {
573 			if ((reg & QRX_CTRL_QENA_REQ_M) &&
574 			    (reg & QRX_CTRL_QENA_STAT_M))
575 				break;
576 		} else {
577 			if (!(reg & QRX_CTRL_QENA_REQ_M) &&
578 			    !(reg & QRX_CTRL_QENA_STAT_M))
579 				break;
580 		}
581 	}
582 
583 	/* Check if it is timeout */
584 	if (j >= ICE_CHK_Q_ENA_COUNT) {
585 		PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]",
586 			    (on ? "enable" : "disable"), q_idx);
587 		return -ETIMEDOUT;
588 	}
589 
590 	return 0;
591 }
592 
593 static inline int
594 ice_check_rx_burst_bulk_alloc_preconditions(struct ice_rx_queue *rxq)
595 {
596 	int ret = 0;
597 
598 	if (!(rxq->rx_free_thresh >= ICE_RX_MAX_BURST)) {
599 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
600 			     "rxq->rx_free_thresh=%d, "
601 			     "ICE_RX_MAX_BURST=%d",
602 			     rxq->rx_free_thresh, ICE_RX_MAX_BURST);
603 		ret = -EINVAL;
604 	} else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
605 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
606 			     "rxq->rx_free_thresh=%d, "
607 			     "rxq->nb_rx_desc=%d",
608 			     rxq->rx_free_thresh, rxq->nb_rx_desc);
609 		ret = -EINVAL;
610 	} else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
611 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
612 			     "rxq->nb_rx_desc=%d, "
613 			     "rxq->rx_free_thresh=%d",
614 			     rxq->nb_rx_desc, rxq->rx_free_thresh);
615 		ret = -EINVAL;
616 	}
617 
618 	return ret;
619 }
620 
621 /* reset fields in ice_rx_queue back to default */
622 static void
623 ice_reset_rx_queue(struct ice_rx_queue *rxq)
624 {
625 	unsigned int i;
626 	uint16_t len;
627 
628 	if (!rxq) {
629 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
630 		return;
631 	}
632 
633 	len = (uint16_t)(rxq->nb_rx_desc + ICE_RX_MAX_BURST);
634 
635 	for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++)
636 		((volatile char *)rxq->rx_ring)[i] = 0;
637 
638 	memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
639 	for (i = 0; i < ICE_RX_MAX_BURST; ++i)
640 		rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
641 
642 	rxq->rx_nb_avail = 0;
643 	rxq->rx_next_avail = 0;
644 	rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
645 
646 	rxq->rx_tail = 0;
647 	rxq->nb_rx_hold = 0;
648 	rxq->pkt_first_seg = NULL;
649 	rxq->pkt_last_seg = NULL;
650 
651 	rxq->rxrearm_start = 0;
652 	rxq->rxrearm_nb = 0;
653 }
654 
655 int
656 ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
657 {
658 	struct ice_rx_queue *rxq;
659 	int err;
660 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
661 
662 	PMD_INIT_FUNC_TRACE();
663 
664 	if (rx_queue_id >= dev->data->nb_rx_queues) {
665 		PMD_DRV_LOG(ERR, "RX queue %u is out of range %u",
666 			    rx_queue_id, dev->data->nb_rx_queues);
667 		return -EINVAL;
668 	}
669 
670 	rxq = dev->data->rx_queues[rx_queue_id];
671 	if (!rxq || !rxq->q_set) {
672 		PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
673 			    rx_queue_id);
674 		return -EINVAL;
675 	}
676 
677 	if (dev->data->rx_queue_state[rx_queue_id] ==
678 		RTE_ETH_QUEUE_STATE_STARTED)
679 		return 0;
680 
681 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
682 		rxq->ts_enable = true;
683 	err = ice_program_hw_rx_queue(rxq);
684 	if (err) {
685 		PMD_DRV_LOG(ERR, "fail to program RX queue %u",
686 			    rx_queue_id);
687 		return -EIO;
688 	}
689 
690 	err = ice_alloc_rx_queue_mbufs(rxq);
691 	if (err) {
692 		PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
693 		return -ENOMEM;
694 	}
695 
696 	/* Init the RX tail register. */
697 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
698 
699 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
700 	if (err) {
701 		PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
702 			    rx_queue_id);
703 
704 		rxq->rx_rel_mbufs(rxq);
705 		ice_reset_rx_queue(rxq);
706 		return -EINVAL;
707 	}
708 
709 	dev->data->rx_queue_state[rx_queue_id] =
710 		RTE_ETH_QUEUE_STATE_STARTED;
711 
712 	return 0;
713 }
714 
715 int
716 ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
717 {
718 	struct ice_rx_queue *rxq;
719 	int err;
720 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721 
722 	if (rx_queue_id < dev->data->nb_rx_queues) {
723 		rxq = dev->data->rx_queues[rx_queue_id];
724 
725 		if (dev->data->rx_queue_state[rx_queue_id] ==
726 			RTE_ETH_QUEUE_STATE_STOPPED)
727 			return 0;
728 
729 		err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
730 		if (err) {
731 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
732 				    rx_queue_id);
733 			return -EINVAL;
734 		}
735 		rxq->rx_rel_mbufs(rxq);
736 		ice_reset_rx_queue(rxq);
737 		dev->data->rx_queue_state[rx_queue_id] =
738 			RTE_ETH_QUEUE_STATE_STOPPED;
739 	}
740 
741 	return 0;
742 }
743 
744 int
745 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
746 {
747 	struct ice_tx_queue *txq;
748 	int err;
749 	struct ice_vsi *vsi;
750 	struct ice_hw *hw;
751 	struct ice_pf *pf;
752 	struct ice_aqc_add_tx_qgrp *txq_elem;
753 	struct ice_tlan_ctx tx_ctx;
754 	int buf_len;
755 
756 	PMD_INIT_FUNC_TRACE();
757 
758 	if (tx_queue_id >= dev->data->nb_tx_queues) {
759 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
760 			    tx_queue_id, dev->data->nb_tx_queues);
761 		return -EINVAL;
762 	}
763 
764 	txq = dev->data->tx_queues[tx_queue_id];
765 	if (!txq || !txq->q_set) {
766 		PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
767 			    tx_queue_id);
768 		return -EINVAL;
769 	}
770 
771 	if (dev->data->tx_queue_state[tx_queue_id] ==
772 		RTE_ETH_QUEUE_STATE_STARTED)
773 		return 0;
774 
775 	buf_len = ice_struct_size(txq_elem, txqs, 1);
776 	txq_elem = ice_malloc(hw, buf_len);
777 	if (!txq_elem)
778 		return -ENOMEM;
779 
780 	vsi = txq->vsi;
781 	hw = ICE_VSI_TO_HW(vsi);
782 	pf = ICE_VSI_TO_PF(vsi);
783 
784 	memset(&tx_ctx, 0, sizeof(tx_ctx));
785 	txq_elem->num_txqs = 1;
786 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
787 
788 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
789 	tx_ctx.qlen = txq->nb_tx_desc;
790 	tx_ctx.pf_num = hw->pf_id;
791 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
792 	tx_ctx.src_vsi = vsi->vsi_id;
793 	tx_ctx.port_num = hw->port_info->lport;
794 	tx_ctx.tso_ena = 1; /* tso enable */
795 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
796 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
797 	tx_ctx.tsyn_ena = 1;
798 
799 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
800 		    ice_tlan_ctx_info);
801 
802 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
803 
804 	/* Init the Tx tail register*/
805 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
806 
807 	/* Fix me, we assume TC always 0 here */
808 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
809 			txq_elem, buf_len, NULL);
810 	if (err) {
811 		PMD_DRV_LOG(ERR, "Failed to add lan txq");
812 		rte_free(txq_elem);
813 		return -EIO;
814 	}
815 	/* store the schedule node id */
816 	txq->q_teid = txq_elem->txqs[0].q_teid;
817 
818 	/* move the queue to correct position in hierarchy, if explicit hierarchy configured */
819 	if (pf->tm_conf.committed)
820 		if (ice_tm_setup_txq_node(pf, hw, tx_queue_id, txq->q_teid) != 0) {
821 			PMD_DRV_LOG(ERR, "Failed to set up txq traffic management node");
822 			rte_free(txq_elem);
823 			return -EIO;
824 		}
825 
826 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
827 
828 	rte_free(txq_elem);
829 	return 0;
830 }
831 
832 static int
833 ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
834 {
835 	struct ice_vsi *vsi = rxq->vsi;
836 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
837 	uint32_t rxdid = ICE_RXDID_LEGACY_1;
838 	struct ice_rlan_ctx rx_ctx;
839 	uint32_t regval;
840 	int err;
841 
842 	rxq->rx_hdr_len = 0;
843 	rxq->rx_buf_len = 1024;
844 
845 	memset(&rx_ctx, 0, sizeof(rx_ctx));
846 
847 	rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
848 	rx_ctx.qlen = rxq->nb_rx_desc;
849 	rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
850 	rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
851 	rx_ctx.dtype = 0; /* No Buffer Split mode */
852 	rx_ctx.dsize = 1; /* 32B descriptors */
853 	rx_ctx.rxmax = ICE_ETH_MAX_LEN;
854 	/* TPH: Transaction Layer Packet (TLP) processing hints */
855 	rx_ctx.tphrdesc_ena = 1;
856 	rx_ctx.tphwdesc_ena = 1;
857 	rx_ctx.tphdata_ena = 1;
858 	rx_ctx.tphhead_ena = 1;
859 	/* Low Receive Queue Threshold defined in 64 descriptors units.
860 	 * When the number of free descriptors goes below the lrxqthresh,
861 	 * an immediate interrupt is triggered.
862 	 */
863 	rx_ctx.lrxqthresh = 2;
864 	/*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/
865 	rx_ctx.l2tsel = 1;
866 	rx_ctx.showiv = 0;
867 	rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
868 
869 	/* Enable Flexible Descriptors in the queue context which
870 	 * allows this driver to select a specific receive descriptor format
871 	 */
872 	regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
873 		QRXFLXP_CNTXT_RXDID_IDX_M;
874 
875 	/* increasing context priority to pick up profile ID;
876 	 * default is 0x01; setting to 0x03 to ensure profile
877 	 * is programming if prev context is of same priority
878 	 */
879 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
880 		QRXFLXP_CNTXT_RXDID_PRIO_M;
881 
882 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
883 
884 	err = ice_clear_rxq_ctx(hw, rxq->reg_idx);
885 	if (err) {
886 		PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context",
887 			    rxq->queue_id);
888 		return -EINVAL;
889 	}
890 	err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx);
891 	if (err) {
892 		PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context",
893 			    rxq->queue_id);
894 		return -EINVAL;
895 	}
896 
897 	rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
898 
899 	/* Init the Rx tail register*/
900 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
901 
902 	return 0;
903 }
904 
905 int
906 ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
907 {
908 	struct ice_rx_queue *rxq;
909 	int err;
910 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
911 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
912 
913 	PMD_INIT_FUNC_TRACE();
914 
915 	rxq = pf->fdir.rxq;
916 	if (!rxq || !rxq->q_set) {
917 		PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup",
918 			    rx_queue_id);
919 		return -EINVAL;
920 	}
921 
922 	err = ice_fdir_program_hw_rx_queue(rxq);
923 	if (err) {
924 		PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u",
925 			    rx_queue_id);
926 		return -EIO;
927 	}
928 
929 	/* Init the RX tail register. */
930 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
931 
932 	err = ice_switch_rx_queue(hw, rxq->reg_idx, true);
933 	if (err) {
934 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on",
935 			    rx_queue_id);
936 
937 		ice_reset_rx_queue(rxq);
938 		return -EINVAL;
939 	}
940 
941 	return 0;
942 }
943 
944 int
945 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
946 {
947 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
948 	struct ice_tx_queue *txq;
949 	int err;
950 	struct ice_vsi *vsi;
951 	struct ice_hw *hw;
952 	struct ice_aqc_add_tx_qgrp *txq_elem;
953 	struct ice_tlan_ctx tx_ctx;
954 	int buf_len;
955 
956 	PMD_INIT_FUNC_TRACE();
957 
958 	txq = pf->fdir.txq;
959 	if (!txq || !txq->q_set) {
960 		PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup",
961 			    tx_queue_id);
962 		return -EINVAL;
963 	}
964 
965 	buf_len = ice_struct_size(txq_elem, txqs, 1);
966 	txq_elem = ice_malloc(hw, buf_len);
967 	if (!txq_elem)
968 		return -ENOMEM;
969 
970 	vsi = txq->vsi;
971 	hw = ICE_VSI_TO_HW(vsi);
972 
973 	memset(&tx_ctx, 0, sizeof(tx_ctx));
974 	txq_elem->num_txqs = 1;
975 	txq_elem->txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx);
976 
977 	tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT;
978 	tx_ctx.qlen = txq->nb_tx_desc;
979 	tx_ctx.pf_num = hw->pf_id;
980 	tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
981 	tx_ctx.src_vsi = vsi->vsi_id;
982 	tx_ctx.port_num = hw->port_info->lport;
983 	tx_ctx.tso_ena = 1; /* tso enable */
984 	tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */
985 	tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */
986 
987 	ice_set_ctx(hw, (uint8_t *)&tx_ctx, txq_elem->txqs[0].txq_ctx,
988 		    ice_tlan_ctx_info);
989 
990 	txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx);
991 
992 	/* Init the Tx tail register*/
993 	ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
994 
995 	/* Fix me, we assume TC always 0 here */
996 	err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
997 			      txq_elem, buf_len, NULL);
998 	if (err) {
999 		PMD_DRV_LOG(ERR, "Failed to add FDIR txq");
1000 		rte_free(txq_elem);
1001 		return -EIO;
1002 	}
1003 	/* store the schedule node id */
1004 	txq->q_teid = txq_elem->txqs[0].q_teid;
1005 
1006 	rte_free(txq_elem);
1007 	return 0;
1008 }
1009 
1010 /* Free all mbufs for descriptors in tx queue */
1011 static void
1012 _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
1013 {
1014 	uint16_t i;
1015 
1016 	if (!txq || !txq->sw_ring) {
1017 		PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
1018 		return;
1019 	}
1020 
1021 	for (i = 0; i < txq->nb_tx_desc; i++) {
1022 		if (txq->sw_ring[i].mbuf) {
1023 			rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1024 			txq->sw_ring[i].mbuf = NULL;
1025 		}
1026 	}
1027 }
1028 
1029 static void
1030 ice_reset_tx_queue(struct ice_tx_queue *txq)
1031 {
1032 	struct ci_tx_entry *txe;
1033 	uint16_t i, prev, size;
1034 
1035 	if (!txq) {
1036 		PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
1037 		return;
1038 	}
1039 
1040 	txe = txq->sw_ring;
1041 	size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc;
1042 	for (i = 0; i < size; i++)
1043 		((volatile char *)txq->tx_ring)[i] = 0;
1044 
1045 	prev = (uint16_t)(txq->nb_tx_desc - 1);
1046 	for (i = 0; i < txq->nb_tx_desc; i++) {
1047 		volatile struct ice_tx_desc *txd = &txq->tx_ring[i];
1048 
1049 		txd->cmd_type_offset_bsz =
1050 			rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
1051 		txe[i].mbuf =  NULL;
1052 		txe[i].last_id = i;
1053 		txe[prev].next_id = i;
1054 		prev = i;
1055 	}
1056 
1057 	txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
1058 	txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
1059 
1060 	txq->tx_tail = 0;
1061 	txq->nb_tx_used = 0;
1062 
1063 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
1064 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
1065 }
1066 
1067 int
1068 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1069 {
1070 	struct ice_tx_queue *txq;
1071 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1072 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1073 	struct ice_vsi *vsi = pf->main_vsi;
1074 	uint16_t q_ids[1];
1075 	uint32_t q_teids[1];
1076 	uint16_t q_handle = tx_queue_id;
1077 	int status;
1078 
1079 	if (tx_queue_id >= dev->data->nb_tx_queues) {
1080 		PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
1081 			    tx_queue_id, dev->data->nb_tx_queues);
1082 		return -EINVAL;
1083 	}
1084 
1085 	txq = dev->data->tx_queues[tx_queue_id];
1086 	if (!txq) {
1087 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
1088 			    tx_queue_id);
1089 		return -EINVAL;
1090 	}
1091 
1092 	if (dev->data->tx_queue_state[tx_queue_id] ==
1093 		RTE_ETH_QUEUE_STATE_STOPPED)
1094 		return 0;
1095 
1096 	q_ids[0] = txq->reg_idx;
1097 	q_teids[0] = txq->q_teid;
1098 
1099 	/* Fix me, we assume TC always 0 here */
1100 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1101 				q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1102 	if (status != ICE_SUCCESS) {
1103 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1104 		return -EINVAL;
1105 	}
1106 
1107 	txq->tx_rel_mbufs(txq);
1108 	ice_reset_tx_queue(txq);
1109 	dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1110 
1111 	return 0;
1112 }
1113 
1114 int
1115 ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1116 {
1117 	struct ice_rx_queue *rxq;
1118 	int err;
1119 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1120 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1121 
1122 	rxq = pf->fdir.rxq;
1123 
1124 	err = ice_switch_rx_queue(hw, rxq->reg_idx, false);
1125 	if (err) {
1126 		PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off",
1127 			    rx_queue_id);
1128 		return -EINVAL;
1129 	}
1130 	rxq->rx_rel_mbufs(rxq);
1131 
1132 	return 0;
1133 }
1134 
1135 int
1136 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1137 {
1138 	struct ice_tx_queue *txq;
1139 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1140 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1141 	struct ice_vsi *vsi = pf->main_vsi;
1142 	uint16_t q_ids[1];
1143 	uint32_t q_teids[1];
1144 	uint16_t q_handle = tx_queue_id;
1145 	int status;
1146 
1147 	txq = pf->fdir.txq;
1148 	if (!txq) {
1149 		PMD_DRV_LOG(ERR, "TX queue %u is not available",
1150 			    tx_queue_id);
1151 		return -EINVAL;
1152 	}
1153 	if (txq->qtx_tail == NULL) {
1154 		PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id);
1155 		return 0;
1156 	}
1157 	vsi = txq->vsi;
1158 
1159 	q_ids[0] = txq->reg_idx;
1160 	q_teids[0] = txq->q_teid;
1161 
1162 	/* Fix me, we assume TC always 0 here */
1163 	status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
1164 				 q_ids, q_teids, ICE_NO_RESET, 0, NULL);
1165 	if (status != ICE_SUCCESS) {
1166 		PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
1167 		return -EINVAL;
1168 	}
1169 
1170 	txq->tx_rel_mbufs(txq);
1171 	txq->qtx_tail = NULL;
1172 
1173 	return 0;
1174 }
1175 
1176 int
1177 ice_rx_queue_setup(struct rte_eth_dev *dev,
1178 		   uint16_t queue_idx,
1179 		   uint16_t nb_desc,
1180 		   unsigned int socket_id,
1181 		   const struct rte_eth_rxconf *rx_conf,
1182 		   struct rte_mempool *mp)
1183 {
1184 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1185 	struct ice_adapter *ad =
1186 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1187 	struct ice_vsi *vsi = pf->main_vsi;
1188 	struct ice_rx_queue *rxq;
1189 	const struct rte_memzone *rz;
1190 	uint32_t ring_size;
1191 	uint16_t len;
1192 	int use_def_burst_func = 1;
1193 	uint64_t offloads;
1194 	uint16_t n_seg = rx_conf->rx_nseg;
1195 	uint16_t i;
1196 
1197 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1198 	    nb_desc > ICE_MAX_RING_DESC ||
1199 	    nb_desc < ICE_MIN_RING_DESC) {
1200 		PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
1201 			     "invalid", nb_desc);
1202 		return -EINVAL;
1203 	}
1204 
1205 	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1206 
1207 	if (mp)
1208 		n_seg = 1;
1209 
1210 	if (n_seg > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1211 		PMD_INIT_LOG(ERR, "port %u queue index %u split offload not configured",
1212 				dev->data->port_id, queue_idx);
1213 		return -EINVAL;
1214 	}
1215 
1216 	/* Free memory if needed */
1217 	if (dev->data->rx_queues[queue_idx]) {
1218 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
1219 		dev->data->rx_queues[queue_idx] = NULL;
1220 	}
1221 
1222 	/* Allocate the rx queue data structure */
1223 	rxq = rte_zmalloc_socket(NULL,
1224 				 sizeof(struct ice_rx_queue),
1225 				 RTE_CACHE_LINE_SIZE,
1226 				 socket_id);
1227 
1228 	if (!rxq) {
1229 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1230 			     "rx queue data structure");
1231 		return -ENOMEM;
1232 	}
1233 
1234 	rxq->rxseg_nb = n_seg;
1235 	if (n_seg > 1) {
1236 		for (i = 0; i < n_seg; i++)
1237 			memcpy(&rxq->rxseg[i], &rx_conf->rx_seg[i].split,
1238 				sizeof(struct rte_eth_rxseg_split));
1239 
1240 		rxq->mp = rxq->rxseg[0].mp;
1241 	} else {
1242 		rxq->mp = mp;
1243 	}
1244 
1245 	rxq->nb_rx_desc = nb_desc;
1246 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1247 	rxq->queue_id = queue_idx;
1248 	rxq->offloads = offloads;
1249 
1250 	rxq->reg_idx = vsi->base_queue + queue_idx;
1251 	rxq->port_id = dev->data->port_id;
1252 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1253 		rxq->crc_len = RTE_ETHER_CRC_LEN;
1254 	else
1255 		rxq->crc_len = 0;
1256 
1257 	rxq->drop_en = rx_conf->rx_drop_en;
1258 	rxq->vsi = vsi;
1259 	rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1260 	rxq->proto_xtr = pf->proto_xtr != NULL ?
1261 			 pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
1262 	if (rxq->proto_xtr != PROTO_XTR_NONE &&
1263 			ad->devargs.xtr_flag_offs[rxq->proto_xtr] != 0xff)
1264 		rxq->xtr_ol_flag = 1ULL << ad->devargs.xtr_flag_offs[rxq->proto_xtr];
1265 	rxq->xtr_field_offs = ad->devargs.xtr_field_offs;
1266 
1267 	/* Allocate the maximum number of RX ring hardware descriptor. */
1268 	len = ICE_MAX_RING_DESC;
1269 
1270 	/**
1271 	 * Allocating a little more memory because vectorized/bulk_alloc Rx
1272 	 * functions doesn't check boundaries each time.
1273 	 */
1274 	len += ICE_RX_MAX_BURST;
1275 
1276 	/* Allocate the maximum number of RX ring hardware descriptor. */
1277 	ring_size = sizeof(union ice_rx_flex_desc) * len;
1278 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1279 	rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1280 				      ring_size, ICE_RING_BASE_ALIGN,
1281 				      socket_id);
1282 	if (!rz) {
1283 		ice_rx_queue_release(rxq);
1284 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
1285 		return -ENOMEM;
1286 	}
1287 
1288 	rxq->mz = rz;
1289 	/* Zero all the descriptors in the ring. */
1290 	memset(rz->addr, 0, ring_size);
1291 
1292 	rxq->rx_ring_dma = rz->iova;
1293 	rxq->rx_ring = rz->addr;
1294 
1295 	/* always reserve more for bulk alloc */
1296 	len = (uint16_t)(nb_desc + ICE_RX_MAX_BURST);
1297 
1298 	/* Allocate the software ring. */
1299 	rxq->sw_ring = rte_zmalloc_socket(NULL,
1300 					  sizeof(struct ice_rx_entry) * len,
1301 					  RTE_CACHE_LINE_SIZE,
1302 					  socket_id);
1303 	if (!rxq->sw_ring) {
1304 		ice_rx_queue_release(rxq);
1305 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
1306 		return -ENOMEM;
1307 	}
1308 
1309 	ice_reset_rx_queue(rxq);
1310 	rxq->q_set = true;
1311 	dev->data->rx_queues[queue_idx] = rxq;
1312 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
1313 
1314 	use_def_burst_func = ice_check_rx_burst_bulk_alloc_preconditions(rxq);
1315 
1316 	if (!use_def_burst_func) {
1317 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1318 			     "satisfied. Rx Burst Bulk Alloc function will be "
1319 			     "used on port=%d, queue=%d.",
1320 			     rxq->port_id, rxq->queue_id);
1321 	} else {
1322 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1323 			     "not satisfied, Scattered Rx is requested. "
1324 			     "on port=%d, queue=%d.",
1325 			     rxq->port_id, rxq->queue_id);
1326 		ad->rx_bulk_alloc_allowed = false;
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 void
1333 ice_rx_queue_release(void *rxq)
1334 {
1335 	struct ice_rx_queue *q = (struct ice_rx_queue *)rxq;
1336 
1337 	if (!q) {
1338 		PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
1339 		return;
1340 	}
1341 
1342 	if (q->rx_rel_mbufs != NULL)
1343 		q->rx_rel_mbufs(q);
1344 	rte_free(q->sw_ring);
1345 	rte_memzone_free(q->mz);
1346 	rte_free(q);
1347 }
1348 
1349 int
1350 ice_tx_queue_setup(struct rte_eth_dev *dev,
1351 		   uint16_t queue_idx,
1352 		   uint16_t nb_desc,
1353 		   unsigned int socket_id,
1354 		   const struct rte_eth_txconf *tx_conf)
1355 {
1356 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1357 	struct ice_vsi *vsi = pf->main_vsi;
1358 	struct ice_tx_queue *txq;
1359 	const struct rte_memzone *tz;
1360 	uint32_t ring_size;
1361 	uint16_t tx_rs_thresh, tx_free_thresh;
1362 	uint64_t offloads;
1363 
1364 	offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1365 
1366 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
1367 	    nb_desc > ICE_MAX_RING_DESC ||
1368 	    nb_desc < ICE_MIN_RING_DESC) {
1369 		PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
1370 			     "invalid", nb_desc);
1371 		return -EINVAL;
1372 	}
1373 
1374 	/**
1375 	 * The following two parameters control the setting of the RS bit on
1376 	 * transmit descriptors. TX descriptors will have their RS bit set
1377 	 * after txq->tx_rs_thresh descriptors have been used. The TX
1378 	 * descriptor ring will be cleaned after txq->tx_free_thresh
1379 	 * descriptors are used or if the number of descriptors required to
1380 	 * transmit a packet is greater than the number of free TX descriptors.
1381 	 *
1382 	 * The following constraints must be satisfied:
1383 	 *  - tx_rs_thresh must be greater than 0.
1384 	 *  - tx_rs_thresh must be less than the size of the ring minus 2.
1385 	 *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
1386 	 *  - tx_rs_thresh must be a divisor of the ring size.
1387 	 *  - tx_free_thresh must be greater than 0.
1388 	 *  - tx_free_thresh must be less than the size of the ring minus 3.
1389 	 *  - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
1390 	 *
1391 	 * One descriptor in the TX ring is used as a sentinel to avoid a H/W
1392 	 * race condition, hence the maximum threshold constraints. When set
1393 	 * to zero use default values.
1394 	 */
1395 	tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
1396 				    tx_conf->tx_free_thresh :
1397 				    ICE_DEFAULT_TX_FREE_THRESH);
1398 	/* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
1399 	tx_rs_thresh =
1400 		(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
1401 			nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
1402 	if (tx_conf->tx_rs_thresh)
1403 		tx_rs_thresh = tx_conf->tx_rs_thresh;
1404 	if (tx_rs_thresh + tx_free_thresh > nb_desc) {
1405 		PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
1406 				"exceed nb_desc. (tx_rs_thresh=%u "
1407 				"tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
1408 				(unsigned int)tx_rs_thresh,
1409 				(unsigned int)tx_free_thresh,
1410 				(unsigned int)nb_desc,
1411 				(int)dev->data->port_id,
1412 				(int)queue_idx);
1413 		return -EINVAL;
1414 	}
1415 	if (tx_rs_thresh >= (nb_desc - 2)) {
1416 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1417 			     "number of TX descriptors minus 2. "
1418 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1419 			     (unsigned int)tx_rs_thresh,
1420 			     (int)dev->data->port_id,
1421 			     (int)queue_idx);
1422 		return -EINVAL;
1423 	}
1424 	if (tx_free_thresh >= (nb_desc - 3)) {
1425 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
1426 			     "tx_free_thresh must be less than the "
1427 			     "number of TX descriptors minus 3. "
1428 			     "(tx_free_thresh=%u port=%d queue=%d)",
1429 			     (unsigned int)tx_free_thresh,
1430 			     (int)dev->data->port_id,
1431 			     (int)queue_idx);
1432 		return -EINVAL;
1433 	}
1434 	if (tx_rs_thresh > tx_free_thresh) {
1435 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or "
1436 			     "equal to tx_free_thresh. (tx_free_thresh=%u"
1437 			     " tx_rs_thresh=%u port=%d queue=%d)",
1438 			     (unsigned int)tx_free_thresh,
1439 			     (unsigned int)tx_rs_thresh,
1440 			     (int)dev->data->port_id,
1441 			     (int)queue_idx);
1442 		return -EINVAL;
1443 	}
1444 	if ((nb_desc % tx_rs_thresh) != 0) {
1445 		PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
1446 			     "number of TX descriptors. (tx_rs_thresh=%u"
1447 			     " port=%d queue=%d)",
1448 			     (unsigned int)tx_rs_thresh,
1449 			     (int)dev->data->port_id,
1450 			     (int)queue_idx);
1451 		return -EINVAL;
1452 	}
1453 	if (tx_rs_thresh > 1 && tx_conf->tx_thresh.wthresh != 0) {
1454 		PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1455 			     "tx_rs_thresh is greater than 1. "
1456 			     "(tx_rs_thresh=%u port=%d queue=%d)",
1457 			     (unsigned int)tx_rs_thresh,
1458 			     (int)dev->data->port_id,
1459 			     (int)queue_idx);
1460 		return -EINVAL;
1461 	}
1462 
1463 	/* Free memory if needed. */
1464 	if (dev->data->tx_queues[queue_idx]) {
1465 		ice_tx_queue_release(dev->data->tx_queues[queue_idx]);
1466 		dev->data->tx_queues[queue_idx] = NULL;
1467 	}
1468 
1469 	/* Allocate the TX queue data structure. */
1470 	txq = rte_zmalloc_socket(NULL,
1471 				 sizeof(struct ice_tx_queue),
1472 				 RTE_CACHE_LINE_SIZE,
1473 				 socket_id);
1474 	if (!txq) {
1475 		PMD_INIT_LOG(ERR, "Failed to allocate memory for "
1476 			     "tx queue structure");
1477 		return -ENOMEM;
1478 	}
1479 
1480 	/* Allocate TX hardware ring descriptors. */
1481 	ring_size = sizeof(struct ice_tx_desc) * ICE_MAX_RING_DESC;
1482 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
1483 	tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1484 				      ring_size, ICE_RING_BASE_ALIGN,
1485 				      socket_id);
1486 	if (!tz) {
1487 		ice_tx_queue_release(txq);
1488 		PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
1489 		return -ENOMEM;
1490 	}
1491 
1492 	txq->mz = tz;
1493 	txq->nb_tx_desc = nb_desc;
1494 	txq->tx_rs_thresh = tx_rs_thresh;
1495 	txq->tx_free_thresh = tx_free_thresh;
1496 	txq->pthresh = tx_conf->tx_thresh.pthresh;
1497 	txq->hthresh = tx_conf->tx_thresh.hthresh;
1498 	txq->wthresh = tx_conf->tx_thresh.wthresh;
1499 	txq->queue_id = queue_idx;
1500 
1501 	txq->reg_idx = vsi->base_queue + queue_idx;
1502 	txq->port_id = dev->data->port_id;
1503 	txq->offloads = offloads;
1504 	txq->vsi = vsi;
1505 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
1506 
1507 	txq->tx_ring_dma = tz->iova;
1508 	txq->tx_ring = tz->addr;
1509 
1510 	/* Allocate software ring */
1511 	txq->sw_ring =
1512 		rte_zmalloc_socket(NULL,
1513 				   sizeof(struct ci_tx_entry) * nb_desc,
1514 				   RTE_CACHE_LINE_SIZE,
1515 				   socket_id);
1516 	if (!txq->sw_ring) {
1517 		ice_tx_queue_release(txq);
1518 		PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
1519 		return -ENOMEM;
1520 	}
1521 
1522 	ice_reset_tx_queue(txq);
1523 	txq->q_set = true;
1524 	dev->data->tx_queues[queue_idx] = txq;
1525 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
1526 	ice_set_tx_function_flag(dev, txq);
1527 
1528 	return 0;
1529 }
1530 
1531 void
1532 ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1533 {
1534 	ice_rx_queue_release(dev->data->rx_queues[qid]);
1535 }
1536 
1537 void
1538 ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1539 {
1540 	ice_tx_queue_release(dev->data->tx_queues[qid]);
1541 }
1542 
1543 void
1544 ice_tx_queue_release(void *txq)
1545 {
1546 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
1547 
1548 	if (!q) {
1549 		PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
1550 		return;
1551 	}
1552 
1553 	if (q->tx_rel_mbufs != NULL)
1554 		q->tx_rel_mbufs(q);
1555 	rte_free(q->sw_ring);
1556 	rte_memzone_free(q->mz);
1557 	rte_free(q);
1558 }
1559 
1560 void
1561 ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1562 		 struct rte_eth_rxq_info *qinfo)
1563 {
1564 	struct ice_rx_queue *rxq;
1565 
1566 	rxq = dev->data->rx_queues[queue_id];
1567 
1568 	qinfo->mp = rxq->mp;
1569 	qinfo->scattered_rx = dev->data->scattered_rx;
1570 	qinfo->nb_desc = rxq->nb_rx_desc;
1571 
1572 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1573 	qinfo->conf.rx_drop_en = rxq->drop_en;
1574 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
1575 }
1576 
1577 void
1578 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1579 		 struct rte_eth_txq_info *qinfo)
1580 {
1581 	struct ice_tx_queue *txq;
1582 
1583 	txq = dev->data->tx_queues[queue_id];
1584 
1585 	qinfo->nb_desc = txq->nb_tx_desc;
1586 
1587 	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1588 	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1589 	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1590 
1591 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1592 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
1593 	qinfo->conf.offloads = txq->offloads;
1594 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1595 }
1596 
1597 uint32_t
1598 ice_rx_queue_count(void *rx_queue)
1599 {
1600 #define ICE_RXQ_SCAN_INTERVAL 4
1601 	volatile union ice_rx_flex_desc *rxdp;
1602 	struct ice_rx_queue *rxq;
1603 	uint16_t desc = 0;
1604 
1605 	rxq = rx_queue;
1606 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1607 	while ((desc < rxq->nb_rx_desc) &&
1608 	       rte_le_to_cpu_16(rxdp->wb.status_error0) &
1609 	       (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) {
1610 		/**
1611 		 * Check the DD bit of a rx descriptor of each 4 in a group,
1612 		 * to avoid checking too frequently and downgrading performance
1613 		 * too much.
1614 		 */
1615 		desc += ICE_RXQ_SCAN_INTERVAL;
1616 		rxdp += ICE_RXQ_SCAN_INTERVAL;
1617 		if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1618 			rxdp = &(rxq->rx_ring[rxq->rx_tail +
1619 				 desc - rxq->nb_rx_desc]);
1620 	}
1621 
1622 	return desc;
1623 }
1624 
1625 #define ICE_RX_FLEX_ERR0_BITS	\
1626 	((1 << ICE_RX_FLEX_DESC_STATUS0_HBO_S) |	\
1627 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |	\
1628 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |	\
1629 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |	\
1630 	 (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |	\
1631 	 (1 << ICE_RX_FLEX_DESC_STATUS0_RXE_S))
1632 
1633 /* Rx L3/L4 checksum */
1634 static inline uint64_t
1635 ice_rxd_error_to_pkt_flags(uint16_t stat_err0)
1636 {
1637 	uint64_t flags = 0;
1638 
1639 	/* check if HW has decoded the packet and checksum */
1640 	if (unlikely(!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))))
1641 		return 0;
1642 
1643 	if (likely(!(stat_err0 & ICE_RX_FLEX_ERR0_BITS))) {
1644 		flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1645 			  RTE_MBUF_F_RX_L4_CKSUM_GOOD |
1646 			  RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD);
1647 		return flags;
1648 	}
1649 
1650 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
1651 		flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
1652 	else
1653 		flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
1654 
1655 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
1656 		flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
1657 	else
1658 		flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
1659 
1660 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
1661 		flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1662 
1663 	if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
1664 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
1665 	else
1666 		flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
1667 
1668 	return flags;
1669 }
1670 
1671 static inline void
1672 ice_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union ice_rx_flex_desc *rxdp)
1673 {
1674 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
1675 	    (1 << ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
1676 		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1677 		mb->vlan_tci =
1678 			rte_le_to_cpu_16(rxdp->wb.l2tag1);
1679 		PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
1680 			   rte_le_to_cpu_16(rxdp->wb.l2tag1));
1681 	} else {
1682 		mb->vlan_tci = 0;
1683 	}
1684 
1685 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1686 	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
1687 	    (1 << ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
1688 		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
1689 				RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
1690 		mb->vlan_tci_outer = mb->vlan_tci;
1691 		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
1692 		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
1693 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
1694 			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
1695 	} else {
1696 		mb->vlan_tci_outer = 0;
1697 	}
1698 #endif
1699 	PMD_RX_LOG(DEBUG, "Mbuf vlan_tci: %u, vlan_tci_outer: %u",
1700 		   mb->vlan_tci, mb->vlan_tci_outer);
1701 }
1702 
1703 #define ICE_LOOK_AHEAD 8
1704 #if (ICE_LOOK_AHEAD != 8)
1705 #error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
1706 #endif
1707 
1708 #define ICE_PTP_TS_VALID 0x1
1709 
1710 static inline int
1711 ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
1712 {
1713 	volatile union ice_rx_flex_desc *rxdp;
1714 	struct ice_rx_entry *rxep;
1715 	struct rte_mbuf *mb;
1716 	uint16_t stat_err0;
1717 	uint16_t pkt_len, hdr_len;
1718 	int32_t s[ICE_LOOK_AHEAD], nb_dd;
1719 	int32_t i, j, nb_rx = 0;
1720 	uint64_t pkt_flags = 0;
1721 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
1722 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1723 	bool is_tsinit = false;
1724 	uint64_t ts_ns;
1725 	struct ice_vsi *vsi = rxq->vsi;
1726 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
1727 	struct ice_adapter *ad = rxq->vsi->adapter;
1728 #endif
1729 	rxdp = &rxq->rx_ring[rxq->rx_tail];
1730 	rxep = &rxq->sw_ring[rxq->rx_tail];
1731 
1732 	stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
1733 
1734 	/* Make sure there is at least 1 packet to receive */
1735 	if (!(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
1736 		return 0;
1737 
1738 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1739 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1740 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
1741 
1742 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
1743 			is_tsinit = 1;
1744 	}
1745 #endif
1746 
1747 	/**
1748 	 * Scan LOOK_AHEAD descriptors at a time to determine which
1749 	 * descriptors reference packets that are ready to be received.
1750 	 */
1751 	for (i = 0; i < ICE_RX_MAX_BURST; i += ICE_LOOK_AHEAD,
1752 	     rxdp += ICE_LOOK_AHEAD, rxep += ICE_LOOK_AHEAD) {
1753 		/* Read desc statuses backwards to avoid race condition */
1754 		for (j = ICE_LOOK_AHEAD - 1; j >= 0; j--)
1755 			s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1756 
1757 		rte_smp_rmb();
1758 
1759 		/* Compute how many status bits were set */
1760 		for (j = 0, nb_dd = 0; j < ICE_LOOK_AHEAD; j++)
1761 			nb_dd += s[j] & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
1762 
1763 		nb_rx += nb_dd;
1764 
1765 		/* Translate descriptor info to mbuf parameters */
1766 		for (j = 0; j < nb_dd; j++) {
1767 			mb = rxep[j].mbuf;
1768 			pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1769 				   ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1770 			mb->data_len = pkt_len;
1771 			mb->pkt_len = pkt_len;
1772 
1773 			if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1774 				pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1775 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1776 				mb->data_len = pkt_len;
1777 				mb->pkt_len = pkt_len;
1778 			} else {
1779 				mb->nb_segs = (uint16_t)(mb->nb_segs + mb->next->nb_segs);
1780 				mb->next->next = NULL;
1781 				hdr_len = rte_le_to_cpu_16(rxdp[j].wb.hdr_len_sph_flex_flags1) &
1782 						ICE_RX_FLEX_DESC_HEADER_LEN_M;
1783 				pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
1784 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
1785 				mb->data_len = hdr_len;
1786 				mb->pkt_len = hdr_len + pkt_len;
1787 				mb->next->data_len = pkt_len;
1788 #ifdef RTE_ETHDEV_DEBUG_RX
1789 				rte_pktmbuf_dump(stdout, mb, rte_pktmbuf_pkt_len(mb));
1790 #endif
1791 			}
1792 
1793 			mb->ol_flags = 0;
1794 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
1795 			pkt_flags = ice_rxd_error_to_pkt_flags(stat_err0);
1796 			mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
1797 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
1798 			ice_rxd_to_vlan_tci(mb, &rxdp[j]);
1799 			rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
1800 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
1801 			if (ice_timestamp_dynflag > 0 &&
1802 			    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
1803 				rxq->time_high =
1804 				rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1805 				if (unlikely(is_tsinit)) {
1806 					ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
1807 									   rxq->time_high);
1808 					rxq->hw_time_low = (uint32_t)ts_ns;
1809 					rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
1810 					is_tsinit = false;
1811 				} else {
1812 					if (rxq->time_high < rxq->hw_time_low)
1813 						rxq->hw_time_high += 1;
1814 					ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
1815 					rxq->hw_time_low = rxq->time_high;
1816 				}
1817 				rxq->hw_time_update = rte_get_timer_cycles() /
1818 						     (rte_get_timer_hz() / 1000);
1819 				*RTE_MBUF_DYNFIELD(mb,
1820 						   ice_timestamp_dynfield_offset,
1821 						   rte_mbuf_timestamp_t *) = ts_ns;
1822 				pkt_flags |= ice_timestamp_dynflag;
1823 			}
1824 
1825 			if (ad->ptp_ena && ((mb->packet_type &
1826 			    RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
1827 				rxq->time_high =
1828 				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
1829 				mb->timesync = rxq->queue_id;
1830 				pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
1831 				if (rxdp[j].wb.time_stamp_low &
1832 				    ICE_PTP_TS_VALID)
1833 					pkt_flags |=
1834 						RTE_MBUF_F_RX_IEEE1588_TMST;
1835 			}
1836 #endif
1837 			mb->ol_flags |= pkt_flags;
1838 		}
1839 
1840 		for (j = 0; j < ICE_LOOK_AHEAD; j++)
1841 			rxq->rx_stage[i + j] = rxep[j].mbuf;
1842 
1843 		if (nb_dd != ICE_LOOK_AHEAD)
1844 			break;
1845 	}
1846 
1847 	/* Clear software ring entries */
1848 	for (i = 0; i < nb_rx; i++)
1849 		rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1850 
1851 	PMD_RX_LOG(DEBUG, "ice_rx_scan_hw_ring: "
1852 		   "port_id=%u, queue_id=%u, nb_rx=%d",
1853 		   rxq->port_id, rxq->queue_id, nb_rx);
1854 
1855 	return nb_rx;
1856 }
1857 
1858 static inline uint16_t
1859 ice_rx_fill_from_stage(struct ice_rx_queue *rxq,
1860 		       struct rte_mbuf **rx_pkts,
1861 		       uint16_t nb_pkts)
1862 {
1863 	uint16_t i;
1864 	struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1865 
1866 	nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1867 
1868 	for (i = 0; i < nb_pkts; i++)
1869 		rx_pkts[i] = stage[i];
1870 
1871 	rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1872 	rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1873 
1874 	return nb_pkts;
1875 }
1876 
1877 static inline int
1878 ice_rx_alloc_bufs(struct ice_rx_queue *rxq)
1879 {
1880 	volatile union ice_rx_flex_desc *rxdp;
1881 	struct ice_rx_entry *rxep;
1882 	struct rte_mbuf *mb;
1883 	uint16_t alloc_idx, i;
1884 	uint64_t dma_addr;
1885 	int diag, diag_pay;
1886 	uint64_t pay_addr;
1887 	struct rte_mbuf *mbufs_pay[rxq->rx_free_thresh];
1888 
1889 	/* Allocate buffers in bulk */
1890 	alloc_idx = (uint16_t)(rxq->rx_free_trigger -
1891 			       (rxq->rx_free_thresh - 1));
1892 	rxep = &rxq->sw_ring[alloc_idx];
1893 	diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep,
1894 				    rxq->rx_free_thresh);
1895 	if (unlikely(diag != 0)) {
1896 		PMD_RX_LOG(ERR, "Failed to get mbufs in bulk");
1897 		return -ENOMEM;
1898 	}
1899 
1900 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1901 		diag_pay = rte_mempool_get_bulk(rxq->rxseg[1].mp,
1902 				(void *)mbufs_pay, rxq->rx_free_thresh);
1903 		if (unlikely(diag_pay != 0)) {
1904 			rte_mempool_put_bulk(rxq->mp, (void *)rxep,
1905 				    rxq->rx_free_thresh);
1906 			PMD_RX_LOG(ERR, "Failed to get payload mbufs in bulk");
1907 			return -ENOMEM;
1908 		}
1909 	}
1910 
1911 	rxdp = &rxq->rx_ring[alloc_idx];
1912 	for (i = 0; i < rxq->rx_free_thresh; i++) {
1913 		if (likely(i < (rxq->rx_free_thresh - 1)))
1914 			/* Prefetch next mbuf */
1915 			rte_prefetch0(rxep[i + 1].mbuf);
1916 
1917 		mb = rxep[i].mbuf;
1918 		rte_mbuf_refcnt_set(mb, 1);
1919 		mb->data_off = RTE_PKTMBUF_HEADROOM;
1920 		mb->nb_segs = 1;
1921 		mb->port = rxq->port_id;
1922 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1923 
1924 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
1925 			mb->next = NULL;
1926 			rxdp[i].read.hdr_addr = 0;
1927 			rxdp[i].read.pkt_addr = dma_addr;
1928 		} else {
1929 			mb->next = mbufs_pay[i];
1930 			pay_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbufs_pay[i]));
1931 			rxdp[i].read.hdr_addr = dma_addr;
1932 			rxdp[i].read.pkt_addr = pay_addr;
1933 		}
1934 	}
1935 
1936 	/* Update Rx tail register */
1937 	ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
1938 
1939 	rxq->rx_free_trigger =
1940 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
1941 	if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1942 		rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1943 
1944 	return 0;
1945 }
1946 
1947 static inline uint16_t
1948 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1949 {
1950 	struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
1951 	uint16_t nb_rx = 0;
1952 
1953 	if (!nb_pkts)
1954 		return 0;
1955 
1956 	if (rxq->rx_nb_avail)
1957 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1958 
1959 	nb_rx = (uint16_t)ice_rx_scan_hw_ring(rxq);
1960 	rxq->rx_next_avail = 0;
1961 	rxq->rx_nb_avail = nb_rx;
1962 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1963 
1964 	if (rxq->rx_tail > rxq->rx_free_trigger) {
1965 		if (ice_rx_alloc_bufs(rxq) != 0) {
1966 			uint16_t i, j;
1967 
1968 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
1969 				rxq->rx_free_thresh;
1970 			PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
1971 				   "port_id=%u, queue_id=%u",
1972 				   rxq->port_id, rxq->queue_id);
1973 			rxq->rx_nb_avail = 0;
1974 			rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1975 			for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
1976 				rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1977 
1978 			return 0;
1979 		}
1980 	}
1981 
1982 	if (rxq->rx_tail >= rxq->nb_rx_desc)
1983 		rxq->rx_tail = 0;
1984 
1985 	if (rxq->rx_nb_avail)
1986 		return ice_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1987 
1988 	return 0;
1989 }
1990 
1991 static uint16_t
1992 ice_recv_pkts_bulk_alloc(void *rx_queue,
1993 			 struct rte_mbuf **rx_pkts,
1994 			 uint16_t nb_pkts)
1995 {
1996 	uint16_t nb_rx = 0;
1997 	uint16_t n;
1998 	uint16_t count;
1999 
2000 	if (unlikely(nb_pkts == 0))
2001 		return nb_rx;
2002 
2003 	if (likely(nb_pkts <= ICE_RX_MAX_BURST))
2004 		return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
2005 
2006 	while (nb_pkts) {
2007 		n = RTE_MIN(nb_pkts, ICE_RX_MAX_BURST);
2008 		count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
2009 		nb_rx = (uint16_t)(nb_rx + count);
2010 		nb_pkts = (uint16_t)(nb_pkts - count);
2011 		if (count < n)
2012 			break;
2013 	}
2014 
2015 	return nb_rx;
2016 }
2017 
2018 static uint16_t
2019 ice_recv_scattered_pkts(void *rx_queue,
2020 			struct rte_mbuf **rx_pkts,
2021 			uint16_t nb_pkts)
2022 {
2023 	struct ice_rx_queue *rxq = rx_queue;
2024 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2025 	volatile union ice_rx_flex_desc *rxdp;
2026 	union ice_rx_flex_desc rxd;
2027 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2028 	struct ice_rx_entry *rxe;
2029 	struct rte_mbuf *first_seg = rxq->pkt_first_seg;
2030 	struct rte_mbuf *last_seg = rxq->pkt_last_seg;
2031 	struct rte_mbuf *nmb; /* new allocated mbuf */
2032 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2033 	uint16_t rx_id = rxq->rx_tail;
2034 	uint16_t nb_rx = 0;
2035 	uint16_t nb_hold = 0;
2036 	uint16_t rx_packet_len;
2037 	uint16_t rx_stat_err0;
2038 	uint64_t dma_addr;
2039 	uint64_t pkt_flags;
2040 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2041 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2042 	bool is_tsinit = false;
2043 	uint64_t ts_ns;
2044 	struct ice_vsi *vsi = rxq->vsi;
2045 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2046 	struct ice_adapter *ad = rxq->vsi->adapter;
2047 
2048 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2049 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2050 
2051 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
2052 			is_tsinit = true;
2053 	}
2054 #endif
2055 
2056 	while (nb_rx < nb_pkts) {
2057 		rxdp = &rx_ring[rx_id];
2058 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2059 
2060 		/* Check the DD bit first */
2061 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2062 			break;
2063 
2064 		/* allocate mbuf */
2065 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2066 		if (unlikely(!nmb)) {
2067 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2068 			break;
2069 		}
2070 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2071 
2072 		nb_hold++;
2073 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2074 		rx_id++;
2075 		if (unlikely(rx_id == rxq->nb_rx_desc))
2076 			rx_id = 0;
2077 
2078 		/* Prefetch next mbuf */
2079 		rte_prefetch0(sw_ring[rx_id].mbuf);
2080 
2081 		/**
2082 		 * When next RX descriptor is on a cache line boundary,
2083 		 * prefetch the next 4 RX descriptors and next 8 pointers
2084 		 * to mbufs.
2085 		 */
2086 		if ((rx_id & 0x3) == 0) {
2087 			rte_prefetch0(&rx_ring[rx_id]);
2088 			rte_prefetch0(&sw_ring[rx_id]);
2089 		}
2090 
2091 		rxm = rxe->mbuf;
2092 		rxe->mbuf = nmb;
2093 		dma_addr =
2094 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2095 
2096 		/* Set data buffer address and data length of the mbuf */
2097 		rxdp->read.hdr_addr = 0;
2098 		rxdp->read.pkt_addr = dma_addr;
2099 		rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
2100 				ICE_RX_FLX_DESC_PKT_LEN_M;
2101 		rxm->data_len = rx_packet_len;
2102 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2103 
2104 		/**
2105 		 * If this is the first buffer of the received packet, set the
2106 		 * pointer to the first mbuf of the packet and initialize its
2107 		 * context. Otherwise, update the total length and the number
2108 		 * of segments of the current scattered packet, and update the
2109 		 * pointer to the last mbuf of the current packet.
2110 		 */
2111 		if (!first_seg) {
2112 			first_seg = rxm;
2113 			first_seg->nb_segs = 1;
2114 			first_seg->pkt_len = rx_packet_len;
2115 		} else {
2116 			first_seg->pkt_len =
2117 				(uint16_t)(first_seg->pkt_len +
2118 					   rx_packet_len);
2119 			first_seg->nb_segs++;
2120 			last_seg->next = rxm;
2121 		}
2122 
2123 		/**
2124 		 * If this is not the last buffer of the received packet,
2125 		 * update the pointer to the last mbuf of the current scattered
2126 		 * packet and continue to parse the RX ring.
2127 		 */
2128 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_EOF_S))) {
2129 			last_seg = rxm;
2130 			continue;
2131 		}
2132 
2133 		/**
2134 		 * This is the last buffer of the received packet. If the CRC
2135 		 * is not stripped by the hardware:
2136 		 *  - Subtract the CRC length from the total packet length.
2137 		 *  - If the last buffer only contains the whole CRC or a part
2138 		 *  of it, free the mbuf associated to the last buffer. If part
2139 		 *  of the CRC is also contained in the previous mbuf, subtract
2140 		 *  the length of that CRC part from the data length of the
2141 		 *  previous mbuf.
2142 		 */
2143 		rxm->next = NULL;
2144 		if (unlikely(rxq->crc_len > 0)) {
2145 			first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
2146 			if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
2147 				rte_pktmbuf_free_seg(rxm);
2148 				first_seg->nb_segs--;
2149 				last_seg->data_len =
2150 					(uint16_t)(last_seg->data_len -
2151 					(RTE_ETHER_CRC_LEN - rx_packet_len));
2152 				last_seg->next = NULL;
2153 			} else
2154 				rxm->data_len = (uint16_t)(rx_packet_len -
2155 							   RTE_ETHER_CRC_LEN);
2156 		} else if (rx_packet_len == 0) {
2157 			rte_pktmbuf_free_seg(rxm);
2158 			first_seg->nb_segs--;
2159 			last_seg->next = NULL;
2160 		}
2161 
2162 		first_seg->port = rxq->port_id;
2163 		first_seg->ol_flags = 0;
2164 		first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2165 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2166 		ice_rxd_to_vlan_tci(first_seg, &rxd);
2167 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
2168 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2169 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2170 		if (ice_timestamp_dynflag > 0 &&
2171 		    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
2172 			rxq->time_high =
2173 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2174 			if (unlikely(is_tsinit)) {
2175 				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2176 				rxq->hw_time_low = (uint32_t)ts_ns;
2177 				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
2178 				is_tsinit = false;
2179 			} else {
2180 				if (rxq->time_high < rxq->hw_time_low)
2181 					rxq->hw_time_high += 1;
2182 				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
2183 				rxq->hw_time_low = rxq->time_high;
2184 			}
2185 			rxq->hw_time_update = rte_get_timer_cycles() /
2186 					     (rte_get_timer_hz() / 1000);
2187 			*RTE_MBUF_DYNFIELD(first_seg,
2188 					   (ice_timestamp_dynfield_offset),
2189 					   rte_mbuf_timestamp_t *) = ts_ns;
2190 			pkt_flags |= ice_timestamp_dynflag;
2191 		}
2192 
2193 		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
2194 		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2195 			rxq->time_high =
2196 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2197 			first_seg->timesync = rxq->queue_id;
2198 			pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2199 		}
2200 #endif
2201 		first_seg->ol_flags |= pkt_flags;
2202 		/* Prefetch data of first segment, if configured to do so. */
2203 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
2204 					  first_seg->data_off));
2205 		rx_pkts[nb_rx++] = first_seg;
2206 		first_seg = NULL;
2207 	}
2208 
2209 	/* Record index of the next RX descriptor to probe. */
2210 	rxq->rx_tail = rx_id;
2211 	rxq->pkt_first_seg = first_seg;
2212 	rxq->pkt_last_seg = last_seg;
2213 
2214 	/**
2215 	 * If the number of free RX descriptors is greater than the RX free
2216 	 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
2217 	 * register. Update the RDT with the value of the last processed RX
2218 	 * descriptor minus 1, to guarantee that the RDT register is never
2219 	 * equal to the RDH register, which creates a "full" ring situation
2220 	 * from the hardware point of view.
2221 	 */
2222 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2223 	if (nb_hold > rxq->rx_free_thresh) {
2224 		rx_id = (uint16_t)(rx_id == 0 ?
2225 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2226 		/* write TAIL register */
2227 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2228 		nb_hold = 0;
2229 	}
2230 	rxq->nb_rx_hold = nb_hold;
2231 
2232 	/* return received packet in the burst */
2233 	return nb_rx;
2234 }
2235 
2236 const uint32_t *
2237 ice_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
2238 {
2239 	struct ice_adapter *ad =
2240 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
2241 	const uint32_t *ptypes;
2242 
2243 	static const uint32_t ptypes_os[] = {
2244 		/* refers to ice_get_default_pkt_type() */
2245 		RTE_PTYPE_L2_ETHER,
2246 		RTE_PTYPE_L2_ETHER_TIMESYNC,
2247 		RTE_PTYPE_L2_ETHER_LLDP,
2248 		RTE_PTYPE_L2_ETHER_ARP,
2249 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2250 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2251 		RTE_PTYPE_L4_FRAG,
2252 		RTE_PTYPE_L4_ICMP,
2253 		RTE_PTYPE_L4_NONFRAG,
2254 		RTE_PTYPE_L4_SCTP,
2255 		RTE_PTYPE_L4_TCP,
2256 		RTE_PTYPE_L4_UDP,
2257 		RTE_PTYPE_TUNNEL_GRENAT,
2258 		RTE_PTYPE_TUNNEL_IP,
2259 		RTE_PTYPE_INNER_L2_ETHER,
2260 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2261 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2262 		RTE_PTYPE_INNER_L4_FRAG,
2263 		RTE_PTYPE_INNER_L4_ICMP,
2264 		RTE_PTYPE_INNER_L4_NONFRAG,
2265 		RTE_PTYPE_INNER_L4_SCTP,
2266 		RTE_PTYPE_INNER_L4_TCP,
2267 		RTE_PTYPE_INNER_L4_UDP,
2268 	};
2269 
2270 	static const uint32_t ptypes_comms[] = {
2271 		/* refers to ice_get_default_pkt_type() */
2272 		RTE_PTYPE_L2_ETHER,
2273 		RTE_PTYPE_L2_ETHER_TIMESYNC,
2274 		RTE_PTYPE_L2_ETHER_LLDP,
2275 		RTE_PTYPE_L2_ETHER_ARP,
2276 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
2277 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
2278 		RTE_PTYPE_L4_FRAG,
2279 		RTE_PTYPE_L4_ICMP,
2280 		RTE_PTYPE_L4_NONFRAG,
2281 		RTE_PTYPE_L4_SCTP,
2282 		RTE_PTYPE_L4_TCP,
2283 		RTE_PTYPE_L4_UDP,
2284 		RTE_PTYPE_TUNNEL_GRENAT,
2285 		RTE_PTYPE_TUNNEL_IP,
2286 		RTE_PTYPE_INNER_L2_ETHER,
2287 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
2288 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
2289 		RTE_PTYPE_INNER_L4_FRAG,
2290 		RTE_PTYPE_INNER_L4_ICMP,
2291 		RTE_PTYPE_INNER_L4_NONFRAG,
2292 		RTE_PTYPE_INNER_L4_SCTP,
2293 		RTE_PTYPE_INNER_L4_TCP,
2294 		RTE_PTYPE_INNER_L4_UDP,
2295 		RTE_PTYPE_TUNNEL_GTPC,
2296 		RTE_PTYPE_TUNNEL_GTPU,
2297 		RTE_PTYPE_L2_ETHER_PPPOE,
2298 	};
2299 
2300 	if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) {
2301 		*no_of_elements = RTE_DIM(ptypes_comms);
2302 		ptypes = ptypes_comms;
2303 	} else {
2304 		*no_of_elements = RTE_DIM(ptypes_os);
2305 		ptypes = ptypes_os;
2306 	}
2307 
2308 	if (dev->rx_pkt_burst == ice_recv_pkts ||
2309 	    dev->rx_pkt_burst == ice_recv_pkts_bulk_alloc ||
2310 	    dev->rx_pkt_burst == ice_recv_scattered_pkts)
2311 		return ptypes;
2312 
2313 #ifdef RTE_ARCH_X86
2314 	if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
2315 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
2316 #ifdef CC_AVX512_SUPPORT
2317 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
2318 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
2319 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
2320 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
2321 #endif
2322 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
2323 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
2324 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
2325 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
2326 		return ptypes;
2327 #endif
2328 
2329 	return NULL;
2330 }
2331 
2332 int
2333 ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
2334 {
2335 	volatile union ice_rx_flex_desc *rxdp;
2336 	struct ice_rx_queue *rxq = rx_queue;
2337 	uint32_t desc;
2338 
2339 	if (unlikely(offset >= rxq->nb_rx_desc))
2340 		return -EINVAL;
2341 
2342 	if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
2343 		return RTE_ETH_RX_DESC_UNAVAIL;
2344 
2345 	desc = rxq->rx_tail + offset;
2346 	if (desc >= rxq->nb_rx_desc)
2347 		desc -= rxq->nb_rx_desc;
2348 
2349 	rxdp = &rxq->rx_ring[desc];
2350 	if (rte_le_to_cpu_16(rxdp->wb.status_error0) &
2351 	    (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))
2352 		return RTE_ETH_RX_DESC_DONE;
2353 
2354 	return RTE_ETH_RX_DESC_AVAIL;
2355 }
2356 
2357 int
2358 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
2359 {
2360 	struct ice_tx_queue *txq = tx_queue;
2361 	volatile uint64_t *status;
2362 	uint64_t mask, expect;
2363 	uint32_t desc;
2364 
2365 	if (unlikely(offset >= txq->nb_tx_desc))
2366 		return -EINVAL;
2367 
2368 	desc = txq->tx_tail + offset;
2369 	/* go to next desc that has the RS bit */
2370 	desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
2371 		txq->tx_rs_thresh;
2372 	if (desc >= txq->nb_tx_desc) {
2373 		desc -= txq->nb_tx_desc;
2374 		if (desc >= txq->nb_tx_desc)
2375 			desc -= txq->nb_tx_desc;
2376 	}
2377 
2378 	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
2379 	mask = rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M);
2380 	expect = rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE <<
2381 				  ICE_TXD_QW1_DTYPE_S);
2382 	if ((*status & mask) == expect)
2383 		return RTE_ETH_TX_DESC_DONE;
2384 
2385 	return RTE_ETH_TX_DESC_FULL;
2386 }
2387 
2388 void
2389 ice_free_queues(struct rte_eth_dev *dev)
2390 {
2391 	uint16_t i;
2392 
2393 	PMD_INIT_FUNC_TRACE();
2394 
2395 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2396 		if (!dev->data->rx_queues[i])
2397 			continue;
2398 		ice_rx_queue_release(dev->data->rx_queues[i]);
2399 		dev->data->rx_queues[i] = NULL;
2400 	}
2401 	dev->data->nb_rx_queues = 0;
2402 
2403 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2404 		if (!dev->data->tx_queues[i])
2405 			continue;
2406 		ice_tx_queue_release(dev->data->tx_queues[i]);
2407 		dev->data->tx_queues[i] = NULL;
2408 	}
2409 	dev->data->nb_tx_queues = 0;
2410 }
2411 
2412 #define ICE_FDIR_NUM_TX_DESC  ICE_MIN_RING_DESC
2413 #define ICE_FDIR_NUM_RX_DESC  ICE_MIN_RING_DESC
2414 
2415 int
2416 ice_fdir_setup_tx_resources(struct ice_pf *pf)
2417 {
2418 	struct ice_tx_queue *txq;
2419 	const struct rte_memzone *tz = NULL;
2420 	uint32_t ring_size;
2421 	struct rte_eth_dev *dev;
2422 
2423 	if (!pf) {
2424 		PMD_DRV_LOG(ERR, "PF is not available");
2425 		return -EINVAL;
2426 	}
2427 
2428 	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2429 
2430 	/* Allocate the TX queue data structure. */
2431 	txq = rte_zmalloc_socket("ice fdir tx queue",
2432 				 sizeof(struct ice_tx_queue),
2433 				 RTE_CACHE_LINE_SIZE,
2434 				 SOCKET_ID_ANY);
2435 	if (!txq) {
2436 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2437 			    "tx queue structure.");
2438 		return -ENOMEM;
2439 	}
2440 
2441 	/* Allocate TX hardware ring descriptors. */
2442 	ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC;
2443 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2444 
2445 	tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
2446 				      ICE_FDIR_QUEUE_ID, ring_size,
2447 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2448 	if (!tz) {
2449 		ice_tx_queue_release(txq);
2450 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
2451 		return -ENOMEM;
2452 	}
2453 
2454 	txq->mz = tz;
2455 	txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC;
2456 	txq->queue_id = ICE_FDIR_QUEUE_ID;
2457 	txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2458 	txq->vsi = pf->fdir.fdir_vsi;
2459 
2460 	txq->tx_ring_dma = tz->iova;
2461 	txq->tx_ring = (struct ice_tx_desc *)tz->addr;
2462 	/*
2463 	 * don't need to allocate software ring and reset for the fdir
2464 	 * program queue just set the queue has been configured.
2465 	 */
2466 	txq->q_set = true;
2467 	pf->fdir.txq = txq;
2468 
2469 	txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
2470 
2471 	return ICE_SUCCESS;
2472 }
2473 
2474 int
2475 ice_fdir_setup_rx_resources(struct ice_pf *pf)
2476 {
2477 	struct ice_rx_queue *rxq;
2478 	const struct rte_memzone *rz = NULL;
2479 	uint32_t ring_size;
2480 	struct rte_eth_dev *dev;
2481 
2482 	if (!pf) {
2483 		PMD_DRV_LOG(ERR, "PF is not available");
2484 		return -EINVAL;
2485 	}
2486 
2487 	dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
2488 
2489 	/* Allocate the RX queue data structure. */
2490 	rxq = rte_zmalloc_socket("ice fdir rx queue",
2491 				 sizeof(struct ice_rx_queue),
2492 				 RTE_CACHE_LINE_SIZE,
2493 				 SOCKET_ID_ANY);
2494 	if (!rxq) {
2495 		PMD_DRV_LOG(ERR, "Failed to allocate memory for "
2496 			    "rx queue structure.");
2497 		return -ENOMEM;
2498 	}
2499 
2500 	/* Allocate RX hardware ring descriptors. */
2501 	ring_size = sizeof(union ice_32byte_rx_desc) * ICE_FDIR_NUM_RX_DESC;
2502 	ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN);
2503 
2504 	rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
2505 				      ICE_FDIR_QUEUE_ID, ring_size,
2506 				      ICE_RING_BASE_ALIGN, SOCKET_ID_ANY);
2507 	if (!rz) {
2508 		ice_rx_queue_release(rxq);
2509 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
2510 		return -ENOMEM;
2511 	}
2512 
2513 	rxq->mz = rz;
2514 	rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC;
2515 	rxq->queue_id = ICE_FDIR_QUEUE_ID;
2516 	rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
2517 	rxq->vsi = pf->fdir.fdir_vsi;
2518 
2519 	rxq->rx_ring_dma = rz->iova;
2520 	memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC *
2521 	       sizeof(union ice_32byte_rx_desc));
2522 	rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr;
2523 
2524 	/*
2525 	 * Don't need to allocate software ring and reset for the fdir
2526 	 * rx queue, just set the queue has been configured.
2527 	 */
2528 	rxq->q_set = true;
2529 	pf->fdir.rxq = rxq;
2530 
2531 	rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs;
2532 
2533 	return ICE_SUCCESS;
2534 }
2535 
2536 uint16_t
2537 ice_recv_pkts(void *rx_queue,
2538 	      struct rte_mbuf **rx_pkts,
2539 	      uint16_t nb_pkts)
2540 {
2541 	struct ice_rx_queue *rxq = rx_queue;
2542 	volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring;
2543 	volatile union ice_rx_flex_desc *rxdp;
2544 	union ice_rx_flex_desc rxd;
2545 	struct ice_rx_entry *sw_ring = rxq->sw_ring;
2546 	struct ice_rx_entry *rxe;
2547 	struct rte_mbuf *nmb; /* new allocated mbuf */
2548 	struct rte_mbuf *nmb_pay; /* new allocated payload mbuf */
2549 	struct rte_mbuf *rxm; /* pointer to store old mbuf in SW ring */
2550 	uint16_t rx_id = rxq->rx_tail;
2551 	uint16_t nb_rx = 0;
2552 	uint16_t nb_hold = 0;
2553 	uint16_t rx_packet_len;
2554 	uint16_t rx_header_len;
2555 	uint16_t rx_stat_err0;
2556 	uint64_t dma_addr;
2557 	uint64_t pkt_flags;
2558 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
2559 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2560 	bool is_tsinit = false;
2561 	uint64_t ts_ns;
2562 	struct ice_vsi *vsi = rxq->vsi;
2563 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
2564 	struct ice_adapter *ad = rxq->vsi->adapter;
2565 
2566 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
2567 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
2568 
2569 		if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
2570 			is_tsinit = 1;
2571 	}
2572 #endif
2573 
2574 	while (nb_rx < nb_pkts) {
2575 		rxdp = &rx_ring[rx_id];
2576 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
2577 
2578 		/* Check the DD bit first */
2579 		if (!(rx_stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
2580 			break;
2581 
2582 		/* allocate header mbuf */
2583 		nmb = rte_mbuf_raw_alloc(rxq->mp);
2584 		if (unlikely(!nmb)) {
2585 			rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2586 			break;
2587 		}
2588 
2589 		rxd = *rxdp; /* copy descriptor in ring to temp variable*/
2590 
2591 		nb_hold++;
2592 		rxe = &sw_ring[rx_id]; /* get corresponding mbuf in SW ring */
2593 		rx_id++;
2594 		if (unlikely(rx_id == rxq->nb_rx_desc))
2595 			rx_id = 0;
2596 		rxm = rxe->mbuf;
2597 		rxe->mbuf = nmb;
2598 		dma_addr =
2599 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
2600 
2601 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
2602 			/**
2603 			 * fill the read format of descriptor with physic address in
2604 			 * new allocated mbuf: nmb
2605 			 */
2606 			rxdp->read.hdr_addr = 0;
2607 			rxdp->read.pkt_addr = dma_addr;
2608 		} else {
2609 			/* allocate payload mbuf */
2610 			nmb_pay = rte_mbuf_raw_alloc(rxq->rxseg[1].mp);
2611 			if (unlikely(!nmb_pay)) {
2612 				rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
2613 				rxe->mbuf = NULL;
2614 				nb_hold--;
2615 				if (unlikely(rx_id == 0))
2616 					rx_id = rxq->nb_rx_desc;
2617 
2618 				rx_id--;
2619 				rte_pktmbuf_free(nmb);
2620 				break;
2621 			}
2622 
2623 			nmb->next = nmb_pay;
2624 			nmb_pay->next = NULL;
2625 
2626 			/**
2627 			 * fill the read format of descriptor with physic address in
2628 			 * new allocated mbuf: nmb
2629 			 */
2630 			rxdp->read.hdr_addr = dma_addr;
2631 			rxdp->read.pkt_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb_pay));
2632 		}
2633 
2634 		/* fill old mbuf with received descriptor: rxd */
2635 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
2636 		rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
2637 		if (!(rxq->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
2638 			rxm->nb_segs = 1;
2639 			rxm->next = NULL;
2640 			/* calculate rx_packet_len of the received pkt */
2641 			rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2642 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2643 			rxm->data_len = rx_packet_len;
2644 			rxm->pkt_len = rx_packet_len;
2645 		} else {
2646 			rxm->nb_segs = (uint16_t)(rxm->nb_segs + rxm->next->nb_segs);
2647 			rxm->next->next = NULL;
2648 			/* calculate rx_packet_len of the received pkt */
2649 			rx_header_len = rte_le_to_cpu_16(rxd.wb.hdr_len_sph_flex_flags1) &
2650 					ICE_RX_FLEX_DESC_HEADER_LEN_M;
2651 			rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
2652 					ICE_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
2653 			rxm->data_len = rx_header_len;
2654 			rxm->pkt_len = rx_header_len + rx_packet_len;
2655 			rxm->next->data_len = rx_packet_len;
2656 
2657 #ifdef RTE_ETHDEV_DEBUG_RX
2658 			rte_pktmbuf_dump(stdout, rxm, rte_pktmbuf_pkt_len(rxm));
2659 #endif
2660 		}
2661 
2662 		rxm->port = rxq->port_id;
2663 		rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
2664 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
2665 		ice_rxd_to_vlan_tci(rxm, &rxd);
2666 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
2667 		pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
2668 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
2669 		if (ice_timestamp_dynflag > 0 &&
2670 		    (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
2671 			rxq->time_high =
2672 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2673 			if (unlikely(is_tsinit)) {
2674 				ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
2675 				rxq->hw_time_low = (uint32_t)ts_ns;
2676 				rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
2677 				is_tsinit = false;
2678 			} else {
2679 				if (rxq->time_high < rxq->hw_time_low)
2680 					rxq->hw_time_high += 1;
2681 				ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
2682 				rxq->hw_time_low = rxq->time_high;
2683 			}
2684 			rxq->hw_time_update = rte_get_timer_cycles() /
2685 					     (rte_get_timer_hz() / 1000);
2686 			*RTE_MBUF_DYNFIELD(rxm,
2687 					   (ice_timestamp_dynfield_offset),
2688 					   rte_mbuf_timestamp_t *) = ts_ns;
2689 			pkt_flags |= ice_timestamp_dynflag;
2690 		}
2691 
2692 		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
2693 		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
2694 			rxq->time_high =
2695 			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
2696 			rxm->timesync = rxq->queue_id;
2697 			pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
2698 		}
2699 #endif
2700 		rxm->ol_flags |= pkt_flags;
2701 		/* copy old mbuf to rx_pkts */
2702 		rx_pkts[nb_rx++] = rxm;
2703 	}
2704 
2705 	rxq->rx_tail = rx_id;
2706 	/**
2707 	 * If the number of free RX descriptors is greater than the RX free
2708 	 * threshold of the queue, advance the receive tail register of queue.
2709 	 * Update that register with the value of the last processed RX
2710 	 * descriptor minus 1.
2711 	 */
2712 	nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
2713 	if (nb_hold > rxq->rx_free_thresh) {
2714 		rx_id = (uint16_t)(rx_id == 0 ?
2715 				   (rxq->nb_rx_desc - 1) : (rx_id - 1));
2716 		/* write TAIL register */
2717 		ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
2718 		nb_hold = 0;
2719 	}
2720 	rxq->nb_rx_hold = nb_hold;
2721 
2722 	/* return received packet in the burst */
2723 	return nb_rx;
2724 }
2725 
2726 static inline void
2727 ice_parse_tunneling_params(uint64_t ol_flags,
2728 			    union ice_tx_offload tx_offload,
2729 			    uint32_t *cd_tunneling)
2730 {
2731 	/* EIPT: External (outer) IP header type */
2732 	if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)
2733 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4;
2734 	else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)
2735 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
2736 	else if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)
2737 		*cd_tunneling |= ICE_TX_CTX_EIPT_IPV6;
2738 
2739 	/* EIPLEN: External (outer) IP header length, in DWords */
2740 	*cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
2741 		ICE_TXD_CTX_QW0_EIPLEN_S;
2742 
2743 	/* L4TUNT: L4 Tunneling Type */
2744 	switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
2745 	case RTE_MBUF_F_TX_TUNNEL_IPIP:
2746 		/* for non UDP / GRE tunneling, set to 00b */
2747 		break;
2748 	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
2749 	case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
2750 	case RTE_MBUF_F_TX_TUNNEL_GTP:
2751 	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
2752 		*cd_tunneling |= ICE_TXD_CTX_UDP_TUNNELING;
2753 		break;
2754 	case RTE_MBUF_F_TX_TUNNEL_GRE:
2755 		*cd_tunneling |= ICE_TXD_CTX_GRE_TUNNELING;
2756 		break;
2757 	default:
2758 		PMD_TX_LOG(ERR, "Tunnel type not supported");
2759 		return;
2760 	}
2761 
2762 	/* L4TUNLEN: L4 Tunneling Length, in Words
2763 	 *
2764 	 * We depend on app to set rte_mbuf.l2_len correctly.
2765 	 * For IP in GRE it should be set to the length of the GRE
2766 	 * header;
2767 	 * For MAC in GRE or MAC in UDP it should be set to the length
2768 	 * of the GRE or UDP headers plus the inner MAC up to including
2769 	 * its last Ethertype.
2770 	 * If MPLS labels exists, it should include them as well.
2771 	 */
2772 	*cd_tunneling |= (tx_offload.l2_len >> 1) <<
2773 		ICE_TXD_CTX_QW0_NATLEN_S;
2774 
2775 	/**
2776 	 * Calculate the tunneling UDP checksum.
2777 	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
2778 	 */
2779 	if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) &&
2780 			(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) &&
2781 			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
2782 		*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
2783 }
2784 
2785 static inline void
2786 ice_txd_enable_checksum(uint64_t ol_flags,
2787 			uint32_t *td_cmd,
2788 			uint32_t *td_offset,
2789 			union ice_tx_offload tx_offload)
2790 {
2791 	/* Set MACLEN */
2792 	if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK))
2793 		*td_offset |= (tx_offload.l2_len >> 1)
2794 			<< ICE_TX_DESC_LEN_MACLEN_S;
2795 
2796 	/* Enable L3 checksum offloads */
2797 	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
2798 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2799 		*td_offset |= (tx_offload.l3_len >> 2) <<
2800 			ICE_TX_DESC_LEN_IPLEN_S;
2801 	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
2802 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2803 		*td_offset |= (tx_offload.l3_len >> 2) <<
2804 			ICE_TX_DESC_LEN_IPLEN_S;
2805 	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
2806 		*td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2807 		*td_offset |= (tx_offload.l3_len >> 2) <<
2808 			ICE_TX_DESC_LEN_IPLEN_S;
2809 	}
2810 
2811 	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
2812 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2813 		*td_offset |= (tx_offload.l4_len >> 2) <<
2814 			      ICE_TX_DESC_LEN_L4_LEN_S;
2815 		return;
2816 	}
2817 
2818 	if (ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
2819 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2820 		*td_offset |= (tx_offload.l4_len >> 2) <<
2821 			      ICE_TX_DESC_LEN_L4_LEN_S;
2822 		return;
2823 	}
2824 
2825 	/* Enable L4 checksum offloads */
2826 	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
2827 	case RTE_MBUF_F_TX_TCP_CKSUM:
2828 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2829 		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
2830 			      ICE_TX_DESC_LEN_L4_LEN_S;
2831 		break;
2832 	case RTE_MBUF_F_TX_SCTP_CKSUM:
2833 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2834 		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
2835 			      ICE_TX_DESC_LEN_L4_LEN_S;
2836 		break;
2837 	case RTE_MBUF_F_TX_UDP_CKSUM:
2838 		*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2839 		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
2840 			      ICE_TX_DESC_LEN_L4_LEN_S;
2841 		break;
2842 	default:
2843 		break;
2844 	}
2845 }
2846 
2847 static inline int
2848 ice_xmit_cleanup(struct ice_tx_queue *txq)
2849 {
2850 	struct ci_tx_entry *sw_ring = txq->sw_ring;
2851 	volatile struct ice_tx_desc *txd = txq->tx_ring;
2852 	uint16_t last_desc_cleaned = txq->last_desc_cleaned;
2853 	uint16_t nb_tx_desc = txq->nb_tx_desc;
2854 	uint16_t desc_to_clean_to;
2855 	uint16_t nb_tx_to_clean;
2856 
2857 	/* Determine the last descriptor needing to be cleaned */
2858 	desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
2859 	if (desc_to_clean_to >= nb_tx_desc)
2860 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
2861 
2862 	/* Check to make sure the last descriptor to clean is done */
2863 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
2864 	if (!(txd[desc_to_clean_to].cmd_type_offset_bsz &
2865 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
2866 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
2867 			   "(port=%d queue=%d) value=0x%"PRIx64,
2868 			   desc_to_clean_to,
2869 			   txq->port_id, txq->queue_id,
2870 			   txd[desc_to_clean_to].cmd_type_offset_bsz);
2871 		/* Failed to clean any descriptors */
2872 		return -1;
2873 	}
2874 
2875 	/* Figure out how many descriptors will be cleaned */
2876 	if (last_desc_cleaned > desc_to_clean_to)
2877 		nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
2878 					    desc_to_clean_to);
2879 	else
2880 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
2881 					    last_desc_cleaned);
2882 
2883 	/* The last descriptor to clean is done, so that means all the
2884 	 * descriptors from the last descriptor that was cleaned
2885 	 * up to the last descriptor with the RS bit set
2886 	 * are done. Only reset the threshold descriptor.
2887 	 */
2888 	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
2889 
2890 	/* Update the txq to reflect the last descriptor that was cleaned */
2891 	txq->last_desc_cleaned = desc_to_clean_to;
2892 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
2893 
2894 	return 0;
2895 }
2896 
2897 /* Construct the tx flags */
2898 static inline uint64_t
2899 ice_build_ctob(uint32_t td_cmd,
2900 	       uint32_t td_offset,
2901 	       uint16_t size,
2902 	       uint32_t td_tag)
2903 {
2904 	return rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
2905 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
2906 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
2907 				((uint64_t)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
2908 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
2909 }
2910 
2911 /* Check if the context descriptor is needed for TX offloading */
2912 static inline uint16_t
2913 ice_calc_context_desc(uint64_t flags)
2914 {
2915 	static uint64_t mask = RTE_MBUF_F_TX_TCP_SEG |
2916 		RTE_MBUF_F_TX_UDP_SEG |
2917 		RTE_MBUF_F_TX_QINQ |
2918 		RTE_MBUF_F_TX_OUTER_IP_CKSUM |
2919 		RTE_MBUF_F_TX_TUNNEL_MASK |
2920 		RTE_MBUF_F_TX_IEEE1588_TMST;
2921 
2922 	return (flags & mask) ? 1 : 0;
2923 }
2924 
2925 /* set ice TSO context descriptor */
2926 static inline uint64_t
2927 ice_set_tso_ctx(struct rte_mbuf *mbuf, union ice_tx_offload tx_offload)
2928 {
2929 	uint64_t ctx_desc = 0;
2930 	uint32_t cd_cmd, hdr_len, cd_tso_len;
2931 
2932 	if (!tx_offload.l4_len) {
2933 		PMD_TX_LOG(DEBUG, "L4 length set to 0");
2934 		return ctx_desc;
2935 	}
2936 
2937 	hdr_len = tx_offload.l2_len + tx_offload.l3_len + tx_offload.l4_len;
2938 	hdr_len += (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
2939 		   tx_offload.outer_l2_len + tx_offload.outer_l3_len : 0;
2940 
2941 	cd_cmd = ICE_TX_CTX_DESC_TSO;
2942 	cd_tso_len = mbuf->pkt_len - hdr_len;
2943 	ctx_desc |= ((uint64_t)cd_cmd << ICE_TXD_CTX_QW1_CMD_S) |
2944 		    ((uint64_t)cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2945 		    ((uint64_t)mbuf->tso_segsz << ICE_TXD_CTX_QW1_MSS_S);
2946 
2947 	return ctx_desc;
2948 }
2949 
2950 /* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
2951 #define ICE_MAX_DATA_PER_TXD \
2952 	(ICE_TXD_QW1_TX_BUF_SZ_M >> ICE_TXD_QW1_TX_BUF_SZ_S)
2953 /* Calculate the number of TX descriptors needed for each pkt */
2954 static inline uint16_t
2955 ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
2956 {
2957 	struct rte_mbuf *txd = tx_pkt;
2958 	uint16_t count = 0;
2959 
2960 	while (txd != NULL) {
2961 		count += DIV_ROUND_UP(txd->data_len, ICE_MAX_DATA_PER_TXD);
2962 		txd = txd->next;
2963 	}
2964 
2965 	return count;
2966 }
2967 
2968 uint16_t
2969 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2970 {
2971 	struct ice_tx_queue *txq;
2972 	volatile struct ice_tx_desc *tx_ring;
2973 	volatile struct ice_tx_desc *txd;
2974 	struct ci_tx_entry *sw_ring;
2975 	struct ci_tx_entry *txe, *txn;
2976 	struct rte_mbuf *tx_pkt;
2977 	struct rte_mbuf *m_seg;
2978 	uint32_t cd_tunneling_params;
2979 	uint16_t tx_id;
2980 	uint16_t nb_tx;
2981 	uint16_t nb_used;
2982 	uint16_t nb_ctx;
2983 	uint32_t td_cmd = 0;
2984 	uint32_t td_offset = 0;
2985 	uint32_t td_tag = 0;
2986 	uint16_t tx_last;
2987 	uint16_t slen;
2988 	uint64_t buf_dma_addr;
2989 	uint64_t ol_flags;
2990 	union ice_tx_offload tx_offload = {0};
2991 
2992 	txq = tx_queue;
2993 	sw_ring = txq->sw_ring;
2994 	tx_ring = txq->tx_ring;
2995 	tx_id = txq->tx_tail;
2996 	txe = &sw_ring[tx_id];
2997 
2998 	/* Check if the descriptor ring needs to be cleaned. */
2999 	if (txq->nb_tx_free < txq->tx_free_thresh)
3000 		(void)ice_xmit_cleanup(txq);
3001 
3002 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
3003 		tx_pkt = *tx_pkts++;
3004 
3005 		td_cmd = 0;
3006 		td_tag = 0;
3007 		td_offset = 0;
3008 		ol_flags = tx_pkt->ol_flags;
3009 		tx_offload.l2_len = tx_pkt->l2_len;
3010 		tx_offload.l3_len = tx_pkt->l3_len;
3011 		tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
3012 		tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
3013 		tx_offload.l4_len = tx_pkt->l4_len;
3014 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
3015 		/* Calculate the number of context descriptors needed. */
3016 		nb_ctx = ice_calc_context_desc(ol_flags);
3017 
3018 		/* The number of descriptors that must be allocated for
3019 		 * a packet equals to the number of the segments of that
3020 		 * packet plus the number of context descriptor if needed.
3021 		 * Recalculate the needed tx descs when TSO enabled in case
3022 		 * the mbuf data size exceeds max data size that hw allows
3023 		 * per tx desc.
3024 		 */
3025 		if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
3026 			nb_used = (uint16_t)(ice_calc_pkt_desc(tx_pkt) +
3027 					     nb_ctx);
3028 		else
3029 			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
3030 		tx_last = (uint16_t)(tx_id + nb_used - 1);
3031 
3032 		/* Circular ring */
3033 		if (tx_last >= txq->nb_tx_desc)
3034 			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
3035 
3036 		if (nb_used > txq->nb_tx_free) {
3037 			if (ice_xmit_cleanup(txq) != 0) {
3038 				if (nb_tx == 0)
3039 					return 0;
3040 				goto end_of_tx;
3041 			}
3042 			if (unlikely(nb_used > txq->tx_rs_thresh)) {
3043 				while (nb_used > txq->nb_tx_free) {
3044 					if (ice_xmit_cleanup(txq) != 0) {
3045 						if (nb_tx == 0)
3046 							return 0;
3047 						goto end_of_tx;
3048 					}
3049 				}
3050 			}
3051 		}
3052 
3053 		/* Descriptor based VLAN insertion */
3054 		if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
3055 			td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
3056 			td_tag = tx_pkt->vlan_tci;
3057 		}
3058 
3059 		/* Fill in tunneling parameters if necessary */
3060 		cd_tunneling_params = 0;
3061 		if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
3062 			td_offset |= (tx_offload.outer_l2_len >> 1)
3063 				<< ICE_TX_DESC_LEN_MACLEN_S;
3064 			ice_parse_tunneling_params(ol_flags, tx_offload,
3065 						   &cd_tunneling_params);
3066 		}
3067 
3068 		/* Enable checksum offloading */
3069 		if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK)
3070 			ice_txd_enable_checksum(ol_flags, &td_cmd,
3071 						&td_offset, tx_offload);
3072 
3073 		if (nb_ctx) {
3074 			/* Setup TX context descriptor if required */
3075 			volatile struct ice_tx_ctx_desc *ctx_txd =
3076 				(volatile struct ice_tx_ctx_desc *)
3077 					&tx_ring[tx_id];
3078 			uint16_t cd_l2tag2 = 0;
3079 			uint64_t cd_type_cmd_tso_mss = ICE_TX_DESC_DTYPE_CTX;
3080 
3081 			txn = &sw_ring[txe->next_id];
3082 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
3083 			if (txe->mbuf) {
3084 				rte_pktmbuf_free_seg(txe->mbuf);
3085 				txe->mbuf = NULL;
3086 			}
3087 
3088 			if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
3089 				cd_type_cmd_tso_mss |=
3090 					ice_set_tso_ctx(tx_pkt, tx_offload);
3091 			else if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
3092 				cd_type_cmd_tso_mss |=
3093 					((uint64_t)ICE_TX_CTX_DESC_TSYN <<
3094 					ICE_TXD_CTX_QW1_CMD_S) |
3095 					 (((uint64_t)txq->vsi->adapter->ptp_tx_index <<
3096 					 ICE_TXD_CTX_QW1_TSYN_S) & ICE_TXD_CTX_QW1_TSYN_M);
3097 
3098 			ctx_txd->tunneling_params =
3099 				rte_cpu_to_le_32(cd_tunneling_params);
3100 
3101 			/* TX context descriptor based double VLAN insert */
3102 			if (ol_flags & RTE_MBUF_F_TX_QINQ) {
3103 				cd_l2tag2 = tx_pkt->vlan_tci_outer;
3104 				cd_type_cmd_tso_mss |=
3105 					((uint64_t)ICE_TX_CTX_DESC_IL2TAG2 <<
3106 					 ICE_TXD_CTX_QW1_CMD_S);
3107 			}
3108 			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
3109 			ctx_txd->qw1 =
3110 				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
3111 
3112 			txe->last_id = tx_last;
3113 			tx_id = txe->next_id;
3114 			txe = txn;
3115 		}
3116 		m_seg = tx_pkt;
3117 
3118 		do {
3119 			txd = &tx_ring[tx_id];
3120 			txn = &sw_ring[txe->next_id];
3121 
3122 			if (txe->mbuf)
3123 				rte_pktmbuf_free_seg(txe->mbuf);
3124 			txe->mbuf = m_seg;
3125 
3126 			/* Setup TX Descriptor */
3127 			slen = m_seg->data_len;
3128 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
3129 
3130 			while ((ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) &&
3131 				unlikely(slen > ICE_MAX_DATA_PER_TXD)) {
3132 				txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
3133 				txd->cmd_type_offset_bsz =
3134 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
3135 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
3136 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
3137 				((uint64_t)ICE_MAX_DATA_PER_TXD <<
3138 				 ICE_TXD_QW1_TX_BUF_SZ_S) |
3139 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
3140 
3141 				buf_dma_addr += ICE_MAX_DATA_PER_TXD;
3142 				slen -= ICE_MAX_DATA_PER_TXD;
3143 
3144 				txe->last_id = tx_last;
3145 				tx_id = txe->next_id;
3146 				txe = txn;
3147 				txd = &tx_ring[tx_id];
3148 				txn = &sw_ring[txe->next_id];
3149 			}
3150 
3151 			txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
3152 			txd->cmd_type_offset_bsz =
3153 				rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DATA |
3154 				((uint64_t)td_cmd << ICE_TXD_QW1_CMD_S) |
3155 				((uint64_t)td_offset << ICE_TXD_QW1_OFFSET_S) |
3156 				((uint64_t)slen << ICE_TXD_QW1_TX_BUF_SZ_S) |
3157 				((uint64_t)td_tag << ICE_TXD_QW1_L2TAG1_S));
3158 
3159 			txe->last_id = tx_last;
3160 			tx_id = txe->next_id;
3161 			txe = txn;
3162 			m_seg = m_seg->next;
3163 		} while (m_seg);
3164 
3165 		/* fill the last descriptor with End of Packet (EOP) bit */
3166 		td_cmd |= ICE_TX_DESC_CMD_EOP;
3167 		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
3168 		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
3169 
3170 		/* set RS bit on the last descriptor of one packet */
3171 		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
3172 			PMD_TX_LOG(DEBUG,
3173 				   "Setting RS bit on TXD id="
3174 				   "%4u (port=%d queue=%d)",
3175 				   tx_last, txq->port_id, txq->queue_id);
3176 
3177 			td_cmd |= ICE_TX_DESC_CMD_RS;
3178 
3179 			/* Update txq RS bit counters */
3180 			txq->nb_tx_used = 0;
3181 		}
3182 		txd->cmd_type_offset_bsz |=
3183 			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
3184 					 ICE_TXD_QW1_CMD_S);
3185 	}
3186 end_of_tx:
3187 	/* update Tail register */
3188 	ICE_PCI_REG_WRITE(txq->qtx_tail, tx_id);
3189 	txq->tx_tail = tx_id;
3190 
3191 	return nb_tx;
3192 }
3193 
3194 static __rte_always_inline int
3195 ice_tx_free_bufs(struct ice_tx_queue *txq)
3196 {
3197 	struct ci_tx_entry *txep;
3198 	uint16_t i;
3199 
3200 	if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
3201 	     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) !=
3202 	    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
3203 		return 0;
3204 
3205 	txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_rs_thresh - 1)];
3206 
3207 	for (i = 0; i < txq->tx_rs_thresh; i++)
3208 		rte_prefetch0((txep + i)->mbuf);
3209 
3210 	if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) {
3211 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
3212 			rte_mempool_put(txep->mbuf->pool, txep->mbuf);
3213 			txep->mbuf = NULL;
3214 		}
3215 	} else {
3216 		for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
3217 			rte_pktmbuf_free_seg(txep->mbuf);
3218 			txep->mbuf = NULL;
3219 		}
3220 	}
3221 
3222 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
3223 	txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
3224 	if (txq->tx_next_dd >= txq->nb_tx_desc)
3225 		txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
3226 
3227 	return txq->tx_rs_thresh;
3228 }
3229 
3230 static int
3231 ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
3232 			uint32_t free_cnt)
3233 {
3234 	struct ci_tx_entry *swr_ring = txq->sw_ring;
3235 	uint16_t i, tx_last, tx_id;
3236 	uint16_t nb_tx_free_last;
3237 	uint16_t nb_tx_to_clean;
3238 	uint32_t pkt_cnt;
3239 
3240 	/* Start free mbuf from the next of tx_tail */
3241 	tx_last = txq->tx_tail;
3242 	tx_id  = swr_ring[tx_last].next_id;
3243 
3244 	if (txq->nb_tx_free == 0 && ice_xmit_cleanup(txq))
3245 		return 0;
3246 
3247 	nb_tx_to_clean = txq->nb_tx_free;
3248 	nb_tx_free_last = txq->nb_tx_free;
3249 	if (!free_cnt)
3250 		free_cnt = txq->nb_tx_desc;
3251 
3252 	/* Loop through swr_ring to count the amount of
3253 	 * freeable mubfs and packets.
3254 	 */
3255 	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
3256 		for (i = 0; i < nb_tx_to_clean &&
3257 			pkt_cnt < free_cnt &&
3258 			tx_id != tx_last; i++) {
3259 			if (swr_ring[tx_id].mbuf != NULL) {
3260 				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
3261 				swr_ring[tx_id].mbuf = NULL;
3262 
3263 				/*
3264 				 * last segment in the packet,
3265 				 * increment packet count
3266 				 */
3267 				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
3268 			}
3269 
3270 			tx_id = swr_ring[tx_id].next_id;
3271 		}
3272 
3273 		if (txq->tx_rs_thresh > txq->nb_tx_desc -
3274 			txq->nb_tx_free || tx_id == tx_last)
3275 			break;
3276 
3277 		if (pkt_cnt < free_cnt) {
3278 			if (ice_xmit_cleanup(txq))
3279 				break;
3280 
3281 			nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
3282 			nb_tx_free_last = txq->nb_tx_free;
3283 		}
3284 	}
3285 
3286 	return (int)pkt_cnt;
3287 }
3288 
3289 #ifdef RTE_ARCH_X86
3290 static int
3291 ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
3292 			uint32_t free_cnt __rte_unused)
3293 {
3294 	return -ENOTSUP;
3295 }
3296 #endif
3297 
3298 static int
3299 ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
3300 			uint32_t free_cnt)
3301 {
3302 	int i, n, cnt;
3303 
3304 	if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
3305 		free_cnt = txq->nb_tx_desc;
3306 
3307 	cnt = free_cnt - free_cnt % txq->tx_rs_thresh;
3308 
3309 	for (i = 0; i < cnt; i += n) {
3310 		if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
3311 			break;
3312 
3313 		n = ice_tx_free_bufs(txq);
3314 
3315 		if (n == 0)
3316 			break;
3317 	}
3318 
3319 	return i;
3320 }
3321 
3322 int
3323 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
3324 {
3325 	struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
3326 	struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
3327 	struct ice_adapter *ad =
3328 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3329 
3330 #ifdef RTE_ARCH_X86
3331 	if (ad->tx_vec_allowed)
3332 		return ice_tx_done_cleanup_vec(q, free_cnt);
3333 #endif
3334 	if (ad->tx_simple_allowed)
3335 		return ice_tx_done_cleanup_simple(q, free_cnt);
3336 	else
3337 		return ice_tx_done_cleanup_full(q, free_cnt);
3338 }
3339 
3340 /* Populate 4 descriptors with data from 4 mbufs */
3341 static inline void
3342 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3343 {
3344 	uint64_t dma_addr;
3345 	uint32_t i;
3346 
3347 	for (i = 0; i < 4; i++, txdp++, pkts++) {
3348 		dma_addr = rte_mbuf_data_iova(*pkts);
3349 		txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3350 		txdp->cmd_type_offset_bsz =
3351 			ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3352 				       (*pkts)->data_len, 0);
3353 	}
3354 }
3355 
3356 /* Populate 1 descriptor with data from 1 mbuf */
3357 static inline void
3358 tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
3359 {
3360 	uint64_t dma_addr;
3361 
3362 	dma_addr = rte_mbuf_data_iova(*pkts);
3363 	txdp->buf_addr = rte_cpu_to_le_64(dma_addr);
3364 	txdp->cmd_type_offset_bsz =
3365 		ice_build_ctob((uint32_t)ICE_TD_CMD, 0,
3366 			       (*pkts)->data_len, 0);
3367 }
3368 
3369 static inline void
3370 ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
3371 		    uint16_t nb_pkts)
3372 {
3373 	volatile struct ice_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
3374 	struct ci_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
3375 	const int N_PER_LOOP = 4;
3376 	const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
3377 	int mainpart, leftover;
3378 	int i, j;
3379 
3380 	/**
3381 	 * Process most of the packets in chunks of N pkts.  Any
3382 	 * leftover packets will get processed one at a time.
3383 	 */
3384 	mainpart = nb_pkts & ((uint32_t)~N_PER_LOOP_MASK);
3385 	leftover = nb_pkts & ((uint32_t)N_PER_LOOP_MASK);
3386 	for (i = 0; i < mainpart; i += N_PER_LOOP) {
3387 		/* Copy N mbuf pointers to the S/W ring */
3388 		for (j = 0; j < N_PER_LOOP; ++j)
3389 			(txep + i + j)->mbuf = *(pkts + i + j);
3390 		tx4(txdp + i, pkts + i);
3391 	}
3392 
3393 	if (unlikely(leftover > 0)) {
3394 		for (i = 0; i < leftover; ++i) {
3395 			(txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
3396 			tx1(txdp + mainpart + i, pkts + mainpart + i);
3397 		}
3398 	}
3399 }
3400 
3401 static inline uint16_t
3402 tx_xmit_pkts(struct ice_tx_queue *txq,
3403 	     struct rte_mbuf **tx_pkts,
3404 	     uint16_t nb_pkts)
3405 {
3406 	volatile struct ice_tx_desc *txr = txq->tx_ring;
3407 	uint16_t n = 0;
3408 
3409 	/**
3410 	 * Begin scanning the H/W ring for done descriptors when the number
3411 	 * of available descriptors drops below tx_free_thresh. For each done
3412 	 * descriptor, free the associated buffer.
3413 	 */
3414 	if (txq->nb_tx_free < txq->tx_free_thresh)
3415 		ice_tx_free_bufs(txq);
3416 
3417 	/* Use available descriptor only */
3418 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
3419 	if (unlikely(!nb_pkts))
3420 		return 0;
3421 
3422 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
3423 	if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
3424 		n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
3425 		ice_tx_fill_hw_ring(txq, tx_pkts, n);
3426 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3427 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3428 					 ICE_TXD_QW1_CMD_S);
3429 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3430 		txq->tx_tail = 0;
3431 	}
3432 
3433 	/* Fill hardware descriptor ring with mbuf data */
3434 	ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
3435 	txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
3436 
3437 	/* Determine if RS bit needs to be set */
3438 	if (txq->tx_tail > txq->tx_next_rs) {
3439 		txr[txq->tx_next_rs].cmd_type_offset_bsz |=
3440 			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
3441 					 ICE_TXD_QW1_CMD_S);
3442 		txq->tx_next_rs =
3443 			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
3444 		if (txq->tx_next_rs >= txq->nb_tx_desc)
3445 			txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
3446 	}
3447 
3448 	if (txq->tx_tail >= txq->nb_tx_desc)
3449 		txq->tx_tail = 0;
3450 
3451 	/* Update the tx tail register */
3452 	ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
3453 
3454 	return nb_pkts;
3455 }
3456 
3457 static uint16_t
3458 ice_xmit_pkts_simple(void *tx_queue,
3459 		     struct rte_mbuf **tx_pkts,
3460 		     uint16_t nb_pkts)
3461 {
3462 	uint16_t nb_tx = 0;
3463 
3464 	if (likely(nb_pkts <= ICE_TX_MAX_BURST))
3465 		return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3466 				    tx_pkts, nb_pkts);
3467 
3468 	while (nb_pkts) {
3469 		uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
3470 						      ICE_TX_MAX_BURST);
3471 
3472 		ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
3473 				   &tx_pkts[nb_tx], num);
3474 		nb_tx = (uint16_t)(nb_tx + ret);
3475 		nb_pkts = (uint16_t)(nb_pkts - ret);
3476 		if (ret < num)
3477 			break;
3478 	}
3479 
3480 	return nb_tx;
3481 }
3482 
3483 void __rte_cold
3484 ice_set_rx_function(struct rte_eth_dev *dev)
3485 {
3486 	PMD_INIT_FUNC_TRACE();
3487 	struct ice_adapter *ad =
3488 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3489 #ifdef RTE_ARCH_X86
3490 	struct ice_rx_queue *rxq;
3491 	int i;
3492 	int rx_check_ret = -1;
3493 
3494 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3495 		ad->rx_use_avx512 = false;
3496 		ad->rx_use_avx2 = false;
3497 		rx_check_ret = ice_rx_vec_dev_check(dev);
3498 		if (ad->ptp_ena)
3499 			rx_check_ret = -1;
3500 		ad->rx_vec_offload_support =
3501 				(rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
3502 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
3503 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3504 			ad->rx_vec_allowed = true;
3505 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
3506 				rxq = dev->data->rx_queues[i];
3507 				if (rxq && ice_rxq_vec_setup(rxq)) {
3508 					ad->rx_vec_allowed = false;
3509 					break;
3510 				}
3511 			}
3512 
3513 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3514 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3515 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3516 #ifdef CC_AVX512_SUPPORT
3517 				ad->rx_use_avx512 = true;
3518 #else
3519 			PMD_DRV_LOG(NOTICE,
3520 				"AVX512 is not supported in build env");
3521 #endif
3522 			if (!ad->rx_use_avx512 &&
3523 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3524 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3525 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3526 				ad->rx_use_avx2 = true;
3527 
3528 		} else {
3529 			ad->rx_vec_allowed = false;
3530 		}
3531 	}
3532 
3533 	if (ad->rx_vec_allowed) {
3534 		if (dev->data->scattered_rx) {
3535 			if (ad->rx_use_avx512) {
3536 #ifdef CC_AVX512_SUPPORT
3537 				if (ad->rx_vec_offload_support) {
3538 					PMD_DRV_LOG(NOTICE,
3539 						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
3540 						dev->data->port_id);
3541 					dev->rx_pkt_burst =
3542 						ice_recv_scattered_pkts_vec_avx512_offload;
3543 				} else {
3544 					PMD_DRV_LOG(NOTICE,
3545 						"Using AVX512 Vector Scattered Rx (port %d).",
3546 						dev->data->port_id);
3547 					dev->rx_pkt_burst =
3548 						ice_recv_scattered_pkts_vec_avx512;
3549 				}
3550 #endif
3551 			} else if (ad->rx_use_avx2) {
3552 				if (ad->rx_vec_offload_support) {
3553 					PMD_DRV_LOG(NOTICE,
3554 						    "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
3555 						    dev->data->port_id);
3556 					dev->rx_pkt_burst =
3557 						ice_recv_scattered_pkts_vec_avx2_offload;
3558 				} else {
3559 					PMD_DRV_LOG(NOTICE,
3560 						    "Using AVX2 Vector Scattered Rx (port %d).",
3561 						    dev->data->port_id);
3562 					dev->rx_pkt_burst =
3563 						ice_recv_scattered_pkts_vec_avx2;
3564 				}
3565 			} else {
3566 				PMD_DRV_LOG(DEBUG,
3567 					"Using Vector Scattered Rx (port %d).",
3568 					dev->data->port_id);
3569 				dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
3570 			}
3571 		} else {
3572 			if (ad->rx_use_avx512) {
3573 #ifdef CC_AVX512_SUPPORT
3574 				if (ad->rx_vec_offload_support) {
3575 					PMD_DRV_LOG(NOTICE,
3576 						"Using AVX512 OFFLOAD Vector Rx (port %d).",
3577 						dev->data->port_id);
3578 					dev->rx_pkt_burst =
3579 						ice_recv_pkts_vec_avx512_offload;
3580 				} else {
3581 					PMD_DRV_LOG(NOTICE,
3582 						"Using AVX512 Vector Rx (port %d).",
3583 						dev->data->port_id);
3584 					dev->rx_pkt_burst =
3585 						ice_recv_pkts_vec_avx512;
3586 				}
3587 #endif
3588 			} else if (ad->rx_use_avx2) {
3589 				if (ad->rx_vec_offload_support) {
3590 					PMD_DRV_LOG(NOTICE,
3591 						    "Using AVX2 OFFLOAD Vector Rx (port %d).",
3592 						    dev->data->port_id);
3593 					dev->rx_pkt_burst =
3594 						ice_recv_pkts_vec_avx2_offload;
3595 				} else {
3596 					PMD_DRV_LOG(NOTICE,
3597 						    "Using AVX2 Vector Rx (port %d).",
3598 						    dev->data->port_id);
3599 					dev->rx_pkt_burst =
3600 						ice_recv_pkts_vec_avx2;
3601 				}
3602 			} else {
3603 				PMD_DRV_LOG(DEBUG,
3604 					"Using Vector Rx (port %d).",
3605 					dev->data->port_id);
3606 				dev->rx_pkt_burst = ice_recv_pkts_vec;
3607 			}
3608 		}
3609 		return;
3610 	}
3611 
3612 #endif
3613 
3614 	if (dev->data->scattered_rx) {
3615 		/* Set the non-LRO scattered function */
3616 		PMD_INIT_LOG(DEBUG,
3617 			     "Using a Scattered function on port %d.",
3618 			     dev->data->port_id);
3619 		dev->rx_pkt_burst = ice_recv_scattered_pkts;
3620 	} else if (ad->rx_bulk_alloc_allowed) {
3621 		PMD_INIT_LOG(DEBUG,
3622 			     "Rx Burst Bulk Alloc Preconditions are "
3623 			     "satisfied. Rx Burst Bulk Alloc function "
3624 			     "will be used on port %d.",
3625 			     dev->data->port_id);
3626 		dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
3627 	} else {
3628 		PMD_INIT_LOG(DEBUG,
3629 			     "Rx Burst Bulk Alloc Preconditions are not "
3630 			     "satisfied, Normal Rx will be used on port %d.",
3631 			     dev->data->port_id);
3632 		dev->rx_pkt_burst = ice_recv_pkts;
3633 	}
3634 }
3635 
3636 static const struct {
3637 	eth_rx_burst_t pkt_burst;
3638 	const char *info;
3639 } ice_rx_burst_infos[] = {
3640 	{ ice_recv_scattered_pkts,          "Scalar Scattered" },
3641 	{ ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
3642 	{ ice_recv_pkts,                    "Scalar" },
3643 #ifdef RTE_ARCH_X86
3644 #ifdef CC_AVX512_SUPPORT
3645 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
3646 	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
3647 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
3648 	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
3649 #endif
3650 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
3651 	{ ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
3652 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
3653 	{ ice_recv_pkts_vec_avx2_offload,   "Offload Vector AVX2" },
3654 	{ ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
3655 	{ ice_recv_pkts_vec,                "Vector SSE" },
3656 #endif
3657 };
3658 
3659 int
3660 ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
3661 		      struct rte_eth_burst_mode *mode)
3662 {
3663 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
3664 	int ret = -EINVAL;
3665 	unsigned int i;
3666 
3667 	for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
3668 		if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
3669 			snprintf(mode->info, sizeof(mode->info), "%s",
3670 				 ice_rx_burst_infos[i].info);
3671 			ret = 0;
3672 			break;
3673 		}
3674 	}
3675 
3676 	return ret;
3677 }
3678 
3679 void __rte_cold
3680 ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
3681 {
3682 	struct ice_adapter *ad =
3683 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3684 
3685 	/* Use a simple Tx queue if possible (only fast free is allowed) */
3686 	ad->tx_simple_allowed =
3687 		(txq->offloads ==
3688 		(txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) &&
3689 		txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
3690 
3691 	if (ad->tx_simple_allowed)
3692 		PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
3693 			     txq->queue_id);
3694 	else
3695 		PMD_INIT_LOG(DEBUG,
3696 			     "Simple Tx can NOT be enabled on Tx queue %u.",
3697 			     txq->queue_id);
3698 }
3699 
3700 /*********************************************************************
3701  *
3702  *  TX prep functions
3703  *
3704  **********************************************************************/
3705 /* The default values of TSO MSS */
3706 #define ICE_MIN_TSO_MSS            64
3707 #define ICE_MAX_TSO_MSS            9728
3708 #define ICE_MAX_TSO_FRAME_SIZE     262144
3709 
3710 /*Check for empty mbuf*/
3711 static inline uint16_t
3712 ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
3713 {
3714 	struct rte_mbuf *txd = tx_pkt;
3715 
3716 	while (txd != NULL) {
3717 		if (txd->data_len == 0)
3718 			return -1;
3719 		txd = txd->next;
3720 	}
3721 
3722 	return 0;
3723 }
3724 
3725 /* Tx mbuf check */
3726 static uint16_t
3727 ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3728 {
3729 	struct ice_tx_queue *txq = tx_queue;
3730 	uint16_t idx;
3731 	struct rte_mbuf *mb;
3732 	bool pkt_error = false;
3733 	uint16_t good_pkts = nb_pkts;
3734 	const char *reason = NULL;
3735 	struct ice_adapter *adapter = txq->vsi->adapter;
3736 	uint64_t ol_flags;
3737 
3738 	for (idx = 0; idx < nb_pkts; idx++) {
3739 		mb = tx_pkts[idx];
3740 		ol_flags = mb->ol_flags;
3741 
3742 		if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_MBUF) &&
3743 		    (rte_mbuf_check(mb, 1, &reason) != 0)) {
3744 			PMD_TX_LOG(ERR, "INVALID mbuf: %s", reason);
3745 			pkt_error = true;
3746 			break;
3747 		}
3748 
3749 		if ((adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SIZE) &&
3750 		    (mb->data_len > mb->pkt_len ||
3751 		     mb->data_len < ICE_TX_MIN_PKT_LEN ||
3752 		     mb->data_len > ICE_FRAME_SIZE_MAX)) {
3753 			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out of range, reasonable range (%d - %d)",
3754 				mb->data_len, ICE_TX_MIN_PKT_LEN, ICE_FRAME_SIZE_MAX);
3755 			pkt_error = true;
3756 			break;
3757 		}
3758 
3759 		if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_SEGMENT) {
3760 			if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
3761 				/**
3762 				 * No TSO case: nb->segs, pkt_len to not exceed
3763 				 * the limites.
3764 				 */
3765 				if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) {
3766 					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds HW limit, maximum allowed value is %d",
3767 						mb->nb_segs, ICE_TX_MTU_SEG_MAX);
3768 					pkt_error = true;
3769 					break;
3770 				}
3771 				if (mb->pkt_len > ICE_FRAME_SIZE_MAX) {
3772 					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds HW limit, maximum allowed value is %d",
3773 						mb->nb_segs, ICE_FRAME_SIZE_MAX);
3774 					pkt_error = true;
3775 					break;
3776 				}
3777 			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
3778 				/** TSO case: tso_segsz, nb_segs, pkt_len not exceed
3779 				 * the limits.
3780 				 */
3781 				if (mb->tso_segsz < ICE_MIN_TSO_MSS ||
3782 				    mb->tso_segsz > ICE_MAX_TSO_MSS) {
3783 					/**
3784 					 * MSS outside the range are considered malicious
3785 					 */
3786 					PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out of range, reasonable range (%d - %u)",
3787 						mb->tso_segsz, ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS);
3788 					pkt_error = true;
3789 					break;
3790 				}
3791 				if (mb->nb_segs > ((struct ice_tx_queue *)tx_queue)->nb_tx_desc) {
3792 					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out of ring length");
3793 					pkt_error = true;
3794 					break;
3795 				}
3796 			}
3797 		}
3798 
3799 		if (adapter->devargs.mbuf_check & ICE_MBUF_CHECK_F_TX_OFFLOAD) {
3800 			if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) {
3801 				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload is not supported");
3802 				pkt_error = true;
3803 				break;
3804 			}
3805 
3806 			if (!rte_validate_tx_offload(mb)) {
3807 				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload setup error");
3808 				pkt_error = true;
3809 				break;
3810 			}
3811 		}
3812 	}
3813 
3814 	if (pkt_error) {
3815 		txq->mbuf_errors++;
3816 		good_pkts = idx;
3817 		if (good_pkts == 0)
3818 			return 0;
3819 	}
3820 
3821 	return adapter->tx_pkt_burst(tx_queue, tx_pkts, good_pkts);
3822 }
3823 
3824 uint16_t
3825 ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
3826 	      uint16_t nb_pkts)
3827 {
3828 	int i, ret;
3829 	uint64_t ol_flags;
3830 	struct rte_mbuf *m;
3831 
3832 	for (i = 0; i < nb_pkts; i++) {
3833 		m = tx_pkts[i];
3834 		ol_flags = m->ol_flags;
3835 
3836 		if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
3837 		    /**
3838 		     * No TSO case: nb->segs, pkt_len to not exceed
3839 		     * the limites.
3840 		     */
3841 		    (m->nb_segs > ICE_TX_MTU_SEG_MAX ||
3842 		     m->pkt_len > ICE_FRAME_SIZE_MAX)) {
3843 			rte_errno = EINVAL;
3844 			return i;
3845 		} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG &&
3846 		    /** TSO case: tso_segsz, nb_segs, pkt_len not exceed
3847 		     * the limits.
3848 		     */
3849 		    (m->tso_segsz < ICE_MIN_TSO_MSS ||
3850 		     m->tso_segsz > ICE_MAX_TSO_MSS ||
3851 		     m->nb_segs >
3852 			((struct ice_tx_queue *)tx_queue)->nb_tx_desc ||
3853 		     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
3854 			/**
3855 			 * MSS outside the range are considered malicious
3856 			 */
3857 			rte_errno = EINVAL;
3858 			return i;
3859 		}
3860 
3861 		if (m->pkt_len < ICE_TX_MIN_PKT_LEN) {
3862 			rte_errno = EINVAL;
3863 			return i;
3864 		}
3865 
3866 #ifdef RTE_ETHDEV_DEBUG_TX
3867 		ret = rte_validate_tx_offload(m);
3868 		if (ret != 0) {
3869 			rte_errno = -ret;
3870 			return i;
3871 		}
3872 #endif
3873 		ret = rte_net_intel_cksum_prepare(m);
3874 		if (ret != 0) {
3875 			rte_errno = -ret;
3876 			return i;
3877 		}
3878 
3879 		if (ice_check_empty_mbuf(m) != 0) {
3880 			rte_errno = EINVAL;
3881 			return i;
3882 		}
3883 	}
3884 	return i;
3885 }
3886 
3887 void __rte_cold
3888 ice_set_tx_function(struct rte_eth_dev *dev)
3889 {
3890 	struct ice_adapter *ad =
3891 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
3892 	int mbuf_check = ad->devargs.mbuf_check;
3893 #ifdef RTE_ARCH_X86
3894 	struct ice_tx_queue *txq;
3895 	int i;
3896 	int tx_check_ret = -1;
3897 
3898 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3899 		ad->tx_use_avx2 = false;
3900 		ad->tx_use_avx512 = false;
3901 		tx_check_ret = ice_tx_vec_dev_check(dev);
3902 		if (tx_check_ret >= 0 &&
3903 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
3904 			ad->tx_vec_allowed = true;
3905 
3906 			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
3907 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
3908 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
3909 #ifdef CC_AVX512_SUPPORT
3910 				ad->tx_use_avx512 = true;
3911 #else
3912 			PMD_DRV_LOG(NOTICE,
3913 				"AVX512 is not supported in build env");
3914 #endif
3915 			if (!ad->tx_use_avx512 &&
3916 				(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
3917 				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
3918 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
3919 				ad->tx_use_avx2 = true;
3920 
3921 			if (!ad->tx_use_avx2 && !ad->tx_use_avx512 &&
3922 				tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
3923 				ad->tx_vec_allowed = false;
3924 
3925 			if (ad->tx_vec_allowed) {
3926 				for (i = 0; i < dev->data->nb_tx_queues; i++) {
3927 					txq = dev->data->tx_queues[i];
3928 					if (txq && ice_txq_vec_setup(txq)) {
3929 						ad->tx_vec_allowed = false;
3930 						break;
3931 					}
3932 				}
3933 			}
3934 		} else {
3935 			ad->tx_vec_allowed = false;
3936 		}
3937 	}
3938 
3939 	if (ad->tx_vec_allowed) {
3940 		dev->tx_pkt_prepare = NULL;
3941 		if (ad->tx_use_avx512) {
3942 #ifdef CC_AVX512_SUPPORT
3943 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3944 				PMD_DRV_LOG(NOTICE,
3945 					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
3946 					    dev->data->port_id);
3947 				dev->tx_pkt_burst =
3948 					ice_xmit_pkts_vec_avx512_offload;
3949 				dev->tx_pkt_prepare = ice_prep_pkts;
3950 			} else {
3951 				PMD_DRV_LOG(NOTICE,
3952 					    "Using AVX512 Vector Tx (port %d).",
3953 					    dev->data->port_id);
3954 				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
3955 			}
3956 #endif
3957 		} else {
3958 			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
3959 				PMD_DRV_LOG(NOTICE,
3960 					    "Using AVX2 OFFLOAD Vector Tx (port %d).",
3961 					    dev->data->port_id);
3962 				dev->tx_pkt_burst =
3963 					ice_xmit_pkts_vec_avx2_offload;
3964 				dev->tx_pkt_prepare = ice_prep_pkts;
3965 			} else {
3966 				PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
3967 					    ad->tx_use_avx2 ? "avx2 " : "",
3968 					    dev->data->port_id);
3969 				dev->tx_pkt_burst = ad->tx_use_avx2 ?
3970 						    ice_xmit_pkts_vec_avx2 :
3971 						    ice_xmit_pkts_vec;
3972 			}
3973 		}
3974 
3975 		if (mbuf_check) {
3976 			ad->tx_pkt_burst = dev->tx_pkt_burst;
3977 			dev->tx_pkt_burst = ice_xmit_pkts_check;
3978 		}
3979 		return;
3980 	}
3981 #endif
3982 
3983 	if (ad->tx_simple_allowed) {
3984 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
3985 		dev->tx_pkt_burst = ice_xmit_pkts_simple;
3986 		dev->tx_pkt_prepare = NULL;
3987 	} else {
3988 		PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
3989 		dev->tx_pkt_burst = ice_xmit_pkts;
3990 		dev->tx_pkt_prepare = ice_prep_pkts;
3991 	}
3992 
3993 	if (mbuf_check) {
3994 		ad->tx_pkt_burst = dev->tx_pkt_burst;
3995 		dev->tx_pkt_burst = ice_xmit_pkts_check;
3996 	}
3997 }
3998 
3999 static const struct {
4000 	eth_tx_burst_t pkt_burst;
4001 	const char *info;
4002 } ice_tx_burst_infos[] = {
4003 	{ ice_xmit_pkts_simple,   "Scalar Simple" },
4004 	{ ice_xmit_pkts,          "Scalar" },
4005 #ifdef RTE_ARCH_X86
4006 #ifdef CC_AVX512_SUPPORT
4007 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
4008 	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
4009 #endif
4010 	{ ice_xmit_pkts_vec_avx2,         "Vector AVX2" },
4011 	{ ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
4012 	{ ice_xmit_pkts_vec,              "Vector SSE" },
4013 #endif
4014 };
4015 
4016 int
4017 ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
4018 		      struct rte_eth_burst_mode *mode)
4019 {
4020 	eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
4021 	int ret = -EINVAL;
4022 	unsigned int i;
4023 
4024 	for (i = 0; i < RTE_DIM(ice_tx_burst_infos); ++i) {
4025 		if (pkt_burst == ice_tx_burst_infos[i].pkt_burst) {
4026 			snprintf(mode->info, sizeof(mode->info), "%s",
4027 				 ice_tx_burst_infos[i].info);
4028 			ret = 0;
4029 			break;
4030 		}
4031 	}
4032 
4033 	return ret;
4034 }
4035 
4036 /* For each value it means, datasheet of hardware can tell more details
4037  *
4038  * @note: fix ice_dev_supported_ptypes_get() if any change here.
4039  */
4040 static inline uint32_t
4041 ice_get_default_pkt_type(uint16_t ptype)
4042 {
4043 	static const alignas(RTE_CACHE_LINE_SIZE) uint32_t type_table[ICE_MAX_PKT_TYPE] = {
4044 		/* L2 types */
4045 		/* [0] reserved */
4046 		[1] = RTE_PTYPE_L2_ETHER,
4047 		[2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
4048 		/* [3] - [5] reserved */
4049 		[6] = RTE_PTYPE_L2_ETHER_LLDP,
4050 		/* [7] - [10] reserved */
4051 		[11] = RTE_PTYPE_L2_ETHER_ARP,
4052 		/* [12] - [21] reserved */
4053 
4054 		/* Non tunneled IPv4 */
4055 		[22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4056 		       RTE_PTYPE_L4_FRAG,
4057 		[23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4058 		       RTE_PTYPE_L4_NONFRAG,
4059 		[24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4060 		       RTE_PTYPE_L4_UDP,
4061 		/* [25] reserved */
4062 		[26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4063 		       RTE_PTYPE_L4_TCP,
4064 		[27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4065 		       RTE_PTYPE_L4_SCTP,
4066 		[28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4067 		       RTE_PTYPE_L4_ICMP,
4068 
4069 		/* IPv4 --> IPv4 */
4070 		[29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4071 		       RTE_PTYPE_TUNNEL_IP |
4072 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4073 		       RTE_PTYPE_INNER_L4_FRAG,
4074 		[30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4075 		       RTE_PTYPE_TUNNEL_IP |
4076 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4077 		       RTE_PTYPE_INNER_L4_NONFRAG,
4078 		[31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4079 		       RTE_PTYPE_TUNNEL_IP |
4080 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4081 		       RTE_PTYPE_INNER_L4_UDP,
4082 		/* [32] reserved */
4083 		[33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4084 		       RTE_PTYPE_TUNNEL_IP |
4085 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4086 		       RTE_PTYPE_INNER_L4_TCP,
4087 		[34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4088 		       RTE_PTYPE_TUNNEL_IP |
4089 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4090 		       RTE_PTYPE_INNER_L4_SCTP,
4091 		[35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4092 		       RTE_PTYPE_TUNNEL_IP |
4093 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4094 		       RTE_PTYPE_INNER_L4_ICMP,
4095 
4096 		/* IPv4 --> IPv6 */
4097 		[36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4098 		       RTE_PTYPE_TUNNEL_IP |
4099 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4100 		       RTE_PTYPE_INNER_L4_FRAG,
4101 		[37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4102 		       RTE_PTYPE_TUNNEL_IP |
4103 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4104 		       RTE_PTYPE_INNER_L4_NONFRAG,
4105 		[38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4106 		       RTE_PTYPE_TUNNEL_IP |
4107 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4108 		       RTE_PTYPE_INNER_L4_UDP,
4109 		/* [39] reserved */
4110 		[40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4111 		       RTE_PTYPE_TUNNEL_IP |
4112 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4113 		       RTE_PTYPE_INNER_L4_TCP,
4114 		[41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4115 		       RTE_PTYPE_TUNNEL_IP |
4116 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4117 		       RTE_PTYPE_INNER_L4_SCTP,
4118 		[42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4119 		       RTE_PTYPE_TUNNEL_IP |
4120 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4121 		       RTE_PTYPE_INNER_L4_ICMP,
4122 
4123 		/* IPv4 --> GRE/Teredo/VXLAN */
4124 		[43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4125 		       RTE_PTYPE_TUNNEL_GRENAT,
4126 
4127 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
4128 		[44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4129 		       RTE_PTYPE_TUNNEL_GRENAT |
4130 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4131 		       RTE_PTYPE_INNER_L4_FRAG,
4132 		[45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4133 		       RTE_PTYPE_TUNNEL_GRENAT |
4134 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4135 		       RTE_PTYPE_INNER_L4_NONFRAG,
4136 		[46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4137 		       RTE_PTYPE_TUNNEL_GRENAT |
4138 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4139 		       RTE_PTYPE_INNER_L4_UDP,
4140 		/* [47] reserved */
4141 		[48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4142 		       RTE_PTYPE_TUNNEL_GRENAT |
4143 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4144 		       RTE_PTYPE_INNER_L4_TCP,
4145 		[49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4146 		       RTE_PTYPE_TUNNEL_GRENAT |
4147 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4148 		       RTE_PTYPE_INNER_L4_SCTP,
4149 		[50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4150 		       RTE_PTYPE_TUNNEL_GRENAT |
4151 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4152 		       RTE_PTYPE_INNER_L4_ICMP,
4153 
4154 		/* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
4155 		[51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4156 		       RTE_PTYPE_TUNNEL_GRENAT |
4157 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4158 		       RTE_PTYPE_INNER_L4_FRAG,
4159 		[52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4160 		       RTE_PTYPE_TUNNEL_GRENAT |
4161 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4162 		       RTE_PTYPE_INNER_L4_NONFRAG,
4163 		[53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4164 		       RTE_PTYPE_TUNNEL_GRENAT |
4165 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4166 		       RTE_PTYPE_INNER_L4_UDP,
4167 		/* [54] reserved */
4168 		[55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4169 		       RTE_PTYPE_TUNNEL_GRENAT |
4170 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4171 		       RTE_PTYPE_INNER_L4_TCP,
4172 		[56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4173 		       RTE_PTYPE_TUNNEL_GRENAT |
4174 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4175 		       RTE_PTYPE_INNER_L4_SCTP,
4176 		[57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4177 		       RTE_PTYPE_TUNNEL_GRENAT |
4178 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4179 		       RTE_PTYPE_INNER_L4_ICMP,
4180 
4181 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC */
4182 		[58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4183 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
4184 
4185 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
4186 		[59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4187 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4188 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4189 		       RTE_PTYPE_INNER_L4_FRAG,
4190 		[60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4191 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4192 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4193 		       RTE_PTYPE_INNER_L4_NONFRAG,
4194 		[61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4195 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4196 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4197 		       RTE_PTYPE_INNER_L4_UDP,
4198 		/* [62] reserved */
4199 		[63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4200 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4201 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4202 		       RTE_PTYPE_INNER_L4_TCP,
4203 		[64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4204 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4205 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4206 		       RTE_PTYPE_INNER_L4_SCTP,
4207 		[65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4208 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4209 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4210 		       RTE_PTYPE_INNER_L4_ICMP,
4211 
4212 		/* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
4213 		[66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4214 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4215 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4216 		       RTE_PTYPE_INNER_L4_FRAG,
4217 		[67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4218 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4219 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4220 		       RTE_PTYPE_INNER_L4_NONFRAG,
4221 		[68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4222 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4223 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4224 		       RTE_PTYPE_INNER_L4_UDP,
4225 		/* [69] reserved */
4226 		[70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4227 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4228 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4229 		       RTE_PTYPE_INNER_L4_TCP,
4230 		[71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4231 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4232 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4233 		       RTE_PTYPE_INNER_L4_SCTP,
4234 		[72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4235 		       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4236 		       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4237 		       RTE_PTYPE_INNER_L4_ICMP,
4238 		/* [73] - [87] reserved */
4239 
4240 		/* Non tunneled IPv6 */
4241 		[88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4242 		       RTE_PTYPE_L4_FRAG,
4243 		[89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4244 		       RTE_PTYPE_L4_NONFRAG,
4245 		[90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4246 		       RTE_PTYPE_L4_UDP,
4247 		/* [91] reserved */
4248 		[92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4249 		       RTE_PTYPE_L4_TCP,
4250 		[93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4251 		       RTE_PTYPE_L4_SCTP,
4252 		[94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4253 		       RTE_PTYPE_L4_ICMP,
4254 
4255 		/* IPv6 --> IPv4 */
4256 		[95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4257 		       RTE_PTYPE_TUNNEL_IP |
4258 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4259 		       RTE_PTYPE_INNER_L4_FRAG,
4260 		[96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4261 		       RTE_PTYPE_TUNNEL_IP |
4262 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4263 		       RTE_PTYPE_INNER_L4_NONFRAG,
4264 		[97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4265 		       RTE_PTYPE_TUNNEL_IP |
4266 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4267 		       RTE_PTYPE_INNER_L4_UDP,
4268 		/* [98] reserved */
4269 		[99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4270 		       RTE_PTYPE_TUNNEL_IP |
4271 		       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4272 		       RTE_PTYPE_INNER_L4_TCP,
4273 		[100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4274 			RTE_PTYPE_TUNNEL_IP |
4275 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4276 			RTE_PTYPE_INNER_L4_SCTP,
4277 		[101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4278 			RTE_PTYPE_TUNNEL_IP |
4279 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4280 			RTE_PTYPE_INNER_L4_ICMP,
4281 
4282 		/* IPv6 --> IPv6 */
4283 		[102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4284 			RTE_PTYPE_TUNNEL_IP |
4285 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4286 			RTE_PTYPE_INNER_L4_FRAG,
4287 		[103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4288 			RTE_PTYPE_TUNNEL_IP |
4289 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4290 			RTE_PTYPE_INNER_L4_NONFRAG,
4291 		[104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4292 			RTE_PTYPE_TUNNEL_IP |
4293 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4294 			RTE_PTYPE_INNER_L4_UDP,
4295 		/* [105] reserved */
4296 		[106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4297 			RTE_PTYPE_TUNNEL_IP |
4298 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4299 			RTE_PTYPE_INNER_L4_TCP,
4300 		[107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4301 			RTE_PTYPE_TUNNEL_IP |
4302 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4303 			RTE_PTYPE_INNER_L4_SCTP,
4304 		[108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4305 			RTE_PTYPE_TUNNEL_IP |
4306 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4307 			RTE_PTYPE_INNER_L4_ICMP,
4308 
4309 		/* IPv6 --> GRE/Teredo/VXLAN */
4310 		[109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4311 			RTE_PTYPE_TUNNEL_GRENAT,
4312 
4313 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
4314 		[110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4315 			RTE_PTYPE_TUNNEL_GRENAT |
4316 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4317 			RTE_PTYPE_INNER_L4_FRAG,
4318 		[111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4319 			RTE_PTYPE_TUNNEL_GRENAT |
4320 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4321 			RTE_PTYPE_INNER_L4_NONFRAG,
4322 		[112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4323 			RTE_PTYPE_TUNNEL_GRENAT |
4324 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4325 			RTE_PTYPE_INNER_L4_UDP,
4326 		/* [113] reserved */
4327 		[114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4328 			RTE_PTYPE_TUNNEL_GRENAT |
4329 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4330 			RTE_PTYPE_INNER_L4_TCP,
4331 		[115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4332 			RTE_PTYPE_TUNNEL_GRENAT |
4333 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4334 			RTE_PTYPE_INNER_L4_SCTP,
4335 		[116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4336 			RTE_PTYPE_TUNNEL_GRENAT |
4337 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4338 			RTE_PTYPE_INNER_L4_ICMP,
4339 
4340 		/* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
4341 		[117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4342 			RTE_PTYPE_TUNNEL_GRENAT |
4343 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4344 			RTE_PTYPE_INNER_L4_FRAG,
4345 		[118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4346 			RTE_PTYPE_TUNNEL_GRENAT |
4347 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4348 			RTE_PTYPE_INNER_L4_NONFRAG,
4349 		[119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4350 			RTE_PTYPE_TUNNEL_GRENAT |
4351 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4352 			RTE_PTYPE_INNER_L4_UDP,
4353 		/* [120] reserved */
4354 		[121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4355 			RTE_PTYPE_TUNNEL_GRENAT |
4356 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4357 			RTE_PTYPE_INNER_L4_TCP,
4358 		[122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4359 			RTE_PTYPE_TUNNEL_GRENAT |
4360 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4361 			RTE_PTYPE_INNER_L4_SCTP,
4362 		[123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4363 			RTE_PTYPE_TUNNEL_GRENAT |
4364 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4365 			RTE_PTYPE_INNER_L4_ICMP,
4366 
4367 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC */
4368 		[124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4369 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
4370 
4371 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
4372 		[125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4373 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4374 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4375 			RTE_PTYPE_INNER_L4_FRAG,
4376 		[126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4377 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4378 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4379 			RTE_PTYPE_INNER_L4_NONFRAG,
4380 		[127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4381 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4382 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4383 			RTE_PTYPE_INNER_L4_UDP,
4384 		/* [128] reserved */
4385 		[129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4386 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4387 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4388 			RTE_PTYPE_INNER_L4_TCP,
4389 		[130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4390 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4391 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4392 			RTE_PTYPE_INNER_L4_SCTP,
4393 		[131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4394 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4395 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4396 			RTE_PTYPE_INNER_L4_ICMP,
4397 
4398 		/* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
4399 		[132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4400 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4401 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4402 			RTE_PTYPE_INNER_L4_FRAG,
4403 		[133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4404 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4405 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4406 			RTE_PTYPE_INNER_L4_NONFRAG,
4407 		[134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4408 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4409 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4410 			RTE_PTYPE_INNER_L4_UDP,
4411 		/* [135] reserved */
4412 		[136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4413 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4414 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4415 			RTE_PTYPE_INNER_L4_TCP,
4416 		[137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4417 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4418 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4419 			RTE_PTYPE_INNER_L4_SCTP,
4420 		[138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4421 			RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
4422 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4423 			RTE_PTYPE_INNER_L4_ICMP,
4424 		/* [139] - [299] reserved */
4425 
4426 		/* PPPoE */
4427 		[300] = RTE_PTYPE_L2_ETHER_PPPOE,
4428 		[301] = RTE_PTYPE_L2_ETHER_PPPOE,
4429 
4430 		/* PPPoE --> IPv4 */
4431 		[302] = RTE_PTYPE_L2_ETHER_PPPOE |
4432 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4433 			RTE_PTYPE_L4_FRAG,
4434 		[303] = RTE_PTYPE_L2_ETHER_PPPOE |
4435 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4436 			RTE_PTYPE_L4_NONFRAG,
4437 		[304] = RTE_PTYPE_L2_ETHER_PPPOE |
4438 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4439 			RTE_PTYPE_L4_UDP,
4440 		[305] = RTE_PTYPE_L2_ETHER_PPPOE |
4441 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4442 			RTE_PTYPE_L4_TCP,
4443 		[306] = RTE_PTYPE_L2_ETHER_PPPOE |
4444 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4445 			RTE_PTYPE_L4_SCTP,
4446 		[307] = RTE_PTYPE_L2_ETHER_PPPOE |
4447 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4448 			RTE_PTYPE_L4_ICMP,
4449 
4450 		/* PPPoE --> IPv6 */
4451 		[308] = RTE_PTYPE_L2_ETHER_PPPOE |
4452 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4453 			RTE_PTYPE_L4_FRAG,
4454 		[309] = RTE_PTYPE_L2_ETHER_PPPOE |
4455 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4456 			RTE_PTYPE_L4_NONFRAG,
4457 		[310] = RTE_PTYPE_L2_ETHER_PPPOE |
4458 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4459 			RTE_PTYPE_L4_UDP,
4460 		[311] = RTE_PTYPE_L2_ETHER_PPPOE |
4461 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4462 			RTE_PTYPE_L4_TCP,
4463 		[312] = RTE_PTYPE_L2_ETHER_PPPOE |
4464 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4465 			RTE_PTYPE_L4_SCTP,
4466 		[313] = RTE_PTYPE_L2_ETHER_PPPOE |
4467 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4468 			RTE_PTYPE_L4_ICMP,
4469 		/* [314] - [324] reserved */
4470 
4471 		/* IPv4/IPv6 --> GTPC/GTPU */
4472 		[325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4473 			RTE_PTYPE_TUNNEL_GTPC,
4474 		[326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4475 			RTE_PTYPE_TUNNEL_GTPC,
4476 		[327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4477 			RTE_PTYPE_TUNNEL_GTPC,
4478 		[328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4479 			RTE_PTYPE_TUNNEL_GTPC,
4480 		[329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4481 			RTE_PTYPE_TUNNEL_GTPU,
4482 		[330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4483 			RTE_PTYPE_TUNNEL_GTPU,
4484 
4485 		/* IPv4 --> GTPU --> IPv4 */
4486 		[331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4487 			RTE_PTYPE_TUNNEL_GTPU |
4488 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4489 			RTE_PTYPE_INNER_L4_FRAG,
4490 		[332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4491 			RTE_PTYPE_TUNNEL_GTPU |
4492 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4493 			RTE_PTYPE_INNER_L4_NONFRAG,
4494 		[333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4495 			RTE_PTYPE_TUNNEL_GTPU |
4496 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4497 			RTE_PTYPE_INNER_L4_UDP,
4498 		[334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4499 			RTE_PTYPE_TUNNEL_GTPU |
4500 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4501 			RTE_PTYPE_INNER_L4_TCP,
4502 		[335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4503 			RTE_PTYPE_TUNNEL_GTPU |
4504 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4505 			RTE_PTYPE_INNER_L4_ICMP,
4506 
4507 		/* IPv6 --> GTPU --> IPv4 */
4508 		[336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4509 			RTE_PTYPE_TUNNEL_GTPU |
4510 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4511 			RTE_PTYPE_INNER_L4_FRAG,
4512 		[337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4513 			RTE_PTYPE_TUNNEL_GTPU |
4514 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4515 			RTE_PTYPE_INNER_L4_NONFRAG,
4516 		[338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4517 			RTE_PTYPE_TUNNEL_GTPU |
4518 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4519 			RTE_PTYPE_INNER_L4_UDP,
4520 		[339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4521 			RTE_PTYPE_TUNNEL_GTPU |
4522 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4523 			RTE_PTYPE_INNER_L4_TCP,
4524 		[340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4525 			RTE_PTYPE_TUNNEL_GTPU |
4526 			RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
4527 			RTE_PTYPE_INNER_L4_ICMP,
4528 
4529 		/* IPv4 --> GTPU --> IPv6 */
4530 		[341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4531 			RTE_PTYPE_TUNNEL_GTPU |
4532 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4533 			RTE_PTYPE_INNER_L4_FRAG,
4534 		[342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4535 			RTE_PTYPE_TUNNEL_GTPU |
4536 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4537 			RTE_PTYPE_INNER_L4_NONFRAG,
4538 		[343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4539 			RTE_PTYPE_TUNNEL_GTPU |
4540 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4541 			RTE_PTYPE_INNER_L4_UDP,
4542 		[344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4543 			RTE_PTYPE_TUNNEL_GTPU |
4544 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4545 			RTE_PTYPE_INNER_L4_TCP,
4546 		[345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4547 			RTE_PTYPE_TUNNEL_GTPU |
4548 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4549 			RTE_PTYPE_INNER_L4_ICMP,
4550 
4551 		/* IPv6 --> GTPU --> IPv6 */
4552 		[346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4553 			RTE_PTYPE_TUNNEL_GTPU |
4554 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4555 			RTE_PTYPE_INNER_L4_FRAG,
4556 		[347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4557 			RTE_PTYPE_TUNNEL_GTPU |
4558 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4559 			RTE_PTYPE_INNER_L4_NONFRAG,
4560 		[348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4561 			RTE_PTYPE_TUNNEL_GTPU |
4562 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4563 			RTE_PTYPE_INNER_L4_UDP,
4564 		[349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4565 			RTE_PTYPE_TUNNEL_GTPU |
4566 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4567 			RTE_PTYPE_INNER_L4_TCP,
4568 		[350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4569 			RTE_PTYPE_TUNNEL_GTPU |
4570 			RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
4571 			RTE_PTYPE_INNER_L4_ICMP,
4572 
4573 		/* IPv4 --> UDP ECPRI */
4574 		[372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4575 			RTE_PTYPE_L4_UDP,
4576 		[373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4577 			RTE_PTYPE_L4_UDP,
4578 		[374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4579 			RTE_PTYPE_L4_UDP,
4580 		[375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4581 			RTE_PTYPE_L4_UDP,
4582 		[376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4583 			RTE_PTYPE_L4_UDP,
4584 		[377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4585 			RTE_PTYPE_L4_UDP,
4586 		[378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4587 			RTE_PTYPE_L4_UDP,
4588 		[379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4589 			RTE_PTYPE_L4_UDP,
4590 		[380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4591 			RTE_PTYPE_L4_UDP,
4592 		[381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
4593 			RTE_PTYPE_L4_UDP,
4594 
4595 		/* IPV6 --> UDP ECPRI */
4596 		[382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4597 			RTE_PTYPE_L4_UDP,
4598 		[383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4599 			RTE_PTYPE_L4_UDP,
4600 		[384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4601 			RTE_PTYPE_L4_UDP,
4602 		[385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4603 			RTE_PTYPE_L4_UDP,
4604 		[386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4605 			RTE_PTYPE_L4_UDP,
4606 		[387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4607 			RTE_PTYPE_L4_UDP,
4608 		[388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4609 			RTE_PTYPE_L4_UDP,
4610 		[389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4611 			RTE_PTYPE_L4_UDP,
4612 		[390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4613 			RTE_PTYPE_L4_UDP,
4614 		[391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
4615 			RTE_PTYPE_L4_UDP,
4616 		/* All others reserved */
4617 	};
4618 
4619 	return type_table[ptype];
4620 }
4621 
4622 void __rte_cold
4623 ice_set_default_ptype_table(struct rte_eth_dev *dev)
4624 {
4625 	struct ice_adapter *ad =
4626 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
4627 	int i;
4628 
4629 	for (i = 0; i < ICE_MAX_PKT_TYPE; i++)
4630 		ad->ptype_tbl[i] = ice_get_default_pkt_type(i);
4631 }
4632 
4633 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S	1
4634 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M	\
4635 			(0x3UL << ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S)
4636 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD 0
4637 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL 0x1
4638 
4639 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S	4
4640 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M	\
4641 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S)
4642 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S	5
4643 #define ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M	\
4644 	(1 << ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S)
4645 
4646 /*
4647  * check the programming status descriptor in rx queue.
4648  * done after Programming Flow Director is programmed on
4649  * tx queue
4650  */
4651 static inline int
4652 ice_check_fdir_programming_status(struct ice_rx_queue *rxq)
4653 {
4654 	volatile union ice_32byte_rx_desc *rxdp;
4655 	uint64_t qword1;
4656 	uint32_t rx_status;
4657 	uint32_t error;
4658 	uint32_t id;
4659 	int ret = -EAGAIN;
4660 
4661 	rxdp = (volatile union ice_32byte_rx_desc *)
4662 		(&rxq->rx_ring[rxq->rx_tail]);
4663 	qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
4664 	rx_status = (qword1 & ICE_RXD_QW1_STATUS_M)
4665 			>> ICE_RXD_QW1_STATUS_S;
4666 
4667 	if (rx_status & (1 << ICE_RX_DESC_STATUS_DD_S)) {
4668 		ret = 0;
4669 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_M) >>
4670 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_S;
4671 		id = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_M) >>
4672 			ICE_RX_PROG_STATUS_DESC_WB_QW1_PROGID_S;
4673 		if (error) {
4674 			if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_ADD)
4675 				PMD_DRV_LOG(ERR, "Failed to add FDIR rule.");
4676 			else if (id == ICE_RX_PROG_STATUS_DESC_WB_QW1_PROG_DEL)
4677 				PMD_DRV_LOG(ERR, "Failed to remove FDIR rule.");
4678 			ret = -EINVAL;
4679 			goto err;
4680 		}
4681 		error = (qword1 & ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_M) >>
4682 			ICE_RX_PROG_STATUS_DESC_WB_QW1_FAIL_PROF_S;
4683 		if (error) {
4684 			PMD_DRV_LOG(ERR, "Failed to create FDIR profile.");
4685 			ret = -EINVAL;
4686 		}
4687 err:
4688 		rxdp->wb.qword1.status_error_len = 0;
4689 		rxq->rx_tail++;
4690 		if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
4691 			rxq->rx_tail = 0;
4692 		if (rxq->rx_tail == 0)
4693 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
4694 		else
4695 			ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
4696 	}
4697 
4698 	return ret;
4699 }
4700 
4701 #define ICE_FDIR_MAX_WAIT_US 10000
4702 
4703 int
4704 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
4705 {
4706 	struct ice_tx_queue *txq = pf->fdir.txq;
4707 	struct ice_rx_queue *rxq = pf->fdir.rxq;
4708 	volatile struct ice_fltr_desc *fdirdp;
4709 	volatile struct ice_tx_desc *txdp;
4710 	uint32_t td_cmd;
4711 	uint16_t i;
4712 
4713 	fdirdp = (volatile struct ice_fltr_desc *)
4714 		(&txq->tx_ring[txq->tx_tail]);
4715 	fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat;
4716 	fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid;
4717 
4718 	txdp = &txq->tx_ring[txq->tx_tail + 1];
4719 	txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
4720 	td_cmd = ICE_TX_DESC_CMD_EOP |
4721 		ICE_TX_DESC_CMD_RS  |
4722 		ICE_TX_DESC_CMD_DUMMY;
4723 
4724 	txdp->cmd_type_offset_bsz =
4725 		ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0);
4726 
4727 	txq->tx_tail += 2;
4728 	if (txq->tx_tail >= txq->nb_tx_desc)
4729 		txq->tx_tail = 0;
4730 	/* Update the tx tail register */
4731 	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
4732 	for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) {
4733 		if ((txdp->cmd_type_offset_bsz &
4734 		     rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
4735 		    rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))
4736 			break;
4737 		rte_delay_us(1);
4738 	}
4739 	if (i >= ICE_FDIR_MAX_WAIT_US) {
4740 		PMD_DRV_LOG(ERR,
4741 			    "Failed to program FDIR filter: time out to get DD on tx queue.");
4742 		return -ETIMEDOUT;
4743 	}
4744 
4745 	for (; i < ICE_FDIR_MAX_WAIT_US; i++) {
4746 		int ret;
4747 
4748 		ret = ice_check_fdir_programming_status(rxq);
4749 		if (ret == -EAGAIN)
4750 			rte_delay_us(1);
4751 		else
4752 			return ret;
4753 	}
4754 
4755 	PMD_DRV_LOG(ERR,
4756 		    "Failed to program FDIR filter: programming status reported.");
4757 	return -ETIMEDOUT;
4758 
4759 
4760 }
4761