xref: /dpdk/drivers/net/enic/enic_rxtx.c (revision 00ce43111dc5b364722c882cdd37d3664d87b6cc)
12e99ea80SHyong Youb Kim /* SPDX-License-Identifier: BSD-3-Clause
22e99ea80SHyong Youb Kim  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3606adbd5SJohn Daley  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4606adbd5SJohn Daley  */
5606adbd5SJohn Daley 
6606adbd5SJohn Daley #include <rte_mbuf.h>
7df96fd0dSBruce Richardson #include <ethdev_driver.h>
81e81dbb5SHyong Youb Kim #include <rte_net.h>
9606adbd5SJohn Daley #include <rte_prefetch.h>
10606adbd5SJohn Daley 
11606adbd5SJohn Daley #include "enic_compat.h"
12606adbd5SJohn Daley #include "rq_enet_desc.h"
13606adbd5SJohn Daley #include "enic.h"
14cd4e7b32SHyong Youb Kim #include "enic_rxtx_common.h"
15026afc76SJohn Daley #include <rte_ether.h>
16026afc76SJohn Daley #include <rte_ip.h>
17026afc76SJohn Daley #include <rte_tcp.h>
18606adbd5SJohn Daley 
19606adbd5SJohn Daley #define RTE_PMD_USE_PREFETCH
20606adbd5SJohn Daley 
21606adbd5SJohn Daley #ifdef RTE_PMD_USE_PREFETCH
22606adbd5SJohn Daley /*Prefetch a cache line into all cache levels. */
23606adbd5SJohn Daley #define rte_enic_prefetch(p) rte_prefetch0(p)
24606adbd5SJohn Daley #else
25606adbd5SJohn Daley #define rte_enic_prefetch(p) do {} while (0)
26606adbd5SJohn Daley #endif
27606adbd5SJohn Daley 
28606adbd5SJohn Daley #ifdef RTE_PMD_PACKET_PREFETCH
29606adbd5SJohn Daley #define rte_packet_prefetch(p) rte_prefetch1(p)
30606adbd5SJohn Daley #else
31606adbd5SJohn Daley #define rte_packet_prefetch(p) do {} while (0)
32606adbd5SJohn Daley #endif
33606adbd5SJohn Daley 
348b428cb5SHyong Youb Kim static inline uint16_t
358b428cb5SHyong Youb Kim enic_recv_pkts_common(void *rx_queue, struct rte_mbuf **rx_pkts,
368b428cb5SHyong Youb Kim 		      uint16_t nb_pkts, const bool use_64b_desc)
37606adbd5SJohn Daley {
38856d7ba7SNelson Escobar 	struct vnic_rq *sop_rq = rx_queue;
39856d7ba7SNelson Escobar 	struct vnic_rq *data_rq;
40856d7ba7SNelson Escobar 	struct vnic_rq *rq;
41856d7ba7SNelson Escobar 	struct enic *enic = vnic_dev_priv(sop_rq->vdev);
42856d7ba7SNelson Escobar 	uint16_t cq_idx;
432c06cebeSJohn Daley 	uint16_t rq_idx, max_rx;
44856d7ba7SNelson Escobar 	uint16_t rq_num;
45606adbd5SJohn Daley 	struct rte_mbuf *nmb, *rxmb;
46856d7ba7SNelson Escobar 	uint16_t nb_rx = 0;
47606adbd5SJohn Daley 	struct vnic_cq *cq;
48606adbd5SJohn Daley 	volatile struct cq_desc *cqd_ptr;
49606adbd5SJohn Daley 	uint8_t color;
5093fb21fdSHyong Youb Kim 	uint8_t tnl;
51856d7ba7SNelson Escobar 	uint16_t seg_length;
52856d7ba7SNelson Escobar 	struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
53856d7ba7SNelson Escobar 	struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
548b428cb5SHyong Youb Kim 	const int desc_size = use_64b_desc ?
558b428cb5SHyong Youb Kim 		sizeof(struct cq_enet_rq_desc_64) :
568b428cb5SHyong Youb Kim 		sizeof(struct cq_enet_rq_desc);
578b428cb5SHyong Youb Kim 	RTE_BUILD_BUG_ON(sizeof(struct cq_enet_rq_desc_64) != 64);
58*00ce4311SHyong Youb Kim 	uint64_t bytes;
59606adbd5SJohn Daley 
60856d7ba7SNelson Escobar 	cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
61856d7ba7SNelson Escobar 	cq_idx = cq->to_clean;		/* index of cqd, rqd, mbuf_table */
628b428cb5SHyong Youb Kim 	cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) +
6368b6e6e6SJohn Daley 				     (uintptr_t)cq_idx * desc_size);
642c06cebeSJohn Daley 	color = cq->last_color;
65606adbd5SJohn Daley 
66856d7ba7SNelson Escobar 	data_rq = &enic->rq[sop_rq->data_queue_idx];
67606adbd5SJohn Daley 
682c06cebeSJohn Daley 	/* Receive until the end of the ring, at most. */
692c06cebeSJohn Daley 	max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
702c06cebeSJohn Daley 
71*00ce4311SHyong Youb Kim 	bytes = 0;
72*00ce4311SHyong Youb Kim 
732c06cebeSJohn Daley 	while (max_rx) {
74606adbd5SJohn Daley 		volatile struct rq_enet_desc *rqd_ptr;
75606adbd5SJohn Daley 		struct cq_desc cqd;
76606adbd5SJohn Daley 		uint8_t packet_error;
77856d7ba7SNelson Escobar 		uint16_t ciflags;
788b428cb5SHyong Youb Kim 		uint8_t tc;
7922572e84SJohn Daley 		uint16_t rq_idx_msbs = 0;
80606adbd5SJohn Daley 
812c06cebeSJohn Daley 		max_rx--;
822c06cebeSJohn Daley 
838b428cb5SHyong Youb Kim 		tc = *(volatile uint8_t *)((uintptr_t)cqd_ptr + desc_size - 1);
84606adbd5SJohn Daley 		/* Check for pkts available */
858b428cb5SHyong Youb Kim 		if ((tc & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
86606adbd5SJohn Daley 			break;
87606adbd5SJohn Daley 
88856d7ba7SNelson Escobar 		/* Get the cq descriptor and extract rq info from it */
89606adbd5SJohn Daley 		cqd = *cqd_ptr;
9022572e84SJohn Daley 
918b428cb5SHyong Youb Kim 		/*
9222572e84SJohn Daley 		 * The first 16B of a 64B descriptor is identical to a 16B
9322572e84SJohn Daley 		 * descriptor except for the type_color and fetch index. Extract
9422572e84SJohn Daley 		 * fetch index and copy the type_color from the 64B to where it
9522572e84SJohn Daley 		 * would be in a 16B descriptor so sebwequent code can run
9622572e84SJohn Daley 		 * without further conditionals.
978b428cb5SHyong Youb Kim 		 */
9822572e84SJohn Daley 		if (use_64b_desc) {
9922572e84SJohn Daley 			rq_idx_msbs = (((volatile struct cq_enet_rq_desc_64 *)
10022572e84SJohn Daley 				      cqd_ptr)->fetch_idx_flags
10122572e84SJohn Daley 				      & CQ_ENET_RQ_DESC_FETCH_IDX_MASK)
10222572e84SJohn Daley 				      << CQ_DESC_COMP_NDX_BITS;
1038b428cb5SHyong Youb Kim 			cqd.type_color = tc;
10422572e84SJohn Daley 		}
105856d7ba7SNelson Escobar 		rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
10622572e84SJohn Daley 		rq_idx = rq_idx_msbs +
10722572e84SJohn Daley 			 (cqd.completed_index & CQ_DESC_COMP_NDX_MASK);
108856d7ba7SNelson Escobar 
109856d7ba7SNelson Escobar 		rq = &enic->rq[rq_num];
110856d7ba7SNelson Escobar 		rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
111606adbd5SJohn Daley 
112606adbd5SJohn Daley 		/* allocate a new mbuf */
113606adbd5SJohn Daley 		nmb = rte_mbuf_raw_alloc(rq->mp);
114606adbd5SJohn Daley 		if (nmb == NULL) {
115606adbd5SJohn Daley 			rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
116606adbd5SJohn Daley 			break;
117606adbd5SJohn Daley 		}
118606adbd5SJohn Daley 
119606adbd5SJohn Daley 		/* A packet error means descriptor and data are untrusted */
120606adbd5SJohn Daley 		packet_error = enic_cq_rx_check_err(&cqd);
121606adbd5SJohn Daley 
122606adbd5SJohn Daley 		/* Get the mbuf to return and replace with one just allocated */
123856d7ba7SNelson Escobar 		rxmb = rq->mbuf_ring[rq_idx];
124856d7ba7SNelson Escobar 		rq->mbuf_ring[rq_idx] = nmb;
125856d7ba7SNelson Escobar 		cq_idx++;
126606adbd5SJohn Daley 
127606adbd5SJohn Daley 		/* Prefetch next mbuf & desc while processing current one */
1288b428cb5SHyong Youb Kim 		cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) +
12968b6e6e6SJohn Daley 					     (uintptr_t)cq_idx * desc_size);
130606adbd5SJohn Daley 		rte_enic_prefetch(cqd_ptr);
131856d7ba7SNelson Escobar 
132856d7ba7SNelson Escobar 		ciflags = enic_cq_rx_desc_ciflags(
133856d7ba7SNelson Escobar 			(struct cq_enet_rq_desc *)&cqd);
134606adbd5SJohn Daley 
135606adbd5SJohn Daley 		/* Push descriptor for newly allocated mbuf */
1361ccc51b0SJohn Daley 		nmb->data_off = RTE_PKTMBUF_HEADROOM;
137b5df2f7aSHyong Youb Kim 		/*
138b5df2f7aSHyong Youb Kim 		 * Only the address needs to be refilled. length_type of the
139b5df2f7aSHyong Youb Kim 		 * descriptor it set during initialization
140b5df2f7aSHyong Youb Kim 		 * (enic_alloc_rx_queue_mbufs) and does not change.
141b5df2f7aSHyong Youb Kim 		 */
142b5df2f7aSHyong Youb Kim 		rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
143856d7ba7SNelson Escobar 						    RTE_PKTMBUF_HEADROOM);
144606adbd5SJohn Daley 
145856d7ba7SNelson Escobar 		/* Fill in the rest of the mbuf */
146856d7ba7SNelson Escobar 		seg_length = enic_cq_rx_desc_n_bytes(&cqd);
147856d7ba7SNelson Escobar 
148856d7ba7SNelson Escobar 		if (rq->is_sop) {
149856d7ba7SNelson Escobar 			first_seg = rxmb;
150856d7ba7SNelson Escobar 			first_seg->pkt_len = seg_length;
151856d7ba7SNelson Escobar 		} else {
152856d7ba7SNelson Escobar 			first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
153856d7ba7SNelson Escobar 							+ seg_length);
154856d7ba7SNelson Escobar 			first_seg->nb_segs++;
155856d7ba7SNelson Escobar 			last_seg->next = rxmb;
156856d7ba7SNelson Escobar 		}
157856d7ba7SNelson Escobar 
158856d7ba7SNelson Escobar 		rxmb->port = enic->port_id;
159856d7ba7SNelson Escobar 		rxmb->data_len = seg_length;
160856d7ba7SNelson Escobar 
161*00ce4311SHyong Youb Kim 		bytes += seg_length;
162*00ce4311SHyong Youb Kim 
163856d7ba7SNelson Escobar 		rq->rx_nb_hold++;
164856d7ba7SNelson Escobar 
165856d7ba7SNelson Escobar 		if (!(enic_cq_rx_desc_eop(ciflags))) {
166856d7ba7SNelson Escobar 			last_seg = rxmb;
167606adbd5SJohn Daley 			continue;
168606adbd5SJohn Daley 		}
169606adbd5SJohn Daley 
17093fb21fdSHyong Youb Kim 		/*
17193fb21fdSHyong Youb Kim 		 * When overlay offload is enabled, CQ.fcoe indicates the
17293fb21fdSHyong Youb Kim 		 * packet is tunnelled.
17393fb21fdSHyong Youb Kim 		 */
17493fb21fdSHyong Youb Kim 		tnl = enic->overlay_offload &&
17593fb21fdSHyong Youb Kim 			(ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
176856d7ba7SNelson Escobar 		/* cq rx flags are only valid if eop bit is set */
17793fb21fdSHyong Youb Kim 		first_seg->packet_type =
17893fb21fdSHyong Youb Kim 			enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
179856d7ba7SNelson Escobar 		enic_cq_rx_to_pkt_flags(&cqd, first_seg);
1802c06cebeSJohn Daley 
18193fb21fdSHyong Youb Kim 		/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
18293fb21fdSHyong Youb Kim 		if (tnl) {
18393fb21fdSHyong Youb Kim 			first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
18493fb21fdSHyong Youb Kim 						    RTE_PTYPE_L4_MASK);
18593fb21fdSHyong Youb Kim 		}
186856d7ba7SNelson Escobar 		if (unlikely(packet_error)) {
187856d7ba7SNelson Escobar 			rte_pktmbuf_free(first_seg);
188856d7ba7SNelson Escobar 			rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
189856d7ba7SNelson Escobar 			continue;
190856d7ba7SNelson Escobar 		}
191856d7ba7SNelson Escobar 
192606adbd5SJohn Daley 
193606adbd5SJohn Daley 		/* prefetch mbuf data for caller */
194856d7ba7SNelson Escobar 		rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
195606adbd5SJohn Daley 				    RTE_PKTMBUF_HEADROOM));
196606adbd5SJohn Daley 
197606adbd5SJohn Daley 		/* store the mbuf address into the next entry of the array */
198856d7ba7SNelson Escobar 		rx_pkts[nb_rx++] = first_seg;
199606adbd5SJohn Daley 	}
2002c06cebeSJohn Daley 	if (unlikely(cq_idx == cq->ring.desc_count)) {
2012c06cebeSJohn Daley 		cq_idx = 0;
2022c06cebeSJohn Daley 		cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
2032c06cebeSJohn Daley 	}
204606adbd5SJohn Daley 
205856d7ba7SNelson Escobar 	sop_rq->pkt_first_seg = first_seg;
206856d7ba7SNelson Escobar 	sop_rq->pkt_last_seg = last_seg;
207606adbd5SJohn Daley 
208856d7ba7SNelson Escobar 	cq->to_clean = cq_idx;
209856d7ba7SNelson Escobar 
210856d7ba7SNelson Escobar 	if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
211856d7ba7SNelson Escobar 	    sop_rq->rx_free_thresh) {
212856d7ba7SNelson Escobar 		if (data_rq->in_use) {
213856d7ba7SNelson Escobar 			data_rq->posted_index =
214856d7ba7SNelson Escobar 				enic_ring_add(data_rq->ring.desc_count,
215856d7ba7SNelson Escobar 					      data_rq->posted_index,
216856d7ba7SNelson Escobar 					      data_rq->rx_nb_hold);
217856d7ba7SNelson Escobar 			data_rq->rx_nb_hold = 0;
218856d7ba7SNelson Escobar 		}
219856d7ba7SNelson Escobar 		sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
220856d7ba7SNelson Escobar 						     sop_rq->posted_index,
221856d7ba7SNelson Escobar 						     sop_rq->rx_nb_hold);
222856d7ba7SNelson Escobar 		sop_rq->rx_nb_hold = 0;
223856d7ba7SNelson Escobar 
224606adbd5SJohn Daley 		rte_mb();
225856d7ba7SNelson Escobar 		if (data_rq->in_use)
226dd7862baSSantosh Shukla 			iowrite32_relaxed(data_rq->posted_index,
227856d7ba7SNelson Escobar 					  &data_rq->ctrl->posted_index);
228856d7ba7SNelson Escobar 		rte_compiler_barrier();
229dd7862baSSantosh Shukla 		iowrite32_relaxed(sop_rq->posted_index,
230dd7862baSSantosh Shukla 				  &sop_rq->ctrl->posted_index);
231606adbd5SJohn Daley 	}
232606adbd5SJohn Daley 
233*00ce4311SHyong Youb Kim 	if (enic->sriov_vf_soft_rx_stats && bytes) {
234*00ce4311SHyong Youb Kim 		sop_rq->soft_stats_pkts += nb_rx;
235*00ce4311SHyong Youb Kim 		sop_rq->soft_stats_bytes += bytes;
236*00ce4311SHyong Youb Kim 	}
237606adbd5SJohn Daley 
238606adbd5SJohn Daley 	return nb_rx;
239606adbd5SJohn Daley }
240606adbd5SJohn Daley 
24135e2cb6aSJohn Daley uint16_t
2428b428cb5SHyong Youb Kim enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2438b428cb5SHyong Youb Kim {
2448b428cb5SHyong Youb Kim 	return enic_recv_pkts_common(rx_queue, rx_pkts, nb_pkts, false);
2458b428cb5SHyong Youb Kim }
2468b428cb5SHyong Youb Kim 
2478b428cb5SHyong Youb Kim uint16_t
2488b428cb5SHyong Youb Kim enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2498b428cb5SHyong Youb Kim {
2508b428cb5SHyong Youb Kim 	return enic_recv_pkts_common(rx_queue, rx_pkts, nb_pkts, true);
2518b428cb5SHyong Youb Kim }
2528b428cb5SHyong Youb Kim 
2538b428cb5SHyong Youb Kim uint16_t
25435e2cb6aSJohn Daley enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
25535e2cb6aSJohn Daley 			 uint16_t nb_pkts)
25635e2cb6aSJohn Daley {
25735e2cb6aSJohn Daley 	struct rte_mbuf *mb, **rx, **rxmb;
25835e2cb6aSJohn Daley 	uint16_t cq_idx, nb_rx, max_rx;
25935e2cb6aSJohn Daley 	struct cq_enet_rq_desc *cqd;
26035e2cb6aSJohn Daley 	struct rq_enet_desc *rqd;
26135e2cb6aSJohn Daley 	unsigned int port_id;
26235e2cb6aSJohn Daley 	struct vnic_cq *cq;
26335e2cb6aSJohn Daley 	struct vnic_rq *rq;
26435e2cb6aSJohn Daley 	struct enic *enic;
26535e2cb6aSJohn Daley 	uint8_t color;
26635e2cb6aSJohn Daley 	bool overlay;
26735e2cb6aSJohn Daley 	bool tnl;
268*00ce4311SHyong Youb Kim 	uint64_t bytes;
26935e2cb6aSJohn Daley 
27035e2cb6aSJohn Daley 	rq = rx_queue;
27135e2cb6aSJohn Daley 	enic = vnic_dev_priv(rq->vdev);
27235e2cb6aSJohn Daley 	cq = &enic->cq[enic_cq_rq(enic, rq->index)];
27335e2cb6aSJohn Daley 	cq_idx = cq->to_clean;
27435e2cb6aSJohn Daley 
27535e2cb6aSJohn Daley 	/*
27635e2cb6aSJohn Daley 	 * Fill up the reserve of free mbufs. Below, we restock the receive
27735e2cb6aSJohn Daley 	 * ring with these mbufs to avoid allocation failures.
27835e2cb6aSJohn Daley 	 */
27935e2cb6aSJohn Daley 	if (rq->num_free_mbufs == 0) {
28035e2cb6aSJohn Daley 		if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
28135e2cb6aSJohn Daley 					 ENIC_RX_BURST_MAX))
28235e2cb6aSJohn Daley 			return 0;
28335e2cb6aSJohn Daley 		rq->num_free_mbufs = ENIC_RX_BURST_MAX;
28435e2cb6aSJohn Daley 	}
28535e2cb6aSJohn Daley 
28635e2cb6aSJohn Daley 	/* Receive until the end of the ring, at most. */
28735e2cb6aSJohn Daley 	max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
28835e2cb6aSJohn Daley 	max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
28935e2cb6aSJohn Daley 
29035e2cb6aSJohn Daley 	cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
29135e2cb6aSJohn Daley 	color = cq->last_color;
29235e2cb6aSJohn Daley 	rxmb = rq->mbuf_ring + cq_idx;
29335e2cb6aSJohn Daley 	port_id = enic->port_id;
29435e2cb6aSJohn Daley 	overlay = enic->overlay_offload;
29535e2cb6aSJohn Daley 
296*00ce4311SHyong Youb Kim 	bytes = 0;
297*00ce4311SHyong Youb Kim 
29835e2cb6aSJohn Daley 	rx = rx_pkts;
29935e2cb6aSJohn Daley 	while (max_rx) {
30035e2cb6aSJohn Daley 		max_rx--;
30135e2cb6aSJohn Daley 		if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
30235e2cb6aSJohn Daley 			break;
30335e2cb6aSJohn Daley 		if (unlikely(cqd->bytes_written_flags &
30435e2cb6aSJohn Daley 			     CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
30535e2cb6aSJohn Daley 			rte_pktmbuf_free(*rxmb++);
30635e2cb6aSJohn Daley 			rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
30735e2cb6aSJohn Daley 			cqd++;
30835e2cb6aSJohn Daley 			continue;
30935e2cb6aSJohn Daley 		}
31035e2cb6aSJohn Daley 
31135e2cb6aSJohn Daley 		mb = *rxmb++;
31235e2cb6aSJohn Daley 		/* prefetch mbuf data for caller */
31335e2cb6aSJohn Daley 		rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
31435e2cb6aSJohn Daley 				    RTE_PKTMBUF_HEADROOM));
31535e2cb6aSJohn Daley 		mb->data_len = cqd->bytes_written_flags &
31635e2cb6aSJohn Daley 			CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
31735e2cb6aSJohn Daley 		mb->pkt_len = mb->data_len;
31835e2cb6aSJohn Daley 		mb->port = port_id;
319*00ce4311SHyong Youb Kim 
320*00ce4311SHyong Youb Kim 		bytes += mb->pkt_len;
321*00ce4311SHyong Youb Kim 
32235e2cb6aSJohn Daley 		tnl = overlay && (cqd->completed_index_flags &
32335e2cb6aSJohn Daley 				  CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
32435e2cb6aSJohn Daley 		mb->packet_type =
32535e2cb6aSJohn Daley 			enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
32635e2cb6aSJohn Daley 						     tnl);
32735e2cb6aSJohn Daley 		enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
32835e2cb6aSJohn Daley 		/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
32935e2cb6aSJohn Daley 		if (tnl) {
33035e2cb6aSJohn Daley 			mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
33135e2cb6aSJohn Daley 					     RTE_PTYPE_L4_MASK);
33235e2cb6aSJohn Daley 		}
33335e2cb6aSJohn Daley 		cqd++;
33435e2cb6aSJohn Daley 		*rx++ = mb;
33535e2cb6aSJohn Daley 	}
33635e2cb6aSJohn Daley 	/* Number of descriptors visited */
33735e2cb6aSJohn Daley 	nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
33835e2cb6aSJohn Daley 	if (nb_rx == 0)
33935e2cb6aSJohn Daley 		return 0;
34035e2cb6aSJohn Daley 	rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
34135e2cb6aSJohn Daley 	rxmb = rq->mbuf_ring + cq_idx;
34235e2cb6aSJohn Daley 	cq_idx += nb_rx;
34335e2cb6aSJohn Daley 	rq->rx_nb_hold += nb_rx;
34435e2cb6aSJohn Daley 	if (unlikely(cq_idx == cq->ring.desc_count)) {
34535e2cb6aSJohn Daley 		cq_idx = 0;
34635e2cb6aSJohn Daley 		cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
34735e2cb6aSJohn Daley 	}
34835e2cb6aSJohn Daley 	cq->to_clean = cq_idx;
34935e2cb6aSJohn Daley 
35035e2cb6aSJohn Daley 	memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
35135e2cb6aSJohn Daley 	       sizeof(struct rte_mbuf *) * nb_rx);
35235e2cb6aSJohn Daley 	rq->num_free_mbufs -= nb_rx;
35335e2cb6aSJohn Daley 	while (nb_rx) {
35435e2cb6aSJohn Daley 		nb_rx--;
35535e2cb6aSJohn Daley 		mb = *rxmb++;
35635e2cb6aSJohn Daley 		mb->data_off = RTE_PKTMBUF_HEADROOM;
35735e2cb6aSJohn Daley 		rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
35835e2cb6aSJohn Daley 		rqd++;
35935e2cb6aSJohn Daley 	}
36035e2cb6aSJohn Daley 	if (rq->rx_nb_hold > rq->rx_free_thresh) {
36135e2cb6aSJohn Daley 		rq->posted_index = enic_ring_add(rq->ring.desc_count,
36235e2cb6aSJohn Daley 						 rq->posted_index,
36335e2cb6aSJohn Daley 						 rq->rx_nb_hold);
36435e2cb6aSJohn Daley 		rq->rx_nb_hold = 0;
36535e2cb6aSJohn Daley 		rte_wmb();
36635e2cb6aSJohn Daley 		iowrite32_relaxed(rq->posted_index,
36735e2cb6aSJohn Daley 				  &rq->ctrl->posted_index);
36835e2cb6aSJohn Daley 	}
36935e2cb6aSJohn Daley 
370*00ce4311SHyong Youb Kim 	if (enic->sriov_vf_soft_rx_stats && bytes) {
371*00ce4311SHyong Youb Kim 		rq->soft_stats_pkts += (rx - rx_pkts);
372*00ce4311SHyong Youb Kim 		rq->soft_stats_bytes += bytes;
373*00ce4311SHyong Youb Kim 	}
374*00ce4311SHyong Youb Kim 
37535e2cb6aSJohn Daley 	return rx - rx_pkts;
37635e2cb6aSJohn Daley }
37735e2cb6aSJohn Daley 
37804e8ec74SJohn Daley static inline void enic_free_wq_bufs(struct vnic_wq *wq,
37904e8ec74SJohn Daley 				     uint16_t completed_index)
380606adbd5SJohn Daley {
381d355a942SHyong Youb Kim 	struct rte_mbuf *buf;
38222572e84SJohn Daley 	struct rte_mbuf *m, *free[ENIC_LEGACY_MAX_WQ_DESCS];
38336935afbSJohn Daley 	unsigned int nb_to_free, nb_free = 0, i;
38436935afbSJohn Daley 	struct rte_mempool *pool;
38536935afbSJohn Daley 	unsigned int tail_idx;
38636935afbSJohn Daley 	unsigned int desc_count = wq->ring.desc_count;
38736935afbSJohn Daley 
38822572e84SJohn Daley 	/*
38922572e84SJohn Daley 	 * On 1500 Series VIC and beyond, greater than ENIC_LEGACY_MAX_WQ_DESCS
39022572e84SJohn Daley 	 * may be attempted to be freed. Cap it at ENIC_LEGACY_MAX_WQ_DESCS.
39122572e84SJohn Daley 	 */
39222572e84SJohn Daley 	nb_to_free = RTE_MIN(enic_ring_sub(desc_count, wq->tail_idx,
39322572e84SJohn Daley 			     completed_index) + 1,
39422572e84SJohn Daley 			     (uint32_t)ENIC_LEGACY_MAX_WQ_DESCS);
39536935afbSJohn Daley 	tail_idx = wq->tail_idx;
396d355a942SHyong Youb Kim 	pool = wq->bufs[tail_idx]->pool;
39736935afbSJohn Daley 	for (i = 0; i < nb_to_free; i++) {
398d355a942SHyong Youb Kim 		buf = wq->bufs[tail_idx];
399d355a942SHyong Youb Kim 		m = rte_pktmbuf_prefree_seg(buf);
400da24f6f6SJohn Daley 		if (unlikely(m == NULL)) {
401da24f6f6SJohn Daley 			tail_idx = enic_ring_incr(desc_count, tail_idx);
402da24f6f6SJohn Daley 			continue;
403da24f6f6SJohn Daley 		}
404da24f6f6SJohn Daley 
40536935afbSJohn Daley 		if (likely(m->pool == pool)) {
40622572e84SJohn Daley 			RTE_ASSERT(nb_free < ENIC_LEGACY_MAX_WQ_DESCS);
40736935afbSJohn Daley 			free[nb_free++] = m;
40836935afbSJohn Daley 		} else {
40936935afbSJohn Daley 			rte_mempool_put_bulk(pool, (void *)free, nb_free);
41036935afbSJohn Daley 			free[0] = m;
41136935afbSJohn Daley 			nb_free = 1;
41236935afbSJohn Daley 			pool = m->pool;
41336935afbSJohn Daley 		}
41436935afbSJohn Daley 		tail_idx = enic_ring_incr(desc_count, tail_idx);
415606adbd5SJohn Daley 	}
416606adbd5SJohn Daley 
417f7a58af5SAaron Conole 	if (nb_free > 0)
41836935afbSJohn Daley 		rte_mempool_put_bulk(pool, (void **)free, nb_free);
419606adbd5SJohn Daley 
42036935afbSJohn Daley 	wq->tail_idx = tail_idx;
42136935afbSJohn Daley 	wq->ring.desc_avail += nb_to_free;
422606adbd5SJohn Daley }
423606adbd5SJohn Daley 
42436935afbSJohn Daley unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
425606adbd5SJohn Daley {
42604e8ec74SJohn Daley 	uint16_t completed_index;
42736935afbSJohn Daley 
42836935afbSJohn Daley 	completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
429606adbd5SJohn Daley 
430fc2c8c06SJohn Daley 	if (wq->last_completed_index != completed_index) {
43136935afbSJohn Daley 		enic_free_wq_bufs(wq, completed_index);
432fc2c8c06SJohn Daley 		wq->last_completed_index = completed_index;
433fc2c8c06SJohn Daley 	}
434fc2c8c06SJohn Daley 	return 0;
435606adbd5SJohn Daley }
436606adbd5SJohn Daley 
43793fb21fdSHyong Youb Kim uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
4381e81dbb5SHyong Youb Kim 			uint16_t nb_pkts)
4391e81dbb5SHyong Youb Kim {
44093fb21fdSHyong Youb Kim 	struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
4411e81dbb5SHyong Youb Kim 	int32_t ret;
4421e81dbb5SHyong Youb Kim 	uint16_t i;
4431e81dbb5SHyong Youb Kim 	uint64_t ol_flags;
4441e81dbb5SHyong Youb Kim 	struct rte_mbuf *m;
4451e81dbb5SHyong Youb Kim 
4461e81dbb5SHyong Youb Kim 	for (i = 0; i != nb_pkts; i++) {
4471e81dbb5SHyong Youb Kim 		m = tx_pkts[i];
4487ac790d6SHyong Youb Kim 		ol_flags = m->ol_flags;
449daa02b5cSOlivier Matz 		if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
4505a12c387SHyong Youb Kim 			if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
4515a12c387SHyong Youb Kim 				rte_errno = EINVAL;
4525a12c387SHyong Youb Kim 				return i;
4535a12c387SHyong Youb Kim 			}
4547ac790d6SHyong Youb Kim 		} else {
4557ac790d6SHyong Youb Kim 			uint16_t header_len;
4567ac790d6SHyong Youb Kim 
4577ac790d6SHyong Youb Kim 			header_len = m->l2_len + m->l3_len + m->l4_len;
4587ac790d6SHyong Youb Kim 			if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) {
4597ac790d6SHyong Youb Kim 				rte_errno = EINVAL;
4607ac790d6SHyong Youb Kim 				return i;
4617ac790d6SHyong Youb Kim 			}
4627ac790d6SHyong Youb Kim 		}
4637ac790d6SHyong Youb Kim 
46493fb21fdSHyong Youb Kim 		if (ol_flags & wq->tx_offload_notsup_mask) {
465579cc855SJohn Daley 			rte_errno = ENOTSUP;
4661e81dbb5SHyong Youb Kim 			return i;
4671e81dbb5SHyong Youb Kim 		}
4681e81dbb5SHyong Youb Kim #ifdef RTE_LIBRTE_ETHDEV_DEBUG
4691e81dbb5SHyong Youb Kim 		ret = rte_validate_tx_offload(m);
4701e81dbb5SHyong Youb Kim 		if (ret != 0) {
471ad8473efSAndrew Rybchenko 			rte_errno = -ret;
4721e81dbb5SHyong Youb Kim 			return i;
4731e81dbb5SHyong Youb Kim 		}
4741e81dbb5SHyong Youb Kim #endif
4751e81dbb5SHyong Youb Kim 		ret = rte_net_intel_cksum_prepare(m);
4761e81dbb5SHyong Youb Kim 		if (ret != 0) {
477ad8473efSAndrew Rybchenko 			rte_errno = -ret;
4781e81dbb5SHyong Youb Kim 			return i;
4791e81dbb5SHyong Youb Kim 		}
4801e81dbb5SHyong Youb Kim 	}
4811e81dbb5SHyong Youb Kim 
4821e81dbb5SHyong Youb Kim 	return i;
4831e81dbb5SHyong Youb Kim }
4841e81dbb5SHyong Youb Kim 
485d309bdc2SJohn Daley uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
486606adbd5SJohn Daley 	uint16_t nb_pkts)
487606adbd5SJohn Daley {
488606adbd5SJohn Daley 	uint16_t index;
48978f90329SJohn Daley 	unsigned int pkt_len, data_len;
490606adbd5SJohn Daley 	unsigned int nb_segs;
49178f90329SJohn Daley 	struct rte_mbuf *tx_pkt;
492606adbd5SJohn Daley 	struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
493606adbd5SJohn Daley 	struct enic *enic = vnic_dev_priv(wq->vdev);
494606adbd5SJohn Daley 	unsigned short vlan_id;
495486cd891SJohn Daley 	uint64_t ol_flags;
496821e5412SJohn Daley 	uint64_t ol_flags_mask;
49778f90329SJohn Daley 	unsigned int wq_desc_avail;
49878f90329SJohn Daley 	int head_idx;
49978f90329SJohn Daley 	unsigned int desc_count;
50078f90329SJohn Daley 	struct wq_enet_desc *descs, *desc_p, desc_tmp;
50178f90329SJohn Daley 	uint16_t mss;
50278f90329SJohn Daley 	uint8_t vlan_tag_insert;
503c55614d1SHyong Youb Kim 	uint8_t eop, cq;
50478f90329SJohn Daley 	uint64_t bus_addr;
505026afc76SJohn Daley 	uint8_t offload_mode;
506026afc76SJohn Daley 	uint16_t header_len;
507cafba10bSJohn Daley 	uint64_t tso;
508cafba10bSJohn Daley 	rte_atomic64_t *tx_oversized;
509606adbd5SJohn Daley 
51078f90329SJohn Daley 	enic_cleanup_wq(enic, wq);
51178f90329SJohn Daley 	wq_desc_avail = vnic_wq_desc_avail(wq);
51278f90329SJohn Daley 	head_idx = wq->head_idx;
51378f90329SJohn Daley 	desc_count = wq->ring.desc_count;
514daa02b5cSOlivier Matz 	ol_flags_mask = RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK;
515cafba10bSJohn Daley 	tx_oversized = &enic->soft_stats.tx_oversized;
51678f90329SJohn Daley 
51778f90329SJohn Daley 	nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
51878f90329SJohn Daley 
519606adbd5SJohn Daley 	for (index = 0; index < nb_pkts; index++) {
520606adbd5SJohn Daley 		tx_pkt = *tx_pkts++;
521ed6e564cSJohn Daley 		pkt_len = tx_pkt->pkt_len;
522ed6e564cSJohn Daley 		data_len = tx_pkt->data_len;
523ed6e564cSJohn Daley 		ol_flags = tx_pkt->ol_flags;
524606adbd5SJohn Daley 		nb_segs = tx_pkt->nb_segs;
525daa02b5cSOlivier Matz 		tso = ol_flags & RTE_MBUF_F_TX_TCP_SEG;
526ed6e564cSJohn Daley 
527cafba10bSJohn Daley 		/* drop packet if it's too big to send */
528cafba10bSJohn Daley 		if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
529ed6e564cSJohn Daley 			rte_pktmbuf_free(tx_pkt);
530cafba10bSJohn Daley 			rte_atomic64_inc(tx_oversized);
531ed6e564cSJohn Daley 			continue;
532ed6e564cSJohn Daley 		}
533ed6e564cSJohn Daley 
53478f90329SJohn Daley 		if (nb_segs > wq_desc_avail) {
535606adbd5SJohn Daley 			if (index > 0)
53678f90329SJohn Daley 				goto post;
53778f90329SJohn Daley 			goto done;
538606adbd5SJohn Daley 		}
539606adbd5SJohn Daley 
54078f90329SJohn Daley 		mss = 0;
541721c6625SJohn Daley 		vlan_id = tx_pkt->vlan_tci;
542daa02b5cSOlivier Matz 		vlan_tag_insert = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
54378f90329SJohn Daley 		bus_addr = (dma_addr_t)
544455da545SSantosh Shukla 			   (tx_pkt->buf_iova + tx_pkt->data_off);
54578f90329SJohn Daley 
54678f90329SJohn Daley 		descs = (struct wq_enet_desc *)wq->ring.descs;
54778f90329SJohn Daley 		desc_p = descs + head_idx;
54878f90329SJohn Daley 
54978f90329SJohn Daley 		eop = (data_len == pkt_len);
550026afc76SJohn Daley 		offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
551026afc76SJohn Daley 		header_len = 0;
55278f90329SJohn Daley 
553cafba10bSJohn Daley 		if (tso) {
5544bb0a6feSJohn Daley 			header_len = tx_pkt->l2_len + tx_pkt->l3_len +
5554bb0a6feSJohn Daley 				     tx_pkt->l4_len;
556cafba10bSJohn Daley 
557cafba10bSJohn Daley 			/* Drop if non-TCP packet or TSO seg size is too big */
558cafba10bSJohn Daley 			if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
559cafba10bSJohn Daley 			    header_len) > ENIC_TX_MAX_PKT_SIZE))) {
560cafba10bSJohn Daley 				rte_pktmbuf_free(tx_pkt);
561cafba10bSJohn Daley 				rte_atomic64_inc(tx_oversized);
562cafba10bSJohn Daley 				continue;
563cafba10bSJohn Daley 			}
564cafba10bSJohn Daley 
565026afc76SJohn Daley 			offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
566026afc76SJohn Daley 			mss = tx_pkt->tso_segsz;
56793fb21fdSHyong Youb Kim 			/* For tunnel, need the size of outer+inner headers */
568daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
56993fb21fdSHyong Youb Kim 				header_len += tx_pkt->outer_l2_len +
57093fb21fdSHyong Youb Kim 					tx_pkt->outer_l3_len;
57193fb21fdSHyong Youb Kim 			}
572821e5412SJohn Daley 		}
573cafba10bSJohn Daley 
574026afc76SJohn Daley 		if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
575daa02b5cSOlivier Matz 			if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
57678f90329SJohn Daley 				mss |= ENIC_CALC_IP_CKSUM;
57778f90329SJohn Daley 
578821e5412SJohn Daley 			/* Nic uses just 1 bit for UDP and TCP */
579daa02b5cSOlivier Matz 			switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
580daa02b5cSOlivier Matz 			case RTE_MBUF_F_TX_TCP_CKSUM:
581daa02b5cSOlivier Matz 			case RTE_MBUF_F_TX_UDP_CKSUM:
58278f90329SJohn Daley 				mss |= ENIC_CALC_TCP_UDP_CKSUM;
583821e5412SJohn Daley 				break;
584821e5412SJohn Daley 			}
585821e5412SJohn Daley 		}
586c55614d1SHyong Youb Kim 		wq->cq_pend++;
587c55614d1SHyong Youb Kim 		cq = 0;
588c55614d1SHyong Youb Kim 		if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
589c55614d1SHyong Youb Kim 			cq = 1;
590c55614d1SHyong Youb Kim 			wq->cq_pend = 0;
591c55614d1SHyong Youb Kim 		}
592026afc76SJohn Daley 		wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
593c55614d1SHyong Youb Kim 				 offload_mode, eop, cq, 0, vlan_tag_insert,
594026afc76SJohn Daley 				 vlan_id, 0);
59578f90329SJohn Daley 
59678f90329SJohn Daley 		*desc_p = desc_tmp;
597d355a942SHyong Youb Kim 		wq->bufs[head_idx] = tx_pkt;
59878f90329SJohn Daley 		head_idx = enic_ring_incr(desc_count, head_idx);
59978f90329SJohn Daley 		wq_desc_avail--;
60078f90329SJohn Daley 
60178f90329SJohn Daley 		if (!eop) {
60278f90329SJohn Daley 			for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
60378f90329SJohn Daley 			    tx_pkt->next) {
60478f90329SJohn Daley 				data_len = tx_pkt->data_len;
60578f90329SJohn Daley 
606c55614d1SHyong Youb Kim 				wq->cq_pend++;
607c55614d1SHyong Youb Kim 				cq = 0;
608c55614d1SHyong Youb Kim 				if (tx_pkt->next == NULL) {
609606adbd5SJohn Daley 					eop = 1;
610c55614d1SHyong Youb Kim 					if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
611c55614d1SHyong Youb Kim 						cq = 1;
612c55614d1SHyong Youb Kim 						wq->cq_pend = 0;
613c55614d1SHyong Youb Kim 					}
614c55614d1SHyong Youb Kim 				}
61578f90329SJohn Daley 				desc_p = descs + head_idx;
616455da545SSantosh Shukla 				bus_addr = (dma_addr_t)(tx_pkt->buf_iova
61778f90329SJohn Daley 					   + tx_pkt->data_off);
61878f90329SJohn Daley 				wq_enet_desc_enc((struct wq_enet_desc *)
61978f90329SJohn Daley 						 &desc_tmp, bus_addr, data_len,
620c55614d1SHyong Youb Kim 						 mss, 0, offload_mode, eop, cq,
621026afc76SJohn Daley 						 0, vlan_tag_insert, vlan_id,
622026afc76SJohn Daley 						 0);
623606adbd5SJohn Daley 
62478f90329SJohn Daley 				*desc_p = desc_tmp;
625d355a942SHyong Youb Kim 				wq->bufs[head_idx] = tx_pkt;
62678f90329SJohn Daley 				head_idx = enic_ring_incr(desc_count, head_idx);
62778f90329SJohn Daley 				wq_desc_avail--;
62878f90329SJohn Daley 			}
62978f90329SJohn Daley 		}
63078f90329SJohn Daley 	}
63178f90329SJohn Daley  post:
63278f90329SJohn Daley 	rte_wmb();
633dd7862baSSantosh Shukla 	iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
63478f90329SJohn Daley  done:
63578f90329SJohn Daley 	wq->ring.desc_avail = wq_desc_avail;
63678f90329SJohn Daley 	wq->head_idx = head_idx;
63778f90329SJohn Daley 
638606adbd5SJohn Daley 	return index;
639606adbd5SJohn Daley }
64078f90329SJohn Daley 
641ed933c35SHyong Youb Kim static void enqueue_simple_pkts(struct rte_mbuf **pkts,
642ed933c35SHyong Youb Kim 				struct wq_enet_desc *desc,
643ed933c35SHyong Youb Kim 				uint16_t n,
644ed933c35SHyong Youb Kim 				struct enic *enic)
645ed933c35SHyong Youb Kim {
646ed933c35SHyong Youb Kim 	struct rte_mbuf *p;
64770401fd7SHyong Youb Kim 	uint16_t mss;
64878f90329SJohn Daley 
649ed933c35SHyong Youb Kim 	while (n) {
650ed933c35SHyong Youb Kim 		n--;
651ed933c35SHyong Youb Kim 		p = *pkts++;
652ed933c35SHyong Youb Kim 		desc->address = p->buf_iova + p->data_off;
653ed933c35SHyong Youb Kim 		desc->length = p->pkt_len;
65470401fd7SHyong Youb Kim 		/* VLAN insert */
65570401fd7SHyong Youb Kim 		desc->vlan_tag = p->vlan_tci;
65670401fd7SHyong Youb Kim 		desc->header_length_flags &=
65770401fd7SHyong Youb Kim 			((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
65870401fd7SHyong Youb Kim 			 (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
659daa02b5cSOlivier Matz 		if (p->ol_flags & RTE_MBUF_F_TX_VLAN) {
66070401fd7SHyong Youb Kim 			desc->header_length_flags |=
66170401fd7SHyong Youb Kim 				1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
66270401fd7SHyong Youb Kim 		}
66370401fd7SHyong Youb Kim 		/*
66470401fd7SHyong Youb Kim 		 * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
66570401fd7SHyong Youb Kim 		 * is 0, so no need to set offload_mode.
66670401fd7SHyong Youb Kim 		 */
66770401fd7SHyong Youb Kim 		mss = 0;
668daa02b5cSOlivier Matz 		if (p->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
66970401fd7SHyong Youb Kim 			mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
670daa02b5cSOlivier Matz 		if (p->ol_flags & RTE_MBUF_F_TX_L4_MASK)
67170401fd7SHyong Youb Kim 			mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
67270401fd7SHyong Youb Kim 		desc->mss_loopback = mss;
67370401fd7SHyong Youb Kim 
674ed933c35SHyong Youb Kim 		/*
675ed933c35SHyong Youb Kim 		 * The app should not send oversized
676ed933c35SHyong Youb Kim 		 * packets. tx_pkt_prepare includes a check as
677ed933c35SHyong Youb Kim 		 * well. But some apps ignore the device max size and
6787be78d02SJosh Soref 		 * tx_pkt_prepare. Oversized packets cause WQ errors
679ed933c35SHyong Youb Kim 		 * and the NIC ends up disabling the whole WQ. So
680ed933c35SHyong Youb Kim 		 * truncate packets..
681ed933c35SHyong Youb Kim 		 */
682ed933c35SHyong Youb Kim 		if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
683ed933c35SHyong Youb Kim 			desc->length = ENIC_TX_MAX_PKT_SIZE;
684ed933c35SHyong Youb Kim 			rte_atomic64_inc(&enic->soft_stats.tx_oversized);
685ed933c35SHyong Youb Kim 		}
686ed933c35SHyong Youb Kim 		desc++;
687ed933c35SHyong Youb Kim 	}
688ed933c35SHyong Youb Kim }
689ed933c35SHyong Youb Kim 
690ed933c35SHyong Youb Kim uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
691ed933c35SHyong Youb Kim 			       uint16_t nb_pkts)
692ed933c35SHyong Youb Kim {
693ed933c35SHyong Youb Kim 	unsigned int head_idx, desc_count;
694ed933c35SHyong Youb Kim 	struct wq_enet_desc *desc;
695ed933c35SHyong Youb Kim 	struct vnic_wq *wq;
696ed933c35SHyong Youb Kim 	struct enic *enic;
697ed933c35SHyong Youb Kim 	uint16_t rem, n;
698ed933c35SHyong Youb Kim 
699ed933c35SHyong Youb Kim 	wq = (struct vnic_wq *)tx_queue;
700ed933c35SHyong Youb Kim 	enic = vnic_dev_priv(wq->vdev);
701ed933c35SHyong Youb Kim 	enic_cleanup_wq(enic, wq);
702ed933c35SHyong Youb Kim 	/* Will enqueue this many packets in this call */
703ed933c35SHyong Youb Kim 	nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
704ed933c35SHyong Youb Kim 	if (nb_pkts == 0)
705ed933c35SHyong Youb Kim 		return 0;
706ed933c35SHyong Youb Kim 
707ed933c35SHyong Youb Kim 	head_idx = wq->head_idx;
708ed933c35SHyong Youb Kim 	desc_count = wq->ring.desc_count;
709ed933c35SHyong Youb Kim 
710ed933c35SHyong Youb Kim 	/* Descriptors until the end of the ring */
711ed933c35SHyong Youb Kim 	n = desc_count - head_idx;
712ed933c35SHyong Youb Kim 	n = RTE_MIN(nb_pkts, n);
713ed933c35SHyong Youb Kim 
714ed933c35SHyong Youb Kim 	/* Save mbuf pointers to free later */
715ed933c35SHyong Youb Kim 	memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
716ed933c35SHyong Youb Kim 
717ed933c35SHyong Youb Kim 	/* Enqueue until the ring end */
718ed933c35SHyong Youb Kim 	rem = nb_pkts - n;
719ed933c35SHyong Youb Kim 	desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
720ed933c35SHyong Youb Kim 	enqueue_simple_pkts(tx_pkts, desc, n, enic);
721ed933c35SHyong Youb Kim 
722ed933c35SHyong Youb Kim 	/* Wrap to the start of the ring */
723ed933c35SHyong Youb Kim 	if (rem) {
724ed933c35SHyong Youb Kim 		tx_pkts += n;
725ed933c35SHyong Youb Kim 		memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
726ed933c35SHyong Youb Kim 		desc = (struct wq_enet_desc *)wq->ring.descs;
727ed933c35SHyong Youb Kim 		enqueue_simple_pkts(tx_pkts, desc, rem, enic);
728ed933c35SHyong Youb Kim 	}
729ed933c35SHyong Youb Kim 	rte_wmb();
730ed933c35SHyong Youb Kim 
731ed933c35SHyong Youb Kim 	/* Update head_idx and desc_avail */
732ed933c35SHyong Youb Kim 	wq->ring.desc_avail -= nb_pkts;
733ed933c35SHyong Youb Kim 	head_idx += nb_pkts;
734ed933c35SHyong Youb Kim 	if (head_idx >= desc_count)
735ed933c35SHyong Youb Kim 		head_idx -= desc_count;
736ed933c35SHyong Youb Kim 	wq->head_idx = head_idx;
737ed933c35SHyong Youb Kim 	iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
738ed933c35SHyong Youb Kim 	return nb_pkts;
739ed933c35SHyong Youb Kim }
740