xref: /dpdk/drivers/net/enic/enic_rxtx.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_net.h>
9 #include <rte_prefetch.h>
10 
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
13 #include "enic.h"
14 #include "enic_rxtx_common.h"
15 #include <rte_ether.h>
16 #include <rte_ip.h>
17 #include <rte_tcp.h>
18 
19 #define RTE_PMD_USE_PREFETCH
20 
21 #ifdef RTE_PMD_USE_PREFETCH
22 /*Prefetch a cache line into all cache levels. */
23 #define rte_enic_prefetch(p) rte_prefetch0(p)
24 #else
25 #define rte_enic_prefetch(p) do {} while (0)
26 #endif
27 
28 #ifdef RTE_PMD_PACKET_PREFETCH
29 #define rte_packet_prefetch(p) rte_prefetch1(p)
30 #else
31 #define rte_packet_prefetch(p) do {} while (0)
32 #endif
33 
34 /* dummy receive function to replace actual function in
35  * order to do safe reconfiguration operations.
36  */
37 uint16_t
38 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
39 		     __rte_unused struct rte_mbuf **rx_pkts,
40 		     __rte_unused uint16_t nb_pkts)
41 {
42 	return 0;
43 }
44 
45 uint16_t
46 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
47 	       uint16_t nb_pkts)
48 {
49 	struct vnic_rq *sop_rq = rx_queue;
50 	struct vnic_rq *data_rq;
51 	struct vnic_rq *rq;
52 	struct enic *enic = vnic_dev_priv(sop_rq->vdev);
53 	uint16_t cq_idx;
54 	uint16_t rq_idx, max_rx;
55 	uint16_t rq_num;
56 	struct rte_mbuf *nmb, *rxmb;
57 	uint16_t nb_rx = 0;
58 	struct vnic_cq *cq;
59 	volatile struct cq_desc *cqd_ptr;
60 	uint8_t color;
61 	uint8_t tnl;
62 	uint16_t seg_length;
63 	struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
64 	struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
65 
66 	cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
67 	cq_idx = cq->to_clean;		/* index of cqd, rqd, mbuf_table */
68 	cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
69 	color = cq->last_color;
70 
71 	data_rq = &enic->rq[sop_rq->data_queue_idx];
72 
73 	/* Receive until the end of the ring, at most. */
74 	max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
75 
76 	while (max_rx) {
77 		volatile struct rq_enet_desc *rqd_ptr;
78 		struct cq_desc cqd;
79 		uint8_t packet_error;
80 		uint16_t ciflags;
81 
82 		max_rx--;
83 
84 		/* Check for pkts available */
85 		if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
86 			break;
87 
88 		/* Get the cq descriptor and extract rq info from it */
89 		cqd = *cqd_ptr;
90 		rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
91 		rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
92 
93 		rq = &enic->rq[rq_num];
94 		rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
95 
96 		/* allocate a new mbuf */
97 		nmb = rte_mbuf_raw_alloc(rq->mp);
98 		if (nmb == NULL) {
99 			rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
100 			break;
101 		}
102 
103 		/* A packet error means descriptor and data are untrusted */
104 		packet_error = enic_cq_rx_check_err(&cqd);
105 
106 		/* Get the mbuf to return and replace with one just allocated */
107 		rxmb = rq->mbuf_ring[rq_idx];
108 		rq->mbuf_ring[rq_idx] = nmb;
109 		cq_idx++;
110 
111 		/* Prefetch next mbuf & desc while processing current one */
112 		cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
113 		rte_enic_prefetch(cqd_ptr);
114 
115 		ciflags = enic_cq_rx_desc_ciflags(
116 			(struct cq_enet_rq_desc *)&cqd);
117 
118 		/* Push descriptor for newly allocated mbuf */
119 		nmb->data_off = RTE_PKTMBUF_HEADROOM;
120 		/*
121 		 * Only the address needs to be refilled. length_type of the
122 		 * descriptor it set during initialization
123 		 * (enic_alloc_rx_queue_mbufs) and does not change.
124 		 */
125 		rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
126 						    RTE_PKTMBUF_HEADROOM);
127 
128 		/* Fill in the rest of the mbuf */
129 		seg_length = enic_cq_rx_desc_n_bytes(&cqd);
130 
131 		if (rq->is_sop) {
132 			first_seg = rxmb;
133 			first_seg->pkt_len = seg_length;
134 		} else {
135 			first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
136 							+ seg_length);
137 			first_seg->nb_segs++;
138 			last_seg->next = rxmb;
139 		}
140 
141 		rxmb->port = enic->port_id;
142 		rxmb->data_len = seg_length;
143 
144 		rq->rx_nb_hold++;
145 
146 		if (!(enic_cq_rx_desc_eop(ciflags))) {
147 			last_seg = rxmb;
148 			continue;
149 		}
150 
151 		/*
152 		 * When overlay offload is enabled, CQ.fcoe indicates the
153 		 * packet is tunnelled.
154 		 */
155 		tnl = enic->overlay_offload &&
156 			(ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
157 		/* cq rx flags are only valid if eop bit is set */
158 		first_seg->packet_type =
159 			enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
160 		enic_cq_rx_to_pkt_flags(&cqd, first_seg);
161 
162 		/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
163 		if (tnl) {
164 			first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
165 						    RTE_PTYPE_L4_MASK);
166 		}
167 		if (unlikely(packet_error)) {
168 			rte_pktmbuf_free(first_seg);
169 			rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
170 			continue;
171 		}
172 
173 
174 		/* prefetch mbuf data for caller */
175 		rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
176 				    RTE_PKTMBUF_HEADROOM));
177 
178 		/* store the mbuf address into the next entry of the array */
179 		rx_pkts[nb_rx++] = first_seg;
180 	}
181 	if (unlikely(cq_idx == cq->ring.desc_count)) {
182 		cq_idx = 0;
183 		cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
184 	}
185 
186 	sop_rq->pkt_first_seg = first_seg;
187 	sop_rq->pkt_last_seg = last_seg;
188 
189 	cq->to_clean = cq_idx;
190 
191 	if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
192 	    sop_rq->rx_free_thresh) {
193 		if (data_rq->in_use) {
194 			data_rq->posted_index =
195 				enic_ring_add(data_rq->ring.desc_count,
196 					      data_rq->posted_index,
197 					      data_rq->rx_nb_hold);
198 			data_rq->rx_nb_hold = 0;
199 		}
200 		sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
201 						     sop_rq->posted_index,
202 						     sop_rq->rx_nb_hold);
203 		sop_rq->rx_nb_hold = 0;
204 
205 		rte_mb();
206 		if (data_rq->in_use)
207 			iowrite32_relaxed(data_rq->posted_index,
208 					  &data_rq->ctrl->posted_index);
209 		rte_compiler_barrier();
210 		iowrite32_relaxed(sop_rq->posted_index,
211 				  &sop_rq->ctrl->posted_index);
212 	}
213 
214 
215 	return nb_rx;
216 }
217 
218 uint16_t
219 enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
220 			 uint16_t nb_pkts)
221 {
222 	struct rte_mbuf *mb, **rx, **rxmb;
223 	uint16_t cq_idx, nb_rx, max_rx;
224 	struct cq_enet_rq_desc *cqd;
225 	struct rq_enet_desc *rqd;
226 	unsigned int port_id;
227 	struct vnic_cq *cq;
228 	struct vnic_rq *rq;
229 	struct enic *enic;
230 	uint8_t color;
231 	bool overlay;
232 	bool tnl;
233 
234 	rq = rx_queue;
235 	enic = vnic_dev_priv(rq->vdev);
236 	cq = &enic->cq[enic_cq_rq(enic, rq->index)];
237 	cq_idx = cq->to_clean;
238 
239 	/*
240 	 * Fill up the reserve of free mbufs. Below, we restock the receive
241 	 * ring with these mbufs to avoid allocation failures.
242 	 */
243 	if (rq->num_free_mbufs == 0) {
244 		if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
245 					 ENIC_RX_BURST_MAX))
246 			return 0;
247 		rq->num_free_mbufs = ENIC_RX_BURST_MAX;
248 	}
249 
250 	/* Receive until the end of the ring, at most. */
251 	max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
252 	max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
253 
254 	cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
255 	color = cq->last_color;
256 	rxmb = rq->mbuf_ring + cq_idx;
257 	port_id = enic->port_id;
258 	overlay = enic->overlay_offload;
259 
260 	rx = rx_pkts;
261 	while (max_rx) {
262 		max_rx--;
263 		if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
264 			break;
265 		if (unlikely(cqd->bytes_written_flags &
266 			     CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
267 			rte_pktmbuf_free(*rxmb++);
268 			rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
269 			cqd++;
270 			continue;
271 		}
272 
273 		mb = *rxmb++;
274 		/* prefetch mbuf data for caller */
275 		rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
276 				    RTE_PKTMBUF_HEADROOM));
277 		mb->data_len = cqd->bytes_written_flags &
278 			CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
279 		mb->pkt_len = mb->data_len;
280 		mb->port = port_id;
281 		tnl = overlay && (cqd->completed_index_flags &
282 				  CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
283 		mb->packet_type =
284 			enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
285 						     tnl);
286 		enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
287 		/* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
288 		if (tnl) {
289 			mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
290 					     RTE_PTYPE_L4_MASK);
291 		}
292 		cqd++;
293 		*rx++ = mb;
294 	}
295 	/* Number of descriptors visited */
296 	nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
297 	if (nb_rx == 0)
298 		return 0;
299 	rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
300 	rxmb = rq->mbuf_ring + cq_idx;
301 	cq_idx += nb_rx;
302 	rq->rx_nb_hold += nb_rx;
303 	if (unlikely(cq_idx == cq->ring.desc_count)) {
304 		cq_idx = 0;
305 		cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
306 	}
307 	cq->to_clean = cq_idx;
308 
309 	memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
310 	       sizeof(struct rte_mbuf *) * nb_rx);
311 	rq->num_free_mbufs -= nb_rx;
312 	while (nb_rx) {
313 		nb_rx--;
314 		mb = *rxmb++;
315 		mb->data_off = RTE_PKTMBUF_HEADROOM;
316 		rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
317 		rqd++;
318 	}
319 	if (rq->rx_nb_hold > rq->rx_free_thresh) {
320 		rq->posted_index = enic_ring_add(rq->ring.desc_count,
321 						 rq->posted_index,
322 						 rq->rx_nb_hold);
323 		rq->rx_nb_hold = 0;
324 		rte_wmb();
325 		iowrite32_relaxed(rq->posted_index,
326 				  &rq->ctrl->posted_index);
327 	}
328 
329 	return rx - rx_pkts;
330 }
331 
332 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
333 {
334 	struct rte_mbuf *buf;
335 	struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
336 	unsigned int nb_to_free, nb_free = 0, i;
337 	struct rte_mempool *pool;
338 	unsigned int tail_idx;
339 	unsigned int desc_count = wq->ring.desc_count;
340 
341 	nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
342 				   + 1;
343 	tail_idx = wq->tail_idx;
344 	pool = wq->bufs[tail_idx]->pool;
345 	for (i = 0; i < nb_to_free; i++) {
346 		buf = wq->bufs[tail_idx];
347 		m = rte_pktmbuf_prefree_seg(buf);
348 		if (unlikely(m == NULL)) {
349 			tail_idx = enic_ring_incr(desc_count, tail_idx);
350 			continue;
351 		}
352 
353 		if (likely(m->pool == pool)) {
354 			RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
355 			free[nb_free++] = m;
356 		} else {
357 			rte_mempool_put_bulk(pool, (void *)free, nb_free);
358 			free[0] = m;
359 			nb_free = 1;
360 			pool = m->pool;
361 		}
362 		tail_idx = enic_ring_incr(desc_count, tail_idx);
363 	}
364 
365 	if (nb_free > 0)
366 		rte_mempool_put_bulk(pool, (void **)free, nb_free);
367 
368 	wq->tail_idx = tail_idx;
369 	wq->ring.desc_avail += nb_to_free;
370 }
371 
372 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
373 {
374 	u16 completed_index;
375 
376 	completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
377 
378 	if (wq->last_completed_index != completed_index) {
379 		enic_free_wq_bufs(wq, completed_index);
380 		wq->last_completed_index = completed_index;
381 	}
382 	return 0;
383 }
384 
385 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
386 			uint16_t nb_pkts)
387 {
388 	struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
389 	int32_t ret;
390 	uint16_t i;
391 	uint64_t ol_flags;
392 	struct rte_mbuf *m;
393 
394 	for (i = 0; i != nb_pkts; i++) {
395 		m = tx_pkts[i];
396 		ol_flags = m->ol_flags;
397 		if (!(ol_flags & PKT_TX_TCP_SEG)) {
398 			if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
399 				rte_errno = EINVAL;
400 				return i;
401 			}
402 		} else {
403 			uint16_t header_len;
404 
405 			header_len = m->l2_len + m->l3_len + m->l4_len;
406 			if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) {
407 				rte_errno = EINVAL;
408 				return i;
409 			}
410 		}
411 
412 		if (ol_flags & wq->tx_offload_notsup_mask) {
413 			rte_errno = ENOTSUP;
414 			return i;
415 		}
416 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
417 		ret = rte_validate_tx_offload(m);
418 		if (ret != 0) {
419 			rte_errno = -ret;
420 			return i;
421 		}
422 #endif
423 		ret = rte_net_intel_cksum_prepare(m);
424 		if (ret != 0) {
425 			rte_errno = -ret;
426 			return i;
427 		}
428 	}
429 
430 	return i;
431 }
432 
433 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
434 	uint16_t nb_pkts)
435 {
436 	uint16_t index;
437 	unsigned int pkt_len, data_len;
438 	unsigned int nb_segs;
439 	struct rte_mbuf *tx_pkt;
440 	struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
441 	struct enic *enic = vnic_dev_priv(wq->vdev);
442 	unsigned short vlan_id;
443 	uint64_t ol_flags;
444 	uint64_t ol_flags_mask;
445 	unsigned int wq_desc_avail;
446 	int head_idx;
447 	unsigned int desc_count;
448 	struct wq_enet_desc *descs, *desc_p, desc_tmp;
449 	uint16_t mss;
450 	uint8_t vlan_tag_insert;
451 	uint8_t eop, cq;
452 	uint64_t bus_addr;
453 	uint8_t offload_mode;
454 	uint16_t header_len;
455 	uint64_t tso;
456 	rte_atomic64_t *tx_oversized;
457 
458 	enic_cleanup_wq(enic, wq);
459 	wq_desc_avail = vnic_wq_desc_avail(wq);
460 	head_idx = wq->head_idx;
461 	desc_count = wq->ring.desc_count;
462 	ol_flags_mask = PKT_TX_VLAN | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
463 	tx_oversized = &enic->soft_stats.tx_oversized;
464 
465 	nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
466 
467 	for (index = 0; index < nb_pkts; index++) {
468 		tx_pkt = *tx_pkts++;
469 		pkt_len = tx_pkt->pkt_len;
470 		data_len = tx_pkt->data_len;
471 		ol_flags = tx_pkt->ol_flags;
472 		nb_segs = tx_pkt->nb_segs;
473 		tso = ol_flags & PKT_TX_TCP_SEG;
474 
475 		/* drop packet if it's too big to send */
476 		if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
477 			rte_pktmbuf_free(tx_pkt);
478 			rte_atomic64_inc(tx_oversized);
479 			continue;
480 		}
481 
482 		if (nb_segs > wq_desc_avail) {
483 			if (index > 0)
484 				goto post;
485 			goto done;
486 		}
487 
488 		mss = 0;
489 		vlan_id = tx_pkt->vlan_tci;
490 		vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN);
491 		bus_addr = (dma_addr_t)
492 			   (tx_pkt->buf_iova + tx_pkt->data_off);
493 
494 		descs = (struct wq_enet_desc *)wq->ring.descs;
495 		desc_p = descs + head_idx;
496 
497 		eop = (data_len == pkt_len);
498 		offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
499 		header_len = 0;
500 
501 		if (tso) {
502 			header_len = tx_pkt->l2_len + tx_pkt->l3_len +
503 				     tx_pkt->l4_len;
504 
505 			/* Drop if non-TCP packet or TSO seg size is too big */
506 			if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
507 			    header_len) > ENIC_TX_MAX_PKT_SIZE))) {
508 				rte_pktmbuf_free(tx_pkt);
509 				rte_atomic64_inc(tx_oversized);
510 				continue;
511 			}
512 
513 			offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
514 			mss = tx_pkt->tso_segsz;
515 			/* For tunnel, need the size of outer+inner headers */
516 			if (ol_flags & PKT_TX_TUNNEL_MASK) {
517 				header_len += tx_pkt->outer_l2_len +
518 					tx_pkt->outer_l3_len;
519 			}
520 		}
521 
522 		if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
523 			if (ol_flags & PKT_TX_IP_CKSUM)
524 				mss |= ENIC_CALC_IP_CKSUM;
525 
526 			/* Nic uses just 1 bit for UDP and TCP */
527 			switch (ol_flags & PKT_TX_L4_MASK) {
528 			case PKT_TX_TCP_CKSUM:
529 			case PKT_TX_UDP_CKSUM:
530 				mss |= ENIC_CALC_TCP_UDP_CKSUM;
531 				break;
532 			}
533 		}
534 		wq->cq_pend++;
535 		cq = 0;
536 		if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
537 			cq = 1;
538 			wq->cq_pend = 0;
539 		}
540 		wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
541 				 offload_mode, eop, cq, 0, vlan_tag_insert,
542 				 vlan_id, 0);
543 
544 		*desc_p = desc_tmp;
545 		wq->bufs[head_idx] = tx_pkt;
546 		head_idx = enic_ring_incr(desc_count, head_idx);
547 		wq_desc_avail--;
548 
549 		if (!eop) {
550 			for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
551 			    tx_pkt->next) {
552 				data_len = tx_pkt->data_len;
553 
554 				wq->cq_pend++;
555 				cq = 0;
556 				if (tx_pkt->next == NULL) {
557 					eop = 1;
558 					if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
559 						cq = 1;
560 						wq->cq_pend = 0;
561 					}
562 				}
563 				desc_p = descs + head_idx;
564 				bus_addr = (dma_addr_t)(tx_pkt->buf_iova
565 					   + tx_pkt->data_off);
566 				wq_enet_desc_enc((struct wq_enet_desc *)
567 						 &desc_tmp, bus_addr, data_len,
568 						 mss, 0, offload_mode, eop, cq,
569 						 0, vlan_tag_insert, vlan_id,
570 						 0);
571 
572 				*desc_p = desc_tmp;
573 				wq->bufs[head_idx] = tx_pkt;
574 				head_idx = enic_ring_incr(desc_count, head_idx);
575 				wq_desc_avail--;
576 			}
577 		}
578 	}
579  post:
580 	rte_wmb();
581 	iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
582  done:
583 	wq->ring.desc_avail = wq_desc_avail;
584 	wq->head_idx = head_idx;
585 
586 	return index;
587 }
588 
589 static void enqueue_simple_pkts(struct rte_mbuf **pkts,
590 				struct wq_enet_desc *desc,
591 				uint16_t n,
592 				struct enic *enic)
593 {
594 	struct rte_mbuf *p;
595 	uint16_t mss;
596 
597 	while (n) {
598 		n--;
599 		p = *pkts++;
600 		desc->address = p->buf_iova + p->data_off;
601 		desc->length = p->pkt_len;
602 		/* VLAN insert */
603 		desc->vlan_tag = p->vlan_tci;
604 		desc->header_length_flags &=
605 			((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
606 			 (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
607 		if (p->ol_flags & PKT_TX_VLAN) {
608 			desc->header_length_flags |=
609 				1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
610 		}
611 		/*
612 		 * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
613 		 * is 0, so no need to set offload_mode.
614 		 */
615 		mss = 0;
616 		if (p->ol_flags & PKT_TX_IP_CKSUM)
617 			mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
618 		if (p->ol_flags & PKT_TX_L4_MASK)
619 			mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
620 		desc->mss_loopback = mss;
621 
622 		/*
623 		 * The app should not send oversized
624 		 * packets. tx_pkt_prepare includes a check as
625 		 * well. But some apps ignore the device max size and
626 		 * tx_pkt_prepare. Oversized packets cause WQ errrors
627 		 * and the NIC ends up disabling the whole WQ. So
628 		 * truncate packets..
629 		 */
630 		if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
631 			desc->length = ENIC_TX_MAX_PKT_SIZE;
632 			rte_atomic64_inc(&enic->soft_stats.tx_oversized);
633 		}
634 		desc++;
635 	}
636 }
637 
638 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
639 			       uint16_t nb_pkts)
640 {
641 	unsigned int head_idx, desc_count;
642 	struct wq_enet_desc *desc;
643 	struct vnic_wq *wq;
644 	struct enic *enic;
645 	uint16_t rem, n;
646 
647 	wq = (struct vnic_wq *)tx_queue;
648 	enic = vnic_dev_priv(wq->vdev);
649 	enic_cleanup_wq(enic, wq);
650 	/* Will enqueue this many packets in this call */
651 	nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
652 	if (nb_pkts == 0)
653 		return 0;
654 
655 	head_idx = wq->head_idx;
656 	desc_count = wq->ring.desc_count;
657 
658 	/* Descriptors until the end of the ring */
659 	n = desc_count - head_idx;
660 	n = RTE_MIN(nb_pkts, n);
661 
662 	/* Save mbuf pointers to free later */
663 	memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
664 
665 	/* Enqueue until the ring end */
666 	rem = nb_pkts - n;
667 	desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
668 	enqueue_simple_pkts(tx_pkts, desc, n, enic);
669 
670 	/* Wrap to the start of the ring */
671 	if (rem) {
672 		tx_pkts += n;
673 		memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
674 		desc = (struct wq_enet_desc *)wq->ring.descs;
675 		enqueue_simple_pkts(tx_pkts, desc, rem, enic);
676 	}
677 	rte_wmb();
678 
679 	/* Update head_idx and desc_avail */
680 	wq->ring.desc_avail -= nb_pkts;
681 	head_idx += nb_pkts;
682 	if (head_idx >= desc_count)
683 		head_idx -= desc_count;
684 	wq->head_idx = head_idx;
685 	iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
686 	return nb_pkts;
687 }
688