xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision f8244c6399d9fae6afab6770ae367aef38742ea5)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <assert.h>
35 #include <stdint.h>
36 #include <string.h>
37 #include <stdlib.h>
38 
39 /* Verbs header. */
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
41 #ifdef PEDANTIC
42 #pragma GCC diagnostic ignored "-Wpedantic"
43 #endif
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5dv.h>
46 #ifdef PEDANTIC
47 #pragma GCC diagnostic error "-Wpedantic"
48 #endif
49 
50 #include <rte_mbuf.h>
51 #include <rte_mempool.h>
52 #include <rte_prefetch.h>
53 #include <rte_common.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_ether.h>
56 
57 #include "mlx5.h"
58 #include "mlx5_utils.h"
59 #include "mlx5_rxtx.h"
60 #include "mlx5_autoconf.h"
61 #include "mlx5_defs.h"
62 #include "mlx5_prm.h"
63 
64 static __rte_always_inline uint32_t
65 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
66 
67 static __rte_always_inline int
68 mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
69 		 uint16_t cqe_cnt, uint32_t *rss_hash);
70 
71 static __rte_always_inline uint32_t
72 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
73 
74 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
75 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
76 };
77 
78 /**
79  * Build a table to translate Rx completion flags to packet type.
80  *
81  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
82  */
83 void
84 mlx5_set_ptype_table(void)
85 {
86 	unsigned int i;
87 	uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
88 
89 	/* Last entry must not be overwritten, reserved for errored packet. */
90 	for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
91 		(*p)[i] = RTE_PTYPE_UNKNOWN;
92 	/*
93 	 * The index to the array should have:
94 	 * bit[1:0] = l3_hdr_type
95 	 * bit[4:2] = l4_hdr_type
96 	 * bit[5] = ip_frag
97 	 * bit[6] = tunneled
98 	 * bit[7] = outer_l3_type
99 	 */
100 	/* L3 */
101 	(*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
102 		     RTE_PTYPE_L4_NONFRAG;
103 	(*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
104 		     RTE_PTYPE_L4_NONFRAG;
105 	/* Fragmented */
106 	(*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
107 		     RTE_PTYPE_L4_FRAG;
108 	(*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
109 		     RTE_PTYPE_L4_FRAG;
110 	/* TCP */
111 	(*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
112 		     RTE_PTYPE_L4_TCP;
113 	(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
114 		     RTE_PTYPE_L4_TCP;
115 	/* UDP */
116 	(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
117 		     RTE_PTYPE_L4_UDP;
118 	(*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
119 		     RTE_PTYPE_L4_UDP;
120 	/* Repeat with outer_l3_type being set. Just in case. */
121 	(*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
122 		     RTE_PTYPE_L4_NONFRAG;
123 	(*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
124 		     RTE_PTYPE_L4_NONFRAG;
125 	(*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
126 		     RTE_PTYPE_L4_FRAG;
127 	(*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
128 		     RTE_PTYPE_L4_FRAG;
129 	(*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
130 		     RTE_PTYPE_L4_TCP;
131 	(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
132 		     RTE_PTYPE_L4_TCP;
133 	(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
134 		     RTE_PTYPE_L4_UDP;
135 	(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
136 		     RTE_PTYPE_L4_UDP;
137 	/* Tunneled - L3 */
138 	(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
139 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
140 		     RTE_PTYPE_INNER_L4_NONFRAG;
141 	(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
142 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
143 		     RTE_PTYPE_INNER_L4_NONFRAG;
144 	(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
145 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
146 		     RTE_PTYPE_INNER_L4_NONFRAG;
147 	(*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
148 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
149 		     RTE_PTYPE_INNER_L4_NONFRAG;
150 	/* Tunneled - Fragmented */
151 	(*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
152 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
153 		     RTE_PTYPE_INNER_L4_FRAG;
154 	(*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
155 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
156 		     RTE_PTYPE_INNER_L4_FRAG;
157 	(*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
158 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
159 		     RTE_PTYPE_INNER_L4_FRAG;
160 	(*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
162 		     RTE_PTYPE_INNER_L4_FRAG;
163 	/* Tunneled - TCP */
164 	(*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
166 		     RTE_PTYPE_L4_TCP;
167 	(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
169 		     RTE_PTYPE_L4_TCP;
170 	(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
172 		     RTE_PTYPE_L4_TCP;
173 	(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
174 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
175 		     RTE_PTYPE_L4_TCP;
176 	/* Tunneled - UDP */
177 	(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
179 		     RTE_PTYPE_L4_UDP;
180 	(*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
182 		     RTE_PTYPE_L4_UDP;
183 	(*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
185 		     RTE_PTYPE_L4_UDP;
186 	(*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
187 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
188 		     RTE_PTYPE_L4_UDP;
189 }
190 
191 /**
192  * Return the size of tailroom of WQ.
193  *
194  * @param txq
195  *   Pointer to TX queue structure.
196  * @param addr
197  *   Pointer to tail of WQ.
198  *
199  * @return
200  *   Size of tailroom.
201  */
202 static inline size_t
203 tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
204 {
205 	size_t tailroom;
206 	tailroom = (uintptr_t)(txq->wqes) +
207 		   (1 << txq->wqe_n) * MLX5_WQE_SIZE -
208 		   (uintptr_t)addr;
209 	return tailroom;
210 }
211 
212 /**
213  * Copy data to tailroom of circular queue.
214  *
215  * @param dst
216  *   Pointer to destination.
217  * @param src
218  *   Pointer to source.
219  * @param n
220  *   Number of bytes to copy.
221  * @param base
222  *   Pointer to head of queue.
223  * @param tailroom
224  *   Size of tailroom from dst.
225  *
226  * @return
227  *   Pointer after copied data.
228  */
229 static inline void *
230 mlx5_copy_to_wq(void *dst, const void *src, size_t n,
231 		void *base, size_t tailroom)
232 {
233 	void *ret;
234 
235 	if (n > tailroom) {
236 		rte_memcpy(dst, src, tailroom);
237 		rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
238 			   n - tailroom);
239 		ret = (uint8_t *)base + n - tailroom;
240 	} else {
241 		rte_memcpy(dst, src, n);
242 		ret = (n == tailroom) ? base : (uint8_t *)dst + n;
243 	}
244 	return ret;
245 }
246 
247 /**
248  * DPDK callback to check the status of a tx descriptor.
249  *
250  * @param tx_queue
251  *   The tx queue.
252  * @param[in] offset
253  *   The index of the descriptor in the ring.
254  *
255  * @return
256  *   The status of the tx descriptor.
257  */
258 int
259 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
260 {
261 	struct txq *txq = tx_queue;
262 	uint16_t used;
263 
264 	mlx5_tx_complete(txq);
265 	used = txq->elts_head - txq->elts_tail;
266 	if (offset < used)
267 		return RTE_ETH_TX_DESC_FULL;
268 	return RTE_ETH_TX_DESC_DONE;
269 }
270 
271 /**
272  * DPDK callback to check the status of a rx descriptor.
273  *
274  * @param rx_queue
275  *   The rx queue.
276  * @param[in] offset
277  *   The index of the descriptor in the ring.
278  *
279  * @return
280  *   The status of the tx descriptor.
281  */
282 int
283 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
284 {
285 	struct rxq *rxq = rx_queue;
286 	struct rxq_zip *zip = &rxq->zip;
287 	volatile struct mlx5_cqe *cqe;
288 	const unsigned int cqe_n = (1 << rxq->cqe_n);
289 	const unsigned int cqe_cnt = cqe_n - 1;
290 	unsigned int cq_ci;
291 	unsigned int used;
292 
293 	/* if we are processing a compressed cqe */
294 	if (zip->ai) {
295 		used = zip->cqe_cnt - zip->ca;
296 		cq_ci = zip->cq_ci;
297 	} else {
298 		used = 0;
299 		cq_ci = rxq->cq_ci;
300 	}
301 	cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
302 	while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
303 		int8_t op_own;
304 		unsigned int n;
305 
306 		op_own = cqe->op_own;
307 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
308 			n = rte_be_to_cpu_32(cqe->byte_cnt);
309 		else
310 			n = 1;
311 		cq_ci += n;
312 		used += n;
313 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
314 	}
315 	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
316 	if (offset < used)
317 		return RTE_ETH_RX_DESC_DONE;
318 	return RTE_ETH_RX_DESC_AVAIL;
319 }
320 
321 /**
322  * DPDK callback for TX.
323  *
324  * @param dpdk_txq
325  *   Generic pointer to TX queue structure.
326  * @param[in] pkts
327  *   Packets to transmit.
328  * @param pkts_n
329  *   Number of packets in array.
330  *
331  * @return
332  *   Number of packets successfully transmitted (<= pkts_n).
333  */
334 uint16_t
335 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
336 {
337 	struct txq *txq = (struct txq *)dpdk_txq;
338 	uint16_t elts_head = txq->elts_head;
339 	const uint16_t elts_n = 1 << txq->elts_n;
340 	const uint16_t elts_m = elts_n - 1;
341 	unsigned int i = 0;
342 	unsigned int j = 0;
343 	unsigned int k = 0;
344 	uint16_t max_elts;
345 	unsigned int max_inline = txq->max_inline;
346 	const unsigned int inline_en = !!max_inline && txq->inline_en;
347 	uint16_t max_wqe;
348 	unsigned int comp;
349 	volatile struct mlx5_wqe_v *wqe = NULL;
350 	volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
351 	unsigned int segs_n = 0;
352 	struct rte_mbuf *buf = NULL;
353 	uint8_t *raw;
354 
355 	if (unlikely(!pkts_n))
356 		return 0;
357 	/* Prefetch first packet cacheline. */
358 	rte_prefetch0(*pkts);
359 	/* Start processing. */
360 	mlx5_tx_complete(txq);
361 	max_elts = (elts_n - (elts_head - txq->elts_tail));
362 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
363 	if (unlikely(!max_wqe))
364 		return 0;
365 	do {
366 		volatile rte_v128u32_t *dseg = NULL;
367 		uint32_t length;
368 		unsigned int ds = 0;
369 		unsigned int sg = 0; /* counter of additional segs attached. */
370 		uintptr_t addr;
371 		uint64_t naddr;
372 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
373 		uint16_t tso_header_sz = 0;
374 		uint16_t ehdr;
375 		uint8_t cs_flags = 0;
376 		uint64_t tso = 0;
377 		uint16_t tso_segsz = 0;
378 #ifdef MLX5_PMD_SOFT_COUNTERS
379 		uint32_t total_length = 0;
380 #endif
381 
382 		/* first_seg */
383 		buf = *pkts;
384 		segs_n = buf->nb_segs;
385 		/*
386 		 * Make sure there is enough room to store this packet and
387 		 * that one ring entry remains unused.
388 		 */
389 		assert(segs_n);
390 		if (max_elts < segs_n)
391 			break;
392 		max_elts -= segs_n;
393 		--segs_n;
394 		if (unlikely(--max_wqe == 0))
395 			break;
396 		wqe = (volatile struct mlx5_wqe_v *)
397 			tx_mlx5_wqe(txq, txq->wqe_ci);
398 		rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
399 		if (pkts_n - i > 1)
400 			rte_prefetch0(*(pkts + 1));
401 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
402 		length = DATA_LEN(buf);
403 		ehdr = (((uint8_t *)addr)[1] << 8) |
404 		       ((uint8_t *)addr)[0];
405 #ifdef MLX5_PMD_SOFT_COUNTERS
406 		total_length = length;
407 #endif
408 		if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
409 			txq->stats.oerrors++;
410 			break;
411 		}
412 		/* Update element. */
413 		(*txq->elts)[elts_head & elts_m] = buf;
414 		/* Prefetch next buffer data. */
415 		if (pkts_n - i > 1)
416 			rte_prefetch0(
417 			    rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
418 		/* Should we enable HW CKSUM offload */
419 		if (buf->ol_flags &
420 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
421 			const uint64_t is_tunneled = buf->ol_flags &
422 						     (PKT_TX_TUNNEL_GRE |
423 						      PKT_TX_TUNNEL_VXLAN);
424 
425 			if (is_tunneled && txq->tunnel_en) {
426 				cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
427 					   MLX5_ETH_WQE_L4_INNER_CSUM;
428 				if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
429 					cs_flags |= MLX5_ETH_WQE_L3_CSUM;
430 			} else {
431 				cs_flags = MLX5_ETH_WQE_L3_CSUM |
432 					   MLX5_ETH_WQE_L4_CSUM;
433 			}
434 		}
435 		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
436 		/* Replace the Ethernet type by the VLAN if necessary. */
437 		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
438 			uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
439 							 buf->vlan_tci);
440 			unsigned int len = 2 * ETHER_ADDR_LEN - 2;
441 
442 			addr += 2;
443 			length -= 2;
444 			/* Copy Destination and source mac address. */
445 			memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
446 			/* Copy VLAN. */
447 			memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
448 			/* Copy missing two bytes to end the DSeg. */
449 			memcpy((uint8_t *)raw + len + sizeof(vlan),
450 			       ((uint8_t *)addr) + len, 2);
451 			addr += len + 2;
452 			length -= (len + 2);
453 		} else {
454 			memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
455 			       MLX5_WQE_DWORD_SIZE);
456 			length -= pkt_inline_sz;
457 			addr += pkt_inline_sz;
458 		}
459 		raw += MLX5_WQE_DWORD_SIZE;
460 		if (txq->tso_en) {
461 			tso = buf->ol_flags & PKT_TX_TCP_SEG;
462 			if (tso) {
463 				uintptr_t end = (uintptr_t)
464 						(((uintptr_t)txq->wqes) +
465 						(1 << txq->wqe_n) *
466 						MLX5_WQE_SIZE);
467 				unsigned int copy_b;
468 				uint8_t vlan_sz = (buf->ol_flags &
469 						  PKT_TX_VLAN_PKT) ? 4 : 0;
470 				const uint64_t is_tunneled =
471 							buf->ol_flags &
472 							(PKT_TX_TUNNEL_GRE |
473 							 PKT_TX_TUNNEL_VXLAN);
474 
475 				tso_header_sz = buf->l2_len + vlan_sz +
476 						buf->l3_len + buf->l4_len;
477 				tso_segsz = buf->tso_segsz;
478 				if (unlikely(tso_segsz == 0)) {
479 					txq->stats.oerrors++;
480 					break;
481 				}
482 				if (is_tunneled	&& txq->tunnel_en) {
483 					tso_header_sz += buf->outer_l2_len +
484 							 buf->outer_l3_len;
485 					cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
486 				} else {
487 					cs_flags |= MLX5_ETH_WQE_L4_CSUM;
488 				}
489 				if (unlikely(tso_header_sz >
490 					     MLX5_MAX_TSO_HEADER)) {
491 					txq->stats.oerrors++;
492 					break;
493 				}
494 				copy_b = tso_header_sz - pkt_inline_sz;
495 				/* First seg must contain all headers. */
496 				assert(copy_b <= length);
497 				if (copy_b &&
498 				   ((end - (uintptr_t)raw) > copy_b)) {
499 					uint16_t n = (MLX5_WQE_DS(copy_b) -
500 						      1 + 3) / 4;
501 
502 					if (unlikely(max_wqe < n))
503 						break;
504 					max_wqe -= n;
505 					rte_memcpy((void *)raw,
506 						   (void *)addr, copy_b);
507 					addr += copy_b;
508 					length -= copy_b;
509 					/* Include padding for TSO header. */
510 					copy_b = MLX5_WQE_DS(copy_b) *
511 						 MLX5_WQE_DWORD_SIZE;
512 					pkt_inline_sz += copy_b;
513 					raw += copy_b;
514 				} else {
515 					/* NOP WQE. */
516 					wqe->ctrl = (rte_v128u32_t){
517 						     rte_cpu_to_be_32(
518 							txq->wqe_ci << 8),
519 						     rte_cpu_to_be_32(
520 							txq->qp_num_8s | 1),
521 						     0,
522 						     0,
523 					};
524 					ds = 1;
525 					total_length = 0;
526 					k++;
527 					goto next_wqe;
528 				}
529 			}
530 		}
531 		/* Inline if enough room. */
532 		if (inline_en || tso) {
533 			uint32_t inl;
534 			uintptr_t end = (uintptr_t)
535 				(((uintptr_t)txq->wqes) +
536 				 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
537 			unsigned int inline_room = max_inline *
538 						   RTE_CACHE_LINE_SIZE -
539 						   (pkt_inline_sz - 2) -
540 						   !!tso * sizeof(inl);
541 			uintptr_t addr_end = (addr + inline_room) &
542 					     ~(RTE_CACHE_LINE_SIZE - 1);
543 			unsigned int copy_b = (addr_end > addr) ?
544 				RTE_MIN((addr_end - addr), length) :
545 				0;
546 
547 			if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
548 				/*
549 				 * One Dseg remains in the current WQE.  To
550 				 * keep the computation positive, it is
551 				 * removed after the bytes to Dseg conversion.
552 				 */
553 				uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
554 
555 				if (unlikely(max_wqe < n))
556 					break;
557 				max_wqe -= n;
558 				if (tso) {
559 					uint32_t inl =
560 					rte_cpu_to_be_32(copy_b |
561 							 MLX5_INLINE_SEG);
562 
563 					pkt_inline_sz =
564 						MLX5_WQE_DS(tso_header_sz) *
565 						MLX5_WQE_DWORD_SIZE;
566 
567 					rte_memcpy((void *)raw,
568 						   (void *)&inl, sizeof(inl));
569 					raw += sizeof(inl);
570 					pkt_inline_sz += sizeof(inl);
571 				}
572 				rte_memcpy((void *)raw, (void *)addr, copy_b);
573 				addr += copy_b;
574 				length -= copy_b;
575 				pkt_inline_sz += copy_b;
576 			}
577 			/*
578 			 * 2 DWORDs consumed by the WQE header + ETH segment +
579 			 * the size of the inline part of the packet.
580 			 */
581 			ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
582 			if (length > 0) {
583 				if (ds % (MLX5_WQE_SIZE /
584 					  MLX5_WQE_DWORD_SIZE) == 0) {
585 					if (unlikely(--max_wqe == 0))
586 						break;
587 					dseg = (volatile rte_v128u32_t *)
588 					       tx_mlx5_wqe(txq, txq->wqe_ci +
589 							   ds / 4);
590 				} else {
591 					dseg = (volatile rte_v128u32_t *)
592 						((uintptr_t)wqe +
593 						 (ds * MLX5_WQE_DWORD_SIZE));
594 				}
595 				goto use_dseg;
596 			} else if (!segs_n) {
597 				goto next_pkt;
598 			} else {
599 				/* dseg will be advance as part of next_seg */
600 				dseg = (volatile rte_v128u32_t *)
601 					((uintptr_t)wqe +
602 					 ((ds - 1) * MLX5_WQE_DWORD_SIZE));
603 				goto next_seg;
604 			}
605 		} else {
606 			/*
607 			 * No inline has been done in the packet, only the
608 			 * Ethernet Header as been stored.
609 			 */
610 			dseg = (volatile rte_v128u32_t *)
611 				((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
612 			ds = 3;
613 use_dseg:
614 			/* Add the remaining packet as a simple ds. */
615 			naddr = rte_cpu_to_be_64(addr);
616 			*dseg = (rte_v128u32_t){
617 				rte_cpu_to_be_32(length),
618 				mlx5_tx_mb2mr(txq, buf),
619 				naddr,
620 				naddr >> 32,
621 			};
622 			++ds;
623 			if (!segs_n)
624 				goto next_pkt;
625 		}
626 next_seg:
627 		assert(buf);
628 		assert(ds);
629 		assert(wqe);
630 		/*
631 		 * Spill on next WQE when the current one does not have
632 		 * enough room left. Size of WQE must a be a multiple
633 		 * of data segment size.
634 		 */
635 		assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
636 		if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
637 			if (unlikely(--max_wqe == 0))
638 				break;
639 			dseg = (volatile rte_v128u32_t *)
640 			       tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
641 			rte_prefetch0(tx_mlx5_wqe(txq,
642 						  txq->wqe_ci + ds / 4 + 1));
643 		} else {
644 			++dseg;
645 		}
646 		++ds;
647 		buf = buf->next;
648 		assert(buf);
649 		length = DATA_LEN(buf);
650 #ifdef MLX5_PMD_SOFT_COUNTERS
651 		total_length += length;
652 #endif
653 		/* Store segment information. */
654 		naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
655 		*dseg = (rte_v128u32_t){
656 			rte_cpu_to_be_32(length),
657 			mlx5_tx_mb2mr(txq, buf),
658 			naddr,
659 			naddr >> 32,
660 		};
661 		(*txq->elts)[++elts_head & elts_m] = buf;
662 		++sg;
663 		/* Advance counter only if all segs are successfully posted. */
664 		if (sg < segs_n)
665 			goto next_seg;
666 		else
667 			j += sg;
668 next_pkt:
669 		if (ds > MLX5_DSEG_MAX) {
670 			txq->stats.oerrors++;
671 			break;
672 		}
673 		++elts_head;
674 		++pkts;
675 		++i;
676 		/* Initialize known and common part of the WQE structure. */
677 		if (tso) {
678 			wqe->ctrl = (rte_v128u32_t){
679 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
680 						 MLX5_OPCODE_TSO),
681 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
682 				0,
683 				0,
684 			};
685 			wqe->eseg = (rte_v128u32_t){
686 				0,
687 				cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
688 				0,
689 				(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
690 			};
691 		} else {
692 			wqe->ctrl = (rte_v128u32_t){
693 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
694 						 MLX5_OPCODE_SEND),
695 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
696 				0,
697 				0,
698 			};
699 			wqe->eseg = (rte_v128u32_t){
700 				0,
701 				cs_flags,
702 				0,
703 				(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
704 			};
705 		}
706 next_wqe:
707 		txq->wqe_ci += (ds + 3) / 4;
708 		/* Save the last successful WQE for completion request */
709 		last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
710 #ifdef MLX5_PMD_SOFT_COUNTERS
711 		/* Increment sent bytes counter. */
712 		txq->stats.obytes += total_length;
713 #endif
714 	} while (i < pkts_n);
715 	/* Take a shortcut if nothing must be sent. */
716 	if (unlikely((i + k) == 0))
717 		return 0;
718 	txq->elts_head += (i + j);
719 	/* Check whether completion threshold has been reached. */
720 	comp = txq->elts_comp + i + j + k;
721 	if (comp >= MLX5_TX_COMP_THRESH) {
722 		/* Request completion on last WQE. */
723 		last_wqe->ctrl2 = rte_cpu_to_be_32(8);
724 		/* Save elts_head in unused "immediate" field of WQE. */
725 		last_wqe->ctrl3 = txq->elts_head;
726 		txq->elts_comp = 0;
727 	} else {
728 		txq->elts_comp = comp;
729 	}
730 #ifdef MLX5_PMD_SOFT_COUNTERS
731 	/* Increment sent packets counter. */
732 	txq->stats.opackets += i;
733 #endif
734 	/* Ring QP doorbell. */
735 	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
736 	return i;
737 }
738 
739 /**
740  * Open a MPW session.
741  *
742  * @param txq
743  *   Pointer to TX queue structure.
744  * @param mpw
745  *   Pointer to MPW session structure.
746  * @param length
747  *   Packet length.
748  */
749 static inline void
750 mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
751 {
752 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
753 	volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
754 		(volatile struct mlx5_wqe_data_seg (*)[])
755 		tx_mlx5_wqe(txq, idx + 1);
756 
757 	mpw->state = MLX5_MPW_STATE_OPENED;
758 	mpw->pkts_n = 0;
759 	mpw->len = length;
760 	mpw->total_len = 0;
761 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
762 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
763 	mpw->wqe->eseg.inline_hdr_sz = 0;
764 	mpw->wqe->eseg.rsvd0 = 0;
765 	mpw->wqe->eseg.rsvd1 = 0;
766 	mpw->wqe->eseg.rsvd2 = 0;
767 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
768 					     (txq->wqe_ci << 8) |
769 					     MLX5_OPCODE_TSO);
770 	mpw->wqe->ctrl[2] = 0;
771 	mpw->wqe->ctrl[3] = 0;
772 	mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
773 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
774 	mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
775 		(((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
776 	mpw->data.dseg[2] = &(*dseg)[0];
777 	mpw->data.dseg[3] = &(*dseg)[1];
778 	mpw->data.dseg[4] = &(*dseg)[2];
779 }
780 
781 /**
782  * Close a MPW session.
783  *
784  * @param txq
785  *   Pointer to TX queue structure.
786  * @param mpw
787  *   Pointer to MPW session structure.
788  */
789 static inline void
790 mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
791 {
792 	unsigned int num = mpw->pkts_n;
793 
794 	/*
795 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
796 	 * count as 2.
797 	 */
798 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
799 	mpw->state = MLX5_MPW_STATE_CLOSED;
800 	if (num < 3)
801 		++txq->wqe_ci;
802 	else
803 		txq->wqe_ci += 2;
804 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
805 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
806 }
807 
808 /**
809  * DPDK callback for TX with MPW support.
810  *
811  * @param dpdk_txq
812  *   Generic pointer to TX queue structure.
813  * @param[in] pkts
814  *   Packets to transmit.
815  * @param pkts_n
816  *   Number of packets in array.
817  *
818  * @return
819  *   Number of packets successfully transmitted (<= pkts_n).
820  */
821 uint16_t
822 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
823 {
824 	struct txq *txq = (struct txq *)dpdk_txq;
825 	uint16_t elts_head = txq->elts_head;
826 	const uint16_t elts_n = 1 << txq->elts_n;
827 	const uint16_t elts_m = elts_n - 1;
828 	unsigned int i = 0;
829 	unsigned int j = 0;
830 	uint16_t max_elts;
831 	uint16_t max_wqe;
832 	unsigned int comp;
833 	struct mlx5_mpw mpw = {
834 		.state = MLX5_MPW_STATE_CLOSED,
835 	};
836 
837 	if (unlikely(!pkts_n))
838 		return 0;
839 	/* Prefetch first packet cacheline. */
840 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
841 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
842 	/* Start processing. */
843 	mlx5_tx_complete(txq);
844 	max_elts = (elts_n - (elts_head - txq->elts_tail));
845 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
846 	if (unlikely(!max_wqe))
847 		return 0;
848 	do {
849 		struct rte_mbuf *buf = *(pkts++);
850 		uint32_t length;
851 		unsigned int segs_n = buf->nb_segs;
852 		uint32_t cs_flags = 0;
853 
854 		/*
855 		 * Make sure there is enough room to store this packet and
856 		 * that one ring entry remains unused.
857 		 */
858 		assert(segs_n);
859 		if (max_elts < segs_n)
860 			break;
861 		/* Do not bother with large packets MPW cannot handle. */
862 		if (segs_n > MLX5_MPW_DSEG_MAX) {
863 			txq->stats.oerrors++;
864 			break;
865 		}
866 		max_elts -= segs_n;
867 		--pkts_n;
868 		/* Should we enable HW CKSUM offload */
869 		if (buf->ol_flags &
870 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
871 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
872 		/* Retrieve packet information. */
873 		length = PKT_LEN(buf);
874 		assert(length);
875 		/* Start new session if packet differs. */
876 		if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
877 		    ((mpw.len != length) ||
878 		     (segs_n != 1) ||
879 		     (mpw.wqe->eseg.cs_flags != cs_flags)))
880 			mlx5_mpw_close(txq, &mpw);
881 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
882 			/*
883 			 * Multi-Packet WQE consumes at most two WQE.
884 			 * mlx5_mpw_new() expects to be able to use such
885 			 * resources.
886 			 */
887 			if (unlikely(max_wqe < 2))
888 				break;
889 			max_wqe -= 2;
890 			mlx5_mpw_new(txq, &mpw, length);
891 			mpw.wqe->eseg.cs_flags = cs_flags;
892 		}
893 		/* Multi-segment packets must be alone in their MPW. */
894 		assert((segs_n == 1) || (mpw.pkts_n == 0));
895 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
896 		length = 0;
897 #endif
898 		do {
899 			volatile struct mlx5_wqe_data_seg *dseg;
900 			uintptr_t addr;
901 
902 			assert(buf);
903 			(*txq->elts)[elts_head++ & elts_m] = buf;
904 			dseg = mpw.data.dseg[mpw.pkts_n];
905 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
906 			*dseg = (struct mlx5_wqe_data_seg){
907 				.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
908 				.lkey = mlx5_tx_mb2mr(txq, buf),
909 				.addr = rte_cpu_to_be_64(addr),
910 			};
911 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
912 			length += DATA_LEN(buf);
913 #endif
914 			buf = buf->next;
915 			++mpw.pkts_n;
916 			++j;
917 		} while (--segs_n);
918 		assert(length == mpw.len);
919 		if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
920 			mlx5_mpw_close(txq, &mpw);
921 #ifdef MLX5_PMD_SOFT_COUNTERS
922 		/* Increment sent bytes counter. */
923 		txq->stats.obytes += length;
924 #endif
925 		++i;
926 	} while (pkts_n);
927 	/* Take a shortcut if nothing must be sent. */
928 	if (unlikely(i == 0))
929 		return 0;
930 	/* Check whether completion threshold has been reached. */
931 	/* "j" includes both packets and segments. */
932 	comp = txq->elts_comp + j;
933 	if (comp >= MLX5_TX_COMP_THRESH) {
934 		volatile struct mlx5_wqe *wqe = mpw.wqe;
935 
936 		/* Request completion on last WQE. */
937 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
938 		/* Save elts_head in unused "immediate" field of WQE. */
939 		wqe->ctrl[3] = elts_head;
940 		txq->elts_comp = 0;
941 	} else {
942 		txq->elts_comp = comp;
943 	}
944 #ifdef MLX5_PMD_SOFT_COUNTERS
945 	/* Increment sent packets counter. */
946 	txq->stats.opackets += i;
947 #endif
948 	/* Ring QP doorbell. */
949 	if (mpw.state == MLX5_MPW_STATE_OPENED)
950 		mlx5_mpw_close(txq, &mpw);
951 	mlx5_tx_dbrec(txq, mpw.wqe);
952 	txq->elts_head = elts_head;
953 	return i;
954 }
955 
956 /**
957  * Open a MPW inline session.
958  *
959  * @param txq
960  *   Pointer to TX queue structure.
961  * @param mpw
962  *   Pointer to MPW session structure.
963  * @param length
964  *   Packet length.
965  */
966 static inline void
967 mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
968 {
969 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
970 	struct mlx5_wqe_inl_small *inl;
971 
972 	mpw->state = MLX5_MPW_INL_STATE_OPENED;
973 	mpw->pkts_n = 0;
974 	mpw->len = length;
975 	mpw->total_len = 0;
976 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
977 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
978 					     (txq->wqe_ci << 8) |
979 					     MLX5_OPCODE_TSO);
980 	mpw->wqe->ctrl[2] = 0;
981 	mpw->wqe->ctrl[3] = 0;
982 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
983 	mpw->wqe->eseg.inline_hdr_sz = 0;
984 	mpw->wqe->eseg.cs_flags = 0;
985 	mpw->wqe->eseg.rsvd0 = 0;
986 	mpw->wqe->eseg.rsvd1 = 0;
987 	mpw->wqe->eseg.rsvd2 = 0;
988 	inl = (struct mlx5_wqe_inl_small *)
989 		(((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
990 	mpw->data.raw = (uint8_t *)&inl->raw;
991 }
992 
993 /**
994  * Close a MPW inline session.
995  *
996  * @param txq
997  *   Pointer to TX queue structure.
998  * @param mpw
999  *   Pointer to MPW session structure.
1000  */
1001 static inline void
1002 mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
1003 {
1004 	unsigned int size;
1005 	struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
1006 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
1007 
1008 	size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
1009 	/*
1010 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
1011 	 * count as 2.
1012 	 */
1013 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1014 					     MLX5_WQE_DS(size));
1015 	mpw->state = MLX5_MPW_STATE_CLOSED;
1016 	inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
1017 	txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1018 }
1019 
1020 /**
1021  * DPDK callback for TX with MPW inline support.
1022  *
1023  * @param dpdk_txq
1024  *   Generic pointer to TX queue structure.
1025  * @param[in] pkts
1026  *   Packets to transmit.
1027  * @param pkts_n
1028  *   Number of packets in array.
1029  *
1030  * @return
1031  *   Number of packets successfully transmitted (<= pkts_n).
1032  */
1033 uint16_t
1034 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
1035 			 uint16_t pkts_n)
1036 {
1037 	struct txq *txq = (struct txq *)dpdk_txq;
1038 	uint16_t elts_head = txq->elts_head;
1039 	const uint16_t elts_n = 1 << txq->elts_n;
1040 	const uint16_t elts_m = elts_n - 1;
1041 	unsigned int i = 0;
1042 	unsigned int j = 0;
1043 	uint16_t max_elts;
1044 	uint16_t max_wqe;
1045 	unsigned int comp;
1046 	unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
1047 	struct mlx5_mpw mpw = {
1048 		.state = MLX5_MPW_STATE_CLOSED,
1049 	};
1050 	/*
1051 	 * Compute the maximum number of WQE which can be consumed by inline
1052 	 * code.
1053 	 * - 2 DSEG for:
1054 	 *   - 1 control segment,
1055 	 *   - 1 Ethernet segment,
1056 	 * - N Dseg from the inline request.
1057 	 */
1058 	const unsigned int wqe_inl_n =
1059 		((2 * MLX5_WQE_DWORD_SIZE +
1060 		  txq->max_inline * RTE_CACHE_LINE_SIZE) +
1061 		 RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
1062 
1063 	if (unlikely(!pkts_n))
1064 		return 0;
1065 	/* Prefetch first packet cacheline. */
1066 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
1067 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
1068 	/* Start processing. */
1069 	mlx5_tx_complete(txq);
1070 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1071 	do {
1072 		struct rte_mbuf *buf = *(pkts++);
1073 		uintptr_t addr;
1074 		uint32_t length;
1075 		unsigned int segs_n = buf->nb_segs;
1076 		uint32_t cs_flags = 0;
1077 
1078 		/*
1079 		 * Make sure there is enough room to store this packet and
1080 		 * that one ring entry remains unused.
1081 		 */
1082 		assert(segs_n);
1083 		if (max_elts < segs_n)
1084 			break;
1085 		/* Do not bother with large packets MPW cannot handle. */
1086 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1087 			txq->stats.oerrors++;
1088 			break;
1089 		}
1090 		max_elts -= segs_n;
1091 		--pkts_n;
1092 		/*
1093 		 * Compute max_wqe in case less WQE were consumed in previous
1094 		 * iteration.
1095 		 */
1096 		max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1097 		/* Should we enable HW CKSUM offload */
1098 		if (buf->ol_flags &
1099 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1100 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1101 		/* Retrieve packet information. */
1102 		length = PKT_LEN(buf);
1103 		/* Start new session if packet differs. */
1104 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1105 			if ((mpw.len != length) ||
1106 			    (segs_n != 1) ||
1107 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1108 				mlx5_mpw_close(txq, &mpw);
1109 		} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
1110 			if ((mpw.len != length) ||
1111 			    (segs_n != 1) ||
1112 			    (length > inline_room) ||
1113 			    (mpw.wqe->eseg.cs_flags != cs_flags)) {
1114 				mlx5_mpw_inline_close(txq, &mpw);
1115 				inline_room =
1116 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1117 			}
1118 		}
1119 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
1120 			if ((segs_n != 1) ||
1121 			    (length > inline_room)) {
1122 				/*
1123 				 * Multi-Packet WQE consumes at most two WQE.
1124 				 * mlx5_mpw_new() expects to be able to use
1125 				 * such resources.
1126 				 */
1127 				if (unlikely(max_wqe < 2))
1128 					break;
1129 				max_wqe -= 2;
1130 				mlx5_mpw_new(txq, &mpw, length);
1131 				mpw.wqe->eseg.cs_flags = cs_flags;
1132 			} else {
1133 				if (unlikely(max_wqe < wqe_inl_n))
1134 					break;
1135 				max_wqe -= wqe_inl_n;
1136 				mlx5_mpw_inline_new(txq, &mpw, length);
1137 				mpw.wqe->eseg.cs_flags = cs_flags;
1138 			}
1139 		}
1140 		/* Multi-segment packets must be alone in their MPW. */
1141 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1142 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1143 			assert(inline_room ==
1144 			       txq->max_inline * RTE_CACHE_LINE_SIZE);
1145 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1146 			length = 0;
1147 #endif
1148 			do {
1149 				volatile struct mlx5_wqe_data_seg *dseg;
1150 
1151 				assert(buf);
1152 				(*txq->elts)[elts_head++ & elts_m] = buf;
1153 				dseg = mpw.data.dseg[mpw.pkts_n];
1154 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1155 				*dseg = (struct mlx5_wqe_data_seg){
1156 					.byte_count =
1157 					       rte_cpu_to_be_32(DATA_LEN(buf)),
1158 					.lkey = mlx5_tx_mb2mr(txq, buf),
1159 					.addr = rte_cpu_to_be_64(addr),
1160 				};
1161 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1162 				length += DATA_LEN(buf);
1163 #endif
1164 				buf = buf->next;
1165 				++mpw.pkts_n;
1166 				++j;
1167 			} while (--segs_n);
1168 			assert(length == mpw.len);
1169 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
1170 				mlx5_mpw_close(txq, &mpw);
1171 		} else {
1172 			unsigned int max;
1173 
1174 			assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
1175 			assert(length <= inline_room);
1176 			assert(length == DATA_LEN(buf));
1177 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1178 			(*txq->elts)[elts_head++ & elts_m] = buf;
1179 			/* Maximum number of bytes before wrapping. */
1180 			max = ((((uintptr_t)(txq->wqes)) +
1181 				(1 << txq->wqe_n) *
1182 				MLX5_WQE_SIZE) -
1183 			       (uintptr_t)mpw.data.raw);
1184 			if (length > max) {
1185 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1186 					   (void *)addr,
1187 					   max);
1188 				mpw.data.raw = (volatile void *)txq->wqes;
1189 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1190 					   (void *)(addr + max),
1191 					   length - max);
1192 				mpw.data.raw += length - max;
1193 			} else {
1194 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1195 					   (void *)addr,
1196 					   length);
1197 
1198 				if (length == max)
1199 					mpw.data.raw =
1200 						(volatile void *)txq->wqes;
1201 				else
1202 					mpw.data.raw += length;
1203 			}
1204 			++mpw.pkts_n;
1205 			mpw.total_len += length;
1206 			++j;
1207 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
1208 				mlx5_mpw_inline_close(txq, &mpw);
1209 				inline_room =
1210 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1211 			} else {
1212 				inline_room -= length;
1213 			}
1214 		}
1215 #ifdef MLX5_PMD_SOFT_COUNTERS
1216 		/* Increment sent bytes counter. */
1217 		txq->stats.obytes += length;
1218 #endif
1219 		++i;
1220 	} while (pkts_n);
1221 	/* Take a shortcut if nothing must be sent. */
1222 	if (unlikely(i == 0))
1223 		return 0;
1224 	/* Check whether completion threshold has been reached. */
1225 	/* "j" includes both packets and segments. */
1226 	comp = txq->elts_comp + j;
1227 	if (comp >= MLX5_TX_COMP_THRESH) {
1228 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1229 
1230 		/* Request completion on last WQE. */
1231 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1232 		/* Save elts_head in unused "immediate" field of WQE. */
1233 		wqe->ctrl[3] = elts_head;
1234 		txq->elts_comp = 0;
1235 	} else {
1236 		txq->elts_comp = comp;
1237 	}
1238 #ifdef MLX5_PMD_SOFT_COUNTERS
1239 	/* Increment sent packets counter. */
1240 	txq->stats.opackets += i;
1241 #endif
1242 	/* Ring QP doorbell. */
1243 	if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
1244 		mlx5_mpw_inline_close(txq, &mpw);
1245 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1246 		mlx5_mpw_close(txq, &mpw);
1247 	mlx5_tx_dbrec(txq, mpw.wqe);
1248 	txq->elts_head = elts_head;
1249 	return i;
1250 }
1251 
1252 /**
1253  * Open an Enhanced MPW session.
1254  *
1255  * @param txq
1256  *   Pointer to TX queue structure.
1257  * @param mpw
1258  *   Pointer to MPW session structure.
1259  * @param length
1260  *   Packet length.
1261  */
1262 static inline void
1263 mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
1264 {
1265 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
1266 
1267 	mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
1268 	mpw->pkts_n = 0;
1269 	mpw->total_len = sizeof(struct mlx5_wqe);
1270 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
1271 	mpw->wqe->ctrl[0] =
1272 		rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
1273 				 (txq->wqe_ci << 8) |
1274 				 MLX5_OPCODE_ENHANCED_MPSW);
1275 	mpw->wqe->ctrl[2] = 0;
1276 	mpw->wqe->ctrl[3] = 0;
1277 	memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
1278 	if (unlikely(padding)) {
1279 		uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
1280 
1281 		/* Pad the first 2 DWORDs with zero-length inline header. */
1282 		*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
1283 		*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
1284 			rte_cpu_to_be_32(MLX5_INLINE_SEG);
1285 		mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
1286 		/* Start from the next WQEBB. */
1287 		mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
1288 	} else {
1289 		mpw->data.raw = (volatile void *)(mpw->wqe + 1);
1290 	}
1291 }
1292 
1293 /**
1294  * Close an Enhanced MPW session.
1295  *
1296  * @param txq
1297  *   Pointer to TX queue structure.
1298  * @param mpw
1299  *   Pointer to MPW session structure.
1300  *
1301  * @return
1302  *   Number of consumed WQEs.
1303  */
1304 static inline uint16_t
1305 mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
1306 {
1307 	uint16_t ret;
1308 
1309 	/* Store size in multiple of 16 bytes. Control and Ethernet segments
1310 	 * count as 2.
1311 	 */
1312 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1313 					     MLX5_WQE_DS(mpw->total_len));
1314 	mpw->state = MLX5_MPW_STATE_CLOSED;
1315 	ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1316 	txq->wqe_ci += ret;
1317 	return ret;
1318 }
1319 
1320 /**
1321  * DPDK callback for TX with Enhanced MPW support.
1322  *
1323  * @param dpdk_txq
1324  *   Generic pointer to TX queue structure.
1325  * @param[in] pkts
1326  *   Packets to transmit.
1327  * @param pkts_n
1328  *   Number of packets in array.
1329  *
1330  * @return
1331  *   Number of packets successfully transmitted (<= pkts_n).
1332  */
1333 uint16_t
1334 mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1335 {
1336 	struct txq *txq = (struct txq *)dpdk_txq;
1337 	uint16_t elts_head = txq->elts_head;
1338 	const uint16_t elts_n = 1 << txq->elts_n;
1339 	const uint16_t elts_m = elts_n - 1;
1340 	unsigned int i = 0;
1341 	unsigned int j = 0;
1342 	uint16_t max_elts;
1343 	uint16_t max_wqe;
1344 	unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
1345 	unsigned int mpw_room = 0;
1346 	unsigned int inl_pad = 0;
1347 	uint32_t inl_hdr;
1348 	struct mlx5_mpw mpw = {
1349 		.state = MLX5_MPW_STATE_CLOSED,
1350 	};
1351 
1352 	if (unlikely(!pkts_n))
1353 		return 0;
1354 	/* Start processing. */
1355 	mlx5_tx_complete(txq);
1356 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1357 	/* A CQE slot must always be available. */
1358 	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
1359 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1360 	if (unlikely(!max_wqe))
1361 		return 0;
1362 	do {
1363 		struct rte_mbuf *buf = *(pkts++);
1364 		uintptr_t addr;
1365 		uint64_t naddr;
1366 		unsigned int n;
1367 		unsigned int do_inline = 0; /* Whether inline is possible. */
1368 		uint32_t length;
1369 		unsigned int segs_n = buf->nb_segs;
1370 		uint32_t cs_flags = 0;
1371 
1372 		/*
1373 		 * Make sure there is enough room to store this packet and
1374 		 * that one ring entry remains unused.
1375 		 */
1376 		assert(segs_n);
1377 		if (max_elts - j < segs_n)
1378 			break;
1379 		/* Do not bother with large packets MPW cannot handle. */
1380 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1381 			txq->stats.oerrors++;
1382 			break;
1383 		}
1384 		/* Should we enable HW CKSUM offload. */
1385 		if (buf->ol_flags &
1386 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1387 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1388 		/* Retrieve packet information. */
1389 		length = PKT_LEN(buf);
1390 		/* Start new session if:
1391 		 * - multi-segment packet
1392 		 * - no space left even for a dseg
1393 		 * - next packet can be inlined with a new WQE
1394 		 * - cs_flag differs
1395 		 * It can't be MLX5_MPW_STATE_OPENED as always have a single
1396 		 * segmented packet.
1397 		 */
1398 		if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
1399 			if ((segs_n != 1) ||
1400 			    (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
1401 			      mpw_room) ||
1402 			    (length <= txq->inline_max_packet_sz &&
1403 			     inl_pad + sizeof(inl_hdr) + length >
1404 			      mpw_room) ||
1405 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1406 				max_wqe -= mlx5_empw_close(txq, &mpw);
1407 		}
1408 		if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
1409 			if (unlikely(segs_n != 1)) {
1410 				/* Fall back to legacy MPW.
1411 				 * A MPW session consumes 2 WQEs at most to
1412 				 * include MLX5_MPW_DSEG_MAX pointers.
1413 				 */
1414 				if (unlikely(max_wqe < 2))
1415 					break;
1416 				mlx5_mpw_new(txq, &mpw, length);
1417 			} else {
1418 				/* In Enhanced MPW, inline as much as the budget
1419 				 * is allowed. The remaining space is to be
1420 				 * filled with dsegs. If the title WQEBB isn't
1421 				 * padded, it will have 2 dsegs there.
1422 				 */
1423 				mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
1424 					    (max_inline ? max_inline :
1425 					     pkts_n * MLX5_WQE_DWORD_SIZE) +
1426 					    MLX5_WQE_SIZE);
1427 				if (unlikely(max_wqe * MLX5_WQE_SIZE <
1428 					      mpw_room))
1429 					break;
1430 				/* Don't pad the title WQEBB to not waste WQ. */
1431 				mlx5_empw_new(txq, &mpw, 0);
1432 				mpw_room -= mpw.total_len;
1433 				inl_pad = 0;
1434 				do_inline =
1435 					length <= txq->inline_max_packet_sz &&
1436 					sizeof(inl_hdr) + length <= mpw_room &&
1437 					!txq->mpw_hdr_dseg;
1438 			}
1439 			mpw.wqe->eseg.cs_flags = cs_flags;
1440 		} else {
1441 			/* Evaluate whether the next packet can be inlined.
1442 			 * Inlininig is possible when:
1443 			 * - length is less than configured value
1444 			 * - length fits for remaining space
1445 			 * - not required to fill the title WQEBB with dsegs
1446 			 */
1447 			do_inline =
1448 				length <= txq->inline_max_packet_sz &&
1449 				inl_pad + sizeof(inl_hdr) + length <=
1450 				 mpw_room &&
1451 				(!txq->mpw_hdr_dseg ||
1452 				 mpw.total_len >= MLX5_WQE_SIZE);
1453 		}
1454 		/* Multi-segment packets must be alone in their MPW. */
1455 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1456 		if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
1457 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1458 			length = 0;
1459 #endif
1460 			do {
1461 				volatile struct mlx5_wqe_data_seg *dseg;
1462 
1463 				assert(buf);
1464 				(*txq->elts)[elts_head++ & elts_m] = buf;
1465 				dseg = mpw.data.dseg[mpw.pkts_n];
1466 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1467 				*dseg = (struct mlx5_wqe_data_seg){
1468 					.byte_count = rte_cpu_to_be_32(
1469 								DATA_LEN(buf)),
1470 					.lkey = mlx5_tx_mb2mr(txq, buf),
1471 					.addr = rte_cpu_to_be_64(addr),
1472 				};
1473 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1474 				length += DATA_LEN(buf);
1475 #endif
1476 				buf = buf->next;
1477 				++j;
1478 				++mpw.pkts_n;
1479 			} while (--segs_n);
1480 			/* A multi-segmented packet takes one MPW session.
1481 			 * TODO: Pack more multi-segmented packets if possible.
1482 			 */
1483 			mlx5_mpw_close(txq, &mpw);
1484 			if (mpw.pkts_n < 3)
1485 				max_wqe--;
1486 			else
1487 				max_wqe -= 2;
1488 		} else if (do_inline) {
1489 			/* Inline packet into WQE. */
1490 			unsigned int max;
1491 
1492 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1493 			assert(length == DATA_LEN(buf));
1494 			inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
1495 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1496 			mpw.data.raw = (volatile void *)
1497 				((uintptr_t)mpw.data.raw + inl_pad);
1498 			max = tx_mlx5_wq_tailroom(txq,
1499 					(void *)(uintptr_t)mpw.data.raw);
1500 			/* Copy inline header. */
1501 			mpw.data.raw = (volatile void *)
1502 				mlx5_copy_to_wq(
1503 					  (void *)(uintptr_t)mpw.data.raw,
1504 					  &inl_hdr,
1505 					  sizeof(inl_hdr),
1506 					  (void *)(uintptr_t)txq->wqes,
1507 					  max);
1508 			max = tx_mlx5_wq_tailroom(txq,
1509 					(void *)(uintptr_t)mpw.data.raw);
1510 			/* Copy packet data. */
1511 			mpw.data.raw = (volatile void *)
1512 				mlx5_copy_to_wq(
1513 					  (void *)(uintptr_t)mpw.data.raw,
1514 					  (void *)addr,
1515 					  length,
1516 					  (void *)(uintptr_t)txq->wqes,
1517 					  max);
1518 			++mpw.pkts_n;
1519 			mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
1520 			/* No need to get completion as the entire packet is
1521 			 * copied to WQ. Free the buf right away.
1522 			 */
1523 			rte_pktmbuf_free_seg(buf);
1524 			mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
1525 			/* Add pad in the next packet if any. */
1526 			inl_pad = (((uintptr_t)mpw.data.raw +
1527 					(MLX5_WQE_DWORD_SIZE - 1)) &
1528 					~(MLX5_WQE_DWORD_SIZE - 1)) -
1529 				  (uintptr_t)mpw.data.raw;
1530 		} else {
1531 			/* No inline. Load a dseg of packet pointer. */
1532 			volatile rte_v128u32_t *dseg;
1533 
1534 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1535 			assert((inl_pad + sizeof(*dseg)) <= mpw_room);
1536 			assert(length == DATA_LEN(buf));
1537 			if (!tx_mlx5_wq_tailroom(txq,
1538 					(void *)((uintptr_t)mpw.data.raw
1539 						+ inl_pad)))
1540 				dseg = (volatile void *)txq->wqes;
1541 			else
1542 				dseg = (volatile void *)
1543 					((uintptr_t)mpw.data.raw +
1544 					 inl_pad);
1545 			(*txq->elts)[elts_head++ & elts_m] = buf;
1546 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1547 			for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
1548 				rte_prefetch2((void *)(addr +
1549 						n * RTE_CACHE_LINE_SIZE));
1550 			naddr = rte_cpu_to_be_64(addr);
1551 			*dseg = (rte_v128u32_t) {
1552 				rte_cpu_to_be_32(length),
1553 				mlx5_tx_mb2mr(txq, buf),
1554 				naddr,
1555 				naddr >> 32,
1556 			};
1557 			mpw.data.raw = (volatile void *)(dseg + 1);
1558 			mpw.total_len += (inl_pad + sizeof(*dseg));
1559 			++j;
1560 			++mpw.pkts_n;
1561 			mpw_room -= (inl_pad + sizeof(*dseg));
1562 			inl_pad = 0;
1563 		}
1564 #ifdef MLX5_PMD_SOFT_COUNTERS
1565 		/* Increment sent bytes counter. */
1566 		txq->stats.obytes += length;
1567 #endif
1568 		++i;
1569 	} while (i < pkts_n);
1570 	/* Take a shortcut if nothing must be sent. */
1571 	if (unlikely(i == 0))
1572 		return 0;
1573 	/* Check whether completion threshold has been reached. */
1574 	if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
1575 			(uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
1576 			 (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
1577 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1578 
1579 		/* Request completion on last WQE. */
1580 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1581 		/* Save elts_head in unused "immediate" field of WQE. */
1582 		wqe->ctrl[3] = elts_head;
1583 		txq->elts_comp = 0;
1584 		txq->mpw_comp = txq->wqe_ci;
1585 		txq->cq_pi++;
1586 	} else {
1587 		txq->elts_comp += j;
1588 	}
1589 #ifdef MLX5_PMD_SOFT_COUNTERS
1590 	/* Increment sent packets counter. */
1591 	txq->stats.opackets += i;
1592 #endif
1593 	if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
1594 		mlx5_empw_close(txq, &mpw);
1595 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1596 		mlx5_mpw_close(txq, &mpw);
1597 	/* Ring QP doorbell. */
1598 	mlx5_tx_dbrec(txq, mpw.wqe);
1599 	txq->elts_head = elts_head;
1600 	return i;
1601 }
1602 
1603 /**
1604  * Translate RX completion flags to packet type.
1605  *
1606  * @param[in] cqe
1607  *   Pointer to CQE.
1608  *
1609  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
1610  *
1611  * @return
1612  *   Packet type for struct rte_mbuf.
1613  */
1614 static inline uint32_t
1615 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
1616 {
1617 	uint8_t idx;
1618 	uint8_t pinfo = cqe->pkt_info;
1619 	uint16_t ptype = cqe->hdr_type_etc;
1620 
1621 	/*
1622 	 * The index to the array should have:
1623 	 * bit[1:0] = l3_hdr_type
1624 	 * bit[4:2] = l4_hdr_type
1625 	 * bit[5] = ip_frag
1626 	 * bit[6] = tunneled
1627 	 * bit[7] = outer_l3_type
1628 	 */
1629 	idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
1630 	return mlx5_ptype_table[idx];
1631 }
1632 
1633 /**
1634  * Get size of the next packet for a given CQE. For compressed CQEs, the
1635  * consumer index is updated only once all packets of the current one have
1636  * been processed.
1637  *
1638  * @param rxq
1639  *   Pointer to RX queue.
1640  * @param cqe
1641  *   CQE to process.
1642  * @param[out] rss_hash
1643  *   Packet RSS Hash result.
1644  *
1645  * @return
1646  *   Packet size in bytes (0 if there is none), -1 in case of completion
1647  *   with error.
1648  */
1649 static inline int
1650 mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
1651 		 uint16_t cqe_cnt, uint32_t *rss_hash)
1652 {
1653 	struct rxq_zip *zip = &rxq->zip;
1654 	uint16_t cqe_n = cqe_cnt + 1;
1655 	int len = 0;
1656 	uint16_t idx, end;
1657 
1658 	/* Process compressed data in the CQE and mini arrays. */
1659 	if (zip->ai) {
1660 		volatile struct mlx5_mini_cqe8 (*mc)[8] =
1661 			(volatile struct mlx5_mini_cqe8 (*)[8])
1662 			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
1663 
1664 		len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1665 		*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
1666 		if ((++zip->ai & 7) == 0) {
1667 			/* Invalidate consumed CQEs */
1668 			idx = zip->ca;
1669 			end = zip->na;
1670 			while (idx != end) {
1671 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1672 					MLX5_CQE_INVALIDATE;
1673 				++idx;
1674 			}
1675 			/*
1676 			 * Increment consumer index to skip the number of
1677 			 * CQEs consumed. Hardware leaves holes in the CQ
1678 			 * ring for software use.
1679 			 */
1680 			zip->ca = zip->na;
1681 			zip->na += 8;
1682 		}
1683 		if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1684 			/* Invalidate the rest */
1685 			idx = zip->ca;
1686 			end = zip->cq_ci;
1687 
1688 			while (idx != end) {
1689 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1690 					MLX5_CQE_INVALIDATE;
1691 				++idx;
1692 			}
1693 			rxq->cq_ci = zip->cq_ci;
1694 			zip->ai = 0;
1695 		}
1696 	/* No compressed data, get next CQE and verify if it is compressed. */
1697 	} else {
1698 		int ret;
1699 		int8_t op_own;
1700 
1701 		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1702 		if (unlikely(ret == 1))
1703 			return 0;
1704 		++rxq->cq_ci;
1705 		op_own = cqe->op_own;
1706 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1707 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
1708 				(volatile struct mlx5_mini_cqe8 (*)[8])
1709 				(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
1710 							  cqe_cnt].pkt_info);
1711 
1712 			/* Fix endianness. */
1713 			zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1714 			/*
1715 			 * Current mini array position is the one returned by
1716 			 * check_cqe64().
1717 			 *
1718 			 * If completion comprises several mini arrays, as a
1719 			 * special case the second one is located 7 CQEs after
1720 			 * the initial CQE instead of 8 for subsequent ones.
1721 			 */
1722 			zip->ca = rxq->cq_ci;
1723 			zip->na = zip->ca + 7;
1724 			/* Compute the next non compressed CQE. */
1725 			--rxq->cq_ci;
1726 			zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1727 			/* Get packet size to return. */
1728 			len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1729 			*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
1730 			zip->ai = 1;
1731 			/* Prefetch all the entries to be invalidated */
1732 			idx = zip->ca;
1733 			end = zip->cq_ci;
1734 			while (idx != end) {
1735 				rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
1736 				++idx;
1737 			}
1738 		} else {
1739 			len = rte_be_to_cpu_32(cqe->byte_cnt);
1740 			*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
1741 		}
1742 		/* Error while receiving packet. */
1743 		if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
1744 			return -1;
1745 	}
1746 	return len;
1747 }
1748 
1749 /**
1750  * Translate RX completion flags to offload flags.
1751  *
1752  * @param[in] rxq
1753  *   Pointer to RX queue structure.
1754  * @param[in] cqe
1755  *   Pointer to CQE.
1756  *
1757  * @return
1758  *   Offload flags (ol_flags) for struct rte_mbuf.
1759  */
1760 static inline uint32_t
1761 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
1762 {
1763 	uint32_t ol_flags = 0;
1764 	uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1765 
1766 	ol_flags =
1767 		TRANSPOSE(flags,
1768 			  MLX5_CQE_RX_L3_HDR_VALID,
1769 			  PKT_RX_IP_CKSUM_GOOD) |
1770 		TRANSPOSE(flags,
1771 			  MLX5_CQE_RX_L4_HDR_VALID,
1772 			  PKT_RX_L4_CKSUM_GOOD);
1773 	if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
1774 		ol_flags |=
1775 			TRANSPOSE(flags,
1776 				  MLX5_CQE_RX_L3_HDR_VALID,
1777 				  PKT_RX_IP_CKSUM_GOOD) |
1778 			TRANSPOSE(flags,
1779 				  MLX5_CQE_RX_L4_HDR_VALID,
1780 				  PKT_RX_L4_CKSUM_GOOD);
1781 	return ol_flags;
1782 }
1783 
1784 /**
1785  * DPDK callback for RX.
1786  *
1787  * @param dpdk_rxq
1788  *   Generic pointer to RX queue structure.
1789  * @param[out] pkts
1790  *   Array to store received packets.
1791  * @param pkts_n
1792  *   Maximum number of packets in array.
1793  *
1794  * @return
1795  *   Number of packets successfully received (<= pkts_n).
1796  */
1797 uint16_t
1798 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1799 {
1800 	struct rxq *rxq = dpdk_rxq;
1801 	const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1802 	const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1803 	const unsigned int sges_n = rxq->sges_n;
1804 	struct rte_mbuf *pkt = NULL;
1805 	struct rte_mbuf *seg = NULL;
1806 	volatile struct mlx5_cqe *cqe =
1807 		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1808 	unsigned int i = 0;
1809 	unsigned int rq_ci = rxq->rq_ci << sges_n;
1810 	int len = 0; /* keep its value across iterations. */
1811 
1812 	while (pkts_n) {
1813 		unsigned int idx = rq_ci & wqe_cnt;
1814 		volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
1815 		struct rte_mbuf *rep = (*rxq->elts)[idx];
1816 		uint32_t rss_hash_res = 0;
1817 
1818 		if (pkt)
1819 			NEXT(seg) = rep;
1820 		seg = rep;
1821 		rte_prefetch0(seg);
1822 		rte_prefetch0(cqe);
1823 		rte_prefetch0(wqe);
1824 		rep = rte_mbuf_raw_alloc(rxq->mp);
1825 		if (unlikely(rep == NULL)) {
1826 			++rxq->stats.rx_nombuf;
1827 			if (!pkt) {
1828 				/*
1829 				 * no buffers before we even started,
1830 				 * bail out silently.
1831 				 */
1832 				break;
1833 			}
1834 			while (pkt != seg) {
1835 				assert(pkt != (*rxq->elts)[idx]);
1836 				rep = NEXT(pkt);
1837 				NEXT(pkt) = NULL;
1838 				NB_SEGS(pkt) = 1;
1839 				rte_mbuf_raw_free(pkt);
1840 				pkt = rep;
1841 			}
1842 			break;
1843 		}
1844 		if (!pkt) {
1845 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1846 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
1847 					       &rss_hash_res);
1848 			if (!len) {
1849 				rte_mbuf_raw_free(rep);
1850 				break;
1851 			}
1852 			if (unlikely(len == -1)) {
1853 				/* RX error, packet is likely too large. */
1854 				rte_mbuf_raw_free(rep);
1855 				++rxq->stats.idropped;
1856 				goto skip;
1857 			}
1858 			pkt = seg;
1859 			assert(len >= (rxq->crc_present << 2));
1860 			/* Update packet information. */
1861 			pkt->packet_type = rxq_cq_to_pkt_type(cqe);
1862 			pkt->ol_flags = 0;
1863 			if (rss_hash_res && rxq->rss_hash) {
1864 				pkt->hash.rss = rss_hash_res;
1865 				pkt->ol_flags = PKT_RX_RSS_HASH;
1866 			}
1867 			if (rxq->mark &&
1868 			    MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1869 				pkt->ol_flags |= PKT_RX_FDIR;
1870 				if (cqe->sop_drop_qpn !=
1871 				    rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1872 					uint32_t mark = cqe->sop_drop_qpn;
1873 
1874 					pkt->ol_flags |= PKT_RX_FDIR_ID;
1875 					pkt->hash.fdir.hi =
1876 						mlx5_flow_mark_get(mark);
1877 				}
1878 			}
1879 			if (rxq->csum | rxq->csum_l2tun)
1880 				pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
1881 			if (rxq->vlan_strip &&
1882 			    (cqe->hdr_type_etc &
1883 			     rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1884 				pkt->ol_flags |= PKT_RX_VLAN_PKT |
1885 					PKT_RX_VLAN_STRIPPED;
1886 				pkt->vlan_tci =
1887 					rte_be_to_cpu_16(cqe->vlan_info);
1888 			}
1889 			if (rxq->crc_present)
1890 				len -= ETHER_CRC_LEN;
1891 			PKT_LEN(pkt) = len;
1892 		}
1893 		DATA_LEN(rep) = DATA_LEN(seg);
1894 		PKT_LEN(rep) = PKT_LEN(seg);
1895 		SET_DATA_OFF(rep, DATA_OFF(seg));
1896 		PORT(rep) = PORT(seg);
1897 		(*rxq->elts)[idx] = rep;
1898 		/*
1899 		 * Fill NIC descriptor with the new buffer.  The lkey and size
1900 		 * of the buffers are already known, only the buffer address
1901 		 * changes.
1902 		 */
1903 		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1904 		if (len > DATA_LEN(seg)) {
1905 			len -= DATA_LEN(seg);
1906 			++NB_SEGS(pkt);
1907 			++rq_ci;
1908 			continue;
1909 		}
1910 		DATA_LEN(seg) = len;
1911 #ifdef MLX5_PMD_SOFT_COUNTERS
1912 		/* Increment bytes counter. */
1913 		rxq->stats.ibytes += PKT_LEN(pkt);
1914 #endif
1915 		/* Return packet. */
1916 		*(pkts++) = pkt;
1917 		pkt = NULL;
1918 		--pkts_n;
1919 		++i;
1920 skip:
1921 		/* Align consumer index to the next stride. */
1922 		rq_ci >>= sges_n;
1923 		++rq_ci;
1924 		rq_ci <<= sges_n;
1925 	}
1926 	if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1927 		return 0;
1928 	/* Update the consumer index. */
1929 	rxq->rq_ci = rq_ci >> sges_n;
1930 	rte_wmb();
1931 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1932 	rte_wmb();
1933 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1934 #ifdef MLX5_PMD_SOFT_COUNTERS
1935 	/* Increment packets counter. */
1936 	rxq->stats.ipackets += i;
1937 #endif
1938 	return i;
1939 }
1940 
1941 /**
1942  * Dummy DPDK callback for TX.
1943  *
1944  * This function is used to temporarily replace the real callback during
1945  * unsafe control operations on the queue, or in case of error.
1946  *
1947  * @param dpdk_txq
1948  *   Generic pointer to TX queue structure.
1949  * @param[in] pkts
1950  *   Packets to transmit.
1951  * @param pkts_n
1952  *   Number of packets in array.
1953  *
1954  * @return
1955  *   Number of packets successfully transmitted (<= pkts_n).
1956  */
1957 uint16_t
1958 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1959 {
1960 	(void)dpdk_txq;
1961 	(void)pkts;
1962 	(void)pkts_n;
1963 	return 0;
1964 }
1965 
1966 /**
1967  * Dummy DPDK callback for RX.
1968  *
1969  * This function is used to temporarily replace the real callback during
1970  * unsafe control operations on the queue, or in case of error.
1971  *
1972  * @param dpdk_rxq
1973  *   Generic pointer to RX queue structure.
1974  * @param[out] pkts
1975  *   Array to store received packets.
1976  * @param pkts_n
1977  *   Maximum number of packets in array.
1978  *
1979  * @return
1980  *   Number of packets successfully received (<= pkts_n).
1981  */
1982 uint16_t
1983 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1984 {
1985 	(void)dpdk_rxq;
1986 	(void)pkts;
1987 	(void)pkts_n;
1988 	return 0;
1989 }
1990 
1991 /*
1992  * Vectorized Rx/Tx routines are not compiled in when required vector
1993  * instructions are not supported on a target architecture. The following null
1994  * stubs are needed for linkage when those are not included outside of this file
1995  * (e.g.  mlx5_rxtx_vec_sse.c for x86).
1996  */
1997 
1998 uint16_t __attribute__((weak))
1999 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2000 {
2001 	(void)dpdk_txq;
2002 	(void)pkts;
2003 	(void)pkts_n;
2004 	return 0;
2005 }
2006 
2007 uint16_t __attribute__((weak))
2008 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2009 {
2010 	(void)dpdk_txq;
2011 	(void)pkts;
2012 	(void)pkts_n;
2013 	return 0;
2014 }
2015 
2016 uint16_t __attribute__((weak))
2017 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
2018 {
2019 	(void)dpdk_rxq;
2020 	(void)pkts;
2021 	(void)pkts_n;
2022 	return 0;
2023 }
2024 
2025 int __attribute__((weak))
2026 priv_check_raw_vec_tx_support(struct priv *priv)
2027 {
2028 	(void)priv;
2029 	return -ENOTSUP;
2030 }
2031 
2032 int __attribute__((weak))
2033 priv_check_vec_tx_support(struct priv *priv)
2034 {
2035 	(void)priv;
2036 	return -ENOTSUP;
2037 }
2038 
2039 int __attribute__((weak))
2040 rxq_check_vec_support(struct rxq *rxq)
2041 {
2042 	(void)rxq;
2043 	return -ENOTSUP;
2044 }
2045 
2046 int __attribute__((weak))
2047 priv_check_vec_rx_support(struct priv *priv)
2048 {
2049 	(void)priv;
2050 	return -ENOTSUP;
2051 }
2052