xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision 3e0ceb9f17fff027fc6c8f18de35e11719ffa61e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <assert.h>
35 #include <stdint.h>
36 #include <string.h>
37 #include <stdlib.h>
38 
39 /* Verbs header. */
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
41 #ifdef PEDANTIC
42 #pragma GCC diagnostic ignored "-Wpedantic"
43 #endif
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5dv.h>
46 #ifdef PEDANTIC
47 #pragma GCC diagnostic error "-Wpedantic"
48 #endif
49 
50 #include <rte_mbuf.h>
51 #include <rte_mempool.h>
52 #include <rte_prefetch.h>
53 #include <rte_common.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_ether.h>
56 
57 #include "mlx5.h"
58 #include "mlx5_utils.h"
59 #include "mlx5_rxtx.h"
60 #include "mlx5_autoconf.h"
61 #include "mlx5_defs.h"
62 #include "mlx5_prm.h"
63 
64 static __rte_always_inline uint32_t
65 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
66 
67 static __rte_always_inline int
68 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
69 		 uint16_t cqe_cnt, uint32_t *rss_hash);
70 
71 static __rte_always_inline uint32_t
72 rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
73 
74 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
75 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
76 };
77 
78 /**
79  * Build a table to translate Rx completion flags to packet type.
80  *
81  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
82  */
83 void
84 mlx5_set_ptype_table(void)
85 {
86 	unsigned int i;
87 	uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
88 
89 	/* Last entry must not be overwritten, reserved for errored packet. */
90 	for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
91 		(*p)[i] = RTE_PTYPE_UNKNOWN;
92 	/*
93 	 * The index to the array should have:
94 	 * bit[1:0] = l3_hdr_type
95 	 * bit[4:2] = l4_hdr_type
96 	 * bit[5] = ip_frag
97 	 * bit[6] = tunneled
98 	 * bit[7] = outer_l3_type
99 	 */
100 	/* L2 */
101 	(*p)[0x00] = RTE_PTYPE_L2_ETHER;
102 	/* L3 */
103 	(*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
104 		     RTE_PTYPE_L4_NONFRAG;
105 	(*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
106 		     RTE_PTYPE_L4_NONFRAG;
107 	/* Fragmented */
108 	(*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
109 		     RTE_PTYPE_L4_FRAG;
110 	(*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
111 		     RTE_PTYPE_L4_FRAG;
112 	/* TCP */
113 	(*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
114 		     RTE_PTYPE_L4_TCP;
115 	(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
116 		     RTE_PTYPE_L4_TCP;
117 	/* UDP */
118 	(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
119 		     RTE_PTYPE_L4_UDP;
120 	(*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
121 		     RTE_PTYPE_L4_UDP;
122 	/* Repeat with outer_l3_type being set. Just in case. */
123 	(*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
124 		     RTE_PTYPE_L4_NONFRAG;
125 	(*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
126 		     RTE_PTYPE_L4_NONFRAG;
127 	(*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
128 		     RTE_PTYPE_L4_FRAG;
129 	(*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
130 		     RTE_PTYPE_L4_FRAG;
131 	(*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
132 		     RTE_PTYPE_L4_TCP;
133 	(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
134 		     RTE_PTYPE_L4_TCP;
135 	(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
136 		     RTE_PTYPE_L4_UDP;
137 	(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
138 		     RTE_PTYPE_L4_UDP;
139 	/* Tunneled - L3 */
140 	(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
141 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
142 		     RTE_PTYPE_INNER_L4_NONFRAG;
143 	(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
144 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
145 		     RTE_PTYPE_INNER_L4_NONFRAG;
146 	(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
147 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
148 		     RTE_PTYPE_INNER_L4_NONFRAG;
149 	(*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
150 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
151 		     RTE_PTYPE_INNER_L4_NONFRAG;
152 	/* Tunneled - Fragmented */
153 	(*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
154 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
155 		     RTE_PTYPE_INNER_L4_FRAG;
156 	(*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
157 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
158 		     RTE_PTYPE_INNER_L4_FRAG;
159 	(*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
161 		     RTE_PTYPE_INNER_L4_FRAG;
162 	(*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
163 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
164 		     RTE_PTYPE_INNER_L4_FRAG;
165 	/* Tunneled - TCP */
166 	(*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
167 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
168 		     RTE_PTYPE_L4_TCP;
169 	(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
171 		     RTE_PTYPE_L4_TCP;
172 	(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
173 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
174 		     RTE_PTYPE_L4_TCP;
175 	(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
177 		     RTE_PTYPE_L4_TCP;
178 	/* Tunneled - UDP */
179 	(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
180 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
181 		     RTE_PTYPE_L4_UDP;
182 	(*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
184 		     RTE_PTYPE_L4_UDP;
185 	(*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
186 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
187 		     RTE_PTYPE_L4_UDP;
188 	(*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
190 		     RTE_PTYPE_L4_UDP;
191 }
192 
193 /**
194  * Return the size of tailroom of WQ.
195  *
196  * @param txq
197  *   Pointer to TX queue structure.
198  * @param addr
199  *   Pointer to tail of WQ.
200  *
201  * @return
202  *   Size of tailroom.
203  */
204 static inline size_t
205 tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
206 {
207 	size_t tailroom;
208 	tailroom = (uintptr_t)(txq->wqes) +
209 		   (1 << txq->wqe_n) * MLX5_WQE_SIZE -
210 		   (uintptr_t)addr;
211 	return tailroom;
212 }
213 
214 /**
215  * Copy data to tailroom of circular queue.
216  *
217  * @param dst
218  *   Pointer to destination.
219  * @param src
220  *   Pointer to source.
221  * @param n
222  *   Number of bytes to copy.
223  * @param base
224  *   Pointer to head of queue.
225  * @param tailroom
226  *   Size of tailroom from dst.
227  *
228  * @return
229  *   Pointer after copied data.
230  */
231 static inline void *
232 mlx5_copy_to_wq(void *dst, const void *src, size_t n,
233 		void *base, size_t tailroom)
234 {
235 	void *ret;
236 
237 	if (n > tailroom) {
238 		rte_memcpy(dst, src, tailroom);
239 		rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
240 			   n - tailroom);
241 		ret = (uint8_t *)base + n - tailroom;
242 	} else {
243 		rte_memcpy(dst, src, n);
244 		ret = (n == tailroom) ? base : (uint8_t *)dst + n;
245 	}
246 	return ret;
247 }
248 
249 /**
250  * DPDK callback to check the status of a tx descriptor.
251  *
252  * @param tx_queue
253  *   The tx queue.
254  * @param[in] offset
255  *   The index of the descriptor in the ring.
256  *
257  * @return
258  *   The status of the tx descriptor.
259  */
260 int
261 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
262 {
263 	struct mlx5_txq_data *txq = tx_queue;
264 	uint16_t used;
265 
266 	mlx5_tx_complete(txq);
267 	used = txq->elts_head - txq->elts_tail;
268 	if (offset < used)
269 		return RTE_ETH_TX_DESC_FULL;
270 	return RTE_ETH_TX_DESC_DONE;
271 }
272 
273 /**
274  * DPDK callback to check the status of a rx descriptor.
275  *
276  * @param rx_queue
277  *   The rx queue.
278  * @param[in] offset
279  *   The index of the descriptor in the ring.
280  *
281  * @return
282  *   The status of the tx descriptor.
283  */
284 int
285 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
286 {
287 	struct mlx5_rxq_data *rxq = rx_queue;
288 	struct rxq_zip *zip = &rxq->zip;
289 	volatile struct mlx5_cqe *cqe;
290 	const unsigned int cqe_n = (1 << rxq->cqe_n);
291 	const unsigned int cqe_cnt = cqe_n - 1;
292 	unsigned int cq_ci;
293 	unsigned int used;
294 
295 	/* if we are processing a compressed cqe */
296 	if (zip->ai) {
297 		used = zip->cqe_cnt - zip->ca;
298 		cq_ci = zip->cq_ci;
299 	} else {
300 		used = 0;
301 		cq_ci = rxq->cq_ci;
302 	}
303 	cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
304 	while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
305 		int8_t op_own;
306 		unsigned int n;
307 
308 		op_own = cqe->op_own;
309 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
310 			n = rte_be_to_cpu_32(cqe->byte_cnt);
311 		else
312 			n = 1;
313 		cq_ci += n;
314 		used += n;
315 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
316 	}
317 	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
318 	if (offset < used)
319 		return RTE_ETH_RX_DESC_DONE;
320 	return RTE_ETH_RX_DESC_AVAIL;
321 }
322 
323 /**
324  * DPDK callback for TX.
325  *
326  * @param dpdk_txq
327  *   Generic pointer to TX queue structure.
328  * @param[in] pkts
329  *   Packets to transmit.
330  * @param pkts_n
331  *   Number of packets in array.
332  *
333  * @return
334  *   Number of packets successfully transmitted (<= pkts_n).
335  */
336 uint16_t
337 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
338 {
339 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
340 	uint16_t elts_head = txq->elts_head;
341 	const uint16_t elts_n = 1 << txq->elts_n;
342 	const uint16_t elts_m = elts_n - 1;
343 	unsigned int i = 0;
344 	unsigned int j = 0;
345 	unsigned int k = 0;
346 	uint16_t max_elts;
347 	unsigned int max_inline = txq->max_inline;
348 	const unsigned int inline_en = !!max_inline && txq->inline_en;
349 	uint16_t max_wqe;
350 	unsigned int comp;
351 	volatile struct mlx5_wqe_v *wqe = NULL;
352 	volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
353 	unsigned int segs_n = 0;
354 	struct rte_mbuf *buf = NULL;
355 	uint8_t *raw;
356 
357 	if (unlikely(!pkts_n))
358 		return 0;
359 	/* Prefetch first packet cacheline. */
360 	rte_prefetch0(*pkts);
361 	/* Start processing. */
362 	mlx5_tx_complete(txq);
363 	max_elts = (elts_n - (elts_head - txq->elts_tail));
364 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
365 	if (unlikely(!max_wqe))
366 		return 0;
367 	do {
368 		volatile rte_v128u32_t *dseg = NULL;
369 		uint32_t length;
370 		unsigned int ds = 0;
371 		unsigned int sg = 0; /* counter of additional segs attached. */
372 		uintptr_t addr;
373 		uint64_t naddr;
374 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
375 		uint16_t tso_header_sz = 0;
376 		uint16_t ehdr;
377 		uint8_t cs_flags = 0;
378 		uint64_t tso = 0;
379 		uint16_t tso_segsz = 0;
380 #ifdef MLX5_PMD_SOFT_COUNTERS
381 		uint32_t total_length = 0;
382 #endif
383 
384 		/* first_seg */
385 		buf = *pkts;
386 		segs_n = buf->nb_segs;
387 		/*
388 		 * Make sure there is enough room to store this packet and
389 		 * that one ring entry remains unused.
390 		 */
391 		assert(segs_n);
392 		if (max_elts < segs_n)
393 			break;
394 		max_elts -= segs_n;
395 		--segs_n;
396 		if (unlikely(--max_wqe == 0))
397 			break;
398 		wqe = (volatile struct mlx5_wqe_v *)
399 			tx_mlx5_wqe(txq, txq->wqe_ci);
400 		rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
401 		if (pkts_n - i > 1)
402 			rte_prefetch0(*(pkts + 1));
403 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
404 		length = DATA_LEN(buf);
405 		ehdr = (((uint8_t *)addr)[1] << 8) |
406 		       ((uint8_t *)addr)[0];
407 #ifdef MLX5_PMD_SOFT_COUNTERS
408 		total_length = length;
409 #endif
410 		if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
411 			txq->stats.oerrors++;
412 			break;
413 		}
414 		/* Update element. */
415 		(*txq->elts)[elts_head & elts_m] = buf;
416 		/* Prefetch next buffer data. */
417 		if (pkts_n - i > 1)
418 			rte_prefetch0(
419 			    rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
420 		/* Should we enable HW CKSUM offload */
421 		if (buf->ol_flags &
422 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
423 			const uint64_t is_tunneled = buf->ol_flags &
424 						     (PKT_TX_TUNNEL_GRE |
425 						      PKT_TX_TUNNEL_VXLAN);
426 
427 			if (is_tunneled && txq->tunnel_en) {
428 				cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
429 					   MLX5_ETH_WQE_L4_INNER_CSUM;
430 				if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
431 					cs_flags |= MLX5_ETH_WQE_L3_CSUM;
432 			} else {
433 				cs_flags = MLX5_ETH_WQE_L3_CSUM |
434 					   MLX5_ETH_WQE_L4_CSUM;
435 			}
436 		}
437 		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
438 		/* Replace the Ethernet type by the VLAN if necessary. */
439 		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
440 			uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
441 							 buf->vlan_tci);
442 			unsigned int len = 2 * ETHER_ADDR_LEN - 2;
443 
444 			addr += 2;
445 			length -= 2;
446 			/* Copy Destination and source mac address. */
447 			memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
448 			/* Copy VLAN. */
449 			memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
450 			/* Copy missing two bytes to end the DSeg. */
451 			memcpy((uint8_t *)raw + len + sizeof(vlan),
452 			       ((uint8_t *)addr) + len, 2);
453 			addr += len + 2;
454 			length -= (len + 2);
455 		} else {
456 			memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
457 			       MLX5_WQE_DWORD_SIZE);
458 			length -= pkt_inline_sz;
459 			addr += pkt_inline_sz;
460 		}
461 		raw += MLX5_WQE_DWORD_SIZE;
462 		if (txq->tso_en) {
463 			tso = buf->ol_flags & PKT_TX_TCP_SEG;
464 			if (tso) {
465 				uintptr_t end = (uintptr_t)
466 						(((uintptr_t)txq->wqes) +
467 						(1 << txq->wqe_n) *
468 						MLX5_WQE_SIZE);
469 				unsigned int copy_b;
470 				uint8_t vlan_sz = (buf->ol_flags &
471 						  PKT_TX_VLAN_PKT) ? 4 : 0;
472 				const uint64_t is_tunneled =
473 							buf->ol_flags &
474 							(PKT_TX_TUNNEL_GRE |
475 							 PKT_TX_TUNNEL_VXLAN);
476 
477 				tso_header_sz = buf->l2_len + vlan_sz +
478 						buf->l3_len + buf->l4_len;
479 				tso_segsz = buf->tso_segsz;
480 				if (unlikely(tso_segsz == 0)) {
481 					txq->stats.oerrors++;
482 					break;
483 				}
484 				if (is_tunneled	&& txq->tunnel_en) {
485 					tso_header_sz += buf->outer_l2_len +
486 							 buf->outer_l3_len;
487 					cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
488 				} else {
489 					cs_flags |= MLX5_ETH_WQE_L4_CSUM;
490 				}
491 				if (unlikely(tso_header_sz >
492 					     MLX5_MAX_TSO_HEADER)) {
493 					txq->stats.oerrors++;
494 					break;
495 				}
496 				copy_b = tso_header_sz - pkt_inline_sz;
497 				/* First seg must contain all headers. */
498 				assert(copy_b <= length);
499 				if (copy_b &&
500 				   ((end - (uintptr_t)raw) > copy_b)) {
501 					uint16_t n = (MLX5_WQE_DS(copy_b) -
502 						      1 + 3) / 4;
503 
504 					if (unlikely(max_wqe < n))
505 						break;
506 					max_wqe -= n;
507 					rte_memcpy((void *)raw,
508 						   (void *)addr, copy_b);
509 					addr += copy_b;
510 					length -= copy_b;
511 					/* Include padding for TSO header. */
512 					copy_b = MLX5_WQE_DS(copy_b) *
513 						 MLX5_WQE_DWORD_SIZE;
514 					pkt_inline_sz += copy_b;
515 					raw += copy_b;
516 				} else {
517 					/* NOP WQE. */
518 					wqe->ctrl = (rte_v128u32_t){
519 						     rte_cpu_to_be_32(
520 							txq->wqe_ci << 8),
521 						     rte_cpu_to_be_32(
522 							txq->qp_num_8s | 1),
523 						     0,
524 						     0,
525 					};
526 					ds = 1;
527 					total_length = 0;
528 					k++;
529 					goto next_wqe;
530 				}
531 			}
532 		}
533 		/* Inline if enough room. */
534 		if (inline_en || tso) {
535 			uint32_t inl;
536 			uintptr_t end = (uintptr_t)
537 				(((uintptr_t)txq->wqes) +
538 				 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
539 			unsigned int inline_room = max_inline *
540 						   RTE_CACHE_LINE_SIZE -
541 						   (pkt_inline_sz - 2) -
542 						   !!tso * sizeof(inl);
543 			uintptr_t addr_end = (addr + inline_room) &
544 					     ~(RTE_CACHE_LINE_SIZE - 1);
545 			unsigned int copy_b = (addr_end > addr) ?
546 				RTE_MIN((addr_end - addr), length) :
547 				0;
548 
549 			if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
550 				/*
551 				 * One Dseg remains in the current WQE.  To
552 				 * keep the computation positive, it is
553 				 * removed after the bytes to Dseg conversion.
554 				 */
555 				uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
556 
557 				if (unlikely(max_wqe < n))
558 					break;
559 				max_wqe -= n;
560 				if (tso) {
561 					inl = rte_cpu_to_be_32(copy_b |
562 							       MLX5_INLINE_SEG);
563 					rte_memcpy((void *)raw,
564 						   (void *)&inl, sizeof(inl));
565 					raw += sizeof(inl);
566 					pkt_inline_sz += sizeof(inl);
567 				}
568 				rte_memcpy((void *)raw, (void *)addr, copy_b);
569 				addr += copy_b;
570 				length -= copy_b;
571 				pkt_inline_sz += copy_b;
572 			}
573 			/*
574 			 * 2 DWORDs consumed by the WQE header + ETH segment +
575 			 * the size of the inline part of the packet.
576 			 */
577 			ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
578 			if (length > 0) {
579 				if (ds % (MLX5_WQE_SIZE /
580 					  MLX5_WQE_DWORD_SIZE) == 0) {
581 					if (unlikely(--max_wqe == 0))
582 						break;
583 					dseg = (volatile rte_v128u32_t *)
584 					       tx_mlx5_wqe(txq, txq->wqe_ci +
585 							   ds / 4);
586 				} else {
587 					dseg = (volatile rte_v128u32_t *)
588 						((uintptr_t)wqe +
589 						 (ds * MLX5_WQE_DWORD_SIZE));
590 				}
591 				goto use_dseg;
592 			} else if (!segs_n) {
593 				goto next_pkt;
594 			} else {
595 				/* dseg will be advance as part of next_seg */
596 				dseg = (volatile rte_v128u32_t *)
597 					((uintptr_t)wqe +
598 					 ((ds - 1) * MLX5_WQE_DWORD_SIZE));
599 				goto next_seg;
600 			}
601 		} else {
602 			/*
603 			 * No inline has been done in the packet, only the
604 			 * Ethernet Header as been stored.
605 			 */
606 			dseg = (volatile rte_v128u32_t *)
607 				((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
608 			ds = 3;
609 use_dseg:
610 			/* Add the remaining packet as a simple ds. */
611 			naddr = rte_cpu_to_be_64(addr);
612 			*dseg = (rte_v128u32_t){
613 				rte_cpu_to_be_32(length),
614 				mlx5_tx_mb2mr(txq, buf),
615 				naddr,
616 				naddr >> 32,
617 			};
618 			++ds;
619 			if (!segs_n)
620 				goto next_pkt;
621 		}
622 next_seg:
623 		assert(buf);
624 		assert(ds);
625 		assert(wqe);
626 		/*
627 		 * Spill on next WQE when the current one does not have
628 		 * enough room left. Size of WQE must a be a multiple
629 		 * of data segment size.
630 		 */
631 		assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
632 		if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
633 			if (unlikely(--max_wqe == 0))
634 				break;
635 			dseg = (volatile rte_v128u32_t *)
636 			       tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
637 			rte_prefetch0(tx_mlx5_wqe(txq,
638 						  txq->wqe_ci + ds / 4 + 1));
639 		} else {
640 			++dseg;
641 		}
642 		++ds;
643 		buf = buf->next;
644 		assert(buf);
645 		length = DATA_LEN(buf);
646 #ifdef MLX5_PMD_SOFT_COUNTERS
647 		total_length += length;
648 #endif
649 		/* Store segment information. */
650 		naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
651 		*dseg = (rte_v128u32_t){
652 			rte_cpu_to_be_32(length),
653 			mlx5_tx_mb2mr(txq, buf),
654 			naddr,
655 			naddr >> 32,
656 		};
657 		(*txq->elts)[++elts_head & elts_m] = buf;
658 		++sg;
659 		/* Advance counter only if all segs are successfully posted. */
660 		if (sg < segs_n)
661 			goto next_seg;
662 		else
663 			j += sg;
664 next_pkt:
665 		if (ds > MLX5_DSEG_MAX) {
666 			txq->stats.oerrors++;
667 			break;
668 		}
669 		++elts_head;
670 		++pkts;
671 		++i;
672 		/* Initialize known and common part of the WQE structure. */
673 		if (tso) {
674 			wqe->ctrl = (rte_v128u32_t){
675 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
676 						 MLX5_OPCODE_TSO),
677 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
678 				0,
679 				0,
680 			};
681 			wqe->eseg = (rte_v128u32_t){
682 				0,
683 				cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
684 				0,
685 				(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
686 			};
687 		} else {
688 			wqe->ctrl = (rte_v128u32_t){
689 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
690 						 MLX5_OPCODE_SEND),
691 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
692 				0,
693 				0,
694 			};
695 			wqe->eseg = (rte_v128u32_t){
696 				0,
697 				cs_flags,
698 				0,
699 				(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
700 			};
701 		}
702 next_wqe:
703 		txq->wqe_ci += (ds + 3) / 4;
704 		/* Save the last successful WQE for completion request */
705 		last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
706 #ifdef MLX5_PMD_SOFT_COUNTERS
707 		/* Increment sent bytes counter. */
708 		txq->stats.obytes += total_length;
709 #endif
710 	} while (i < pkts_n);
711 	/* Take a shortcut if nothing must be sent. */
712 	if (unlikely((i + k) == 0))
713 		return 0;
714 	txq->elts_head += (i + j);
715 	/* Check whether completion threshold has been reached. */
716 	comp = txq->elts_comp + i + j + k;
717 	if (comp >= MLX5_TX_COMP_THRESH) {
718 		/* Request completion on last WQE. */
719 		last_wqe->ctrl2 = rte_cpu_to_be_32(8);
720 		/* Save elts_head in unused "immediate" field of WQE. */
721 		last_wqe->ctrl3 = txq->elts_head;
722 		txq->elts_comp = 0;
723 	} else {
724 		txq->elts_comp = comp;
725 	}
726 #ifdef MLX5_PMD_SOFT_COUNTERS
727 	/* Increment sent packets counter. */
728 	txq->stats.opackets += i;
729 #endif
730 	/* Ring QP doorbell. */
731 	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
732 	return i;
733 }
734 
735 /**
736  * Open a MPW session.
737  *
738  * @param txq
739  *   Pointer to TX queue structure.
740  * @param mpw
741  *   Pointer to MPW session structure.
742  * @param length
743  *   Packet length.
744  */
745 static inline void
746 mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
747 {
748 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
749 	volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
750 		(volatile struct mlx5_wqe_data_seg (*)[])
751 		tx_mlx5_wqe(txq, idx + 1);
752 
753 	mpw->state = MLX5_MPW_STATE_OPENED;
754 	mpw->pkts_n = 0;
755 	mpw->len = length;
756 	mpw->total_len = 0;
757 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
758 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
759 	mpw->wqe->eseg.inline_hdr_sz = 0;
760 	mpw->wqe->eseg.rsvd0 = 0;
761 	mpw->wqe->eseg.rsvd1 = 0;
762 	mpw->wqe->eseg.rsvd2 = 0;
763 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
764 					     (txq->wqe_ci << 8) |
765 					     MLX5_OPCODE_TSO);
766 	mpw->wqe->ctrl[2] = 0;
767 	mpw->wqe->ctrl[3] = 0;
768 	mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
769 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
770 	mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
771 		(((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
772 	mpw->data.dseg[2] = &(*dseg)[0];
773 	mpw->data.dseg[3] = &(*dseg)[1];
774 	mpw->data.dseg[4] = &(*dseg)[2];
775 }
776 
777 /**
778  * Close a MPW session.
779  *
780  * @param txq
781  *   Pointer to TX queue structure.
782  * @param mpw
783  *   Pointer to MPW session structure.
784  */
785 static inline void
786 mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
787 {
788 	unsigned int num = mpw->pkts_n;
789 
790 	/*
791 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
792 	 * count as 2.
793 	 */
794 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
795 	mpw->state = MLX5_MPW_STATE_CLOSED;
796 	if (num < 3)
797 		++txq->wqe_ci;
798 	else
799 		txq->wqe_ci += 2;
800 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
801 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
802 }
803 
804 /**
805  * DPDK callback for TX with MPW support.
806  *
807  * @param dpdk_txq
808  *   Generic pointer to TX queue structure.
809  * @param[in] pkts
810  *   Packets to transmit.
811  * @param pkts_n
812  *   Number of packets in array.
813  *
814  * @return
815  *   Number of packets successfully transmitted (<= pkts_n).
816  */
817 uint16_t
818 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
819 {
820 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
821 	uint16_t elts_head = txq->elts_head;
822 	const uint16_t elts_n = 1 << txq->elts_n;
823 	const uint16_t elts_m = elts_n - 1;
824 	unsigned int i = 0;
825 	unsigned int j = 0;
826 	uint16_t max_elts;
827 	uint16_t max_wqe;
828 	unsigned int comp;
829 	struct mlx5_mpw mpw = {
830 		.state = MLX5_MPW_STATE_CLOSED,
831 	};
832 
833 	if (unlikely(!pkts_n))
834 		return 0;
835 	/* Prefetch first packet cacheline. */
836 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
837 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
838 	/* Start processing. */
839 	mlx5_tx_complete(txq);
840 	max_elts = (elts_n - (elts_head - txq->elts_tail));
841 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
842 	if (unlikely(!max_wqe))
843 		return 0;
844 	do {
845 		struct rte_mbuf *buf = *(pkts++);
846 		uint32_t length;
847 		unsigned int segs_n = buf->nb_segs;
848 		uint32_t cs_flags = 0;
849 
850 		/*
851 		 * Make sure there is enough room to store this packet and
852 		 * that one ring entry remains unused.
853 		 */
854 		assert(segs_n);
855 		if (max_elts < segs_n)
856 			break;
857 		/* Do not bother with large packets MPW cannot handle. */
858 		if (segs_n > MLX5_MPW_DSEG_MAX) {
859 			txq->stats.oerrors++;
860 			break;
861 		}
862 		max_elts -= segs_n;
863 		--pkts_n;
864 		/* Should we enable HW CKSUM offload */
865 		if (buf->ol_flags &
866 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
867 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
868 		/* Retrieve packet information. */
869 		length = PKT_LEN(buf);
870 		assert(length);
871 		/* Start new session if packet differs. */
872 		if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
873 		    ((mpw.len != length) ||
874 		     (segs_n != 1) ||
875 		     (mpw.wqe->eseg.cs_flags != cs_flags)))
876 			mlx5_mpw_close(txq, &mpw);
877 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
878 			/*
879 			 * Multi-Packet WQE consumes at most two WQE.
880 			 * mlx5_mpw_new() expects to be able to use such
881 			 * resources.
882 			 */
883 			if (unlikely(max_wqe < 2))
884 				break;
885 			max_wqe -= 2;
886 			mlx5_mpw_new(txq, &mpw, length);
887 			mpw.wqe->eseg.cs_flags = cs_flags;
888 		}
889 		/* Multi-segment packets must be alone in their MPW. */
890 		assert((segs_n == 1) || (mpw.pkts_n == 0));
891 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
892 		length = 0;
893 #endif
894 		do {
895 			volatile struct mlx5_wqe_data_seg *dseg;
896 			uintptr_t addr;
897 
898 			assert(buf);
899 			(*txq->elts)[elts_head++ & elts_m] = buf;
900 			dseg = mpw.data.dseg[mpw.pkts_n];
901 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
902 			*dseg = (struct mlx5_wqe_data_seg){
903 				.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
904 				.lkey = mlx5_tx_mb2mr(txq, buf),
905 				.addr = rte_cpu_to_be_64(addr),
906 			};
907 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
908 			length += DATA_LEN(buf);
909 #endif
910 			buf = buf->next;
911 			++mpw.pkts_n;
912 			++j;
913 		} while (--segs_n);
914 		assert(length == mpw.len);
915 		if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
916 			mlx5_mpw_close(txq, &mpw);
917 #ifdef MLX5_PMD_SOFT_COUNTERS
918 		/* Increment sent bytes counter. */
919 		txq->stats.obytes += length;
920 #endif
921 		++i;
922 	} while (pkts_n);
923 	/* Take a shortcut if nothing must be sent. */
924 	if (unlikely(i == 0))
925 		return 0;
926 	/* Check whether completion threshold has been reached. */
927 	/* "j" includes both packets and segments. */
928 	comp = txq->elts_comp + j;
929 	if (comp >= MLX5_TX_COMP_THRESH) {
930 		volatile struct mlx5_wqe *wqe = mpw.wqe;
931 
932 		/* Request completion on last WQE. */
933 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
934 		/* Save elts_head in unused "immediate" field of WQE. */
935 		wqe->ctrl[3] = elts_head;
936 		txq->elts_comp = 0;
937 	} else {
938 		txq->elts_comp = comp;
939 	}
940 #ifdef MLX5_PMD_SOFT_COUNTERS
941 	/* Increment sent packets counter. */
942 	txq->stats.opackets += i;
943 #endif
944 	/* Ring QP doorbell. */
945 	if (mpw.state == MLX5_MPW_STATE_OPENED)
946 		mlx5_mpw_close(txq, &mpw);
947 	mlx5_tx_dbrec(txq, mpw.wqe);
948 	txq->elts_head = elts_head;
949 	return i;
950 }
951 
952 /**
953  * Open a MPW inline session.
954  *
955  * @param txq
956  *   Pointer to TX queue structure.
957  * @param mpw
958  *   Pointer to MPW session structure.
959  * @param length
960  *   Packet length.
961  */
962 static inline void
963 mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
964 		    uint32_t length)
965 {
966 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
967 	struct mlx5_wqe_inl_small *inl;
968 
969 	mpw->state = MLX5_MPW_INL_STATE_OPENED;
970 	mpw->pkts_n = 0;
971 	mpw->len = length;
972 	mpw->total_len = 0;
973 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
974 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
975 					     (txq->wqe_ci << 8) |
976 					     MLX5_OPCODE_TSO);
977 	mpw->wqe->ctrl[2] = 0;
978 	mpw->wqe->ctrl[3] = 0;
979 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
980 	mpw->wqe->eseg.inline_hdr_sz = 0;
981 	mpw->wqe->eseg.cs_flags = 0;
982 	mpw->wqe->eseg.rsvd0 = 0;
983 	mpw->wqe->eseg.rsvd1 = 0;
984 	mpw->wqe->eseg.rsvd2 = 0;
985 	inl = (struct mlx5_wqe_inl_small *)
986 		(((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
987 	mpw->data.raw = (uint8_t *)&inl->raw;
988 }
989 
990 /**
991  * Close a MPW inline session.
992  *
993  * @param txq
994  *   Pointer to TX queue structure.
995  * @param mpw
996  *   Pointer to MPW session structure.
997  */
998 static inline void
999 mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1000 {
1001 	unsigned int size;
1002 	struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
1003 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
1004 
1005 	size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
1006 	/*
1007 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
1008 	 * count as 2.
1009 	 */
1010 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1011 					     MLX5_WQE_DS(size));
1012 	mpw->state = MLX5_MPW_STATE_CLOSED;
1013 	inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
1014 	txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1015 }
1016 
1017 /**
1018  * DPDK callback for TX with MPW inline support.
1019  *
1020  * @param dpdk_txq
1021  *   Generic pointer to TX queue structure.
1022  * @param[in] pkts
1023  *   Packets to transmit.
1024  * @param pkts_n
1025  *   Number of packets in array.
1026  *
1027  * @return
1028  *   Number of packets successfully transmitted (<= pkts_n).
1029  */
1030 uint16_t
1031 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
1032 			 uint16_t pkts_n)
1033 {
1034 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1035 	uint16_t elts_head = txq->elts_head;
1036 	const uint16_t elts_n = 1 << txq->elts_n;
1037 	const uint16_t elts_m = elts_n - 1;
1038 	unsigned int i = 0;
1039 	unsigned int j = 0;
1040 	uint16_t max_elts;
1041 	uint16_t max_wqe;
1042 	unsigned int comp;
1043 	unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
1044 	struct mlx5_mpw mpw = {
1045 		.state = MLX5_MPW_STATE_CLOSED,
1046 	};
1047 	/*
1048 	 * Compute the maximum number of WQE which can be consumed by inline
1049 	 * code.
1050 	 * - 2 DSEG for:
1051 	 *   - 1 control segment,
1052 	 *   - 1 Ethernet segment,
1053 	 * - N Dseg from the inline request.
1054 	 */
1055 	const unsigned int wqe_inl_n =
1056 		((2 * MLX5_WQE_DWORD_SIZE +
1057 		  txq->max_inline * RTE_CACHE_LINE_SIZE) +
1058 		 RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
1059 
1060 	if (unlikely(!pkts_n))
1061 		return 0;
1062 	/* Prefetch first packet cacheline. */
1063 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
1064 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
1065 	/* Start processing. */
1066 	mlx5_tx_complete(txq);
1067 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1068 	do {
1069 		struct rte_mbuf *buf = *(pkts++);
1070 		uintptr_t addr;
1071 		uint32_t length;
1072 		unsigned int segs_n = buf->nb_segs;
1073 		uint32_t cs_flags = 0;
1074 
1075 		/*
1076 		 * Make sure there is enough room to store this packet and
1077 		 * that one ring entry remains unused.
1078 		 */
1079 		assert(segs_n);
1080 		if (max_elts < segs_n)
1081 			break;
1082 		/* Do not bother with large packets MPW cannot handle. */
1083 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1084 			txq->stats.oerrors++;
1085 			break;
1086 		}
1087 		max_elts -= segs_n;
1088 		--pkts_n;
1089 		/*
1090 		 * Compute max_wqe in case less WQE were consumed in previous
1091 		 * iteration.
1092 		 */
1093 		max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1094 		/* Should we enable HW CKSUM offload */
1095 		if (buf->ol_flags &
1096 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1097 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1098 		/* Retrieve packet information. */
1099 		length = PKT_LEN(buf);
1100 		/* Start new session if packet differs. */
1101 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1102 			if ((mpw.len != length) ||
1103 			    (segs_n != 1) ||
1104 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1105 				mlx5_mpw_close(txq, &mpw);
1106 		} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
1107 			if ((mpw.len != length) ||
1108 			    (segs_n != 1) ||
1109 			    (length > inline_room) ||
1110 			    (mpw.wqe->eseg.cs_flags != cs_flags)) {
1111 				mlx5_mpw_inline_close(txq, &mpw);
1112 				inline_room =
1113 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1114 			}
1115 		}
1116 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
1117 			if ((segs_n != 1) ||
1118 			    (length > inline_room)) {
1119 				/*
1120 				 * Multi-Packet WQE consumes at most two WQE.
1121 				 * mlx5_mpw_new() expects to be able to use
1122 				 * such resources.
1123 				 */
1124 				if (unlikely(max_wqe < 2))
1125 					break;
1126 				max_wqe -= 2;
1127 				mlx5_mpw_new(txq, &mpw, length);
1128 				mpw.wqe->eseg.cs_flags = cs_flags;
1129 			} else {
1130 				if (unlikely(max_wqe < wqe_inl_n))
1131 					break;
1132 				max_wqe -= wqe_inl_n;
1133 				mlx5_mpw_inline_new(txq, &mpw, length);
1134 				mpw.wqe->eseg.cs_flags = cs_flags;
1135 			}
1136 		}
1137 		/* Multi-segment packets must be alone in their MPW. */
1138 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1139 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1140 			assert(inline_room ==
1141 			       txq->max_inline * RTE_CACHE_LINE_SIZE);
1142 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1143 			length = 0;
1144 #endif
1145 			do {
1146 				volatile struct mlx5_wqe_data_seg *dseg;
1147 
1148 				assert(buf);
1149 				(*txq->elts)[elts_head++ & elts_m] = buf;
1150 				dseg = mpw.data.dseg[mpw.pkts_n];
1151 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1152 				*dseg = (struct mlx5_wqe_data_seg){
1153 					.byte_count =
1154 					       rte_cpu_to_be_32(DATA_LEN(buf)),
1155 					.lkey = mlx5_tx_mb2mr(txq, buf),
1156 					.addr = rte_cpu_to_be_64(addr),
1157 				};
1158 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1159 				length += DATA_LEN(buf);
1160 #endif
1161 				buf = buf->next;
1162 				++mpw.pkts_n;
1163 				++j;
1164 			} while (--segs_n);
1165 			assert(length == mpw.len);
1166 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
1167 				mlx5_mpw_close(txq, &mpw);
1168 		} else {
1169 			unsigned int max;
1170 
1171 			assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
1172 			assert(length <= inline_room);
1173 			assert(length == DATA_LEN(buf));
1174 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1175 			(*txq->elts)[elts_head++ & elts_m] = buf;
1176 			/* Maximum number of bytes before wrapping. */
1177 			max = ((((uintptr_t)(txq->wqes)) +
1178 				(1 << txq->wqe_n) *
1179 				MLX5_WQE_SIZE) -
1180 			       (uintptr_t)mpw.data.raw);
1181 			if (length > max) {
1182 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1183 					   (void *)addr,
1184 					   max);
1185 				mpw.data.raw = (volatile void *)txq->wqes;
1186 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1187 					   (void *)(addr + max),
1188 					   length - max);
1189 				mpw.data.raw += length - max;
1190 			} else {
1191 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1192 					   (void *)addr,
1193 					   length);
1194 
1195 				if (length == max)
1196 					mpw.data.raw =
1197 						(volatile void *)txq->wqes;
1198 				else
1199 					mpw.data.raw += length;
1200 			}
1201 			++mpw.pkts_n;
1202 			mpw.total_len += length;
1203 			++j;
1204 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
1205 				mlx5_mpw_inline_close(txq, &mpw);
1206 				inline_room =
1207 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1208 			} else {
1209 				inline_room -= length;
1210 			}
1211 		}
1212 #ifdef MLX5_PMD_SOFT_COUNTERS
1213 		/* Increment sent bytes counter. */
1214 		txq->stats.obytes += length;
1215 #endif
1216 		++i;
1217 	} while (pkts_n);
1218 	/* Take a shortcut if nothing must be sent. */
1219 	if (unlikely(i == 0))
1220 		return 0;
1221 	/* Check whether completion threshold has been reached. */
1222 	/* "j" includes both packets and segments. */
1223 	comp = txq->elts_comp + j;
1224 	if (comp >= MLX5_TX_COMP_THRESH) {
1225 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1226 
1227 		/* Request completion on last WQE. */
1228 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1229 		/* Save elts_head in unused "immediate" field of WQE. */
1230 		wqe->ctrl[3] = elts_head;
1231 		txq->elts_comp = 0;
1232 	} else {
1233 		txq->elts_comp = comp;
1234 	}
1235 #ifdef MLX5_PMD_SOFT_COUNTERS
1236 	/* Increment sent packets counter. */
1237 	txq->stats.opackets += i;
1238 #endif
1239 	/* Ring QP doorbell. */
1240 	if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
1241 		mlx5_mpw_inline_close(txq, &mpw);
1242 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1243 		mlx5_mpw_close(txq, &mpw);
1244 	mlx5_tx_dbrec(txq, mpw.wqe);
1245 	txq->elts_head = elts_head;
1246 	return i;
1247 }
1248 
1249 /**
1250  * Open an Enhanced MPW session.
1251  *
1252  * @param txq
1253  *   Pointer to TX queue structure.
1254  * @param mpw
1255  *   Pointer to MPW session structure.
1256  * @param length
1257  *   Packet length.
1258  */
1259 static inline void
1260 mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
1261 {
1262 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
1263 
1264 	mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
1265 	mpw->pkts_n = 0;
1266 	mpw->total_len = sizeof(struct mlx5_wqe);
1267 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
1268 	mpw->wqe->ctrl[0] =
1269 		rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
1270 				 (txq->wqe_ci << 8) |
1271 				 MLX5_OPCODE_ENHANCED_MPSW);
1272 	mpw->wqe->ctrl[2] = 0;
1273 	mpw->wqe->ctrl[3] = 0;
1274 	memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
1275 	if (unlikely(padding)) {
1276 		uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
1277 
1278 		/* Pad the first 2 DWORDs with zero-length inline header. */
1279 		*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
1280 		*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
1281 			rte_cpu_to_be_32(MLX5_INLINE_SEG);
1282 		mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
1283 		/* Start from the next WQEBB. */
1284 		mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
1285 	} else {
1286 		mpw->data.raw = (volatile void *)(mpw->wqe + 1);
1287 	}
1288 }
1289 
1290 /**
1291  * Close an Enhanced MPW session.
1292  *
1293  * @param txq
1294  *   Pointer to TX queue structure.
1295  * @param mpw
1296  *   Pointer to MPW session structure.
1297  *
1298  * @return
1299  *   Number of consumed WQEs.
1300  */
1301 static inline uint16_t
1302 mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1303 {
1304 	uint16_t ret;
1305 
1306 	/* Store size in multiple of 16 bytes. Control and Ethernet segments
1307 	 * count as 2.
1308 	 */
1309 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1310 					     MLX5_WQE_DS(mpw->total_len));
1311 	mpw->state = MLX5_MPW_STATE_CLOSED;
1312 	ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1313 	txq->wqe_ci += ret;
1314 	return ret;
1315 }
1316 
1317 /**
1318  * DPDK callback for TX with Enhanced MPW support.
1319  *
1320  * @param dpdk_txq
1321  *   Generic pointer to TX queue structure.
1322  * @param[in] pkts
1323  *   Packets to transmit.
1324  * @param pkts_n
1325  *   Number of packets in array.
1326  *
1327  * @return
1328  *   Number of packets successfully transmitted (<= pkts_n).
1329  */
1330 uint16_t
1331 mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1332 {
1333 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1334 	uint16_t elts_head = txq->elts_head;
1335 	const uint16_t elts_n = 1 << txq->elts_n;
1336 	const uint16_t elts_m = elts_n - 1;
1337 	unsigned int i = 0;
1338 	unsigned int j = 0;
1339 	uint16_t max_elts;
1340 	uint16_t max_wqe;
1341 	unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
1342 	unsigned int mpw_room = 0;
1343 	unsigned int inl_pad = 0;
1344 	uint32_t inl_hdr;
1345 	struct mlx5_mpw mpw = {
1346 		.state = MLX5_MPW_STATE_CLOSED,
1347 	};
1348 
1349 	if (unlikely(!pkts_n))
1350 		return 0;
1351 	/* Start processing. */
1352 	mlx5_tx_complete(txq);
1353 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1354 	/* A CQE slot must always be available. */
1355 	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
1356 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1357 	if (unlikely(!max_wqe))
1358 		return 0;
1359 	do {
1360 		struct rte_mbuf *buf = *(pkts++);
1361 		uintptr_t addr;
1362 		uint64_t naddr;
1363 		unsigned int n;
1364 		unsigned int do_inline = 0; /* Whether inline is possible. */
1365 		uint32_t length;
1366 		unsigned int segs_n = buf->nb_segs;
1367 		uint32_t cs_flags = 0;
1368 
1369 		/*
1370 		 * Make sure there is enough room to store this packet and
1371 		 * that one ring entry remains unused.
1372 		 */
1373 		assert(segs_n);
1374 		if (max_elts - j < segs_n)
1375 			break;
1376 		/* Do not bother with large packets MPW cannot handle. */
1377 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1378 			txq->stats.oerrors++;
1379 			break;
1380 		}
1381 		/* Should we enable HW CKSUM offload. */
1382 		if (buf->ol_flags &
1383 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1384 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1385 		/* Retrieve packet information. */
1386 		length = PKT_LEN(buf);
1387 		/* Start new session if:
1388 		 * - multi-segment packet
1389 		 * - no space left even for a dseg
1390 		 * - next packet can be inlined with a new WQE
1391 		 * - cs_flag differs
1392 		 * It can't be MLX5_MPW_STATE_OPENED as always have a single
1393 		 * segmented packet.
1394 		 */
1395 		if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
1396 			if ((segs_n != 1) ||
1397 			    (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
1398 			      mpw_room) ||
1399 			    (length <= txq->inline_max_packet_sz &&
1400 			     inl_pad + sizeof(inl_hdr) + length >
1401 			      mpw_room) ||
1402 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1403 				max_wqe -= mlx5_empw_close(txq, &mpw);
1404 		}
1405 		if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
1406 			if (unlikely(segs_n != 1)) {
1407 				/* Fall back to legacy MPW.
1408 				 * A MPW session consumes 2 WQEs at most to
1409 				 * include MLX5_MPW_DSEG_MAX pointers.
1410 				 */
1411 				if (unlikely(max_wqe < 2))
1412 					break;
1413 				mlx5_mpw_new(txq, &mpw, length);
1414 			} else {
1415 				/* In Enhanced MPW, inline as much as the budget
1416 				 * is allowed. The remaining space is to be
1417 				 * filled with dsegs. If the title WQEBB isn't
1418 				 * padded, it will have 2 dsegs there.
1419 				 */
1420 				mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
1421 					    (max_inline ? max_inline :
1422 					     pkts_n * MLX5_WQE_DWORD_SIZE) +
1423 					    MLX5_WQE_SIZE);
1424 				if (unlikely(max_wqe * MLX5_WQE_SIZE <
1425 					      mpw_room))
1426 					break;
1427 				/* Don't pad the title WQEBB to not waste WQ. */
1428 				mlx5_empw_new(txq, &mpw, 0);
1429 				mpw_room -= mpw.total_len;
1430 				inl_pad = 0;
1431 				do_inline =
1432 					length <= txq->inline_max_packet_sz &&
1433 					sizeof(inl_hdr) + length <= mpw_room &&
1434 					!txq->mpw_hdr_dseg;
1435 			}
1436 			mpw.wqe->eseg.cs_flags = cs_flags;
1437 		} else {
1438 			/* Evaluate whether the next packet can be inlined.
1439 			 * Inlininig is possible when:
1440 			 * - length is less than configured value
1441 			 * - length fits for remaining space
1442 			 * - not required to fill the title WQEBB with dsegs
1443 			 */
1444 			do_inline =
1445 				length <= txq->inline_max_packet_sz &&
1446 				inl_pad + sizeof(inl_hdr) + length <=
1447 				 mpw_room &&
1448 				(!txq->mpw_hdr_dseg ||
1449 				 mpw.total_len >= MLX5_WQE_SIZE);
1450 		}
1451 		/* Multi-segment packets must be alone in their MPW. */
1452 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1453 		if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
1454 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1455 			length = 0;
1456 #endif
1457 			do {
1458 				volatile struct mlx5_wqe_data_seg *dseg;
1459 
1460 				assert(buf);
1461 				(*txq->elts)[elts_head++ & elts_m] = buf;
1462 				dseg = mpw.data.dseg[mpw.pkts_n];
1463 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1464 				*dseg = (struct mlx5_wqe_data_seg){
1465 					.byte_count = rte_cpu_to_be_32(
1466 								DATA_LEN(buf)),
1467 					.lkey = mlx5_tx_mb2mr(txq, buf),
1468 					.addr = rte_cpu_to_be_64(addr),
1469 				};
1470 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1471 				length += DATA_LEN(buf);
1472 #endif
1473 				buf = buf->next;
1474 				++j;
1475 				++mpw.pkts_n;
1476 			} while (--segs_n);
1477 			/* A multi-segmented packet takes one MPW session.
1478 			 * TODO: Pack more multi-segmented packets if possible.
1479 			 */
1480 			mlx5_mpw_close(txq, &mpw);
1481 			if (mpw.pkts_n < 3)
1482 				max_wqe--;
1483 			else
1484 				max_wqe -= 2;
1485 		} else if (do_inline) {
1486 			/* Inline packet into WQE. */
1487 			unsigned int max;
1488 
1489 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1490 			assert(length == DATA_LEN(buf));
1491 			inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
1492 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1493 			mpw.data.raw = (volatile void *)
1494 				((uintptr_t)mpw.data.raw + inl_pad);
1495 			max = tx_mlx5_wq_tailroom(txq,
1496 					(void *)(uintptr_t)mpw.data.raw);
1497 			/* Copy inline header. */
1498 			mpw.data.raw = (volatile void *)
1499 				mlx5_copy_to_wq(
1500 					  (void *)(uintptr_t)mpw.data.raw,
1501 					  &inl_hdr,
1502 					  sizeof(inl_hdr),
1503 					  (void *)(uintptr_t)txq->wqes,
1504 					  max);
1505 			max = tx_mlx5_wq_tailroom(txq,
1506 					(void *)(uintptr_t)mpw.data.raw);
1507 			/* Copy packet data. */
1508 			mpw.data.raw = (volatile void *)
1509 				mlx5_copy_to_wq(
1510 					  (void *)(uintptr_t)mpw.data.raw,
1511 					  (void *)addr,
1512 					  length,
1513 					  (void *)(uintptr_t)txq->wqes,
1514 					  max);
1515 			++mpw.pkts_n;
1516 			mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
1517 			/* No need to get completion as the entire packet is
1518 			 * copied to WQ. Free the buf right away.
1519 			 */
1520 			rte_pktmbuf_free_seg(buf);
1521 			mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
1522 			/* Add pad in the next packet if any. */
1523 			inl_pad = (((uintptr_t)mpw.data.raw +
1524 					(MLX5_WQE_DWORD_SIZE - 1)) &
1525 					~(MLX5_WQE_DWORD_SIZE - 1)) -
1526 				  (uintptr_t)mpw.data.raw;
1527 		} else {
1528 			/* No inline. Load a dseg of packet pointer. */
1529 			volatile rte_v128u32_t *dseg;
1530 
1531 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1532 			assert((inl_pad + sizeof(*dseg)) <= mpw_room);
1533 			assert(length == DATA_LEN(buf));
1534 			if (!tx_mlx5_wq_tailroom(txq,
1535 					(void *)((uintptr_t)mpw.data.raw
1536 						+ inl_pad)))
1537 				dseg = (volatile void *)txq->wqes;
1538 			else
1539 				dseg = (volatile void *)
1540 					((uintptr_t)mpw.data.raw +
1541 					 inl_pad);
1542 			(*txq->elts)[elts_head++ & elts_m] = buf;
1543 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1544 			for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
1545 				rte_prefetch2((void *)(addr +
1546 						n * RTE_CACHE_LINE_SIZE));
1547 			naddr = rte_cpu_to_be_64(addr);
1548 			*dseg = (rte_v128u32_t) {
1549 				rte_cpu_to_be_32(length),
1550 				mlx5_tx_mb2mr(txq, buf),
1551 				naddr,
1552 				naddr >> 32,
1553 			};
1554 			mpw.data.raw = (volatile void *)(dseg + 1);
1555 			mpw.total_len += (inl_pad + sizeof(*dseg));
1556 			++j;
1557 			++mpw.pkts_n;
1558 			mpw_room -= (inl_pad + sizeof(*dseg));
1559 			inl_pad = 0;
1560 		}
1561 #ifdef MLX5_PMD_SOFT_COUNTERS
1562 		/* Increment sent bytes counter. */
1563 		txq->stats.obytes += length;
1564 #endif
1565 		++i;
1566 	} while (i < pkts_n);
1567 	/* Take a shortcut if nothing must be sent. */
1568 	if (unlikely(i == 0))
1569 		return 0;
1570 	/* Check whether completion threshold has been reached. */
1571 	if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
1572 			(uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
1573 			 (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
1574 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1575 
1576 		/* Request completion on last WQE. */
1577 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1578 		/* Save elts_head in unused "immediate" field of WQE. */
1579 		wqe->ctrl[3] = elts_head;
1580 		txq->elts_comp = 0;
1581 		txq->mpw_comp = txq->wqe_ci;
1582 		txq->cq_pi++;
1583 	} else {
1584 		txq->elts_comp += j;
1585 	}
1586 #ifdef MLX5_PMD_SOFT_COUNTERS
1587 	/* Increment sent packets counter. */
1588 	txq->stats.opackets += i;
1589 #endif
1590 	if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
1591 		mlx5_empw_close(txq, &mpw);
1592 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1593 		mlx5_mpw_close(txq, &mpw);
1594 	/* Ring QP doorbell. */
1595 	mlx5_tx_dbrec(txq, mpw.wqe);
1596 	txq->elts_head = elts_head;
1597 	return i;
1598 }
1599 
1600 /**
1601  * Translate RX completion flags to packet type.
1602  *
1603  * @param[in] cqe
1604  *   Pointer to CQE.
1605  *
1606  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
1607  *
1608  * @return
1609  *   Packet type for struct rte_mbuf.
1610  */
1611 static inline uint32_t
1612 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
1613 {
1614 	uint8_t idx;
1615 	uint8_t pinfo = cqe->pkt_info;
1616 	uint16_t ptype = cqe->hdr_type_etc;
1617 
1618 	/*
1619 	 * The index to the array should have:
1620 	 * bit[1:0] = l3_hdr_type
1621 	 * bit[4:2] = l4_hdr_type
1622 	 * bit[5] = ip_frag
1623 	 * bit[6] = tunneled
1624 	 * bit[7] = outer_l3_type
1625 	 */
1626 	idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
1627 	return mlx5_ptype_table[idx];
1628 }
1629 
1630 /**
1631  * Get size of the next packet for a given CQE. For compressed CQEs, the
1632  * consumer index is updated only once all packets of the current one have
1633  * been processed.
1634  *
1635  * @param rxq
1636  *   Pointer to RX queue.
1637  * @param cqe
1638  *   CQE to process.
1639  * @param[out] rss_hash
1640  *   Packet RSS Hash result.
1641  *
1642  * @return
1643  *   Packet size in bytes (0 if there is none), -1 in case of completion
1644  *   with error.
1645  */
1646 static inline int
1647 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1648 		 uint16_t cqe_cnt, uint32_t *rss_hash)
1649 {
1650 	struct rxq_zip *zip = &rxq->zip;
1651 	uint16_t cqe_n = cqe_cnt + 1;
1652 	int len = 0;
1653 	uint16_t idx, end;
1654 
1655 	/* Process compressed data in the CQE and mini arrays. */
1656 	if (zip->ai) {
1657 		volatile struct mlx5_mini_cqe8 (*mc)[8] =
1658 			(volatile struct mlx5_mini_cqe8 (*)[8])
1659 			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
1660 
1661 		len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1662 		*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
1663 		if ((++zip->ai & 7) == 0) {
1664 			/* Invalidate consumed CQEs */
1665 			idx = zip->ca;
1666 			end = zip->na;
1667 			while (idx != end) {
1668 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1669 					MLX5_CQE_INVALIDATE;
1670 				++idx;
1671 			}
1672 			/*
1673 			 * Increment consumer index to skip the number of
1674 			 * CQEs consumed. Hardware leaves holes in the CQ
1675 			 * ring for software use.
1676 			 */
1677 			zip->ca = zip->na;
1678 			zip->na += 8;
1679 		}
1680 		if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1681 			/* Invalidate the rest */
1682 			idx = zip->ca;
1683 			end = zip->cq_ci;
1684 
1685 			while (idx != end) {
1686 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1687 					MLX5_CQE_INVALIDATE;
1688 				++idx;
1689 			}
1690 			rxq->cq_ci = zip->cq_ci;
1691 			zip->ai = 0;
1692 		}
1693 	/* No compressed data, get next CQE and verify if it is compressed. */
1694 	} else {
1695 		int ret;
1696 		int8_t op_own;
1697 
1698 		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1699 		if (unlikely(ret == 1))
1700 			return 0;
1701 		++rxq->cq_ci;
1702 		op_own = cqe->op_own;
1703 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1704 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
1705 				(volatile struct mlx5_mini_cqe8 (*)[8])
1706 				(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
1707 							  cqe_cnt].pkt_info);
1708 
1709 			/* Fix endianness. */
1710 			zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1711 			/*
1712 			 * Current mini array position is the one returned by
1713 			 * check_cqe64().
1714 			 *
1715 			 * If completion comprises several mini arrays, as a
1716 			 * special case the second one is located 7 CQEs after
1717 			 * the initial CQE instead of 8 for subsequent ones.
1718 			 */
1719 			zip->ca = rxq->cq_ci;
1720 			zip->na = zip->ca + 7;
1721 			/* Compute the next non compressed CQE. */
1722 			--rxq->cq_ci;
1723 			zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1724 			/* Get packet size to return. */
1725 			len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1726 			*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
1727 			zip->ai = 1;
1728 			/* Prefetch all the entries to be invalidated */
1729 			idx = zip->ca;
1730 			end = zip->cq_ci;
1731 			while (idx != end) {
1732 				rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
1733 				++idx;
1734 			}
1735 		} else {
1736 			len = rte_be_to_cpu_32(cqe->byte_cnt);
1737 			*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
1738 		}
1739 		/* Error while receiving packet. */
1740 		if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
1741 			return -1;
1742 	}
1743 	return len;
1744 }
1745 
1746 /**
1747  * Translate RX completion flags to offload flags.
1748  *
1749  * @param[in] rxq
1750  *   Pointer to RX queue structure.
1751  * @param[in] cqe
1752  *   Pointer to CQE.
1753  *
1754  * @return
1755  *   Offload flags (ol_flags) for struct rte_mbuf.
1756  */
1757 static inline uint32_t
1758 rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
1759 {
1760 	uint32_t ol_flags = 0;
1761 	uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1762 
1763 	ol_flags =
1764 		TRANSPOSE(flags,
1765 			  MLX5_CQE_RX_L3_HDR_VALID,
1766 			  PKT_RX_IP_CKSUM_GOOD) |
1767 		TRANSPOSE(flags,
1768 			  MLX5_CQE_RX_L4_HDR_VALID,
1769 			  PKT_RX_L4_CKSUM_GOOD);
1770 	if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
1771 		ol_flags |=
1772 			TRANSPOSE(flags,
1773 				  MLX5_CQE_RX_L3_HDR_VALID,
1774 				  PKT_RX_IP_CKSUM_GOOD) |
1775 			TRANSPOSE(flags,
1776 				  MLX5_CQE_RX_L4_HDR_VALID,
1777 				  PKT_RX_L4_CKSUM_GOOD);
1778 	return ol_flags;
1779 }
1780 
1781 /**
1782  * DPDK callback for RX.
1783  *
1784  * @param dpdk_rxq
1785  *   Generic pointer to RX queue structure.
1786  * @param[out] pkts
1787  *   Array to store received packets.
1788  * @param pkts_n
1789  *   Maximum number of packets in array.
1790  *
1791  * @return
1792  *   Number of packets successfully received (<= pkts_n).
1793  */
1794 uint16_t
1795 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1796 {
1797 	struct mlx5_rxq_data *rxq = dpdk_rxq;
1798 	const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1799 	const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1800 	const unsigned int sges_n = rxq->sges_n;
1801 	struct rte_mbuf *pkt = NULL;
1802 	struct rte_mbuf *seg = NULL;
1803 	volatile struct mlx5_cqe *cqe =
1804 		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1805 	unsigned int i = 0;
1806 	unsigned int rq_ci = rxq->rq_ci << sges_n;
1807 	int len = 0; /* keep its value across iterations. */
1808 
1809 	while (pkts_n) {
1810 		unsigned int idx = rq_ci & wqe_cnt;
1811 		volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
1812 		struct rte_mbuf *rep = (*rxq->elts)[idx];
1813 		uint32_t rss_hash_res = 0;
1814 
1815 		if (pkt)
1816 			NEXT(seg) = rep;
1817 		seg = rep;
1818 		rte_prefetch0(seg);
1819 		rte_prefetch0(cqe);
1820 		rte_prefetch0(wqe);
1821 		rep = rte_mbuf_raw_alloc(rxq->mp);
1822 		if (unlikely(rep == NULL)) {
1823 			++rxq->stats.rx_nombuf;
1824 			if (!pkt) {
1825 				/*
1826 				 * no buffers before we even started,
1827 				 * bail out silently.
1828 				 */
1829 				break;
1830 			}
1831 			while (pkt != seg) {
1832 				assert(pkt != (*rxq->elts)[idx]);
1833 				rep = NEXT(pkt);
1834 				NEXT(pkt) = NULL;
1835 				NB_SEGS(pkt) = 1;
1836 				rte_mbuf_raw_free(pkt);
1837 				pkt = rep;
1838 			}
1839 			break;
1840 		}
1841 		if (!pkt) {
1842 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1843 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
1844 					       &rss_hash_res);
1845 			if (!len) {
1846 				rte_mbuf_raw_free(rep);
1847 				break;
1848 			}
1849 			if (unlikely(len == -1)) {
1850 				/* RX error, packet is likely too large. */
1851 				rte_mbuf_raw_free(rep);
1852 				++rxq->stats.idropped;
1853 				goto skip;
1854 			}
1855 			pkt = seg;
1856 			assert(len >= (rxq->crc_present << 2));
1857 			/* Update packet information. */
1858 			pkt->packet_type = rxq_cq_to_pkt_type(cqe);
1859 			pkt->ol_flags = 0;
1860 			if (rss_hash_res && rxq->rss_hash) {
1861 				pkt->hash.rss = rss_hash_res;
1862 				pkt->ol_flags = PKT_RX_RSS_HASH;
1863 			}
1864 			if (rxq->mark &&
1865 			    MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1866 				pkt->ol_flags |= PKT_RX_FDIR;
1867 				if (cqe->sop_drop_qpn !=
1868 				    rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1869 					uint32_t mark = cqe->sop_drop_qpn;
1870 
1871 					pkt->ol_flags |= PKT_RX_FDIR_ID;
1872 					pkt->hash.fdir.hi =
1873 						mlx5_flow_mark_get(mark);
1874 				}
1875 			}
1876 			if (rxq->csum | rxq->csum_l2tun)
1877 				pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
1878 			if (rxq->vlan_strip &&
1879 			    (cqe->hdr_type_etc &
1880 			     rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1881 				pkt->ol_flags |= PKT_RX_VLAN |
1882 					PKT_RX_VLAN_STRIPPED;
1883 				pkt->vlan_tci =
1884 					rte_be_to_cpu_16(cqe->vlan_info);
1885 			}
1886 			if (rxq->hw_timestamp) {
1887 				pkt->timestamp =
1888 					rte_be_to_cpu_64(cqe->timestamp);
1889 				pkt->ol_flags |= PKT_RX_TIMESTAMP;
1890 			}
1891 			if (rxq->crc_present)
1892 				len -= ETHER_CRC_LEN;
1893 			PKT_LEN(pkt) = len;
1894 		}
1895 		DATA_LEN(rep) = DATA_LEN(seg);
1896 		PKT_LEN(rep) = PKT_LEN(seg);
1897 		SET_DATA_OFF(rep, DATA_OFF(seg));
1898 		PORT(rep) = PORT(seg);
1899 		(*rxq->elts)[idx] = rep;
1900 		/*
1901 		 * Fill NIC descriptor with the new buffer.  The lkey and size
1902 		 * of the buffers are already known, only the buffer address
1903 		 * changes.
1904 		 */
1905 		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1906 		if (len > DATA_LEN(seg)) {
1907 			len -= DATA_LEN(seg);
1908 			++NB_SEGS(pkt);
1909 			++rq_ci;
1910 			continue;
1911 		}
1912 		DATA_LEN(seg) = len;
1913 #ifdef MLX5_PMD_SOFT_COUNTERS
1914 		/* Increment bytes counter. */
1915 		rxq->stats.ibytes += PKT_LEN(pkt);
1916 #endif
1917 		/* Return packet. */
1918 		*(pkts++) = pkt;
1919 		pkt = NULL;
1920 		--pkts_n;
1921 		++i;
1922 skip:
1923 		/* Align consumer index to the next stride. */
1924 		rq_ci >>= sges_n;
1925 		++rq_ci;
1926 		rq_ci <<= sges_n;
1927 	}
1928 	if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1929 		return 0;
1930 	/* Update the consumer index. */
1931 	rxq->rq_ci = rq_ci >> sges_n;
1932 	rte_io_wmb();
1933 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1934 	rte_io_wmb();
1935 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1936 #ifdef MLX5_PMD_SOFT_COUNTERS
1937 	/* Increment packets counter. */
1938 	rxq->stats.ipackets += i;
1939 #endif
1940 	return i;
1941 }
1942 
1943 /**
1944  * Dummy DPDK callback for TX.
1945  *
1946  * This function is used to temporarily replace the real callback during
1947  * unsafe control operations on the queue, or in case of error.
1948  *
1949  * @param dpdk_txq
1950  *   Generic pointer to TX queue structure.
1951  * @param[in] pkts
1952  *   Packets to transmit.
1953  * @param pkts_n
1954  *   Number of packets in array.
1955  *
1956  * @return
1957  *   Number of packets successfully transmitted (<= pkts_n).
1958  */
1959 uint16_t
1960 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1961 {
1962 	(void)dpdk_txq;
1963 	(void)pkts;
1964 	(void)pkts_n;
1965 	return 0;
1966 }
1967 
1968 /**
1969  * Dummy DPDK callback for RX.
1970  *
1971  * This function is used to temporarily replace the real callback during
1972  * unsafe control operations on the queue, or in case of error.
1973  *
1974  * @param dpdk_rxq
1975  *   Generic pointer to RX queue structure.
1976  * @param[out] pkts
1977  *   Array to store received packets.
1978  * @param pkts_n
1979  *   Maximum number of packets in array.
1980  *
1981  * @return
1982  *   Number of packets successfully received (<= pkts_n).
1983  */
1984 uint16_t
1985 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1986 {
1987 	(void)dpdk_rxq;
1988 	(void)pkts;
1989 	(void)pkts_n;
1990 	return 0;
1991 }
1992 
1993 /*
1994  * Vectorized Rx/Tx routines are not compiled in when required vector
1995  * instructions are not supported on a target architecture. The following null
1996  * stubs are needed for linkage when those are not included outside of this file
1997  * (e.g.  mlx5_rxtx_vec_sse.c for x86).
1998  */
1999 
2000 uint16_t __attribute__((weak))
2001 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2002 {
2003 	(void)dpdk_txq;
2004 	(void)pkts;
2005 	(void)pkts_n;
2006 	return 0;
2007 }
2008 
2009 uint16_t __attribute__((weak))
2010 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2011 {
2012 	(void)dpdk_txq;
2013 	(void)pkts;
2014 	(void)pkts_n;
2015 	return 0;
2016 }
2017 
2018 uint16_t __attribute__((weak))
2019 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
2020 {
2021 	(void)dpdk_rxq;
2022 	(void)pkts;
2023 	(void)pkts_n;
2024 	return 0;
2025 }
2026 
2027 int __attribute__((weak))
2028 priv_check_raw_vec_tx_support(struct priv *priv)
2029 {
2030 	(void)priv;
2031 	return -ENOTSUP;
2032 }
2033 
2034 int __attribute__((weak))
2035 priv_check_vec_tx_support(struct priv *priv)
2036 {
2037 	(void)priv;
2038 	return -ENOTSUP;
2039 }
2040 
2041 int __attribute__((weak))
2042 rxq_check_vec_support(struct mlx5_rxq_data *rxq)
2043 {
2044 	(void)rxq;
2045 	return -ENOTSUP;
2046 }
2047 
2048 int __attribute__((weak))
2049 priv_check_vec_rx_support(struct priv *priv)
2050 {
2051 	(void)priv;
2052 	return -ENOTSUP;
2053 }
2054