xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision ade02f0f3e7fbb5acd98b2fdf5fce0945ad9554a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <assert.h>
35 #include <stdint.h>
36 #include <string.h>
37 #include <stdlib.h>
38 
39 /* Verbs header. */
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
41 #ifdef PEDANTIC
42 #pragma GCC diagnostic ignored "-Wpedantic"
43 #endif
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5dv.h>
46 #ifdef PEDANTIC
47 #pragma GCC diagnostic error "-Wpedantic"
48 #endif
49 
50 #include <rte_mbuf.h>
51 #include <rte_mempool.h>
52 #include <rte_prefetch.h>
53 #include <rte_common.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_ether.h>
56 
57 #include "mlx5.h"
58 #include "mlx5_utils.h"
59 #include "mlx5_rxtx.h"
60 #include "mlx5_autoconf.h"
61 #include "mlx5_defs.h"
62 #include "mlx5_prm.h"
63 
64 static __rte_always_inline uint32_t
65 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
66 
67 static __rte_always_inline int
68 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
69 		 uint16_t cqe_cnt, uint32_t *rss_hash);
70 
71 static __rte_always_inline uint32_t
72 rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
73 
74 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
75 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
76 };
77 
78 /**
79  * Build a table to translate Rx completion flags to packet type.
80  *
81  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
82  */
83 void
84 mlx5_set_ptype_table(void)
85 {
86 	unsigned int i;
87 	uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
88 
89 	/* Last entry must not be overwritten, reserved for errored packet. */
90 	for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
91 		(*p)[i] = RTE_PTYPE_UNKNOWN;
92 	/*
93 	 * The index to the array should have:
94 	 * bit[1:0] = l3_hdr_type
95 	 * bit[4:2] = l4_hdr_type
96 	 * bit[5] = ip_frag
97 	 * bit[6] = tunneled
98 	 * bit[7] = outer_l3_type
99 	 */
100 	/* L3 */
101 	(*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
102 		     RTE_PTYPE_L4_NONFRAG;
103 	(*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
104 		     RTE_PTYPE_L4_NONFRAG;
105 	/* Fragmented */
106 	(*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
107 		     RTE_PTYPE_L4_FRAG;
108 	(*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
109 		     RTE_PTYPE_L4_FRAG;
110 	/* TCP */
111 	(*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
112 		     RTE_PTYPE_L4_TCP;
113 	(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
114 		     RTE_PTYPE_L4_TCP;
115 	/* UDP */
116 	(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
117 		     RTE_PTYPE_L4_UDP;
118 	(*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
119 		     RTE_PTYPE_L4_UDP;
120 	/* Repeat with outer_l3_type being set. Just in case. */
121 	(*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
122 		     RTE_PTYPE_L4_NONFRAG;
123 	(*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
124 		     RTE_PTYPE_L4_NONFRAG;
125 	(*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
126 		     RTE_PTYPE_L4_FRAG;
127 	(*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
128 		     RTE_PTYPE_L4_FRAG;
129 	(*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
130 		     RTE_PTYPE_L4_TCP;
131 	(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
132 		     RTE_PTYPE_L4_TCP;
133 	(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
134 		     RTE_PTYPE_L4_UDP;
135 	(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
136 		     RTE_PTYPE_L4_UDP;
137 	/* Tunneled - L3 */
138 	(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
139 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
140 		     RTE_PTYPE_INNER_L4_NONFRAG;
141 	(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
142 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
143 		     RTE_PTYPE_INNER_L4_NONFRAG;
144 	(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
145 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
146 		     RTE_PTYPE_INNER_L4_NONFRAG;
147 	(*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
148 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
149 		     RTE_PTYPE_INNER_L4_NONFRAG;
150 	/* Tunneled - Fragmented */
151 	(*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
152 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
153 		     RTE_PTYPE_INNER_L4_FRAG;
154 	(*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
155 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
156 		     RTE_PTYPE_INNER_L4_FRAG;
157 	(*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
158 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
159 		     RTE_PTYPE_INNER_L4_FRAG;
160 	(*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
162 		     RTE_PTYPE_INNER_L4_FRAG;
163 	/* Tunneled - TCP */
164 	(*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
166 		     RTE_PTYPE_L4_TCP;
167 	(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
169 		     RTE_PTYPE_L4_TCP;
170 	(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
171 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
172 		     RTE_PTYPE_L4_TCP;
173 	(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
174 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
175 		     RTE_PTYPE_L4_TCP;
176 	/* Tunneled - UDP */
177 	(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
178 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
179 		     RTE_PTYPE_L4_UDP;
180 	(*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
182 		     RTE_PTYPE_L4_UDP;
183 	(*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
184 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
185 		     RTE_PTYPE_L4_UDP;
186 	(*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
187 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
188 		     RTE_PTYPE_L4_UDP;
189 }
190 
191 /**
192  * Return the size of tailroom of WQ.
193  *
194  * @param txq
195  *   Pointer to TX queue structure.
196  * @param addr
197  *   Pointer to tail of WQ.
198  *
199  * @return
200  *   Size of tailroom.
201  */
202 static inline size_t
203 tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
204 {
205 	size_t tailroom;
206 	tailroom = (uintptr_t)(txq->wqes) +
207 		   (1 << txq->wqe_n) * MLX5_WQE_SIZE -
208 		   (uintptr_t)addr;
209 	return tailroom;
210 }
211 
212 /**
213  * Copy data to tailroom of circular queue.
214  *
215  * @param dst
216  *   Pointer to destination.
217  * @param src
218  *   Pointer to source.
219  * @param n
220  *   Number of bytes to copy.
221  * @param base
222  *   Pointer to head of queue.
223  * @param tailroom
224  *   Size of tailroom from dst.
225  *
226  * @return
227  *   Pointer after copied data.
228  */
229 static inline void *
230 mlx5_copy_to_wq(void *dst, const void *src, size_t n,
231 		void *base, size_t tailroom)
232 {
233 	void *ret;
234 
235 	if (n > tailroom) {
236 		rte_memcpy(dst, src, tailroom);
237 		rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
238 			   n - tailroom);
239 		ret = (uint8_t *)base + n - tailroom;
240 	} else {
241 		rte_memcpy(dst, src, n);
242 		ret = (n == tailroom) ? base : (uint8_t *)dst + n;
243 	}
244 	return ret;
245 }
246 
247 /**
248  * DPDK callback to check the status of a tx descriptor.
249  *
250  * @param tx_queue
251  *   The tx queue.
252  * @param[in] offset
253  *   The index of the descriptor in the ring.
254  *
255  * @return
256  *   The status of the tx descriptor.
257  */
258 int
259 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
260 {
261 	struct mlx5_txq_data *txq = tx_queue;
262 	uint16_t used;
263 
264 	mlx5_tx_complete(txq);
265 	used = txq->elts_head - txq->elts_tail;
266 	if (offset < used)
267 		return RTE_ETH_TX_DESC_FULL;
268 	return RTE_ETH_TX_DESC_DONE;
269 }
270 
271 /**
272  * DPDK callback to check the status of a rx descriptor.
273  *
274  * @param rx_queue
275  *   The rx queue.
276  * @param[in] offset
277  *   The index of the descriptor in the ring.
278  *
279  * @return
280  *   The status of the tx descriptor.
281  */
282 int
283 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
284 {
285 	struct mlx5_rxq_data *rxq = rx_queue;
286 	struct rxq_zip *zip = &rxq->zip;
287 	volatile struct mlx5_cqe *cqe;
288 	const unsigned int cqe_n = (1 << rxq->cqe_n);
289 	const unsigned int cqe_cnt = cqe_n - 1;
290 	unsigned int cq_ci;
291 	unsigned int used;
292 
293 	/* if we are processing a compressed cqe */
294 	if (zip->ai) {
295 		used = zip->cqe_cnt - zip->ca;
296 		cq_ci = zip->cq_ci;
297 	} else {
298 		used = 0;
299 		cq_ci = rxq->cq_ci;
300 	}
301 	cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
302 	while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
303 		int8_t op_own;
304 		unsigned int n;
305 
306 		op_own = cqe->op_own;
307 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
308 			n = rte_be_to_cpu_32(cqe->byte_cnt);
309 		else
310 			n = 1;
311 		cq_ci += n;
312 		used += n;
313 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
314 	}
315 	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
316 	if (offset < used)
317 		return RTE_ETH_RX_DESC_DONE;
318 	return RTE_ETH_RX_DESC_AVAIL;
319 }
320 
321 /**
322  * DPDK callback for TX.
323  *
324  * @param dpdk_txq
325  *   Generic pointer to TX queue structure.
326  * @param[in] pkts
327  *   Packets to transmit.
328  * @param pkts_n
329  *   Number of packets in array.
330  *
331  * @return
332  *   Number of packets successfully transmitted (<= pkts_n).
333  */
334 uint16_t
335 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
336 {
337 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
338 	uint16_t elts_head = txq->elts_head;
339 	const uint16_t elts_n = 1 << txq->elts_n;
340 	const uint16_t elts_m = elts_n - 1;
341 	unsigned int i = 0;
342 	unsigned int j = 0;
343 	unsigned int k = 0;
344 	uint16_t max_elts;
345 	unsigned int max_inline = txq->max_inline;
346 	const unsigned int inline_en = !!max_inline && txq->inline_en;
347 	uint16_t max_wqe;
348 	unsigned int comp;
349 	volatile struct mlx5_wqe_v *wqe = NULL;
350 	volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
351 	unsigned int segs_n = 0;
352 	struct rte_mbuf *buf = NULL;
353 	uint8_t *raw;
354 
355 	if (unlikely(!pkts_n))
356 		return 0;
357 	/* Prefetch first packet cacheline. */
358 	rte_prefetch0(*pkts);
359 	/* Start processing. */
360 	mlx5_tx_complete(txq);
361 	max_elts = (elts_n - (elts_head - txq->elts_tail));
362 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
363 	if (unlikely(!max_wqe))
364 		return 0;
365 	do {
366 		volatile rte_v128u32_t *dseg = NULL;
367 		uint32_t length;
368 		unsigned int ds = 0;
369 		unsigned int sg = 0; /* counter of additional segs attached. */
370 		uintptr_t addr;
371 		uint64_t naddr;
372 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
373 		uint16_t tso_header_sz = 0;
374 		uint16_t ehdr;
375 		uint8_t cs_flags = 0;
376 		uint64_t tso = 0;
377 		uint16_t tso_segsz = 0;
378 #ifdef MLX5_PMD_SOFT_COUNTERS
379 		uint32_t total_length = 0;
380 #endif
381 
382 		/* first_seg */
383 		buf = *pkts;
384 		segs_n = buf->nb_segs;
385 		/*
386 		 * Make sure there is enough room to store this packet and
387 		 * that one ring entry remains unused.
388 		 */
389 		assert(segs_n);
390 		if (max_elts < segs_n)
391 			break;
392 		max_elts -= segs_n;
393 		--segs_n;
394 		if (unlikely(--max_wqe == 0))
395 			break;
396 		wqe = (volatile struct mlx5_wqe_v *)
397 			tx_mlx5_wqe(txq, txq->wqe_ci);
398 		rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
399 		if (pkts_n - i > 1)
400 			rte_prefetch0(*(pkts + 1));
401 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
402 		length = DATA_LEN(buf);
403 		ehdr = (((uint8_t *)addr)[1] << 8) |
404 		       ((uint8_t *)addr)[0];
405 #ifdef MLX5_PMD_SOFT_COUNTERS
406 		total_length = length;
407 #endif
408 		if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
409 			txq->stats.oerrors++;
410 			break;
411 		}
412 		/* Update element. */
413 		(*txq->elts)[elts_head & elts_m] = buf;
414 		/* Prefetch next buffer data. */
415 		if (pkts_n - i > 1)
416 			rte_prefetch0(
417 			    rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
418 		/* Should we enable HW CKSUM offload */
419 		if (buf->ol_flags &
420 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
421 			const uint64_t is_tunneled = buf->ol_flags &
422 						     (PKT_TX_TUNNEL_GRE |
423 						      PKT_TX_TUNNEL_VXLAN);
424 
425 			if (is_tunneled && txq->tunnel_en) {
426 				cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
427 					   MLX5_ETH_WQE_L4_INNER_CSUM;
428 				if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
429 					cs_flags |= MLX5_ETH_WQE_L3_CSUM;
430 			} else {
431 				cs_flags = MLX5_ETH_WQE_L3_CSUM |
432 					   MLX5_ETH_WQE_L4_CSUM;
433 			}
434 		}
435 		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
436 		/* Replace the Ethernet type by the VLAN if necessary. */
437 		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
438 			uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
439 							 buf->vlan_tci);
440 			unsigned int len = 2 * ETHER_ADDR_LEN - 2;
441 
442 			addr += 2;
443 			length -= 2;
444 			/* Copy Destination and source mac address. */
445 			memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
446 			/* Copy VLAN. */
447 			memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
448 			/* Copy missing two bytes to end the DSeg. */
449 			memcpy((uint8_t *)raw + len + sizeof(vlan),
450 			       ((uint8_t *)addr) + len, 2);
451 			addr += len + 2;
452 			length -= (len + 2);
453 		} else {
454 			memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
455 			       MLX5_WQE_DWORD_SIZE);
456 			length -= pkt_inline_sz;
457 			addr += pkt_inline_sz;
458 		}
459 		raw += MLX5_WQE_DWORD_SIZE;
460 		if (txq->tso_en) {
461 			tso = buf->ol_flags & PKT_TX_TCP_SEG;
462 			if (tso) {
463 				uintptr_t end = (uintptr_t)
464 						(((uintptr_t)txq->wqes) +
465 						(1 << txq->wqe_n) *
466 						MLX5_WQE_SIZE);
467 				unsigned int copy_b;
468 				uint8_t vlan_sz = (buf->ol_flags &
469 						  PKT_TX_VLAN_PKT) ? 4 : 0;
470 				const uint64_t is_tunneled =
471 							buf->ol_flags &
472 							(PKT_TX_TUNNEL_GRE |
473 							 PKT_TX_TUNNEL_VXLAN);
474 
475 				tso_header_sz = buf->l2_len + vlan_sz +
476 						buf->l3_len + buf->l4_len;
477 				tso_segsz = buf->tso_segsz;
478 				if (unlikely(tso_segsz == 0)) {
479 					txq->stats.oerrors++;
480 					break;
481 				}
482 				if (is_tunneled	&& txq->tunnel_en) {
483 					tso_header_sz += buf->outer_l2_len +
484 							 buf->outer_l3_len;
485 					cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
486 				} else {
487 					cs_flags |= MLX5_ETH_WQE_L4_CSUM;
488 				}
489 				if (unlikely(tso_header_sz >
490 					     MLX5_MAX_TSO_HEADER)) {
491 					txq->stats.oerrors++;
492 					break;
493 				}
494 				copy_b = tso_header_sz - pkt_inline_sz;
495 				/* First seg must contain all headers. */
496 				assert(copy_b <= length);
497 				if (copy_b &&
498 				   ((end - (uintptr_t)raw) > copy_b)) {
499 					uint16_t n = (MLX5_WQE_DS(copy_b) -
500 						      1 + 3) / 4;
501 
502 					if (unlikely(max_wqe < n))
503 						break;
504 					max_wqe -= n;
505 					rte_memcpy((void *)raw,
506 						   (void *)addr, copy_b);
507 					addr += copy_b;
508 					length -= copy_b;
509 					/* Include padding for TSO header. */
510 					copy_b = MLX5_WQE_DS(copy_b) *
511 						 MLX5_WQE_DWORD_SIZE;
512 					pkt_inline_sz += copy_b;
513 					raw += copy_b;
514 				} else {
515 					/* NOP WQE. */
516 					wqe->ctrl = (rte_v128u32_t){
517 						     rte_cpu_to_be_32(
518 							txq->wqe_ci << 8),
519 						     rte_cpu_to_be_32(
520 							txq->qp_num_8s | 1),
521 						     0,
522 						     0,
523 					};
524 					ds = 1;
525 					total_length = 0;
526 					k++;
527 					goto next_wqe;
528 				}
529 			}
530 		}
531 		/* Inline if enough room. */
532 		if (inline_en || tso) {
533 			uint32_t inl;
534 			uintptr_t end = (uintptr_t)
535 				(((uintptr_t)txq->wqes) +
536 				 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
537 			unsigned int inline_room = max_inline *
538 						   RTE_CACHE_LINE_SIZE -
539 						   (pkt_inline_sz - 2) -
540 						   !!tso * sizeof(inl);
541 			uintptr_t addr_end = (addr + inline_room) &
542 					     ~(RTE_CACHE_LINE_SIZE - 1);
543 			unsigned int copy_b = (addr_end > addr) ?
544 				RTE_MIN((addr_end - addr), length) :
545 				0;
546 
547 			if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
548 				/*
549 				 * One Dseg remains in the current WQE.  To
550 				 * keep the computation positive, it is
551 				 * removed after the bytes to Dseg conversion.
552 				 */
553 				uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
554 
555 				if (unlikely(max_wqe < n))
556 					break;
557 				max_wqe -= n;
558 				if (tso) {
559 					uint32_t inl =
560 					rte_cpu_to_be_32(copy_b |
561 							 MLX5_INLINE_SEG);
562 
563 					pkt_inline_sz =
564 						MLX5_WQE_DS(tso_header_sz) *
565 						MLX5_WQE_DWORD_SIZE;
566 
567 					rte_memcpy((void *)raw,
568 						   (void *)&inl, sizeof(inl));
569 					raw += sizeof(inl);
570 					pkt_inline_sz += sizeof(inl);
571 				}
572 				rte_memcpy((void *)raw, (void *)addr, copy_b);
573 				addr += copy_b;
574 				length -= copy_b;
575 				pkt_inline_sz += copy_b;
576 			}
577 			/*
578 			 * 2 DWORDs consumed by the WQE header + ETH segment +
579 			 * the size of the inline part of the packet.
580 			 */
581 			ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
582 			if (length > 0) {
583 				if (ds % (MLX5_WQE_SIZE /
584 					  MLX5_WQE_DWORD_SIZE) == 0) {
585 					if (unlikely(--max_wqe == 0))
586 						break;
587 					dseg = (volatile rte_v128u32_t *)
588 					       tx_mlx5_wqe(txq, txq->wqe_ci +
589 							   ds / 4);
590 				} else {
591 					dseg = (volatile rte_v128u32_t *)
592 						((uintptr_t)wqe +
593 						 (ds * MLX5_WQE_DWORD_SIZE));
594 				}
595 				goto use_dseg;
596 			} else if (!segs_n) {
597 				goto next_pkt;
598 			} else {
599 				/* dseg will be advance as part of next_seg */
600 				dseg = (volatile rte_v128u32_t *)
601 					((uintptr_t)wqe +
602 					 ((ds - 1) * MLX5_WQE_DWORD_SIZE));
603 				goto next_seg;
604 			}
605 		} else {
606 			/*
607 			 * No inline has been done in the packet, only the
608 			 * Ethernet Header as been stored.
609 			 */
610 			dseg = (volatile rte_v128u32_t *)
611 				((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
612 			ds = 3;
613 use_dseg:
614 			/* Add the remaining packet as a simple ds. */
615 			naddr = rte_cpu_to_be_64(addr);
616 			*dseg = (rte_v128u32_t){
617 				rte_cpu_to_be_32(length),
618 				mlx5_tx_mb2mr(txq, buf),
619 				naddr,
620 				naddr >> 32,
621 			};
622 			++ds;
623 			if (!segs_n)
624 				goto next_pkt;
625 		}
626 next_seg:
627 		assert(buf);
628 		assert(ds);
629 		assert(wqe);
630 		/*
631 		 * Spill on next WQE when the current one does not have
632 		 * enough room left. Size of WQE must a be a multiple
633 		 * of data segment size.
634 		 */
635 		assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
636 		if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
637 			if (unlikely(--max_wqe == 0))
638 				break;
639 			dseg = (volatile rte_v128u32_t *)
640 			       tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
641 			rte_prefetch0(tx_mlx5_wqe(txq,
642 						  txq->wqe_ci + ds / 4 + 1));
643 		} else {
644 			++dseg;
645 		}
646 		++ds;
647 		buf = buf->next;
648 		assert(buf);
649 		length = DATA_LEN(buf);
650 #ifdef MLX5_PMD_SOFT_COUNTERS
651 		total_length += length;
652 #endif
653 		/* Store segment information. */
654 		naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
655 		*dseg = (rte_v128u32_t){
656 			rte_cpu_to_be_32(length),
657 			mlx5_tx_mb2mr(txq, buf),
658 			naddr,
659 			naddr >> 32,
660 		};
661 		(*txq->elts)[++elts_head & elts_m] = buf;
662 		++sg;
663 		/* Advance counter only if all segs are successfully posted. */
664 		if (sg < segs_n)
665 			goto next_seg;
666 		else
667 			j += sg;
668 next_pkt:
669 		if (ds > MLX5_DSEG_MAX) {
670 			txq->stats.oerrors++;
671 			break;
672 		}
673 		++elts_head;
674 		++pkts;
675 		++i;
676 		/* Initialize known and common part of the WQE structure. */
677 		if (tso) {
678 			wqe->ctrl = (rte_v128u32_t){
679 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
680 						 MLX5_OPCODE_TSO),
681 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
682 				0,
683 				0,
684 			};
685 			wqe->eseg = (rte_v128u32_t){
686 				0,
687 				cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
688 				0,
689 				(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
690 			};
691 		} else {
692 			wqe->ctrl = (rte_v128u32_t){
693 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
694 						 MLX5_OPCODE_SEND),
695 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
696 				0,
697 				0,
698 			};
699 			wqe->eseg = (rte_v128u32_t){
700 				0,
701 				cs_flags,
702 				0,
703 				(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
704 			};
705 		}
706 next_wqe:
707 		txq->wqe_ci += (ds + 3) / 4;
708 		/* Save the last successful WQE for completion request */
709 		last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
710 #ifdef MLX5_PMD_SOFT_COUNTERS
711 		/* Increment sent bytes counter. */
712 		txq->stats.obytes += total_length;
713 #endif
714 	} while (i < pkts_n);
715 	/* Take a shortcut if nothing must be sent. */
716 	if (unlikely((i + k) == 0))
717 		return 0;
718 	txq->elts_head += (i + j);
719 	/* Check whether completion threshold has been reached. */
720 	comp = txq->elts_comp + i + j + k;
721 	if (comp >= MLX5_TX_COMP_THRESH) {
722 		/* Request completion on last WQE. */
723 		last_wqe->ctrl2 = rte_cpu_to_be_32(8);
724 		/* Save elts_head in unused "immediate" field of WQE. */
725 		last_wqe->ctrl3 = txq->elts_head;
726 		txq->elts_comp = 0;
727 	} else {
728 		txq->elts_comp = comp;
729 	}
730 #ifdef MLX5_PMD_SOFT_COUNTERS
731 	/* Increment sent packets counter. */
732 	txq->stats.opackets += i;
733 #endif
734 	/* Ring QP doorbell. */
735 	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
736 	return i;
737 }
738 
739 /**
740  * Open a MPW session.
741  *
742  * @param txq
743  *   Pointer to TX queue structure.
744  * @param mpw
745  *   Pointer to MPW session structure.
746  * @param length
747  *   Packet length.
748  */
749 static inline void
750 mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
751 {
752 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
753 	volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
754 		(volatile struct mlx5_wqe_data_seg (*)[])
755 		tx_mlx5_wqe(txq, idx + 1);
756 
757 	mpw->state = MLX5_MPW_STATE_OPENED;
758 	mpw->pkts_n = 0;
759 	mpw->len = length;
760 	mpw->total_len = 0;
761 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
762 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
763 	mpw->wqe->eseg.inline_hdr_sz = 0;
764 	mpw->wqe->eseg.rsvd0 = 0;
765 	mpw->wqe->eseg.rsvd1 = 0;
766 	mpw->wqe->eseg.rsvd2 = 0;
767 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
768 					     (txq->wqe_ci << 8) |
769 					     MLX5_OPCODE_TSO);
770 	mpw->wqe->ctrl[2] = 0;
771 	mpw->wqe->ctrl[3] = 0;
772 	mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
773 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
774 	mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
775 		(((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
776 	mpw->data.dseg[2] = &(*dseg)[0];
777 	mpw->data.dseg[3] = &(*dseg)[1];
778 	mpw->data.dseg[4] = &(*dseg)[2];
779 }
780 
781 /**
782  * Close a MPW session.
783  *
784  * @param txq
785  *   Pointer to TX queue structure.
786  * @param mpw
787  *   Pointer to MPW session structure.
788  */
789 static inline void
790 mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
791 {
792 	unsigned int num = mpw->pkts_n;
793 
794 	/*
795 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
796 	 * count as 2.
797 	 */
798 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
799 	mpw->state = MLX5_MPW_STATE_CLOSED;
800 	if (num < 3)
801 		++txq->wqe_ci;
802 	else
803 		txq->wqe_ci += 2;
804 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
805 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
806 }
807 
808 /**
809  * DPDK callback for TX with MPW support.
810  *
811  * @param dpdk_txq
812  *   Generic pointer to TX queue structure.
813  * @param[in] pkts
814  *   Packets to transmit.
815  * @param pkts_n
816  *   Number of packets in array.
817  *
818  * @return
819  *   Number of packets successfully transmitted (<= pkts_n).
820  */
821 uint16_t
822 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
823 {
824 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
825 	uint16_t elts_head = txq->elts_head;
826 	const uint16_t elts_n = 1 << txq->elts_n;
827 	const uint16_t elts_m = elts_n - 1;
828 	unsigned int i = 0;
829 	unsigned int j = 0;
830 	uint16_t max_elts;
831 	uint16_t max_wqe;
832 	unsigned int comp;
833 	struct mlx5_mpw mpw = {
834 		.state = MLX5_MPW_STATE_CLOSED,
835 	};
836 
837 	if (unlikely(!pkts_n))
838 		return 0;
839 	/* Prefetch first packet cacheline. */
840 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
841 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
842 	/* Start processing. */
843 	mlx5_tx_complete(txq);
844 	max_elts = (elts_n - (elts_head - txq->elts_tail));
845 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
846 	if (unlikely(!max_wqe))
847 		return 0;
848 	do {
849 		struct rte_mbuf *buf = *(pkts++);
850 		uint32_t length;
851 		unsigned int segs_n = buf->nb_segs;
852 		uint32_t cs_flags = 0;
853 
854 		/*
855 		 * Make sure there is enough room to store this packet and
856 		 * that one ring entry remains unused.
857 		 */
858 		assert(segs_n);
859 		if (max_elts < segs_n)
860 			break;
861 		/* Do not bother with large packets MPW cannot handle. */
862 		if (segs_n > MLX5_MPW_DSEG_MAX) {
863 			txq->stats.oerrors++;
864 			break;
865 		}
866 		max_elts -= segs_n;
867 		--pkts_n;
868 		/* Should we enable HW CKSUM offload */
869 		if (buf->ol_flags &
870 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
871 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
872 		/* Retrieve packet information. */
873 		length = PKT_LEN(buf);
874 		assert(length);
875 		/* Start new session if packet differs. */
876 		if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
877 		    ((mpw.len != length) ||
878 		     (segs_n != 1) ||
879 		     (mpw.wqe->eseg.cs_flags != cs_flags)))
880 			mlx5_mpw_close(txq, &mpw);
881 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
882 			/*
883 			 * Multi-Packet WQE consumes at most two WQE.
884 			 * mlx5_mpw_new() expects to be able to use such
885 			 * resources.
886 			 */
887 			if (unlikely(max_wqe < 2))
888 				break;
889 			max_wqe -= 2;
890 			mlx5_mpw_new(txq, &mpw, length);
891 			mpw.wqe->eseg.cs_flags = cs_flags;
892 		}
893 		/* Multi-segment packets must be alone in their MPW. */
894 		assert((segs_n == 1) || (mpw.pkts_n == 0));
895 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
896 		length = 0;
897 #endif
898 		do {
899 			volatile struct mlx5_wqe_data_seg *dseg;
900 			uintptr_t addr;
901 
902 			assert(buf);
903 			(*txq->elts)[elts_head++ & elts_m] = buf;
904 			dseg = mpw.data.dseg[mpw.pkts_n];
905 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
906 			*dseg = (struct mlx5_wqe_data_seg){
907 				.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
908 				.lkey = mlx5_tx_mb2mr(txq, buf),
909 				.addr = rte_cpu_to_be_64(addr),
910 			};
911 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
912 			length += DATA_LEN(buf);
913 #endif
914 			buf = buf->next;
915 			++mpw.pkts_n;
916 			++j;
917 		} while (--segs_n);
918 		assert(length == mpw.len);
919 		if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
920 			mlx5_mpw_close(txq, &mpw);
921 #ifdef MLX5_PMD_SOFT_COUNTERS
922 		/* Increment sent bytes counter. */
923 		txq->stats.obytes += length;
924 #endif
925 		++i;
926 	} while (pkts_n);
927 	/* Take a shortcut if nothing must be sent. */
928 	if (unlikely(i == 0))
929 		return 0;
930 	/* Check whether completion threshold has been reached. */
931 	/* "j" includes both packets and segments. */
932 	comp = txq->elts_comp + j;
933 	if (comp >= MLX5_TX_COMP_THRESH) {
934 		volatile struct mlx5_wqe *wqe = mpw.wqe;
935 
936 		/* Request completion on last WQE. */
937 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
938 		/* Save elts_head in unused "immediate" field of WQE. */
939 		wqe->ctrl[3] = elts_head;
940 		txq->elts_comp = 0;
941 	} else {
942 		txq->elts_comp = comp;
943 	}
944 #ifdef MLX5_PMD_SOFT_COUNTERS
945 	/* Increment sent packets counter. */
946 	txq->stats.opackets += i;
947 #endif
948 	/* Ring QP doorbell. */
949 	if (mpw.state == MLX5_MPW_STATE_OPENED)
950 		mlx5_mpw_close(txq, &mpw);
951 	mlx5_tx_dbrec(txq, mpw.wqe);
952 	txq->elts_head = elts_head;
953 	return i;
954 }
955 
956 /**
957  * Open a MPW inline session.
958  *
959  * @param txq
960  *   Pointer to TX queue structure.
961  * @param mpw
962  *   Pointer to MPW session structure.
963  * @param length
964  *   Packet length.
965  */
966 static inline void
967 mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
968 		    uint32_t length)
969 {
970 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
971 	struct mlx5_wqe_inl_small *inl;
972 
973 	mpw->state = MLX5_MPW_INL_STATE_OPENED;
974 	mpw->pkts_n = 0;
975 	mpw->len = length;
976 	mpw->total_len = 0;
977 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
978 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
979 					     (txq->wqe_ci << 8) |
980 					     MLX5_OPCODE_TSO);
981 	mpw->wqe->ctrl[2] = 0;
982 	mpw->wqe->ctrl[3] = 0;
983 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
984 	mpw->wqe->eseg.inline_hdr_sz = 0;
985 	mpw->wqe->eseg.cs_flags = 0;
986 	mpw->wqe->eseg.rsvd0 = 0;
987 	mpw->wqe->eseg.rsvd1 = 0;
988 	mpw->wqe->eseg.rsvd2 = 0;
989 	inl = (struct mlx5_wqe_inl_small *)
990 		(((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
991 	mpw->data.raw = (uint8_t *)&inl->raw;
992 }
993 
994 /**
995  * Close a MPW inline session.
996  *
997  * @param txq
998  *   Pointer to TX queue structure.
999  * @param mpw
1000  *   Pointer to MPW session structure.
1001  */
1002 static inline void
1003 mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1004 {
1005 	unsigned int size;
1006 	struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
1007 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
1008 
1009 	size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
1010 	/*
1011 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
1012 	 * count as 2.
1013 	 */
1014 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1015 					     MLX5_WQE_DS(size));
1016 	mpw->state = MLX5_MPW_STATE_CLOSED;
1017 	inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
1018 	txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1019 }
1020 
1021 /**
1022  * DPDK callback for TX with MPW inline support.
1023  *
1024  * @param dpdk_txq
1025  *   Generic pointer to TX queue structure.
1026  * @param[in] pkts
1027  *   Packets to transmit.
1028  * @param pkts_n
1029  *   Number of packets in array.
1030  *
1031  * @return
1032  *   Number of packets successfully transmitted (<= pkts_n).
1033  */
1034 uint16_t
1035 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
1036 			 uint16_t pkts_n)
1037 {
1038 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1039 	uint16_t elts_head = txq->elts_head;
1040 	const uint16_t elts_n = 1 << txq->elts_n;
1041 	const uint16_t elts_m = elts_n - 1;
1042 	unsigned int i = 0;
1043 	unsigned int j = 0;
1044 	uint16_t max_elts;
1045 	uint16_t max_wqe;
1046 	unsigned int comp;
1047 	unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
1048 	struct mlx5_mpw mpw = {
1049 		.state = MLX5_MPW_STATE_CLOSED,
1050 	};
1051 	/*
1052 	 * Compute the maximum number of WQE which can be consumed by inline
1053 	 * code.
1054 	 * - 2 DSEG for:
1055 	 *   - 1 control segment,
1056 	 *   - 1 Ethernet segment,
1057 	 * - N Dseg from the inline request.
1058 	 */
1059 	const unsigned int wqe_inl_n =
1060 		((2 * MLX5_WQE_DWORD_SIZE +
1061 		  txq->max_inline * RTE_CACHE_LINE_SIZE) +
1062 		 RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
1063 
1064 	if (unlikely(!pkts_n))
1065 		return 0;
1066 	/* Prefetch first packet cacheline. */
1067 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
1068 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
1069 	/* Start processing. */
1070 	mlx5_tx_complete(txq);
1071 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1072 	do {
1073 		struct rte_mbuf *buf = *(pkts++);
1074 		uintptr_t addr;
1075 		uint32_t length;
1076 		unsigned int segs_n = buf->nb_segs;
1077 		uint32_t cs_flags = 0;
1078 
1079 		/*
1080 		 * Make sure there is enough room to store this packet and
1081 		 * that one ring entry remains unused.
1082 		 */
1083 		assert(segs_n);
1084 		if (max_elts < segs_n)
1085 			break;
1086 		/* Do not bother with large packets MPW cannot handle. */
1087 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1088 			txq->stats.oerrors++;
1089 			break;
1090 		}
1091 		max_elts -= segs_n;
1092 		--pkts_n;
1093 		/*
1094 		 * Compute max_wqe in case less WQE were consumed in previous
1095 		 * iteration.
1096 		 */
1097 		max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1098 		/* Should we enable HW CKSUM offload */
1099 		if (buf->ol_flags &
1100 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1101 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1102 		/* Retrieve packet information. */
1103 		length = PKT_LEN(buf);
1104 		/* Start new session if packet differs. */
1105 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1106 			if ((mpw.len != length) ||
1107 			    (segs_n != 1) ||
1108 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1109 				mlx5_mpw_close(txq, &mpw);
1110 		} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
1111 			if ((mpw.len != length) ||
1112 			    (segs_n != 1) ||
1113 			    (length > inline_room) ||
1114 			    (mpw.wqe->eseg.cs_flags != cs_flags)) {
1115 				mlx5_mpw_inline_close(txq, &mpw);
1116 				inline_room =
1117 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1118 			}
1119 		}
1120 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
1121 			if ((segs_n != 1) ||
1122 			    (length > inline_room)) {
1123 				/*
1124 				 * Multi-Packet WQE consumes at most two WQE.
1125 				 * mlx5_mpw_new() expects to be able to use
1126 				 * such resources.
1127 				 */
1128 				if (unlikely(max_wqe < 2))
1129 					break;
1130 				max_wqe -= 2;
1131 				mlx5_mpw_new(txq, &mpw, length);
1132 				mpw.wqe->eseg.cs_flags = cs_flags;
1133 			} else {
1134 				if (unlikely(max_wqe < wqe_inl_n))
1135 					break;
1136 				max_wqe -= wqe_inl_n;
1137 				mlx5_mpw_inline_new(txq, &mpw, length);
1138 				mpw.wqe->eseg.cs_flags = cs_flags;
1139 			}
1140 		}
1141 		/* Multi-segment packets must be alone in their MPW. */
1142 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1143 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1144 			assert(inline_room ==
1145 			       txq->max_inline * RTE_CACHE_LINE_SIZE);
1146 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1147 			length = 0;
1148 #endif
1149 			do {
1150 				volatile struct mlx5_wqe_data_seg *dseg;
1151 
1152 				assert(buf);
1153 				(*txq->elts)[elts_head++ & elts_m] = buf;
1154 				dseg = mpw.data.dseg[mpw.pkts_n];
1155 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1156 				*dseg = (struct mlx5_wqe_data_seg){
1157 					.byte_count =
1158 					       rte_cpu_to_be_32(DATA_LEN(buf)),
1159 					.lkey = mlx5_tx_mb2mr(txq, buf),
1160 					.addr = rte_cpu_to_be_64(addr),
1161 				};
1162 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1163 				length += DATA_LEN(buf);
1164 #endif
1165 				buf = buf->next;
1166 				++mpw.pkts_n;
1167 				++j;
1168 			} while (--segs_n);
1169 			assert(length == mpw.len);
1170 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
1171 				mlx5_mpw_close(txq, &mpw);
1172 		} else {
1173 			unsigned int max;
1174 
1175 			assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
1176 			assert(length <= inline_room);
1177 			assert(length == DATA_LEN(buf));
1178 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1179 			(*txq->elts)[elts_head++ & elts_m] = buf;
1180 			/* Maximum number of bytes before wrapping. */
1181 			max = ((((uintptr_t)(txq->wqes)) +
1182 				(1 << txq->wqe_n) *
1183 				MLX5_WQE_SIZE) -
1184 			       (uintptr_t)mpw.data.raw);
1185 			if (length > max) {
1186 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1187 					   (void *)addr,
1188 					   max);
1189 				mpw.data.raw = (volatile void *)txq->wqes;
1190 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1191 					   (void *)(addr + max),
1192 					   length - max);
1193 				mpw.data.raw += length - max;
1194 			} else {
1195 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1196 					   (void *)addr,
1197 					   length);
1198 
1199 				if (length == max)
1200 					mpw.data.raw =
1201 						(volatile void *)txq->wqes;
1202 				else
1203 					mpw.data.raw += length;
1204 			}
1205 			++mpw.pkts_n;
1206 			mpw.total_len += length;
1207 			++j;
1208 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
1209 				mlx5_mpw_inline_close(txq, &mpw);
1210 				inline_room =
1211 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1212 			} else {
1213 				inline_room -= length;
1214 			}
1215 		}
1216 #ifdef MLX5_PMD_SOFT_COUNTERS
1217 		/* Increment sent bytes counter. */
1218 		txq->stats.obytes += length;
1219 #endif
1220 		++i;
1221 	} while (pkts_n);
1222 	/* Take a shortcut if nothing must be sent. */
1223 	if (unlikely(i == 0))
1224 		return 0;
1225 	/* Check whether completion threshold has been reached. */
1226 	/* "j" includes both packets and segments. */
1227 	comp = txq->elts_comp + j;
1228 	if (comp >= MLX5_TX_COMP_THRESH) {
1229 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1230 
1231 		/* Request completion on last WQE. */
1232 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1233 		/* Save elts_head in unused "immediate" field of WQE. */
1234 		wqe->ctrl[3] = elts_head;
1235 		txq->elts_comp = 0;
1236 	} else {
1237 		txq->elts_comp = comp;
1238 	}
1239 #ifdef MLX5_PMD_SOFT_COUNTERS
1240 	/* Increment sent packets counter. */
1241 	txq->stats.opackets += i;
1242 #endif
1243 	/* Ring QP doorbell. */
1244 	if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
1245 		mlx5_mpw_inline_close(txq, &mpw);
1246 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1247 		mlx5_mpw_close(txq, &mpw);
1248 	mlx5_tx_dbrec(txq, mpw.wqe);
1249 	txq->elts_head = elts_head;
1250 	return i;
1251 }
1252 
1253 /**
1254  * Open an Enhanced MPW session.
1255  *
1256  * @param txq
1257  *   Pointer to TX queue structure.
1258  * @param mpw
1259  *   Pointer to MPW session structure.
1260  * @param length
1261  *   Packet length.
1262  */
1263 static inline void
1264 mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
1265 {
1266 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
1267 
1268 	mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
1269 	mpw->pkts_n = 0;
1270 	mpw->total_len = sizeof(struct mlx5_wqe);
1271 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
1272 	mpw->wqe->ctrl[0] =
1273 		rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
1274 				 (txq->wqe_ci << 8) |
1275 				 MLX5_OPCODE_ENHANCED_MPSW);
1276 	mpw->wqe->ctrl[2] = 0;
1277 	mpw->wqe->ctrl[3] = 0;
1278 	memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
1279 	if (unlikely(padding)) {
1280 		uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
1281 
1282 		/* Pad the first 2 DWORDs with zero-length inline header. */
1283 		*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
1284 		*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
1285 			rte_cpu_to_be_32(MLX5_INLINE_SEG);
1286 		mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
1287 		/* Start from the next WQEBB. */
1288 		mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
1289 	} else {
1290 		mpw->data.raw = (volatile void *)(mpw->wqe + 1);
1291 	}
1292 }
1293 
1294 /**
1295  * Close an Enhanced MPW session.
1296  *
1297  * @param txq
1298  *   Pointer to TX queue structure.
1299  * @param mpw
1300  *   Pointer to MPW session structure.
1301  *
1302  * @return
1303  *   Number of consumed WQEs.
1304  */
1305 static inline uint16_t
1306 mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1307 {
1308 	uint16_t ret;
1309 
1310 	/* Store size in multiple of 16 bytes. Control and Ethernet segments
1311 	 * count as 2.
1312 	 */
1313 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1314 					     MLX5_WQE_DS(mpw->total_len));
1315 	mpw->state = MLX5_MPW_STATE_CLOSED;
1316 	ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1317 	txq->wqe_ci += ret;
1318 	return ret;
1319 }
1320 
1321 /**
1322  * DPDK callback for TX with Enhanced MPW support.
1323  *
1324  * @param dpdk_txq
1325  *   Generic pointer to TX queue structure.
1326  * @param[in] pkts
1327  *   Packets to transmit.
1328  * @param pkts_n
1329  *   Number of packets in array.
1330  *
1331  * @return
1332  *   Number of packets successfully transmitted (<= pkts_n).
1333  */
1334 uint16_t
1335 mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1336 {
1337 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1338 	uint16_t elts_head = txq->elts_head;
1339 	const uint16_t elts_n = 1 << txq->elts_n;
1340 	const uint16_t elts_m = elts_n - 1;
1341 	unsigned int i = 0;
1342 	unsigned int j = 0;
1343 	uint16_t max_elts;
1344 	uint16_t max_wqe;
1345 	unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
1346 	unsigned int mpw_room = 0;
1347 	unsigned int inl_pad = 0;
1348 	uint32_t inl_hdr;
1349 	struct mlx5_mpw mpw = {
1350 		.state = MLX5_MPW_STATE_CLOSED,
1351 	};
1352 
1353 	if (unlikely(!pkts_n))
1354 		return 0;
1355 	/* Start processing. */
1356 	mlx5_tx_complete(txq);
1357 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1358 	/* A CQE slot must always be available. */
1359 	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
1360 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1361 	if (unlikely(!max_wqe))
1362 		return 0;
1363 	do {
1364 		struct rte_mbuf *buf = *(pkts++);
1365 		uintptr_t addr;
1366 		uint64_t naddr;
1367 		unsigned int n;
1368 		unsigned int do_inline = 0; /* Whether inline is possible. */
1369 		uint32_t length;
1370 		unsigned int segs_n = buf->nb_segs;
1371 		uint32_t cs_flags = 0;
1372 
1373 		/*
1374 		 * Make sure there is enough room to store this packet and
1375 		 * that one ring entry remains unused.
1376 		 */
1377 		assert(segs_n);
1378 		if (max_elts - j < segs_n)
1379 			break;
1380 		/* Do not bother with large packets MPW cannot handle. */
1381 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1382 			txq->stats.oerrors++;
1383 			break;
1384 		}
1385 		/* Should we enable HW CKSUM offload. */
1386 		if (buf->ol_flags &
1387 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1388 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1389 		/* Retrieve packet information. */
1390 		length = PKT_LEN(buf);
1391 		/* Start new session if:
1392 		 * - multi-segment packet
1393 		 * - no space left even for a dseg
1394 		 * - next packet can be inlined with a new WQE
1395 		 * - cs_flag differs
1396 		 * It can't be MLX5_MPW_STATE_OPENED as always have a single
1397 		 * segmented packet.
1398 		 */
1399 		if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
1400 			if ((segs_n != 1) ||
1401 			    (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
1402 			      mpw_room) ||
1403 			    (length <= txq->inline_max_packet_sz &&
1404 			     inl_pad + sizeof(inl_hdr) + length >
1405 			      mpw_room) ||
1406 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1407 				max_wqe -= mlx5_empw_close(txq, &mpw);
1408 		}
1409 		if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
1410 			if (unlikely(segs_n != 1)) {
1411 				/* Fall back to legacy MPW.
1412 				 * A MPW session consumes 2 WQEs at most to
1413 				 * include MLX5_MPW_DSEG_MAX pointers.
1414 				 */
1415 				if (unlikely(max_wqe < 2))
1416 					break;
1417 				mlx5_mpw_new(txq, &mpw, length);
1418 			} else {
1419 				/* In Enhanced MPW, inline as much as the budget
1420 				 * is allowed. The remaining space is to be
1421 				 * filled with dsegs. If the title WQEBB isn't
1422 				 * padded, it will have 2 dsegs there.
1423 				 */
1424 				mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
1425 					    (max_inline ? max_inline :
1426 					     pkts_n * MLX5_WQE_DWORD_SIZE) +
1427 					    MLX5_WQE_SIZE);
1428 				if (unlikely(max_wqe * MLX5_WQE_SIZE <
1429 					      mpw_room))
1430 					break;
1431 				/* Don't pad the title WQEBB to not waste WQ. */
1432 				mlx5_empw_new(txq, &mpw, 0);
1433 				mpw_room -= mpw.total_len;
1434 				inl_pad = 0;
1435 				do_inline =
1436 					length <= txq->inline_max_packet_sz &&
1437 					sizeof(inl_hdr) + length <= mpw_room &&
1438 					!txq->mpw_hdr_dseg;
1439 			}
1440 			mpw.wqe->eseg.cs_flags = cs_flags;
1441 		} else {
1442 			/* Evaluate whether the next packet can be inlined.
1443 			 * Inlininig is possible when:
1444 			 * - length is less than configured value
1445 			 * - length fits for remaining space
1446 			 * - not required to fill the title WQEBB with dsegs
1447 			 */
1448 			do_inline =
1449 				length <= txq->inline_max_packet_sz &&
1450 				inl_pad + sizeof(inl_hdr) + length <=
1451 				 mpw_room &&
1452 				(!txq->mpw_hdr_dseg ||
1453 				 mpw.total_len >= MLX5_WQE_SIZE);
1454 		}
1455 		/* Multi-segment packets must be alone in their MPW. */
1456 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1457 		if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
1458 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1459 			length = 0;
1460 #endif
1461 			do {
1462 				volatile struct mlx5_wqe_data_seg *dseg;
1463 
1464 				assert(buf);
1465 				(*txq->elts)[elts_head++ & elts_m] = buf;
1466 				dseg = mpw.data.dseg[mpw.pkts_n];
1467 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1468 				*dseg = (struct mlx5_wqe_data_seg){
1469 					.byte_count = rte_cpu_to_be_32(
1470 								DATA_LEN(buf)),
1471 					.lkey = mlx5_tx_mb2mr(txq, buf),
1472 					.addr = rte_cpu_to_be_64(addr),
1473 				};
1474 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1475 				length += DATA_LEN(buf);
1476 #endif
1477 				buf = buf->next;
1478 				++j;
1479 				++mpw.pkts_n;
1480 			} while (--segs_n);
1481 			/* A multi-segmented packet takes one MPW session.
1482 			 * TODO: Pack more multi-segmented packets if possible.
1483 			 */
1484 			mlx5_mpw_close(txq, &mpw);
1485 			if (mpw.pkts_n < 3)
1486 				max_wqe--;
1487 			else
1488 				max_wqe -= 2;
1489 		} else if (do_inline) {
1490 			/* Inline packet into WQE. */
1491 			unsigned int max;
1492 
1493 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1494 			assert(length == DATA_LEN(buf));
1495 			inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
1496 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1497 			mpw.data.raw = (volatile void *)
1498 				((uintptr_t)mpw.data.raw + inl_pad);
1499 			max = tx_mlx5_wq_tailroom(txq,
1500 					(void *)(uintptr_t)mpw.data.raw);
1501 			/* Copy inline header. */
1502 			mpw.data.raw = (volatile void *)
1503 				mlx5_copy_to_wq(
1504 					  (void *)(uintptr_t)mpw.data.raw,
1505 					  &inl_hdr,
1506 					  sizeof(inl_hdr),
1507 					  (void *)(uintptr_t)txq->wqes,
1508 					  max);
1509 			max = tx_mlx5_wq_tailroom(txq,
1510 					(void *)(uintptr_t)mpw.data.raw);
1511 			/* Copy packet data. */
1512 			mpw.data.raw = (volatile void *)
1513 				mlx5_copy_to_wq(
1514 					  (void *)(uintptr_t)mpw.data.raw,
1515 					  (void *)addr,
1516 					  length,
1517 					  (void *)(uintptr_t)txq->wqes,
1518 					  max);
1519 			++mpw.pkts_n;
1520 			mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
1521 			/* No need to get completion as the entire packet is
1522 			 * copied to WQ. Free the buf right away.
1523 			 */
1524 			rte_pktmbuf_free_seg(buf);
1525 			mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
1526 			/* Add pad in the next packet if any. */
1527 			inl_pad = (((uintptr_t)mpw.data.raw +
1528 					(MLX5_WQE_DWORD_SIZE - 1)) &
1529 					~(MLX5_WQE_DWORD_SIZE - 1)) -
1530 				  (uintptr_t)mpw.data.raw;
1531 		} else {
1532 			/* No inline. Load a dseg of packet pointer. */
1533 			volatile rte_v128u32_t *dseg;
1534 
1535 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1536 			assert((inl_pad + sizeof(*dseg)) <= mpw_room);
1537 			assert(length == DATA_LEN(buf));
1538 			if (!tx_mlx5_wq_tailroom(txq,
1539 					(void *)((uintptr_t)mpw.data.raw
1540 						+ inl_pad)))
1541 				dseg = (volatile void *)txq->wqes;
1542 			else
1543 				dseg = (volatile void *)
1544 					((uintptr_t)mpw.data.raw +
1545 					 inl_pad);
1546 			(*txq->elts)[elts_head++ & elts_m] = buf;
1547 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1548 			for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
1549 				rte_prefetch2((void *)(addr +
1550 						n * RTE_CACHE_LINE_SIZE));
1551 			naddr = rte_cpu_to_be_64(addr);
1552 			*dseg = (rte_v128u32_t) {
1553 				rte_cpu_to_be_32(length),
1554 				mlx5_tx_mb2mr(txq, buf),
1555 				naddr,
1556 				naddr >> 32,
1557 			};
1558 			mpw.data.raw = (volatile void *)(dseg + 1);
1559 			mpw.total_len += (inl_pad + sizeof(*dseg));
1560 			++j;
1561 			++mpw.pkts_n;
1562 			mpw_room -= (inl_pad + sizeof(*dseg));
1563 			inl_pad = 0;
1564 		}
1565 #ifdef MLX5_PMD_SOFT_COUNTERS
1566 		/* Increment sent bytes counter. */
1567 		txq->stats.obytes += length;
1568 #endif
1569 		++i;
1570 	} while (i < pkts_n);
1571 	/* Take a shortcut if nothing must be sent. */
1572 	if (unlikely(i == 0))
1573 		return 0;
1574 	/* Check whether completion threshold has been reached. */
1575 	if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
1576 			(uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
1577 			 (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
1578 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1579 
1580 		/* Request completion on last WQE. */
1581 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1582 		/* Save elts_head in unused "immediate" field of WQE. */
1583 		wqe->ctrl[3] = elts_head;
1584 		txq->elts_comp = 0;
1585 		txq->mpw_comp = txq->wqe_ci;
1586 		txq->cq_pi++;
1587 	} else {
1588 		txq->elts_comp += j;
1589 	}
1590 #ifdef MLX5_PMD_SOFT_COUNTERS
1591 	/* Increment sent packets counter. */
1592 	txq->stats.opackets += i;
1593 #endif
1594 	if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
1595 		mlx5_empw_close(txq, &mpw);
1596 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1597 		mlx5_mpw_close(txq, &mpw);
1598 	/* Ring QP doorbell. */
1599 	mlx5_tx_dbrec(txq, mpw.wqe);
1600 	txq->elts_head = elts_head;
1601 	return i;
1602 }
1603 
1604 /**
1605  * Translate RX completion flags to packet type.
1606  *
1607  * @param[in] cqe
1608  *   Pointer to CQE.
1609  *
1610  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
1611  *
1612  * @return
1613  *   Packet type for struct rte_mbuf.
1614  */
1615 static inline uint32_t
1616 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
1617 {
1618 	uint8_t idx;
1619 	uint8_t pinfo = cqe->pkt_info;
1620 	uint16_t ptype = cqe->hdr_type_etc;
1621 
1622 	/*
1623 	 * The index to the array should have:
1624 	 * bit[1:0] = l3_hdr_type
1625 	 * bit[4:2] = l4_hdr_type
1626 	 * bit[5] = ip_frag
1627 	 * bit[6] = tunneled
1628 	 * bit[7] = outer_l3_type
1629 	 */
1630 	idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
1631 	return mlx5_ptype_table[idx];
1632 }
1633 
1634 /**
1635  * Get size of the next packet for a given CQE. For compressed CQEs, the
1636  * consumer index is updated only once all packets of the current one have
1637  * been processed.
1638  *
1639  * @param rxq
1640  *   Pointer to RX queue.
1641  * @param cqe
1642  *   CQE to process.
1643  * @param[out] rss_hash
1644  *   Packet RSS Hash result.
1645  *
1646  * @return
1647  *   Packet size in bytes (0 if there is none), -1 in case of completion
1648  *   with error.
1649  */
1650 static inline int
1651 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1652 		 uint16_t cqe_cnt, uint32_t *rss_hash)
1653 {
1654 	struct rxq_zip *zip = &rxq->zip;
1655 	uint16_t cqe_n = cqe_cnt + 1;
1656 	int len = 0;
1657 	uint16_t idx, end;
1658 
1659 	/* Process compressed data in the CQE and mini arrays. */
1660 	if (zip->ai) {
1661 		volatile struct mlx5_mini_cqe8 (*mc)[8] =
1662 			(volatile struct mlx5_mini_cqe8 (*)[8])
1663 			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
1664 
1665 		len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1666 		*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
1667 		if ((++zip->ai & 7) == 0) {
1668 			/* Invalidate consumed CQEs */
1669 			idx = zip->ca;
1670 			end = zip->na;
1671 			while (idx != end) {
1672 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1673 					MLX5_CQE_INVALIDATE;
1674 				++idx;
1675 			}
1676 			/*
1677 			 * Increment consumer index to skip the number of
1678 			 * CQEs consumed. Hardware leaves holes in the CQ
1679 			 * ring for software use.
1680 			 */
1681 			zip->ca = zip->na;
1682 			zip->na += 8;
1683 		}
1684 		if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1685 			/* Invalidate the rest */
1686 			idx = zip->ca;
1687 			end = zip->cq_ci;
1688 
1689 			while (idx != end) {
1690 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1691 					MLX5_CQE_INVALIDATE;
1692 				++idx;
1693 			}
1694 			rxq->cq_ci = zip->cq_ci;
1695 			zip->ai = 0;
1696 		}
1697 	/* No compressed data, get next CQE and verify if it is compressed. */
1698 	} else {
1699 		int ret;
1700 		int8_t op_own;
1701 
1702 		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1703 		if (unlikely(ret == 1))
1704 			return 0;
1705 		++rxq->cq_ci;
1706 		op_own = cqe->op_own;
1707 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1708 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
1709 				(volatile struct mlx5_mini_cqe8 (*)[8])
1710 				(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
1711 							  cqe_cnt].pkt_info);
1712 
1713 			/* Fix endianness. */
1714 			zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1715 			/*
1716 			 * Current mini array position is the one returned by
1717 			 * check_cqe64().
1718 			 *
1719 			 * If completion comprises several mini arrays, as a
1720 			 * special case the second one is located 7 CQEs after
1721 			 * the initial CQE instead of 8 for subsequent ones.
1722 			 */
1723 			zip->ca = rxq->cq_ci;
1724 			zip->na = zip->ca + 7;
1725 			/* Compute the next non compressed CQE. */
1726 			--rxq->cq_ci;
1727 			zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1728 			/* Get packet size to return. */
1729 			len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1730 			*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
1731 			zip->ai = 1;
1732 			/* Prefetch all the entries to be invalidated */
1733 			idx = zip->ca;
1734 			end = zip->cq_ci;
1735 			while (idx != end) {
1736 				rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
1737 				++idx;
1738 			}
1739 		} else {
1740 			len = rte_be_to_cpu_32(cqe->byte_cnt);
1741 			*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
1742 		}
1743 		/* Error while receiving packet. */
1744 		if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
1745 			return -1;
1746 	}
1747 	return len;
1748 }
1749 
1750 /**
1751  * Translate RX completion flags to offload flags.
1752  *
1753  * @param[in] rxq
1754  *   Pointer to RX queue structure.
1755  * @param[in] cqe
1756  *   Pointer to CQE.
1757  *
1758  * @return
1759  *   Offload flags (ol_flags) for struct rte_mbuf.
1760  */
1761 static inline uint32_t
1762 rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
1763 {
1764 	uint32_t ol_flags = 0;
1765 	uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1766 
1767 	ol_flags =
1768 		TRANSPOSE(flags,
1769 			  MLX5_CQE_RX_L3_HDR_VALID,
1770 			  PKT_RX_IP_CKSUM_GOOD) |
1771 		TRANSPOSE(flags,
1772 			  MLX5_CQE_RX_L4_HDR_VALID,
1773 			  PKT_RX_L4_CKSUM_GOOD);
1774 	if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
1775 		ol_flags |=
1776 			TRANSPOSE(flags,
1777 				  MLX5_CQE_RX_L3_HDR_VALID,
1778 				  PKT_RX_IP_CKSUM_GOOD) |
1779 			TRANSPOSE(flags,
1780 				  MLX5_CQE_RX_L4_HDR_VALID,
1781 				  PKT_RX_L4_CKSUM_GOOD);
1782 	return ol_flags;
1783 }
1784 
1785 /**
1786  * DPDK callback for RX.
1787  *
1788  * @param dpdk_rxq
1789  *   Generic pointer to RX queue structure.
1790  * @param[out] pkts
1791  *   Array to store received packets.
1792  * @param pkts_n
1793  *   Maximum number of packets in array.
1794  *
1795  * @return
1796  *   Number of packets successfully received (<= pkts_n).
1797  */
1798 uint16_t
1799 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1800 {
1801 	struct mlx5_rxq_data *rxq = dpdk_rxq;
1802 	const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1803 	const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1804 	const unsigned int sges_n = rxq->sges_n;
1805 	struct rte_mbuf *pkt = NULL;
1806 	struct rte_mbuf *seg = NULL;
1807 	volatile struct mlx5_cqe *cqe =
1808 		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1809 	unsigned int i = 0;
1810 	unsigned int rq_ci = rxq->rq_ci << sges_n;
1811 	int len = 0; /* keep its value across iterations. */
1812 
1813 	while (pkts_n) {
1814 		unsigned int idx = rq_ci & wqe_cnt;
1815 		volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
1816 		struct rte_mbuf *rep = (*rxq->elts)[idx];
1817 		uint32_t rss_hash_res = 0;
1818 
1819 		if (pkt)
1820 			NEXT(seg) = rep;
1821 		seg = rep;
1822 		rte_prefetch0(seg);
1823 		rte_prefetch0(cqe);
1824 		rte_prefetch0(wqe);
1825 		rep = rte_mbuf_raw_alloc(rxq->mp);
1826 		if (unlikely(rep == NULL)) {
1827 			++rxq->stats.rx_nombuf;
1828 			if (!pkt) {
1829 				/*
1830 				 * no buffers before we even started,
1831 				 * bail out silently.
1832 				 */
1833 				break;
1834 			}
1835 			while (pkt != seg) {
1836 				assert(pkt != (*rxq->elts)[idx]);
1837 				rep = NEXT(pkt);
1838 				NEXT(pkt) = NULL;
1839 				NB_SEGS(pkt) = 1;
1840 				rte_mbuf_raw_free(pkt);
1841 				pkt = rep;
1842 			}
1843 			break;
1844 		}
1845 		if (!pkt) {
1846 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1847 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
1848 					       &rss_hash_res);
1849 			if (!len) {
1850 				rte_mbuf_raw_free(rep);
1851 				break;
1852 			}
1853 			if (unlikely(len == -1)) {
1854 				/* RX error, packet is likely too large. */
1855 				rte_mbuf_raw_free(rep);
1856 				++rxq->stats.idropped;
1857 				goto skip;
1858 			}
1859 			pkt = seg;
1860 			assert(len >= (rxq->crc_present << 2));
1861 			/* Update packet information. */
1862 			pkt->packet_type = rxq_cq_to_pkt_type(cqe);
1863 			pkt->ol_flags = 0;
1864 			if (rss_hash_res && rxq->rss_hash) {
1865 				pkt->hash.rss = rss_hash_res;
1866 				pkt->ol_flags = PKT_RX_RSS_HASH;
1867 			}
1868 			if (rxq->mark &&
1869 			    MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1870 				pkt->ol_flags |= PKT_RX_FDIR;
1871 				if (cqe->sop_drop_qpn !=
1872 				    rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1873 					uint32_t mark = cqe->sop_drop_qpn;
1874 
1875 					pkt->ol_flags |= PKT_RX_FDIR_ID;
1876 					pkt->hash.fdir.hi =
1877 						mlx5_flow_mark_get(mark);
1878 				}
1879 			}
1880 			if (rxq->csum | rxq->csum_l2tun)
1881 				pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
1882 			if (rxq->vlan_strip &&
1883 			    (cqe->hdr_type_etc &
1884 			     rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1885 				pkt->ol_flags |= PKT_RX_VLAN_PKT |
1886 					PKT_RX_VLAN_STRIPPED;
1887 				pkt->vlan_tci =
1888 					rte_be_to_cpu_16(cqe->vlan_info);
1889 			}
1890 			if (rxq->hw_timestamp) {
1891 				pkt->timestamp =
1892 					rte_be_to_cpu_64(cqe->timestamp);
1893 				pkt->ol_flags |= PKT_RX_TIMESTAMP;
1894 			}
1895 			if (rxq->crc_present)
1896 				len -= ETHER_CRC_LEN;
1897 			PKT_LEN(pkt) = len;
1898 		}
1899 		DATA_LEN(rep) = DATA_LEN(seg);
1900 		PKT_LEN(rep) = PKT_LEN(seg);
1901 		SET_DATA_OFF(rep, DATA_OFF(seg));
1902 		PORT(rep) = PORT(seg);
1903 		(*rxq->elts)[idx] = rep;
1904 		/*
1905 		 * Fill NIC descriptor with the new buffer.  The lkey and size
1906 		 * of the buffers are already known, only the buffer address
1907 		 * changes.
1908 		 */
1909 		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1910 		if (len > DATA_LEN(seg)) {
1911 			len -= DATA_LEN(seg);
1912 			++NB_SEGS(pkt);
1913 			++rq_ci;
1914 			continue;
1915 		}
1916 		DATA_LEN(seg) = len;
1917 #ifdef MLX5_PMD_SOFT_COUNTERS
1918 		/* Increment bytes counter. */
1919 		rxq->stats.ibytes += PKT_LEN(pkt);
1920 #endif
1921 		/* Return packet. */
1922 		*(pkts++) = pkt;
1923 		pkt = NULL;
1924 		--pkts_n;
1925 		++i;
1926 skip:
1927 		/* Align consumer index to the next stride. */
1928 		rq_ci >>= sges_n;
1929 		++rq_ci;
1930 		rq_ci <<= sges_n;
1931 	}
1932 	if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1933 		return 0;
1934 	/* Update the consumer index. */
1935 	rxq->rq_ci = rq_ci >> sges_n;
1936 	rte_io_wmb();
1937 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1938 	rte_io_wmb();
1939 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1940 #ifdef MLX5_PMD_SOFT_COUNTERS
1941 	/* Increment packets counter. */
1942 	rxq->stats.ipackets += i;
1943 #endif
1944 	return i;
1945 }
1946 
1947 /**
1948  * Dummy DPDK callback for TX.
1949  *
1950  * This function is used to temporarily replace the real callback during
1951  * unsafe control operations on the queue, or in case of error.
1952  *
1953  * @param dpdk_txq
1954  *   Generic pointer to TX queue structure.
1955  * @param[in] pkts
1956  *   Packets to transmit.
1957  * @param pkts_n
1958  *   Number of packets in array.
1959  *
1960  * @return
1961  *   Number of packets successfully transmitted (<= pkts_n).
1962  */
1963 uint16_t
1964 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1965 {
1966 	(void)dpdk_txq;
1967 	(void)pkts;
1968 	(void)pkts_n;
1969 	return 0;
1970 }
1971 
1972 /**
1973  * Dummy DPDK callback for RX.
1974  *
1975  * This function is used to temporarily replace the real callback during
1976  * unsafe control operations on the queue, or in case of error.
1977  *
1978  * @param dpdk_rxq
1979  *   Generic pointer to RX queue structure.
1980  * @param[out] pkts
1981  *   Array to store received packets.
1982  * @param pkts_n
1983  *   Maximum number of packets in array.
1984  *
1985  * @return
1986  *   Number of packets successfully received (<= pkts_n).
1987  */
1988 uint16_t
1989 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1990 {
1991 	(void)dpdk_rxq;
1992 	(void)pkts;
1993 	(void)pkts_n;
1994 	return 0;
1995 }
1996 
1997 /*
1998  * Vectorized Rx/Tx routines are not compiled in when required vector
1999  * instructions are not supported on a target architecture. The following null
2000  * stubs are needed for linkage when those are not included outside of this file
2001  * (e.g.  mlx5_rxtx_vec_sse.c for x86).
2002  */
2003 
2004 uint16_t __attribute__((weak))
2005 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2006 {
2007 	(void)dpdk_txq;
2008 	(void)pkts;
2009 	(void)pkts_n;
2010 	return 0;
2011 }
2012 
2013 uint16_t __attribute__((weak))
2014 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2015 {
2016 	(void)dpdk_txq;
2017 	(void)pkts;
2018 	(void)pkts_n;
2019 	return 0;
2020 }
2021 
2022 uint16_t __attribute__((weak))
2023 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
2024 {
2025 	(void)dpdk_rxq;
2026 	(void)pkts;
2027 	(void)pkts_n;
2028 	return 0;
2029 }
2030 
2031 int __attribute__((weak))
2032 priv_check_raw_vec_tx_support(struct priv *priv)
2033 {
2034 	(void)priv;
2035 	return -ENOTSUP;
2036 }
2037 
2038 int __attribute__((weak))
2039 priv_check_vec_tx_support(struct priv *priv)
2040 {
2041 	(void)priv;
2042 	return -ENOTSUP;
2043 }
2044 
2045 int __attribute__((weak))
2046 rxq_check_vec_support(struct mlx5_rxq_data *rxq)
2047 {
2048 	(void)rxq;
2049 	return -ENOTSUP;
2050 }
2051 
2052 int __attribute__((weak))
2053 priv_check_vec_rx_support(struct priv *priv)
2054 {
2055 	(void)priv;
2056 	return -ENOTSUP;
2057 }
2058