xref: /dpdk/drivers/net/mlx5/mlx5_rxtx.c (revision 131a75b6e4df60586103d71defb85dcf9f77fb17)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <assert.h>
35 #include <stdint.h>
36 #include <string.h>
37 #include <stdlib.h>
38 
39 /* Verbs header. */
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
41 #ifdef PEDANTIC
42 #pragma GCC diagnostic ignored "-Wpedantic"
43 #endif
44 #include <infiniband/verbs.h>
45 #include <infiniband/mlx5dv.h>
46 #ifdef PEDANTIC
47 #pragma GCC diagnostic error "-Wpedantic"
48 #endif
49 
50 #include <rte_mbuf.h>
51 #include <rte_mempool.h>
52 #include <rte_prefetch.h>
53 #include <rte_common.h>
54 #include <rte_branch_prediction.h>
55 #include <rte_ether.h>
56 
57 #include "mlx5.h"
58 #include "mlx5_utils.h"
59 #include "mlx5_rxtx.h"
60 #include "mlx5_autoconf.h"
61 #include "mlx5_defs.h"
62 #include "mlx5_prm.h"
63 
64 static __rte_always_inline uint32_t
65 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
66 
67 static __rte_always_inline int
68 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
69 		 uint16_t cqe_cnt, uint32_t *rss_hash);
70 
71 static __rte_always_inline uint32_t
72 rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
73 
74 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
75 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
76 };
77 
78 /**
79  * Build a table to translate Rx completion flags to packet type.
80  *
81  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
82  */
83 void
84 mlx5_set_ptype_table(void)
85 {
86 	unsigned int i;
87 	uint32_t (*p)[RTE_DIM(mlx5_ptype_table)] = &mlx5_ptype_table;
88 
89 	/* Last entry must not be overwritten, reserved for errored packet. */
90 	for (i = 0; i < RTE_DIM(mlx5_ptype_table) - 1; ++i)
91 		(*p)[i] = RTE_PTYPE_UNKNOWN;
92 	/*
93 	 * The index to the array should have:
94 	 * bit[1:0] = l3_hdr_type
95 	 * bit[4:2] = l4_hdr_type
96 	 * bit[5] = ip_frag
97 	 * bit[6] = tunneled
98 	 * bit[7] = outer_l3_type
99 	 */
100 	/* L2 */
101 	(*p)[0x00] = RTE_PTYPE_L2_ETHER;
102 	/* L3 */
103 	(*p)[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
104 		     RTE_PTYPE_L4_NONFRAG;
105 	(*p)[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
106 		     RTE_PTYPE_L4_NONFRAG;
107 	/* Fragmented */
108 	(*p)[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
109 		     RTE_PTYPE_L4_FRAG;
110 	(*p)[0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
111 		     RTE_PTYPE_L4_FRAG;
112 	/* TCP */
113 	(*p)[0x05] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
114 		     RTE_PTYPE_L4_TCP;
115 	(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
116 		     RTE_PTYPE_L4_TCP;
117 	/* UDP */
118 	(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
119 		     RTE_PTYPE_L4_UDP;
120 	(*p)[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
121 		     RTE_PTYPE_L4_UDP;
122 	/* Repeat with outer_l3_type being set. Just in case. */
123 	(*p)[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
124 		     RTE_PTYPE_L4_NONFRAG;
125 	(*p)[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
126 		     RTE_PTYPE_L4_NONFRAG;
127 	(*p)[0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
128 		     RTE_PTYPE_L4_FRAG;
129 	(*p)[0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
130 		     RTE_PTYPE_L4_FRAG;
131 	(*p)[0x85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
132 		     RTE_PTYPE_L4_TCP;
133 	(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
134 		     RTE_PTYPE_L4_TCP;
135 	(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
136 		     RTE_PTYPE_L4_UDP;
137 	(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
138 		     RTE_PTYPE_L4_UDP;
139 	/* Tunneled - L3 */
140 	(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
141 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
142 		     RTE_PTYPE_INNER_L4_NONFRAG;
143 	(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
144 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
145 		     RTE_PTYPE_INNER_L4_NONFRAG;
146 	(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
147 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
148 		     RTE_PTYPE_INNER_L4_NONFRAG;
149 	(*p)[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
150 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
151 		     RTE_PTYPE_INNER_L4_NONFRAG;
152 	/* Tunneled - Fragmented */
153 	(*p)[0x61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
154 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
155 		     RTE_PTYPE_INNER_L4_FRAG;
156 	(*p)[0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
157 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
158 		     RTE_PTYPE_INNER_L4_FRAG;
159 	(*p)[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
160 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
161 		     RTE_PTYPE_INNER_L4_FRAG;
162 	(*p)[0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
163 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
164 		     RTE_PTYPE_INNER_L4_FRAG;
165 	/* Tunneled - TCP */
166 	(*p)[0x45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
167 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
168 		     RTE_PTYPE_INNER_L4_TCP;
169 	(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
170 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
171 		     RTE_PTYPE_INNER_L4_TCP;
172 	(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
173 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
174 		     RTE_PTYPE_INNER_L4_TCP;
175 	(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
176 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
177 		     RTE_PTYPE_INNER_L4_TCP;
178 	/* Tunneled - UDP */
179 	(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
180 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
181 		     RTE_PTYPE_INNER_L4_UDP;
182 	(*p)[0x4a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
183 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
184 		     RTE_PTYPE_INNER_L4_UDP;
185 	(*p)[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
186 		     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
187 		     RTE_PTYPE_INNER_L4_UDP;
188 	(*p)[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
189 		     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
190 		     RTE_PTYPE_INNER_L4_UDP;
191 }
192 
193 /**
194  * Return the size of tailroom of WQ.
195  *
196  * @param txq
197  *   Pointer to TX queue structure.
198  * @param addr
199  *   Pointer to tail of WQ.
200  *
201  * @return
202  *   Size of tailroom.
203  */
204 static inline size_t
205 tx_mlx5_wq_tailroom(struct mlx5_txq_data *txq, void *addr)
206 {
207 	size_t tailroom;
208 	tailroom = (uintptr_t)(txq->wqes) +
209 		   (1 << txq->wqe_n) * MLX5_WQE_SIZE -
210 		   (uintptr_t)addr;
211 	return tailroom;
212 }
213 
214 /**
215  * Copy data to tailroom of circular queue.
216  *
217  * @param dst
218  *   Pointer to destination.
219  * @param src
220  *   Pointer to source.
221  * @param n
222  *   Number of bytes to copy.
223  * @param base
224  *   Pointer to head of queue.
225  * @param tailroom
226  *   Size of tailroom from dst.
227  *
228  * @return
229  *   Pointer after copied data.
230  */
231 static inline void *
232 mlx5_copy_to_wq(void *dst, const void *src, size_t n,
233 		void *base, size_t tailroom)
234 {
235 	void *ret;
236 
237 	if (n > tailroom) {
238 		rte_memcpy(dst, src, tailroom);
239 		rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
240 			   n - tailroom);
241 		ret = (uint8_t *)base + n - tailroom;
242 	} else {
243 		rte_memcpy(dst, src, n);
244 		ret = (n == tailroom) ? base : (uint8_t *)dst + n;
245 	}
246 	return ret;
247 }
248 
249 /**
250  * DPDK callback to check the status of a tx descriptor.
251  *
252  * @param tx_queue
253  *   The tx queue.
254  * @param[in] offset
255  *   The index of the descriptor in the ring.
256  *
257  * @return
258  *   The status of the tx descriptor.
259  */
260 int
261 mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
262 {
263 	struct mlx5_txq_data *txq = tx_queue;
264 	uint16_t used;
265 
266 	mlx5_tx_complete(txq);
267 	used = txq->elts_head - txq->elts_tail;
268 	if (offset < used)
269 		return RTE_ETH_TX_DESC_FULL;
270 	return RTE_ETH_TX_DESC_DONE;
271 }
272 
273 /**
274  * DPDK callback to check the status of a rx descriptor.
275  *
276  * @param rx_queue
277  *   The rx queue.
278  * @param[in] offset
279  *   The index of the descriptor in the ring.
280  *
281  * @return
282  *   The status of the tx descriptor.
283  */
284 int
285 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
286 {
287 	struct mlx5_rxq_data *rxq = rx_queue;
288 	struct rxq_zip *zip = &rxq->zip;
289 	volatile struct mlx5_cqe *cqe;
290 	const unsigned int cqe_n = (1 << rxq->cqe_n);
291 	const unsigned int cqe_cnt = cqe_n - 1;
292 	unsigned int cq_ci;
293 	unsigned int used;
294 
295 	/* if we are processing a compressed cqe */
296 	if (zip->ai) {
297 		used = zip->cqe_cnt - zip->ca;
298 		cq_ci = zip->cq_ci;
299 	} else {
300 		used = 0;
301 		cq_ci = rxq->cq_ci;
302 	}
303 	cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
304 	while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
305 		int8_t op_own;
306 		unsigned int n;
307 
308 		op_own = cqe->op_own;
309 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
310 			n = rte_be_to_cpu_32(cqe->byte_cnt);
311 		else
312 			n = 1;
313 		cq_ci += n;
314 		used += n;
315 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
316 	}
317 	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
318 	if (offset < used)
319 		return RTE_ETH_RX_DESC_DONE;
320 	return RTE_ETH_RX_DESC_AVAIL;
321 }
322 
323 /**
324  * DPDK callback for TX.
325  *
326  * @param dpdk_txq
327  *   Generic pointer to TX queue structure.
328  * @param[in] pkts
329  *   Packets to transmit.
330  * @param pkts_n
331  *   Number of packets in array.
332  *
333  * @return
334  *   Number of packets successfully transmitted (<= pkts_n).
335  */
336 uint16_t
337 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
338 {
339 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
340 	uint16_t elts_head = txq->elts_head;
341 	const uint16_t elts_n = 1 << txq->elts_n;
342 	const uint16_t elts_m = elts_n - 1;
343 	unsigned int i = 0;
344 	unsigned int j = 0;
345 	unsigned int k = 0;
346 	uint16_t max_elts;
347 	unsigned int max_inline = txq->max_inline;
348 	const unsigned int inline_en = !!max_inline && txq->inline_en;
349 	uint16_t max_wqe;
350 	unsigned int comp;
351 	volatile struct mlx5_wqe_v *wqe = NULL;
352 	volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
353 	unsigned int segs_n = 0;
354 	struct rte_mbuf *buf = NULL;
355 	uint8_t *raw;
356 
357 	if (unlikely(!pkts_n))
358 		return 0;
359 	/* Prefetch first packet cacheline. */
360 	rte_prefetch0(*pkts);
361 	/* Start processing. */
362 	mlx5_tx_complete(txq);
363 	max_elts = (elts_n - (elts_head - txq->elts_tail));
364 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
365 	if (unlikely(!max_wqe))
366 		return 0;
367 	do {
368 		volatile rte_v128u32_t *dseg = NULL;
369 		uint32_t length;
370 		unsigned int ds = 0;
371 		unsigned int sg = 0; /* counter of additional segs attached. */
372 		uintptr_t addr;
373 		uint64_t naddr;
374 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
375 		uint16_t tso_header_sz = 0;
376 		uint16_t ehdr;
377 		uint8_t cs_flags = 0;
378 		uint64_t tso = 0;
379 		uint16_t tso_segsz = 0;
380 #ifdef MLX5_PMD_SOFT_COUNTERS
381 		uint32_t total_length = 0;
382 #endif
383 
384 		/* first_seg */
385 		buf = *pkts;
386 		segs_n = buf->nb_segs;
387 		/*
388 		 * Make sure there is enough room to store this packet and
389 		 * that one ring entry remains unused.
390 		 */
391 		assert(segs_n);
392 		if (max_elts < segs_n)
393 			break;
394 		max_elts -= segs_n;
395 		--segs_n;
396 		if (unlikely(--max_wqe == 0))
397 			break;
398 		wqe = (volatile struct mlx5_wqe_v *)
399 			tx_mlx5_wqe(txq, txq->wqe_ci);
400 		rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
401 		if (pkts_n - i > 1)
402 			rte_prefetch0(*(pkts + 1));
403 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
404 		length = DATA_LEN(buf);
405 		ehdr = (((uint8_t *)addr)[1] << 8) |
406 		       ((uint8_t *)addr)[0];
407 #ifdef MLX5_PMD_SOFT_COUNTERS
408 		total_length = length;
409 #endif
410 		if (length < (MLX5_WQE_DWORD_SIZE + 2)) {
411 			txq->stats.oerrors++;
412 			break;
413 		}
414 		/* Update element. */
415 		(*txq->elts)[elts_head & elts_m] = buf;
416 		/* Prefetch next buffer data. */
417 		if (pkts_n - i > 1)
418 			rte_prefetch0(
419 			    rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
420 		/* Should we enable HW CKSUM offload */
421 		if (buf->ol_flags &
422 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
423 			const uint64_t is_tunneled = buf->ol_flags &
424 						     (PKT_TX_TUNNEL_GRE |
425 						      PKT_TX_TUNNEL_VXLAN);
426 
427 			if (is_tunneled && txq->tunnel_en) {
428 				cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
429 					   MLX5_ETH_WQE_L4_INNER_CSUM;
430 				if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
431 					cs_flags |= MLX5_ETH_WQE_L3_CSUM;
432 			} else {
433 				cs_flags = MLX5_ETH_WQE_L3_CSUM |
434 					   MLX5_ETH_WQE_L4_CSUM;
435 			}
436 		}
437 		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
438 		/* Replace the Ethernet type by the VLAN if necessary. */
439 		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
440 			uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
441 							 buf->vlan_tci);
442 			unsigned int len = 2 * ETHER_ADDR_LEN - 2;
443 
444 			addr += 2;
445 			length -= 2;
446 			/* Copy Destination and source mac address. */
447 			memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
448 			/* Copy VLAN. */
449 			memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
450 			/* Copy missing two bytes to end the DSeg. */
451 			memcpy((uint8_t *)raw + len + sizeof(vlan),
452 			       ((uint8_t *)addr) + len, 2);
453 			addr += len + 2;
454 			length -= (len + 2);
455 		} else {
456 			memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
457 			       MLX5_WQE_DWORD_SIZE);
458 			length -= pkt_inline_sz;
459 			addr += pkt_inline_sz;
460 		}
461 		raw += MLX5_WQE_DWORD_SIZE;
462 		if (txq->tso_en) {
463 			tso = buf->ol_flags & PKT_TX_TCP_SEG;
464 			if (tso) {
465 				uintptr_t end = (uintptr_t)
466 						(((uintptr_t)txq->wqes) +
467 						(1 << txq->wqe_n) *
468 						MLX5_WQE_SIZE);
469 				unsigned int copy_b;
470 				uint8_t vlan_sz = (buf->ol_flags &
471 						  PKT_TX_VLAN_PKT) ? 4 : 0;
472 				const uint64_t is_tunneled =
473 							buf->ol_flags &
474 							(PKT_TX_TUNNEL_GRE |
475 							 PKT_TX_TUNNEL_VXLAN);
476 
477 				tso_header_sz = buf->l2_len + vlan_sz +
478 						buf->l3_len + buf->l4_len;
479 				tso_segsz = buf->tso_segsz;
480 				if (unlikely(tso_segsz == 0)) {
481 					txq->stats.oerrors++;
482 					break;
483 				}
484 				if (is_tunneled	&& txq->tunnel_en) {
485 					tso_header_sz += buf->outer_l2_len +
486 							 buf->outer_l3_len;
487 					cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
488 				} else {
489 					cs_flags |= MLX5_ETH_WQE_L4_CSUM;
490 				}
491 				if (unlikely(tso_header_sz >
492 					     MLX5_MAX_TSO_HEADER)) {
493 					txq->stats.oerrors++;
494 					break;
495 				}
496 				copy_b = tso_header_sz - pkt_inline_sz;
497 				/* First seg must contain all headers. */
498 				assert(copy_b <= length);
499 				if (copy_b &&
500 				   ((end - (uintptr_t)raw) > copy_b)) {
501 					uint16_t n = (MLX5_WQE_DS(copy_b) -
502 						      1 + 3) / 4;
503 
504 					if (unlikely(max_wqe < n))
505 						break;
506 					max_wqe -= n;
507 					rte_memcpy((void *)raw,
508 						   (void *)addr, copy_b);
509 					addr += copy_b;
510 					length -= copy_b;
511 					/* Include padding for TSO header. */
512 					copy_b = MLX5_WQE_DS(copy_b) *
513 						 MLX5_WQE_DWORD_SIZE;
514 					pkt_inline_sz += copy_b;
515 					raw += copy_b;
516 				} else {
517 					/* NOP WQE. */
518 					wqe->ctrl = (rte_v128u32_t){
519 						     rte_cpu_to_be_32(
520 							txq->wqe_ci << 8),
521 						     rte_cpu_to_be_32(
522 							txq->qp_num_8s | 1),
523 						     0,
524 						     0,
525 					};
526 					ds = 1;
527 #ifdef MLX5_PMD_SOFT_COUNTERS
528 					total_length = 0;
529 #endif
530 					k++;
531 					goto next_wqe;
532 				}
533 			}
534 		}
535 		/* Inline if enough room. */
536 		if (inline_en || tso) {
537 			uint32_t inl;
538 			uintptr_t end = (uintptr_t)
539 				(((uintptr_t)txq->wqes) +
540 				 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
541 			unsigned int inline_room = max_inline *
542 						   RTE_CACHE_LINE_SIZE -
543 						   (pkt_inline_sz - 2) -
544 						   !!tso * sizeof(inl);
545 			uintptr_t addr_end = (addr + inline_room) &
546 					     ~(RTE_CACHE_LINE_SIZE - 1);
547 			unsigned int copy_b = (addr_end > addr) ?
548 				RTE_MIN((addr_end - addr), length) :
549 				0;
550 
551 			if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
552 				/*
553 				 * One Dseg remains in the current WQE.  To
554 				 * keep the computation positive, it is
555 				 * removed after the bytes to Dseg conversion.
556 				 */
557 				uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
558 
559 				if (unlikely(max_wqe < n))
560 					break;
561 				max_wqe -= n;
562 				if (tso) {
563 					inl = rte_cpu_to_be_32(copy_b |
564 							       MLX5_INLINE_SEG);
565 					rte_memcpy((void *)raw,
566 						   (void *)&inl, sizeof(inl));
567 					raw += sizeof(inl);
568 					pkt_inline_sz += sizeof(inl);
569 				}
570 				rte_memcpy((void *)raw, (void *)addr, copy_b);
571 				addr += copy_b;
572 				length -= copy_b;
573 				pkt_inline_sz += copy_b;
574 			}
575 			/*
576 			 * 2 DWORDs consumed by the WQE header + ETH segment +
577 			 * the size of the inline part of the packet.
578 			 */
579 			ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
580 			if (length > 0) {
581 				if (ds % (MLX5_WQE_SIZE /
582 					  MLX5_WQE_DWORD_SIZE) == 0) {
583 					if (unlikely(--max_wqe == 0))
584 						break;
585 					dseg = (volatile rte_v128u32_t *)
586 					       tx_mlx5_wqe(txq, txq->wqe_ci +
587 							   ds / 4);
588 				} else {
589 					dseg = (volatile rte_v128u32_t *)
590 						((uintptr_t)wqe +
591 						 (ds * MLX5_WQE_DWORD_SIZE));
592 				}
593 				goto use_dseg;
594 			} else if (!segs_n) {
595 				goto next_pkt;
596 			} else {
597 				/* dseg will be advance as part of next_seg */
598 				dseg = (volatile rte_v128u32_t *)
599 					((uintptr_t)wqe +
600 					 ((ds - 1) * MLX5_WQE_DWORD_SIZE));
601 				goto next_seg;
602 			}
603 		} else {
604 			/*
605 			 * No inline has been done in the packet, only the
606 			 * Ethernet Header as been stored.
607 			 */
608 			dseg = (volatile rte_v128u32_t *)
609 				((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
610 			ds = 3;
611 use_dseg:
612 			/* Add the remaining packet as a simple ds. */
613 			naddr = rte_cpu_to_be_64(addr);
614 			*dseg = (rte_v128u32_t){
615 				rte_cpu_to_be_32(length),
616 				mlx5_tx_mb2mr(txq, buf),
617 				naddr,
618 				naddr >> 32,
619 			};
620 			++ds;
621 			if (!segs_n)
622 				goto next_pkt;
623 		}
624 next_seg:
625 		assert(buf);
626 		assert(ds);
627 		assert(wqe);
628 		/*
629 		 * Spill on next WQE when the current one does not have
630 		 * enough room left. Size of WQE must a be a multiple
631 		 * of data segment size.
632 		 */
633 		assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
634 		if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
635 			if (unlikely(--max_wqe == 0))
636 				break;
637 			dseg = (volatile rte_v128u32_t *)
638 			       tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
639 			rte_prefetch0(tx_mlx5_wqe(txq,
640 						  txq->wqe_ci + ds / 4 + 1));
641 		} else {
642 			++dseg;
643 		}
644 		++ds;
645 		buf = buf->next;
646 		assert(buf);
647 		length = DATA_LEN(buf);
648 #ifdef MLX5_PMD_SOFT_COUNTERS
649 		total_length += length;
650 #endif
651 		/* Store segment information. */
652 		naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
653 		*dseg = (rte_v128u32_t){
654 			rte_cpu_to_be_32(length),
655 			mlx5_tx_mb2mr(txq, buf),
656 			naddr,
657 			naddr >> 32,
658 		};
659 		(*txq->elts)[++elts_head & elts_m] = buf;
660 		++sg;
661 		/* Advance counter only if all segs are successfully posted. */
662 		if (sg < segs_n)
663 			goto next_seg;
664 		else
665 			j += sg;
666 next_pkt:
667 		if (ds > MLX5_DSEG_MAX) {
668 			txq->stats.oerrors++;
669 			break;
670 		}
671 		++elts_head;
672 		++pkts;
673 		++i;
674 		/* Initialize known and common part of the WQE structure. */
675 		if (tso) {
676 			wqe->ctrl = (rte_v128u32_t){
677 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
678 						 MLX5_OPCODE_TSO),
679 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
680 				0,
681 				0,
682 			};
683 			wqe->eseg = (rte_v128u32_t){
684 				0,
685 				cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
686 				0,
687 				(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
688 			};
689 		} else {
690 			wqe->ctrl = (rte_v128u32_t){
691 				rte_cpu_to_be_32((txq->wqe_ci << 8) |
692 						 MLX5_OPCODE_SEND),
693 				rte_cpu_to_be_32(txq->qp_num_8s | ds),
694 				0,
695 				0,
696 			};
697 			wqe->eseg = (rte_v128u32_t){
698 				0,
699 				cs_flags,
700 				0,
701 				(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
702 			};
703 		}
704 next_wqe:
705 		txq->wqe_ci += (ds + 3) / 4;
706 		/* Save the last successful WQE for completion request */
707 		last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
708 #ifdef MLX5_PMD_SOFT_COUNTERS
709 		/* Increment sent bytes counter. */
710 		txq->stats.obytes += total_length;
711 #endif
712 	} while (i < pkts_n);
713 	/* Take a shortcut if nothing must be sent. */
714 	if (unlikely((i + k) == 0))
715 		return 0;
716 	txq->elts_head += (i + j);
717 	/* Check whether completion threshold has been reached. */
718 	comp = txq->elts_comp + i + j + k;
719 	if (comp >= MLX5_TX_COMP_THRESH) {
720 		/* Request completion on last WQE. */
721 		last_wqe->ctrl2 = rte_cpu_to_be_32(8);
722 		/* Save elts_head in unused "immediate" field of WQE. */
723 		last_wqe->ctrl3 = txq->elts_head;
724 		txq->elts_comp = 0;
725 	} else {
726 		txq->elts_comp = comp;
727 	}
728 #ifdef MLX5_PMD_SOFT_COUNTERS
729 	/* Increment sent packets counter. */
730 	txq->stats.opackets += i;
731 #endif
732 	/* Ring QP doorbell. */
733 	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
734 	return i;
735 }
736 
737 /**
738  * Open a MPW session.
739  *
740  * @param txq
741  *   Pointer to TX queue structure.
742  * @param mpw
743  *   Pointer to MPW session structure.
744  * @param length
745  *   Packet length.
746  */
747 static inline void
748 mlx5_mpw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, uint32_t length)
749 {
750 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
751 	volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
752 		(volatile struct mlx5_wqe_data_seg (*)[])
753 		tx_mlx5_wqe(txq, idx + 1);
754 
755 	mpw->state = MLX5_MPW_STATE_OPENED;
756 	mpw->pkts_n = 0;
757 	mpw->len = length;
758 	mpw->total_len = 0;
759 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
760 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
761 	mpw->wqe->eseg.inline_hdr_sz = 0;
762 	mpw->wqe->eseg.rsvd0 = 0;
763 	mpw->wqe->eseg.rsvd1 = 0;
764 	mpw->wqe->eseg.rsvd2 = 0;
765 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
766 					     (txq->wqe_ci << 8) |
767 					     MLX5_OPCODE_TSO);
768 	mpw->wqe->ctrl[2] = 0;
769 	mpw->wqe->ctrl[3] = 0;
770 	mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
771 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
772 	mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
773 		(((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
774 	mpw->data.dseg[2] = &(*dseg)[0];
775 	mpw->data.dseg[3] = &(*dseg)[1];
776 	mpw->data.dseg[4] = &(*dseg)[2];
777 }
778 
779 /**
780  * Close a MPW session.
781  *
782  * @param txq
783  *   Pointer to TX queue structure.
784  * @param mpw
785  *   Pointer to MPW session structure.
786  */
787 static inline void
788 mlx5_mpw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
789 {
790 	unsigned int num = mpw->pkts_n;
791 
792 	/*
793 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
794 	 * count as 2.
795 	 */
796 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
797 	mpw->state = MLX5_MPW_STATE_CLOSED;
798 	if (num < 3)
799 		++txq->wqe_ci;
800 	else
801 		txq->wqe_ci += 2;
802 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
803 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
804 }
805 
806 /**
807  * DPDK callback for TX with MPW support.
808  *
809  * @param dpdk_txq
810  *   Generic pointer to TX queue structure.
811  * @param[in] pkts
812  *   Packets to transmit.
813  * @param pkts_n
814  *   Number of packets in array.
815  *
816  * @return
817  *   Number of packets successfully transmitted (<= pkts_n).
818  */
819 uint16_t
820 mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
821 {
822 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
823 	uint16_t elts_head = txq->elts_head;
824 	const uint16_t elts_n = 1 << txq->elts_n;
825 	const uint16_t elts_m = elts_n - 1;
826 	unsigned int i = 0;
827 	unsigned int j = 0;
828 	uint16_t max_elts;
829 	uint16_t max_wqe;
830 	unsigned int comp;
831 	struct mlx5_mpw mpw = {
832 		.state = MLX5_MPW_STATE_CLOSED,
833 	};
834 
835 	if (unlikely(!pkts_n))
836 		return 0;
837 	/* Prefetch first packet cacheline. */
838 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
839 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
840 	/* Start processing. */
841 	mlx5_tx_complete(txq);
842 	max_elts = (elts_n - (elts_head - txq->elts_tail));
843 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
844 	if (unlikely(!max_wqe))
845 		return 0;
846 	do {
847 		struct rte_mbuf *buf = *(pkts++);
848 		uint32_t length;
849 		unsigned int segs_n = buf->nb_segs;
850 		uint32_t cs_flags = 0;
851 
852 		/*
853 		 * Make sure there is enough room to store this packet and
854 		 * that one ring entry remains unused.
855 		 */
856 		assert(segs_n);
857 		if (max_elts < segs_n)
858 			break;
859 		/* Do not bother with large packets MPW cannot handle. */
860 		if (segs_n > MLX5_MPW_DSEG_MAX) {
861 			txq->stats.oerrors++;
862 			break;
863 		}
864 		max_elts -= segs_n;
865 		--pkts_n;
866 		/* Should we enable HW CKSUM offload */
867 		if (buf->ol_flags &
868 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
869 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
870 		/* Retrieve packet information. */
871 		length = PKT_LEN(buf);
872 		assert(length);
873 		/* Start new session if packet differs. */
874 		if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
875 		    ((mpw.len != length) ||
876 		     (segs_n != 1) ||
877 		     (mpw.wqe->eseg.cs_flags != cs_flags)))
878 			mlx5_mpw_close(txq, &mpw);
879 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
880 			/*
881 			 * Multi-Packet WQE consumes at most two WQE.
882 			 * mlx5_mpw_new() expects to be able to use such
883 			 * resources.
884 			 */
885 			if (unlikely(max_wqe < 2))
886 				break;
887 			max_wqe -= 2;
888 			mlx5_mpw_new(txq, &mpw, length);
889 			mpw.wqe->eseg.cs_flags = cs_flags;
890 		}
891 		/* Multi-segment packets must be alone in their MPW. */
892 		assert((segs_n == 1) || (mpw.pkts_n == 0));
893 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
894 		length = 0;
895 #endif
896 		do {
897 			volatile struct mlx5_wqe_data_seg *dseg;
898 			uintptr_t addr;
899 
900 			assert(buf);
901 			(*txq->elts)[elts_head++ & elts_m] = buf;
902 			dseg = mpw.data.dseg[mpw.pkts_n];
903 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
904 			*dseg = (struct mlx5_wqe_data_seg){
905 				.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
906 				.lkey = mlx5_tx_mb2mr(txq, buf),
907 				.addr = rte_cpu_to_be_64(addr),
908 			};
909 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
910 			length += DATA_LEN(buf);
911 #endif
912 			buf = buf->next;
913 			++mpw.pkts_n;
914 			++j;
915 		} while (--segs_n);
916 		assert(length == mpw.len);
917 		if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
918 			mlx5_mpw_close(txq, &mpw);
919 #ifdef MLX5_PMD_SOFT_COUNTERS
920 		/* Increment sent bytes counter. */
921 		txq->stats.obytes += length;
922 #endif
923 		++i;
924 	} while (pkts_n);
925 	/* Take a shortcut if nothing must be sent. */
926 	if (unlikely(i == 0))
927 		return 0;
928 	/* Check whether completion threshold has been reached. */
929 	/* "j" includes both packets and segments. */
930 	comp = txq->elts_comp + j;
931 	if (comp >= MLX5_TX_COMP_THRESH) {
932 		volatile struct mlx5_wqe *wqe = mpw.wqe;
933 
934 		/* Request completion on last WQE. */
935 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
936 		/* Save elts_head in unused "immediate" field of WQE. */
937 		wqe->ctrl[3] = elts_head;
938 		txq->elts_comp = 0;
939 	} else {
940 		txq->elts_comp = comp;
941 	}
942 #ifdef MLX5_PMD_SOFT_COUNTERS
943 	/* Increment sent packets counter. */
944 	txq->stats.opackets += i;
945 #endif
946 	/* Ring QP doorbell. */
947 	if (mpw.state == MLX5_MPW_STATE_OPENED)
948 		mlx5_mpw_close(txq, &mpw);
949 	mlx5_tx_dbrec(txq, mpw.wqe);
950 	txq->elts_head = elts_head;
951 	return i;
952 }
953 
954 /**
955  * Open a MPW inline session.
956  *
957  * @param txq
958  *   Pointer to TX queue structure.
959  * @param mpw
960  *   Pointer to MPW session structure.
961  * @param length
962  *   Packet length.
963  */
964 static inline void
965 mlx5_mpw_inline_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw,
966 		    uint32_t length)
967 {
968 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
969 	struct mlx5_wqe_inl_small *inl;
970 
971 	mpw->state = MLX5_MPW_INL_STATE_OPENED;
972 	mpw->pkts_n = 0;
973 	mpw->len = length;
974 	mpw->total_len = 0;
975 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
976 	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
977 					     (txq->wqe_ci << 8) |
978 					     MLX5_OPCODE_TSO);
979 	mpw->wqe->ctrl[2] = 0;
980 	mpw->wqe->ctrl[3] = 0;
981 	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
982 	mpw->wqe->eseg.inline_hdr_sz = 0;
983 	mpw->wqe->eseg.cs_flags = 0;
984 	mpw->wqe->eseg.rsvd0 = 0;
985 	mpw->wqe->eseg.rsvd1 = 0;
986 	mpw->wqe->eseg.rsvd2 = 0;
987 	inl = (struct mlx5_wqe_inl_small *)
988 		(((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
989 	mpw->data.raw = (uint8_t *)&inl->raw;
990 }
991 
992 /**
993  * Close a MPW inline session.
994  *
995  * @param txq
996  *   Pointer to TX queue structure.
997  * @param mpw
998  *   Pointer to MPW session structure.
999  */
1000 static inline void
1001 mlx5_mpw_inline_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1002 {
1003 	unsigned int size;
1004 	struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
1005 		(((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
1006 
1007 	size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
1008 	/*
1009 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
1010 	 * count as 2.
1011 	 */
1012 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1013 					     MLX5_WQE_DS(size));
1014 	mpw->state = MLX5_MPW_STATE_CLOSED;
1015 	inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
1016 	txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1017 }
1018 
1019 /**
1020  * DPDK callback for TX with MPW inline support.
1021  *
1022  * @param dpdk_txq
1023  *   Generic pointer to TX queue structure.
1024  * @param[in] pkts
1025  *   Packets to transmit.
1026  * @param pkts_n
1027  *   Number of packets in array.
1028  *
1029  * @return
1030  *   Number of packets successfully transmitted (<= pkts_n).
1031  */
1032 uint16_t
1033 mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
1034 			 uint16_t pkts_n)
1035 {
1036 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1037 	uint16_t elts_head = txq->elts_head;
1038 	const uint16_t elts_n = 1 << txq->elts_n;
1039 	const uint16_t elts_m = elts_n - 1;
1040 	unsigned int i = 0;
1041 	unsigned int j = 0;
1042 	uint16_t max_elts;
1043 	uint16_t max_wqe;
1044 	unsigned int comp;
1045 	unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
1046 	struct mlx5_mpw mpw = {
1047 		.state = MLX5_MPW_STATE_CLOSED,
1048 	};
1049 	/*
1050 	 * Compute the maximum number of WQE which can be consumed by inline
1051 	 * code.
1052 	 * - 2 DSEG for:
1053 	 *   - 1 control segment,
1054 	 *   - 1 Ethernet segment,
1055 	 * - N Dseg from the inline request.
1056 	 */
1057 	const unsigned int wqe_inl_n =
1058 		((2 * MLX5_WQE_DWORD_SIZE +
1059 		  txq->max_inline * RTE_CACHE_LINE_SIZE) +
1060 		 RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
1061 
1062 	if (unlikely(!pkts_n))
1063 		return 0;
1064 	/* Prefetch first packet cacheline. */
1065 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
1066 	rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
1067 	/* Start processing. */
1068 	mlx5_tx_complete(txq);
1069 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1070 	do {
1071 		struct rte_mbuf *buf = *(pkts++);
1072 		uintptr_t addr;
1073 		uint32_t length;
1074 		unsigned int segs_n = buf->nb_segs;
1075 		uint32_t cs_flags = 0;
1076 
1077 		/*
1078 		 * Make sure there is enough room to store this packet and
1079 		 * that one ring entry remains unused.
1080 		 */
1081 		assert(segs_n);
1082 		if (max_elts < segs_n)
1083 			break;
1084 		/* Do not bother with large packets MPW cannot handle. */
1085 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1086 			txq->stats.oerrors++;
1087 			break;
1088 		}
1089 		max_elts -= segs_n;
1090 		--pkts_n;
1091 		/*
1092 		 * Compute max_wqe in case less WQE were consumed in previous
1093 		 * iteration.
1094 		 */
1095 		max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1096 		/* Should we enable HW CKSUM offload */
1097 		if (buf->ol_flags &
1098 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1099 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1100 		/* Retrieve packet information. */
1101 		length = PKT_LEN(buf);
1102 		/* Start new session if packet differs. */
1103 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1104 			if ((mpw.len != length) ||
1105 			    (segs_n != 1) ||
1106 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1107 				mlx5_mpw_close(txq, &mpw);
1108 		} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
1109 			if ((mpw.len != length) ||
1110 			    (segs_n != 1) ||
1111 			    (length > inline_room) ||
1112 			    (mpw.wqe->eseg.cs_flags != cs_flags)) {
1113 				mlx5_mpw_inline_close(txq, &mpw);
1114 				inline_room =
1115 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1116 			}
1117 		}
1118 		if (mpw.state == MLX5_MPW_STATE_CLOSED) {
1119 			if ((segs_n != 1) ||
1120 			    (length > inline_room)) {
1121 				/*
1122 				 * Multi-Packet WQE consumes at most two WQE.
1123 				 * mlx5_mpw_new() expects to be able to use
1124 				 * such resources.
1125 				 */
1126 				if (unlikely(max_wqe < 2))
1127 					break;
1128 				max_wqe -= 2;
1129 				mlx5_mpw_new(txq, &mpw, length);
1130 				mpw.wqe->eseg.cs_flags = cs_flags;
1131 			} else {
1132 				if (unlikely(max_wqe < wqe_inl_n))
1133 					break;
1134 				max_wqe -= wqe_inl_n;
1135 				mlx5_mpw_inline_new(txq, &mpw, length);
1136 				mpw.wqe->eseg.cs_flags = cs_flags;
1137 			}
1138 		}
1139 		/* Multi-segment packets must be alone in their MPW. */
1140 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1141 		if (mpw.state == MLX5_MPW_STATE_OPENED) {
1142 			assert(inline_room ==
1143 			       txq->max_inline * RTE_CACHE_LINE_SIZE);
1144 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1145 			length = 0;
1146 #endif
1147 			do {
1148 				volatile struct mlx5_wqe_data_seg *dseg;
1149 
1150 				assert(buf);
1151 				(*txq->elts)[elts_head++ & elts_m] = buf;
1152 				dseg = mpw.data.dseg[mpw.pkts_n];
1153 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1154 				*dseg = (struct mlx5_wqe_data_seg){
1155 					.byte_count =
1156 					       rte_cpu_to_be_32(DATA_LEN(buf)),
1157 					.lkey = mlx5_tx_mb2mr(txq, buf),
1158 					.addr = rte_cpu_to_be_64(addr),
1159 				};
1160 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1161 				length += DATA_LEN(buf);
1162 #endif
1163 				buf = buf->next;
1164 				++mpw.pkts_n;
1165 				++j;
1166 			} while (--segs_n);
1167 			assert(length == mpw.len);
1168 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX)
1169 				mlx5_mpw_close(txq, &mpw);
1170 		} else {
1171 			unsigned int max;
1172 
1173 			assert(mpw.state == MLX5_MPW_INL_STATE_OPENED);
1174 			assert(length <= inline_room);
1175 			assert(length == DATA_LEN(buf));
1176 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1177 			(*txq->elts)[elts_head++ & elts_m] = buf;
1178 			/* Maximum number of bytes before wrapping. */
1179 			max = ((((uintptr_t)(txq->wqes)) +
1180 				(1 << txq->wqe_n) *
1181 				MLX5_WQE_SIZE) -
1182 			       (uintptr_t)mpw.data.raw);
1183 			if (length > max) {
1184 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1185 					   (void *)addr,
1186 					   max);
1187 				mpw.data.raw = (volatile void *)txq->wqes;
1188 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1189 					   (void *)(addr + max),
1190 					   length - max);
1191 				mpw.data.raw += length - max;
1192 			} else {
1193 				rte_memcpy((void *)(uintptr_t)mpw.data.raw,
1194 					   (void *)addr,
1195 					   length);
1196 
1197 				if (length == max)
1198 					mpw.data.raw =
1199 						(volatile void *)txq->wqes;
1200 				else
1201 					mpw.data.raw += length;
1202 			}
1203 			++mpw.pkts_n;
1204 			mpw.total_len += length;
1205 			++j;
1206 			if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
1207 				mlx5_mpw_inline_close(txq, &mpw);
1208 				inline_room =
1209 					txq->max_inline * RTE_CACHE_LINE_SIZE;
1210 			} else {
1211 				inline_room -= length;
1212 			}
1213 		}
1214 #ifdef MLX5_PMD_SOFT_COUNTERS
1215 		/* Increment sent bytes counter. */
1216 		txq->stats.obytes += length;
1217 #endif
1218 		++i;
1219 	} while (pkts_n);
1220 	/* Take a shortcut if nothing must be sent. */
1221 	if (unlikely(i == 0))
1222 		return 0;
1223 	/* Check whether completion threshold has been reached. */
1224 	/* "j" includes both packets and segments. */
1225 	comp = txq->elts_comp + j;
1226 	if (comp >= MLX5_TX_COMP_THRESH) {
1227 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1228 
1229 		/* Request completion on last WQE. */
1230 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1231 		/* Save elts_head in unused "immediate" field of WQE. */
1232 		wqe->ctrl[3] = elts_head;
1233 		txq->elts_comp = 0;
1234 	} else {
1235 		txq->elts_comp = comp;
1236 	}
1237 #ifdef MLX5_PMD_SOFT_COUNTERS
1238 	/* Increment sent packets counter. */
1239 	txq->stats.opackets += i;
1240 #endif
1241 	/* Ring QP doorbell. */
1242 	if (mpw.state == MLX5_MPW_INL_STATE_OPENED)
1243 		mlx5_mpw_inline_close(txq, &mpw);
1244 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1245 		mlx5_mpw_close(txq, &mpw);
1246 	mlx5_tx_dbrec(txq, mpw.wqe);
1247 	txq->elts_head = elts_head;
1248 	return i;
1249 }
1250 
1251 /**
1252  * Open an Enhanced MPW session.
1253  *
1254  * @param txq
1255  *   Pointer to TX queue structure.
1256  * @param mpw
1257  *   Pointer to MPW session structure.
1258  * @param length
1259  *   Packet length.
1260  */
1261 static inline void
1262 mlx5_empw_new(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw, int padding)
1263 {
1264 	uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
1265 
1266 	mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
1267 	mpw->pkts_n = 0;
1268 	mpw->total_len = sizeof(struct mlx5_wqe);
1269 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
1270 	mpw->wqe->ctrl[0] =
1271 		rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
1272 				 (txq->wqe_ci << 8) |
1273 				 MLX5_OPCODE_ENHANCED_MPSW);
1274 	mpw->wqe->ctrl[2] = 0;
1275 	mpw->wqe->ctrl[3] = 0;
1276 	memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
1277 	if (unlikely(padding)) {
1278 		uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
1279 
1280 		/* Pad the first 2 DWORDs with zero-length inline header. */
1281 		*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
1282 		*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
1283 			rte_cpu_to_be_32(MLX5_INLINE_SEG);
1284 		mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
1285 		/* Start from the next WQEBB. */
1286 		mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
1287 	} else {
1288 		mpw->data.raw = (volatile void *)(mpw->wqe + 1);
1289 	}
1290 }
1291 
1292 /**
1293  * Close an Enhanced MPW session.
1294  *
1295  * @param txq
1296  *   Pointer to TX queue structure.
1297  * @param mpw
1298  *   Pointer to MPW session structure.
1299  *
1300  * @return
1301  *   Number of consumed WQEs.
1302  */
1303 static inline uint16_t
1304 mlx5_empw_close(struct mlx5_txq_data *txq, struct mlx5_mpw *mpw)
1305 {
1306 	uint16_t ret;
1307 
1308 	/* Store size in multiple of 16 bytes. Control and Ethernet segments
1309 	 * count as 2.
1310 	 */
1311 	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
1312 					     MLX5_WQE_DS(mpw->total_len));
1313 	mpw->state = MLX5_MPW_STATE_CLOSED;
1314 	ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
1315 	txq->wqe_ci += ret;
1316 	return ret;
1317 }
1318 
1319 /**
1320  * DPDK callback for TX with Enhanced MPW support.
1321  *
1322  * @param dpdk_txq
1323  *   Generic pointer to TX queue structure.
1324  * @param[in] pkts
1325  *   Packets to transmit.
1326  * @param pkts_n
1327  *   Number of packets in array.
1328  *
1329  * @return
1330  *   Number of packets successfully transmitted (<= pkts_n).
1331  */
1332 uint16_t
1333 mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1334 {
1335 	struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
1336 	uint16_t elts_head = txq->elts_head;
1337 	const uint16_t elts_n = 1 << txq->elts_n;
1338 	const uint16_t elts_m = elts_n - 1;
1339 	unsigned int i = 0;
1340 	unsigned int j = 0;
1341 	uint16_t max_elts;
1342 	uint16_t max_wqe;
1343 	unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
1344 	unsigned int mpw_room = 0;
1345 	unsigned int inl_pad = 0;
1346 	uint32_t inl_hdr;
1347 	struct mlx5_mpw mpw = {
1348 		.state = MLX5_MPW_STATE_CLOSED,
1349 	};
1350 
1351 	if (unlikely(!pkts_n))
1352 		return 0;
1353 	/* Start processing. */
1354 	mlx5_tx_complete(txq);
1355 	max_elts = (elts_n - (elts_head - txq->elts_tail));
1356 	/* A CQE slot must always be available. */
1357 	assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
1358 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
1359 	if (unlikely(!max_wqe))
1360 		return 0;
1361 	do {
1362 		struct rte_mbuf *buf = *(pkts++);
1363 		uintptr_t addr;
1364 		uint64_t naddr;
1365 		unsigned int n;
1366 		unsigned int do_inline = 0; /* Whether inline is possible. */
1367 		uint32_t length;
1368 		unsigned int segs_n = buf->nb_segs;
1369 		uint32_t cs_flags = 0;
1370 
1371 		/*
1372 		 * Make sure there is enough room to store this packet and
1373 		 * that one ring entry remains unused.
1374 		 */
1375 		assert(segs_n);
1376 		if (max_elts - j < segs_n)
1377 			break;
1378 		/* Do not bother with large packets MPW cannot handle. */
1379 		if (segs_n > MLX5_MPW_DSEG_MAX) {
1380 			txq->stats.oerrors++;
1381 			break;
1382 		}
1383 		/* Should we enable HW CKSUM offload. */
1384 		if (buf->ol_flags &
1385 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
1386 			cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
1387 		/* Retrieve packet information. */
1388 		length = PKT_LEN(buf);
1389 		/* Start new session if:
1390 		 * - multi-segment packet
1391 		 * - no space left even for a dseg
1392 		 * - next packet can be inlined with a new WQE
1393 		 * - cs_flag differs
1394 		 * It can't be MLX5_MPW_STATE_OPENED as always have a single
1395 		 * segmented packet.
1396 		 */
1397 		if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
1398 			if ((segs_n != 1) ||
1399 			    (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
1400 			      mpw_room) ||
1401 			    (length <= txq->inline_max_packet_sz &&
1402 			     inl_pad + sizeof(inl_hdr) + length >
1403 			      mpw_room) ||
1404 			    (mpw.wqe->eseg.cs_flags != cs_flags))
1405 				max_wqe -= mlx5_empw_close(txq, &mpw);
1406 		}
1407 		if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
1408 			if (unlikely(segs_n != 1)) {
1409 				/* Fall back to legacy MPW.
1410 				 * A MPW session consumes 2 WQEs at most to
1411 				 * include MLX5_MPW_DSEG_MAX pointers.
1412 				 */
1413 				if (unlikely(max_wqe < 2))
1414 					break;
1415 				mlx5_mpw_new(txq, &mpw, length);
1416 			} else {
1417 				/* In Enhanced MPW, inline as much as the budget
1418 				 * is allowed. The remaining space is to be
1419 				 * filled with dsegs. If the title WQEBB isn't
1420 				 * padded, it will have 2 dsegs there.
1421 				 */
1422 				mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
1423 					    (max_inline ? max_inline :
1424 					     pkts_n * MLX5_WQE_DWORD_SIZE) +
1425 					    MLX5_WQE_SIZE);
1426 				if (unlikely(max_wqe * MLX5_WQE_SIZE <
1427 					      mpw_room))
1428 					break;
1429 				/* Don't pad the title WQEBB to not waste WQ. */
1430 				mlx5_empw_new(txq, &mpw, 0);
1431 				mpw_room -= mpw.total_len;
1432 				inl_pad = 0;
1433 				do_inline =
1434 					length <= txq->inline_max_packet_sz &&
1435 					sizeof(inl_hdr) + length <= mpw_room &&
1436 					!txq->mpw_hdr_dseg;
1437 			}
1438 			mpw.wqe->eseg.cs_flags = cs_flags;
1439 		} else {
1440 			/* Evaluate whether the next packet can be inlined.
1441 			 * Inlininig is possible when:
1442 			 * - length is less than configured value
1443 			 * - length fits for remaining space
1444 			 * - not required to fill the title WQEBB with dsegs
1445 			 */
1446 			do_inline =
1447 				length <= txq->inline_max_packet_sz &&
1448 				inl_pad + sizeof(inl_hdr) + length <=
1449 				 mpw_room &&
1450 				(!txq->mpw_hdr_dseg ||
1451 				 mpw.total_len >= MLX5_WQE_SIZE);
1452 		}
1453 		/* Multi-segment packets must be alone in their MPW. */
1454 		assert((segs_n == 1) || (mpw.pkts_n == 0));
1455 		if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
1456 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1457 			length = 0;
1458 #endif
1459 			do {
1460 				volatile struct mlx5_wqe_data_seg *dseg;
1461 
1462 				assert(buf);
1463 				(*txq->elts)[elts_head++ & elts_m] = buf;
1464 				dseg = mpw.data.dseg[mpw.pkts_n];
1465 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
1466 				*dseg = (struct mlx5_wqe_data_seg){
1467 					.byte_count = rte_cpu_to_be_32(
1468 								DATA_LEN(buf)),
1469 					.lkey = mlx5_tx_mb2mr(txq, buf),
1470 					.addr = rte_cpu_to_be_64(addr),
1471 				};
1472 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
1473 				length += DATA_LEN(buf);
1474 #endif
1475 				buf = buf->next;
1476 				++j;
1477 				++mpw.pkts_n;
1478 			} while (--segs_n);
1479 			/* A multi-segmented packet takes one MPW session.
1480 			 * TODO: Pack more multi-segmented packets if possible.
1481 			 */
1482 			mlx5_mpw_close(txq, &mpw);
1483 			if (mpw.pkts_n < 3)
1484 				max_wqe--;
1485 			else
1486 				max_wqe -= 2;
1487 		} else if (do_inline) {
1488 			/* Inline packet into WQE. */
1489 			unsigned int max;
1490 
1491 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1492 			assert(length == DATA_LEN(buf));
1493 			inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
1494 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1495 			mpw.data.raw = (volatile void *)
1496 				((uintptr_t)mpw.data.raw + inl_pad);
1497 			max = tx_mlx5_wq_tailroom(txq,
1498 					(void *)(uintptr_t)mpw.data.raw);
1499 			/* Copy inline header. */
1500 			mpw.data.raw = (volatile void *)
1501 				mlx5_copy_to_wq(
1502 					  (void *)(uintptr_t)mpw.data.raw,
1503 					  &inl_hdr,
1504 					  sizeof(inl_hdr),
1505 					  (void *)(uintptr_t)txq->wqes,
1506 					  max);
1507 			max = tx_mlx5_wq_tailroom(txq,
1508 					(void *)(uintptr_t)mpw.data.raw);
1509 			/* Copy packet data. */
1510 			mpw.data.raw = (volatile void *)
1511 				mlx5_copy_to_wq(
1512 					  (void *)(uintptr_t)mpw.data.raw,
1513 					  (void *)addr,
1514 					  length,
1515 					  (void *)(uintptr_t)txq->wqes,
1516 					  max);
1517 			++mpw.pkts_n;
1518 			mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
1519 			/* No need to get completion as the entire packet is
1520 			 * copied to WQ. Free the buf right away.
1521 			 */
1522 			rte_pktmbuf_free_seg(buf);
1523 			mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
1524 			/* Add pad in the next packet if any. */
1525 			inl_pad = (((uintptr_t)mpw.data.raw +
1526 					(MLX5_WQE_DWORD_SIZE - 1)) &
1527 					~(MLX5_WQE_DWORD_SIZE - 1)) -
1528 				  (uintptr_t)mpw.data.raw;
1529 		} else {
1530 			/* No inline. Load a dseg of packet pointer. */
1531 			volatile rte_v128u32_t *dseg;
1532 
1533 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
1534 			assert((inl_pad + sizeof(*dseg)) <= mpw_room);
1535 			assert(length == DATA_LEN(buf));
1536 			if (!tx_mlx5_wq_tailroom(txq,
1537 					(void *)((uintptr_t)mpw.data.raw
1538 						+ inl_pad)))
1539 				dseg = (volatile void *)txq->wqes;
1540 			else
1541 				dseg = (volatile void *)
1542 					((uintptr_t)mpw.data.raw +
1543 					 inl_pad);
1544 			(*txq->elts)[elts_head++ & elts_m] = buf;
1545 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
1546 			for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
1547 				rte_prefetch2((void *)(addr +
1548 						n * RTE_CACHE_LINE_SIZE));
1549 			naddr = rte_cpu_to_be_64(addr);
1550 			*dseg = (rte_v128u32_t) {
1551 				rte_cpu_to_be_32(length),
1552 				mlx5_tx_mb2mr(txq, buf),
1553 				naddr,
1554 				naddr >> 32,
1555 			};
1556 			mpw.data.raw = (volatile void *)(dseg + 1);
1557 			mpw.total_len += (inl_pad + sizeof(*dseg));
1558 			++j;
1559 			++mpw.pkts_n;
1560 			mpw_room -= (inl_pad + sizeof(*dseg));
1561 			inl_pad = 0;
1562 		}
1563 #ifdef MLX5_PMD_SOFT_COUNTERS
1564 		/* Increment sent bytes counter. */
1565 		txq->stats.obytes += length;
1566 #endif
1567 		++i;
1568 	} while (i < pkts_n);
1569 	/* Take a shortcut if nothing must be sent. */
1570 	if (unlikely(i == 0))
1571 		return 0;
1572 	/* Check whether completion threshold has been reached. */
1573 	if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
1574 			(uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
1575 			 (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
1576 		volatile struct mlx5_wqe *wqe = mpw.wqe;
1577 
1578 		/* Request completion on last WQE. */
1579 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
1580 		/* Save elts_head in unused "immediate" field of WQE. */
1581 		wqe->ctrl[3] = elts_head;
1582 		txq->elts_comp = 0;
1583 		txq->mpw_comp = txq->wqe_ci;
1584 		txq->cq_pi++;
1585 	} else {
1586 		txq->elts_comp += j;
1587 	}
1588 #ifdef MLX5_PMD_SOFT_COUNTERS
1589 	/* Increment sent packets counter. */
1590 	txq->stats.opackets += i;
1591 #endif
1592 	if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
1593 		mlx5_empw_close(txq, &mpw);
1594 	else if (mpw.state == MLX5_MPW_STATE_OPENED)
1595 		mlx5_mpw_close(txq, &mpw);
1596 	/* Ring QP doorbell. */
1597 	mlx5_tx_dbrec(txq, mpw.wqe);
1598 	txq->elts_head = elts_head;
1599 	return i;
1600 }
1601 
1602 /**
1603  * Translate RX completion flags to packet type.
1604  *
1605  * @param[in] cqe
1606  *   Pointer to CQE.
1607  *
1608  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
1609  *
1610  * @return
1611  *   Packet type for struct rte_mbuf.
1612  */
1613 static inline uint32_t
1614 rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
1615 {
1616 	uint8_t idx;
1617 	uint8_t pinfo = cqe->pkt_info;
1618 	uint16_t ptype = cqe->hdr_type_etc;
1619 
1620 	/*
1621 	 * The index to the array should have:
1622 	 * bit[1:0] = l3_hdr_type
1623 	 * bit[4:2] = l4_hdr_type
1624 	 * bit[5] = ip_frag
1625 	 * bit[6] = tunneled
1626 	 * bit[7] = outer_l3_type
1627 	 */
1628 	idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
1629 	return mlx5_ptype_table[idx];
1630 }
1631 
1632 /**
1633  * Get size of the next packet for a given CQE. For compressed CQEs, the
1634  * consumer index is updated only once all packets of the current one have
1635  * been processed.
1636  *
1637  * @param rxq
1638  *   Pointer to RX queue.
1639  * @param cqe
1640  *   CQE to process.
1641  * @param[out] rss_hash
1642  *   Packet RSS Hash result.
1643  *
1644  * @return
1645  *   Packet size in bytes (0 if there is none), -1 in case of completion
1646  *   with error.
1647  */
1648 static inline int
1649 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
1650 		 uint16_t cqe_cnt, uint32_t *rss_hash)
1651 {
1652 	struct rxq_zip *zip = &rxq->zip;
1653 	uint16_t cqe_n = cqe_cnt + 1;
1654 	int len = 0;
1655 	uint16_t idx, end;
1656 
1657 	/* Process compressed data in the CQE and mini arrays. */
1658 	if (zip->ai) {
1659 		volatile struct mlx5_mini_cqe8 (*mc)[8] =
1660 			(volatile struct mlx5_mini_cqe8 (*)[8])
1661 			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
1662 
1663 		len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
1664 		*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
1665 		if ((++zip->ai & 7) == 0) {
1666 			/* Invalidate consumed CQEs */
1667 			idx = zip->ca;
1668 			end = zip->na;
1669 			while (idx != end) {
1670 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1671 					MLX5_CQE_INVALIDATE;
1672 				++idx;
1673 			}
1674 			/*
1675 			 * Increment consumer index to skip the number of
1676 			 * CQEs consumed. Hardware leaves holes in the CQ
1677 			 * ring for software use.
1678 			 */
1679 			zip->ca = zip->na;
1680 			zip->na += 8;
1681 		}
1682 		if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
1683 			/* Invalidate the rest */
1684 			idx = zip->ca;
1685 			end = zip->cq_ci;
1686 
1687 			while (idx != end) {
1688 				(*rxq->cqes)[idx & cqe_cnt].op_own =
1689 					MLX5_CQE_INVALIDATE;
1690 				++idx;
1691 			}
1692 			rxq->cq_ci = zip->cq_ci;
1693 			zip->ai = 0;
1694 		}
1695 	/* No compressed data, get next CQE and verify if it is compressed. */
1696 	} else {
1697 		int ret;
1698 		int8_t op_own;
1699 
1700 		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
1701 		if (unlikely(ret == 1))
1702 			return 0;
1703 		++rxq->cq_ci;
1704 		op_own = cqe->op_own;
1705 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
1706 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
1707 				(volatile struct mlx5_mini_cqe8 (*)[8])
1708 				(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
1709 							  cqe_cnt].pkt_info);
1710 
1711 			/* Fix endianness. */
1712 			zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
1713 			/*
1714 			 * Current mini array position is the one returned by
1715 			 * check_cqe64().
1716 			 *
1717 			 * If completion comprises several mini arrays, as a
1718 			 * special case the second one is located 7 CQEs after
1719 			 * the initial CQE instead of 8 for subsequent ones.
1720 			 */
1721 			zip->ca = rxq->cq_ci;
1722 			zip->na = zip->ca + 7;
1723 			/* Compute the next non compressed CQE. */
1724 			--rxq->cq_ci;
1725 			zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
1726 			/* Get packet size to return. */
1727 			len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
1728 			*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
1729 			zip->ai = 1;
1730 			/* Prefetch all the entries to be invalidated */
1731 			idx = zip->ca;
1732 			end = zip->cq_ci;
1733 			while (idx != end) {
1734 				rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
1735 				++idx;
1736 			}
1737 		} else {
1738 			len = rte_be_to_cpu_32(cqe->byte_cnt);
1739 			*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
1740 		}
1741 		/* Error while receiving packet. */
1742 		if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
1743 			return -1;
1744 	}
1745 	return len;
1746 }
1747 
1748 /**
1749  * Translate RX completion flags to offload flags.
1750  *
1751  * @param[in] rxq
1752  *   Pointer to RX queue structure.
1753  * @param[in] cqe
1754  *   Pointer to CQE.
1755  *
1756  * @return
1757  *   Offload flags (ol_flags) for struct rte_mbuf.
1758  */
1759 static inline uint32_t
1760 rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
1761 {
1762 	uint32_t ol_flags = 0;
1763 	uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
1764 
1765 	ol_flags =
1766 		TRANSPOSE(flags,
1767 			  MLX5_CQE_RX_L3_HDR_VALID,
1768 			  PKT_RX_IP_CKSUM_GOOD) |
1769 		TRANSPOSE(flags,
1770 			  MLX5_CQE_RX_L4_HDR_VALID,
1771 			  PKT_RX_L4_CKSUM_GOOD);
1772 	if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
1773 		ol_flags |=
1774 			TRANSPOSE(flags,
1775 				  MLX5_CQE_RX_L3_HDR_VALID,
1776 				  PKT_RX_IP_CKSUM_GOOD) |
1777 			TRANSPOSE(flags,
1778 				  MLX5_CQE_RX_L4_HDR_VALID,
1779 				  PKT_RX_L4_CKSUM_GOOD);
1780 	return ol_flags;
1781 }
1782 
1783 /**
1784  * DPDK callback for RX.
1785  *
1786  * @param dpdk_rxq
1787  *   Generic pointer to RX queue structure.
1788  * @param[out] pkts
1789  *   Array to store received packets.
1790  * @param pkts_n
1791  *   Maximum number of packets in array.
1792  *
1793  * @return
1794  *   Number of packets successfully received (<= pkts_n).
1795  */
1796 uint16_t
1797 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1798 {
1799 	struct mlx5_rxq_data *rxq = dpdk_rxq;
1800 	const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
1801 	const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
1802 	const unsigned int sges_n = rxq->sges_n;
1803 	struct rte_mbuf *pkt = NULL;
1804 	struct rte_mbuf *seg = NULL;
1805 	volatile struct mlx5_cqe *cqe =
1806 		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1807 	unsigned int i = 0;
1808 	unsigned int rq_ci = rxq->rq_ci << sges_n;
1809 	int len = 0; /* keep its value across iterations. */
1810 
1811 	while (pkts_n) {
1812 		unsigned int idx = rq_ci & wqe_cnt;
1813 		volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
1814 		struct rte_mbuf *rep = (*rxq->elts)[idx];
1815 		uint32_t rss_hash_res = 0;
1816 
1817 		if (pkt)
1818 			NEXT(seg) = rep;
1819 		seg = rep;
1820 		rte_prefetch0(seg);
1821 		rte_prefetch0(cqe);
1822 		rte_prefetch0(wqe);
1823 		rep = rte_mbuf_raw_alloc(rxq->mp);
1824 		if (unlikely(rep == NULL)) {
1825 			++rxq->stats.rx_nombuf;
1826 			if (!pkt) {
1827 				/*
1828 				 * no buffers before we even started,
1829 				 * bail out silently.
1830 				 */
1831 				break;
1832 			}
1833 			while (pkt != seg) {
1834 				assert(pkt != (*rxq->elts)[idx]);
1835 				rep = NEXT(pkt);
1836 				NEXT(pkt) = NULL;
1837 				NB_SEGS(pkt) = 1;
1838 				rte_mbuf_raw_free(pkt);
1839 				pkt = rep;
1840 			}
1841 			break;
1842 		}
1843 		if (!pkt) {
1844 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
1845 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
1846 					       &rss_hash_res);
1847 			if (!len) {
1848 				rte_mbuf_raw_free(rep);
1849 				break;
1850 			}
1851 			if (unlikely(len == -1)) {
1852 				/* RX error, packet is likely too large. */
1853 				rte_mbuf_raw_free(rep);
1854 				++rxq->stats.idropped;
1855 				goto skip;
1856 			}
1857 			pkt = seg;
1858 			assert(len >= (rxq->crc_present << 2));
1859 			/* Update packet information. */
1860 			pkt->packet_type = rxq_cq_to_pkt_type(cqe);
1861 			pkt->ol_flags = 0;
1862 			if (rss_hash_res && rxq->rss_hash) {
1863 				pkt->hash.rss = rss_hash_res;
1864 				pkt->ol_flags = PKT_RX_RSS_HASH;
1865 			}
1866 			if (rxq->mark &&
1867 			    MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
1868 				pkt->ol_flags |= PKT_RX_FDIR;
1869 				if (cqe->sop_drop_qpn !=
1870 				    rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
1871 					uint32_t mark = cqe->sop_drop_qpn;
1872 
1873 					pkt->ol_flags |= PKT_RX_FDIR_ID;
1874 					pkt->hash.fdir.hi =
1875 						mlx5_flow_mark_get(mark);
1876 				}
1877 			}
1878 			if (rxq->csum | rxq->csum_l2tun)
1879 				pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
1880 			if (rxq->vlan_strip &&
1881 			    (cqe->hdr_type_etc &
1882 			     rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
1883 				pkt->ol_flags |= PKT_RX_VLAN |
1884 					PKT_RX_VLAN_STRIPPED;
1885 				pkt->vlan_tci =
1886 					rte_be_to_cpu_16(cqe->vlan_info);
1887 			}
1888 			if (rxq->hw_timestamp) {
1889 				pkt->timestamp =
1890 					rte_be_to_cpu_64(cqe->timestamp);
1891 				pkt->ol_flags |= PKT_RX_TIMESTAMP;
1892 			}
1893 			if (rxq->crc_present)
1894 				len -= ETHER_CRC_LEN;
1895 			PKT_LEN(pkt) = len;
1896 		}
1897 		DATA_LEN(rep) = DATA_LEN(seg);
1898 		PKT_LEN(rep) = PKT_LEN(seg);
1899 		SET_DATA_OFF(rep, DATA_OFF(seg));
1900 		PORT(rep) = PORT(seg);
1901 		(*rxq->elts)[idx] = rep;
1902 		/*
1903 		 * Fill NIC descriptor with the new buffer.  The lkey and size
1904 		 * of the buffers are already known, only the buffer address
1905 		 * changes.
1906 		 */
1907 		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1908 		if (len > DATA_LEN(seg)) {
1909 			len -= DATA_LEN(seg);
1910 			++NB_SEGS(pkt);
1911 			++rq_ci;
1912 			continue;
1913 		}
1914 		DATA_LEN(seg) = len;
1915 #ifdef MLX5_PMD_SOFT_COUNTERS
1916 		/* Increment bytes counter. */
1917 		rxq->stats.ibytes += PKT_LEN(pkt);
1918 #endif
1919 		/* Return packet. */
1920 		*(pkts++) = pkt;
1921 		pkt = NULL;
1922 		--pkts_n;
1923 		++i;
1924 skip:
1925 		/* Align consumer index to the next stride. */
1926 		rq_ci >>= sges_n;
1927 		++rq_ci;
1928 		rq_ci <<= sges_n;
1929 	}
1930 	if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci)))
1931 		return 0;
1932 	/* Update the consumer index. */
1933 	rxq->rq_ci = rq_ci >> sges_n;
1934 	rte_io_wmb();
1935 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1936 	rte_io_wmb();
1937 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1938 #ifdef MLX5_PMD_SOFT_COUNTERS
1939 	/* Increment packets counter. */
1940 	rxq->stats.ipackets += i;
1941 #endif
1942 	return i;
1943 }
1944 
1945 /**
1946  * Dummy DPDK callback for TX.
1947  *
1948  * This function is used to temporarily replace the real callback during
1949  * unsafe control operations on the queue, or in case of error.
1950  *
1951  * @param dpdk_txq
1952  *   Generic pointer to TX queue structure.
1953  * @param[in] pkts
1954  *   Packets to transmit.
1955  * @param pkts_n
1956  *   Number of packets in array.
1957  *
1958  * @return
1959  *   Number of packets successfully transmitted (<= pkts_n).
1960  */
1961 uint16_t
1962 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1963 {
1964 	(void)dpdk_txq;
1965 	(void)pkts;
1966 	(void)pkts_n;
1967 	return 0;
1968 }
1969 
1970 /**
1971  * Dummy DPDK callback for RX.
1972  *
1973  * This function is used to temporarily replace the real callback during
1974  * unsafe control operations on the queue, or in case of error.
1975  *
1976  * @param dpdk_rxq
1977  *   Generic pointer to RX queue structure.
1978  * @param[out] pkts
1979  *   Array to store received packets.
1980  * @param pkts_n
1981  *   Maximum number of packets in array.
1982  *
1983  * @return
1984  *   Number of packets successfully received (<= pkts_n).
1985  */
1986 uint16_t
1987 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1988 {
1989 	(void)dpdk_rxq;
1990 	(void)pkts;
1991 	(void)pkts_n;
1992 	return 0;
1993 }
1994 
1995 /*
1996  * Vectorized Rx/Tx routines are not compiled in when required vector
1997  * instructions are not supported on a target architecture. The following null
1998  * stubs are needed for linkage when those are not included outside of this file
1999  * (e.g.  mlx5_rxtx_vec_sse.c for x86).
2000  */
2001 
2002 uint16_t __attribute__((weak))
2003 mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2004 {
2005 	(void)dpdk_txq;
2006 	(void)pkts;
2007 	(void)pkts_n;
2008 	return 0;
2009 }
2010 
2011 uint16_t __attribute__((weak))
2012 mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
2013 {
2014 	(void)dpdk_txq;
2015 	(void)pkts;
2016 	(void)pkts_n;
2017 	return 0;
2018 }
2019 
2020 uint16_t __attribute__((weak))
2021 mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
2022 {
2023 	(void)dpdk_rxq;
2024 	(void)pkts;
2025 	(void)pkts_n;
2026 	return 0;
2027 }
2028 
2029 int __attribute__((weak))
2030 priv_check_raw_vec_tx_support(struct priv *priv)
2031 {
2032 	(void)priv;
2033 	return -ENOTSUP;
2034 }
2035 
2036 int __attribute__((weak))
2037 priv_check_vec_tx_support(struct priv *priv)
2038 {
2039 	(void)priv;
2040 	return -ENOTSUP;
2041 }
2042 
2043 int __attribute__((weak))
2044 rxq_check_vec_support(struct mlx5_rxq_data *rxq)
2045 {
2046 	(void)rxq;
2047 	return -ENOTSUP;
2048 }
2049 
2050 int __attribute__((weak))
2051 priv_check_vec_rx_support(struct priv *priv)
2052 {
2053 	(void)priv;
2054 	return -ENOTSUP;
2055 }
2056