xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h (revision ceccf8dc7c3d7797e380f12b45cd3ea1d7396b58)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
8 
9 #include <assert.h>
10 #include <stdint.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <smmintrin.h>
14 
15 #include <rte_mbuf.h>
16 #include <rte_mempool.h>
17 #include <rte_prefetch.h>
18 
19 #include "mlx5.h"
20 #include "mlx5_utils.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_rxtx_vec.h"
23 #include "mlx5_autoconf.h"
24 #include "mlx5_defs.h"
25 #include "mlx5_prm.h"
26 
27 #ifndef __INTEL_COMPILER
28 #pragma GCC diagnostic ignored "-Wcast-qual"
29 #endif
30 
31 /**
32  * Fill in buffer descriptors in a multi-packet send descriptor.
33  *
34  * @param txq
35  *   Pointer to TX queue structure.
36  * @param dseg
37  *   Pointer to buffer descriptor to be written.
38  * @param pkts
39  *   Pointer to array of packets to be sent.
40  * @param n
41  *   Number of packets to be filled.
42  */
43 static inline void
44 txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
45 	      struct rte_mbuf **pkts, unsigned int n)
46 {
47 	unsigned int pos;
48 	uintptr_t addr;
49 	const __m128i shuf_mask_dseg =
50 		_mm_set_epi8(8,  9, 10, 11, /* addr, bswap64 */
51 			    12, 13, 14, 15,
52 			     7,  6,  5,  4, /* lkey */
53 			     0,  1,  2,  3  /* length, bswap32 */);
54 #ifdef MLX5_PMD_SOFT_COUNTERS
55 	uint32_t tx_byte = 0;
56 #endif
57 
58 	for (pos = 0; pos < n; ++pos, ++dseg) {
59 		__m128i desc;
60 		struct rte_mbuf *pkt = pkts[pos];
61 
62 		addr = rte_pktmbuf_mtod(pkt, uintptr_t);
63 		desc = _mm_set_epi32(addr >> 32,
64 				     addr,
65 				     mlx5_tx_mb2mr(txq, pkt),
66 				     DATA_LEN(pkt));
67 		desc = _mm_shuffle_epi8(desc, shuf_mask_dseg);
68 		_mm_store_si128(dseg, desc);
69 #ifdef MLX5_PMD_SOFT_COUNTERS
70 		tx_byte += DATA_LEN(pkt);
71 #endif
72 	}
73 #ifdef MLX5_PMD_SOFT_COUNTERS
74 	txq->stats.obytes += tx_byte;
75 #endif
76 }
77 
78 /**
79  * Send multi-segmented packets until it encounters a single segment packet in
80  * the pkts list.
81  *
82  * @param txq
83  *   Pointer to TX queue structure.
84  * @param pkts
85  *   Pointer to array of packets to be sent.
86  * @param pkts_n
87  *   Number of packets to be sent.
88  *
89  * @return
90  *   Number of packets successfully transmitted (<= pkts_n).
91  */
92 static uint16_t
93 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
94 	      uint16_t pkts_n)
95 {
96 	uint16_t elts_head = txq->elts_head;
97 	const uint16_t elts_n = 1 << txq->elts_n;
98 	const uint16_t elts_m = elts_n - 1;
99 	const uint16_t wq_n = 1 << txq->wqe_n;
100 	const uint16_t wq_mask = wq_n - 1;
101 	const unsigned int nb_dword_per_wqebb =
102 		MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
103 	const unsigned int nb_dword_in_hdr =
104 		sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
105 	unsigned int n;
106 	volatile struct mlx5_wqe *wqe = NULL;
107 
108 	assert(elts_n > pkts_n);
109 	mlx5_tx_complete(txq);
110 	if (unlikely(!pkts_n))
111 		return 0;
112 	for (n = 0; n < pkts_n; ++n) {
113 		struct rte_mbuf *buf = pkts[n];
114 		unsigned int segs_n = buf->nb_segs;
115 		unsigned int ds = nb_dword_in_hdr;
116 		unsigned int len = PKT_LEN(buf);
117 		uint16_t wqe_ci = txq->wqe_ci;
118 		const __m128i shuf_mask_ctrl =
119 			_mm_set_epi8(15, 14, 13, 12,
120 				      8,  9, 10, 11, /* bswap32 */
121 				      4,  5,  6,  7, /* bswap32 */
122 				      0,  1,  2,  3  /* bswap32 */);
123 		uint8_t cs_flags;
124 		uint16_t max_elts;
125 		uint16_t max_wqe;
126 		__m128i *t_wqe, *dseg;
127 		__m128i ctrl;
128 
129 		assert(segs_n);
130 		max_elts = elts_n - (elts_head - txq->elts_tail);
131 		max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
132 		/*
133 		 * A MPW session consumes 2 WQEs at most to
134 		 * include MLX5_MPW_DSEG_MAX pointers.
135 		 */
136 		if (segs_n == 1 ||
137 		    max_elts < segs_n || max_wqe < 2)
138 			break;
139 		if (segs_n > MLX5_MPW_DSEG_MAX) {
140 			txq->stats.oerrors++;
141 			break;
142 		}
143 		wqe = &((volatile struct mlx5_wqe64 *)
144 			 txq->wqes)[wqe_ci & wq_mask].hdr;
145 		cs_flags = txq_ol_cksum_to_cs(buf);
146 		/* Title WQEBB pointer. */
147 		t_wqe = (__m128i *)wqe;
148 		dseg = (__m128i *)(wqe + 1);
149 		do {
150 			if (!(ds++ % nb_dword_per_wqebb)) {
151 				dseg = (__m128i *)
152 					&((volatile struct mlx5_wqe64 *)
153 					   txq->wqes)[++wqe_ci & wq_mask];
154 			}
155 			txq_wr_dseg_v(txq, dseg++, &buf, 1);
156 			(*txq->elts)[elts_head++ & elts_m] = buf;
157 			buf = buf->next;
158 		} while (--segs_n);
159 		++wqe_ci;
160 		/* Fill CTRL in the header. */
161 		ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds,
162 				     MLX5_OPC_MOD_MPW << 24 |
163 				     txq->wqe_ci << 8 | MLX5_OPCODE_TSO);
164 		ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
165 		_mm_store_si128(t_wqe, ctrl);
166 		/* Fill ESEG in the header. */
167 		_mm_store_si128(t_wqe + 1,
168 				_mm_set_epi16(0, 0, 0, 0,
169 					      rte_cpu_to_be_16(len), cs_flags,
170 					      0, 0));
171 		txq->wqe_ci = wqe_ci;
172 	}
173 	if (!n)
174 		return 0;
175 	txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
176 	txq->elts_head = elts_head;
177 	if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
178 		/* A CQE slot must always be available. */
179 		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
180 		wqe->ctrl[2] = rte_cpu_to_be_32(8);
181 		wqe->ctrl[3] = txq->elts_head;
182 		txq->elts_comp = 0;
183 	}
184 #ifdef MLX5_PMD_SOFT_COUNTERS
185 	txq->stats.opackets += n;
186 #endif
187 	mlx5_tx_dbrec(txq, wqe);
188 	return n;
189 }
190 
191 /**
192  * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
193  * it returns to make it processed by txq_scatter_v(). All the packets in
194  * the pkts list should be single segment packets having same offload flags.
195  * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
196  *
197  * @param txq
198  *   Pointer to TX queue structure.
199  * @param pkts
200  *   Pointer to array of packets to be sent.
201  * @param pkts_n
202  *   Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
203  * @param cs_flags
204  *   Checksum offload flags to be written in the descriptor.
205  * @param metadata
206  *   Metadata value to be written in the descriptor.
207  *
208  * @return
209  *   Number of packets successfully transmitted (<= pkts_n).
210  */
211 static inline uint16_t
212 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
213 	    uint8_t cs_flags, rte_be32_t metadata)
214 {
215 	struct rte_mbuf **elts;
216 	uint16_t elts_head = txq->elts_head;
217 	const uint16_t elts_n = 1 << txq->elts_n;
218 	const uint16_t elts_m = elts_n - 1;
219 	const unsigned int nb_dword_per_wqebb =
220 		MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
221 	const unsigned int nb_dword_in_hdr =
222 		sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
223 	unsigned int n = 0;
224 	unsigned int pos;
225 	uint16_t max_elts;
226 	uint16_t max_wqe;
227 	uint32_t comp_req = 0;
228 	const uint16_t wq_n = 1 << txq->wqe_n;
229 	const uint16_t wq_mask = wq_n - 1;
230 	uint16_t wq_idx = txq->wqe_ci & wq_mask;
231 	volatile struct mlx5_wqe64 *wq =
232 		&((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
233 	volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
234 	const __m128i shuf_mask_ctrl =
235 		_mm_set_epi8(15, 14, 13, 12,
236 			      8,  9, 10, 11, /* bswap32 */
237 			      4,  5,  6,  7, /* bswap32 */
238 			      0,  1,  2,  3  /* bswap32 */);
239 	__m128i *t_wqe, *dseg;
240 	__m128i ctrl;
241 
242 	/* Make sure all packets can fit into a single WQE. */
243 	assert(elts_n > pkts_n);
244 	mlx5_tx_complete(txq);
245 	max_elts = (elts_n - (elts_head - txq->elts_tail));
246 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
247 	pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
248 	assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
249 	if (unlikely(!pkts_n))
250 		return 0;
251 	elts = &(*txq->elts)[elts_head & elts_m];
252 	/* Loop for available tailroom first. */
253 	n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
254 	for (pos = 0; pos < (n & -2); pos += 2)
255 		_mm_storeu_si128((__m128i *)&elts[pos],
256 				 _mm_loadu_si128((__m128i *)&pkts[pos]));
257 	if (n & 1)
258 		elts[pos] = pkts[pos];
259 	/* Check if it crosses the end of the queue. */
260 	if (unlikely(n < pkts_n)) {
261 		elts = &(*txq->elts)[0];
262 		for (pos = 0; pos < pkts_n - n; ++pos)
263 			elts[pos] = pkts[n + pos];
264 	}
265 	txq->elts_head += pkts_n;
266 	/* Save title WQEBB pointer. */
267 	t_wqe = (__m128i *)wqe;
268 	dseg = (__m128i *)(wqe + 1);
269 	/* Calculate the number of entries to the end. */
270 	n = RTE_MIN(
271 		(wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
272 		pkts_n);
273 	/* Fill DSEGs. */
274 	txq_wr_dseg_v(txq, dseg, pkts, n);
275 	/* Check if it crosses the end of the queue. */
276 	if (n < pkts_n) {
277 		dseg = (__m128i *)txq->wqes;
278 		txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
279 	}
280 	if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
281 		txq->elts_comp += pkts_n;
282 	} else {
283 		/* A CQE slot must always be available. */
284 		assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
285 		/* Request a completion. */
286 		txq->elts_comp = 0;
287 		comp_req = 8;
288 	}
289 	/* Fill CTRL in the header. */
290 	ctrl = _mm_set_epi32(txq->elts_head, comp_req,
291 			     txq->qp_num_8s | (pkts_n + 2),
292 			     MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
293 				txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
294 	ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
295 	_mm_store_si128(t_wqe, ctrl);
296 	/* Fill ESEG in the header. */
297 	_mm_store_si128(t_wqe + 1, _mm_set_epi32(0, metadata, cs_flags, 0));
298 #ifdef MLX5_PMD_SOFT_COUNTERS
299 	txq->stats.opackets += pkts_n;
300 #endif
301 	txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
302 		       nb_dword_per_wqebb;
303 	/* Ring QP doorbell. */
304 	mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
305 	return pkts_n;
306 }
307 
308 /**
309  * Store free buffers to RX SW ring.
310  *
311  * @param rxq
312  *   Pointer to RX queue structure.
313  * @param pkts
314  *   Pointer to array of packets to be stored.
315  * @param pkts_n
316  *   Number of packets to be stored.
317  */
318 static inline void
319 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
320 {
321 	const uint16_t q_mask = (1 << rxq->elts_n) - 1;
322 	struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
323 	unsigned int pos;
324 	uint16_t p = n & -2;
325 
326 	for (pos = 0; pos < p; pos += 2) {
327 		__m128i mbp;
328 
329 		mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
330 		_mm_storeu_si128((__m128i *)&pkts[pos], mbp);
331 	}
332 	if (n & 1)
333 		pkts[pos] = elts[pos];
334 }
335 
336 /**
337  * Decompress a compressed completion and fill in mbufs in RX SW ring with data
338  * extracted from the title completion descriptor.
339  *
340  * @param rxq
341  *   Pointer to RX queue structure.
342  * @param cq
343  *   Pointer to completion array having a compressed completion at first.
344  * @param elts
345  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
346  *   the title completion descriptor to be copied to the rest of mbufs.
347  */
348 static inline void
349 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
350 		    struct rte_mbuf **elts)
351 {
352 	volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
353 	struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
354 	unsigned int pos;
355 	unsigned int i;
356 	unsigned int inv = 0;
357 	/* Mask to shuffle from extracted mini CQE to mbuf. */
358 	const __m128i shuf_mask1 =
359 		_mm_set_epi8(0,  1,  2,  3, /* rss, bswap32 */
360 			    -1, -1,         /* skip vlan_tci */
361 			     6,  7,         /* data_len, bswap16 */
362 			    -1, -1,  6,  7, /* pkt_len, bswap16 */
363 			    -1, -1, -1, -1  /* skip packet_type */);
364 	const __m128i shuf_mask2 =
365 		_mm_set_epi8(8,  9, 10, 11, /* rss, bswap32 */
366 			    -1, -1,         /* skip vlan_tci */
367 			    14, 15,         /* data_len, bswap16 */
368 			    -1, -1, 14, 15, /* pkt_len, bswap16 */
369 			    -1, -1, -1, -1  /* skip packet_type */);
370 	/* Restore the compressed count. Must be 16 bits. */
371 	const uint16_t mcqe_n = t_pkt->data_len +
372 				(rxq->crc_present * ETHER_CRC_LEN);
373 	const __m128i rearm =
374 		_mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
375 	const __m128i rxdf =
376 		_mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
377 	const __m128i crc_adj =
378 		_mm_set_epi16(0, 0, 0,
379 			      rxq->crc_present * ETHER_CRC_LEN,
380 			      0,
381 			      rxq->crc_present * ETHER_CRC_LEN,
382 			      0, 0);
383 	const uint32_t flow_tag = t_pkt->hash.fdir.hi;
384 #ifdef MLX5_PMD_SOFT_COUNTERS
385 	const __m128i zero = _mm_setzero_si128();
386 	const __m128i ones = _mm_cmpeq_epi32(zero, zero);
387 	uint32_t rcvd_byte = 0;
388 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
389 	const __m128i len_shuf_mask =
390 		_mm_set_epi8(-1, -1, -1, -1,
391 			     -1, -1, -1, -1,
392 			     14, 15,  6,  7,
393 			     10, 11,  2,  3);
394 #endif
395 
396 	/*
397 	 * A. load mCQEs into a 128bit register.
398 	 * B. store rearm data to mbuf.
399 	 * C. combine data from mCQEs with rx_descriptor_fields1.
400 	 * D. store rx_descriptor_fields1.
401 	 * E. store flow tag (rte_flow mark).
402 	 */
403 	for (pos = 0; pos < mcqe_n; ) {
404 		__m128i mcqe1, mcqe2;
405 		__m128i rxdf1, rxdf2;
406 #ifdef MLX5_PMD_SOFT_COUNTERS
407 		__m128i byte_cnt, invalid_mask;
408 #endif
409 
410 		if (!(pos & 0x7) && pos + 8 < mcqe_n)
411 			rte_prefetch0((void *)(cq + pos + 8));
412 		/* A.1 load mCQEs into a 128bit register. */
413 		mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
414 		mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
415 		/* B.1 store rearm data to mbuf. */
416 		_mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
417 		_mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
418 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
419 		rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
420 		rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
421 		rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
422 		rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
423 		rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
424 		rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
425 		/* D.1 store rx_descriptor_fields1. */
426 		_mm_storeu_si128((__m128i *)
427 				  &elts[pos]->rx_descriptor_fields1,
428 				 rxdf1);
429 		_mm_storeu_si128((__m128i *)
430 				  &elts[pos + 1]->rx_descriptor_fields1,
431 				 rxdf2);
432 		/* B.1 store rearm data to mbuf. */
433 		_mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
434 		_mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
435 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
436 		rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
437 		rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
438 		rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
439 		rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
440 		rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
441 		rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
442 		/* D.1 store rx_descriptor_fields1. */
443 		_mm_storeu_si128((__m128i *)
444 				  &elts[pos + 2]->rx_descriptor_fields1,
445 				 rxdf1);
446 		_mm_storeu_si128((__m128i *)
447 				  &elts[pos + 3]->rx_descriptor_fields1,
448 				 rxdf2);
449 #ifdef MLX5_PMD_SOFT_COUNTERS
450 		invalid_mask = _mm_set_epi64x(0,
451 					      (mcqe_n - pos) *
452 					      sizeof(uint16_t) * 8);
453 		invalid_mask = _mm_sll_epi64(ones, invalid_mask);
454 		mcqe1 = _mm_srli_si128(mcqe1, 4);
455 		byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
456 		byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
457 		byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
458 		byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
459 		rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
460 #endif
461 		if (rxq->mark) {
462 			/* E.1 store flow tag (rte_flow mark). */
463 			elts[pos]->hash.fdir.hi = flow_tag;
464 			elts[pos + 1]->hash.fdir.hi = flow_tag;
465 			elts[pos + 2]->hash.fdir.hi = flow_tag;
466 			elts[pos + 3]->hash.fdir.hi = flow_tag;
467 		}
468 		pos += MLX5_VPMD_DESCS_PER_LOOP;
469 		/* Move to next CQE and invalidate consumed CQEs. */
470 		if (!(pos & 0x7) && pos < mcqe_n) {
471 			mcq = (void *)(cq + pos);
472 			for (i = 0; i < 8; ++i)
473 				cq[inv++].op_own = MLX5_CQE_INVALIDATE;
474 		}
475 	}
476 	/* Invalidate the rest of CQEs. */
477 	for (; inv < mcqe_n; ++inv)
478 		cq[inv].op_own = MLX5_CQE_INVALIDATE;
479 #ifdef MLX5_PMD_SOFT_COUNTERS
480 	rxq->stats.ipackets += mcqe_n;
481 	rxq->stats.ibytes += rcvd_byte;
482 #endif
483 	rxq->cq_ci += mcqe_n;
484 }
485 
486 /**
487  * Calculate packet type and offload flag for mbuf and store it.
488  *
489  * @param rxq
490  *   Pointer to RX queue structure.
491  * @param cqes[4]
492  *   Array of four 16bytes completions extracted from the original completion
493  *   descriptor.
494  * @param op_err
495  *   Opcode vector having responder error status. Each field is 4B.
496  * @param pkts
497  *   Pointer to array of packets to be filled.
498  */
499 static inline void
500 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
501 			 __m128i op_err, struct rte_mbuf **pkts)
502 {
503 	__m128i pinfo0, pinfo1;
504 	__m128i pinfo, ptype;
505 	__m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH |
506 					  rxq->hw_timestamp * PKT_RX_TIMESTAMP);
507 	__m128i cv_flags;
508 	const __m128i zero = _mm_setzero_si128();
509 	const __m128i ptype_mask =
510 		_mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
511 	const __m128i ptype_ol_mask =
512 		_mm_set_epi32(0x106, 0x106, 0x106, 0x106);
513 	const __m128i pinfo_mask =
514 		_mm_set_epi32(0x3, 0x3, 0x3, 0x3);
515 	const __m128i cv_flag_sel =
516 		_mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
517 			     (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
518 					PKT_RX_L4_CKSUM_GOOD) >> 1),
519 			     0,
520 			     (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
521 			     0,
522 			     (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
523 			     (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
524 			     0);
525 	const __m128i cv_mask =
526 		_mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
527 			      PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
528 			      PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
529 			      PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
530 			      PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
531 			      PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
532 			      PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
533 			      PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
534 	const __m128i mbuf_init =
535 		_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
536 	__m128i rearm0, rearm1, rearm2, rearm3;
537 	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
538 
539 	/* Extract pkt_info field. */
540 	pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
541 	pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
542 	pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
543 	/* Extract hdr_type_etc field. */
544 	pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
545 	pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
546 	ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
547 	if (rxq->mark) {
548 		const __m128i pinfo_ft_mask =
549 			_mm_set_epi32(0xffffff00, 0xffffff00,
550 				      0xffffff00, 0xffffff00);
551 		const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
552 		__m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
553 		__m128i flow_tag, invalid_mask;
554 
555 		flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
556 		/* Check if flow tag is non-zero then set PKT_RX_FDIR. */
557 		invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
558 		ol_flags = _mm_or_si128(ol_flags,
559 					_mm_andnot_si128(invalid_mask,
560 							 fdir_flags));
561 		/* Mask out invalid entries. */
562 		fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags);
563 		/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
564 		ol_flags = _mm_or_si128(ol_flags,
565 					_mm_andnot_si128(
566 						_mm_cmpeq_epi32(flow_tag,
567 								pinfo_ft_mask),
568 						fdir_id_flags));
569 	}
570 	/*
571 	 * Merge the two fields to generate the following:
572 	 * bit[1]     = l3_ok
573 	 * bit[2]     = l4_ok
574 	 * bit[8]     = cv
575 	 * bit[11:10] = l3_hdr_type
576 	 * bit[14:12] = l4_hdr_type
577 	 * bit[15]    = ip_frag
578 	 * bit[16]    = tunneled
579 	 * bit[17]    = outer_l3_type
580 	 */
581 	ptype = _mm_and_si128(ptype, ptype_mask);
582 	pinfo = _mm_and_si128(pinfo, pinfo_mask);
583 	pinfo = _mm_slli_epi32(pinfo, 16);
584 	/* Make pinfo has merged fields for ol_flags calculation. */
585 	pinfo = _mm_or_si128(ptype, pinfo);
586 	ptype = _mm_srli_epi32(pinfo, 10);
587 	ptype = _mm_packs_epi32(ptype, zero);
588 	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
589 	op_err = _mm_srli_epi16(op_err, 8);
590 	ptype = _mm_or_si128(ptype, op_err);
591 	pt_idx0 = _mm_extract_epi8(ptype, 0);
592 	pt_idx1 = _mm_extract_epi8(ptype, 2);
593 	pt_idx2 = _mm_extract_epi8(ptype, 4);
594 	pt_idx3 = _mm_extract_epi8(ptype, 6);
595 	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
596 			       !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
597 	pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
598 			       !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
599 	pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
600 			       !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
601 	pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
602 			       !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
603 	/* Fill flags for checksum and VLAN. */
604 	pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
605 	pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
606 	/* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
607 	cv_flags = _mm_slli_epi32(pinfo, 9);
608 	cv_flags = _mm_or_si128(pinfo, cv_flags);
609 	/* Move back flags to start from byte[0]. */
610 	cv_flags = _mm_srli_epi32(cv_flags, 8);
611 	/* Mask out garbage bits. */
612 	cv_flags = _mm_and_si128(cv_flags, cv_mask);
613 	/* Merge to ol_flags. */
614 	ol_flags = _mm_or_si128(ol_flags, cv_flags);
615 	/* Merge mbuf_init and ol_flags. */
616 	rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
617 	rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
618 	rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
619 	rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
620 	/* Write 8B rearm_data and 8B ol_flags. */
621 	_mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
622 	_mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
623 	_mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
624 	_mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
625 }
626 
627 /**
628  * Receive burst of packets. An errored completion also consumes a mbuf, but the
629  * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
630  * before returning to application.
631  *
632  * @param rxq
633  *   Pointer to RX queue structure.
634  * @param[out] pkts
635  *   Array to store received packets.
636  * @param pkts_n
637  *   Maximum number of packets in array.
638  * @param[out] err
639  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
640  *   packet to handle.
641  *
642  * @return
643  *   Number of packets received including errors (<= pkts_n).
644  */
645 static inline uint16_t
646 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
647 	    uint64_t *err)
648 {
649 	const uint16_t q_n = 1 << rxq->cqe_n;
650 	const uint16_t q_mask = q_n - 1;
651 	volatile struct mlx5_cqe *cq;
652 	struct rte_mbuf **elts;
653 	unsigned int pos;
654 	uint64_t n;
655 	uint16_t repl_n;
656 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
657 	uint16_t nocmp_n = 0;
658 	uint16_t rcvd_pkt = 0;
659 	unsigned int cq_idx = rxq->cq_ci & q_mask;
660 	unsigned int elts_idx;
661 	unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
662 	const __m128i owner_check =
663 		_mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
664 	const __m128i opcode_check =
665 		_mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
666 	const __m128i format_check =
667 		_mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
668 	const __m128i resp_err_check =
669 		_mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
670 #ifdef MLX5_PMD_SOFT_COUNTERS
671 	uint32_t rcvd_byte = 0;
672 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
673 	const __m128i len_shuf_mask =
674 		_mm_set_epi8(-1, -1, -1, -1,
675 			     -1, -1, -1, -1,
676 			     12, 13,  8,  9,
677 			      4,  5,  0,  1);
678 #endif
679 	/* Mask to shuffle from extracted CQE to mbuf. */
680 	const __m128i shuf_mask =
681 		_mm_set_epi8(-1,  3,  2,  1, /* fdir.hi */
682 			     12, 13, 14, 15, /* rss, bswap32 */
683 			     10, 11,         /* vlan_tci, bswap16 */
684 			      4,  5,         /* data_len, bswap16 */
685 			     -1, -1,         /* zero out 2nd half of pkt_len */
686 			      4,  5          /* pkt_len, bswap16 */);
687 	/* Mask to blend from the last Qword to the first DQword. */
688 	const __m128i blend_mask =
689 		_mm_set_epi8(-1, -1, -1, -1,
690 			     -1, -1, -1, -1,
691 			      0,  0,  0,  0,
692 			      0,  0,  0, -1);
693 	const __m128i zero = _mm_setzero_si128();
694 	const __m128i ones = _mm_cmpeq_epi32(zero, zero);
695 	const __m128i crc_adj =
696 		_mm_set_epi16(0, 0, 0, 0, 0,
697 			      rxq->crc_present * ETHER_CRC_LEN,
698 			      0,
699 			      rxq->crc_present * ETHER_CRC_LEN);
700 	const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
701 
702 	assert(rxq->sges_n == 0);
703 	assert(rxq->cqe_n == rxq->elts_n);
704 	cq = &(*rxq->cqes)[cq_idx];
705 	rte_prefetch0(cq);
706 	rte_prefetch0(cq + 1);
707 	rte_prefetch0(cq + 2);
708 	rte_prefetch0(cq + 3);
709 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
710 	/*
711 	 * Order of indexes:
712 	 *   rq_ci >= cq_ci >= rq_pi
713 	 * Definition of indexes:
714 	 *   rq_ci - cq_ci := # of buffers owned by HW (posted).
715 	 *   cq_ci - rq_pi := # of buffers not returned to app (decompressed).
716 	 *   N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
717 	 */
718 	repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
719 	if (repl_n >= rxq->rq_repl_thresh)
720 		mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
721 	/* See if there're unreturned mbufs from compressed CQE. */
722 	rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
723 	if (rcvd_pkt > 0) {
724 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
725 		rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
726 		rxq->rq_pi += rcvd_pkt;
727 		pkts += rcvd_pkt;
728 	}
729 	elts_idx = rxq->rq_pi & q_mask;
730 	elts = &(*rxq->elts)[elts_idx];
731 	/* Not to overflow pkts array. */
732 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
733 	/* Not to cross queue end. */
734 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
735 	if (!pkts_n)
736 		return rcvd_pkt;
737 	/* At this point, there shouldn't be any remained packets. */
738 	assert(rxq->rq_pi == rxq->cq_ci);
739 	/*
740 	 * A. load first Qword (8bytes) in one loop.
741 	 * B. copy 4 mbuf pointers from elts ring to returing pkts.
742 	 * C. load remained CQE data and extract necessary fields.
743 	 *    Final 16bytes cqes[] extracted from original 64bytes CQE has the
744 	 *    following structure:
745 	 *        struct {
746 	 *          uint8_t  pkt_info;
747 	 *          uint8_t  flow_tag[3];
748 	 *          uint16_t byte_cnt;
749 	 *          uint8_t  rsvd4;
750 	 *          uint8_t  op_own;
751 	 *          uint16_t hdr_type_etc;
752 	 *          uint16_t vlan_info;
753 	 *          uint32_t rx_has_res;
754 	 *        } c;
755 	 * D. fill in mbuf.
756 	 * E. get valid CQEs.
757 	 * F. find compressed CQE.
758 	 */
759 	for (pos = 0;
760 	     pos < pkts_n;
761 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
762 		__m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
763 		__m128i cqe_tmp1, cqe_tmp2;
764 		__m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
765 		__m128i op_own, op_own_tmp1, op_own_tmp2;
766 		__m128i opcode, owner_mask, invalid_mask;
767 		__m128i comp_mask;
768 		__m128i mask;
769 #ifdef MLX5_PMD_SOFT_COUNTERS
770 		__m128i byte_cnt;
771 #endif
772 		__m128i mbp1, mbp2;
773 		__m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
774 		unsigned int p1, p2, p3;
775 
776 		/* Prefetch next 4 CQEs. */
777 		if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
778 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
779 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
780 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
781 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
782 		}
783 		/* A.0 do not cross the end of CQ. */
784 		mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
785 		mask = _mm_sll_epi64(ones, mask);
786 		p = _mm_andnot_si128(mask, p);
787 		/* A.1 load cqes. */
788 		p3 = _mm_extract_epi16(p, 3);
789 		cqes[3] = _mm_loadl_epi64((__m128i *)
790 					   &cq[pos + p3].sop_drop_qpn);
791 		rte_compiler_barrier();
792 		p2 = _mm_extract_epi16(p, 2);
793 		cqes[2] = _mm_loadl_epi64((__m128i *)
794 					   &cq[pos + p2].sop_drop_qpn);
795 		rte_compiler_barrier();
796 		/* B.1 load mbuf pointers. */
797 		mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
798 		mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
799 		/* A.1 load a block having op_own. */
800 		p1 = _mm_extract_epi16(p, 1);
801 		cqes[1] = _mm_loadl_epi64((__m128i *)
802 					   &cq[pos + p1].sop_drop_qpn);
803 		rte_compiler_barrier();
804 		cqes[0] = _mm_loadl_epi64((__m128i *)
805 					   &cq[pos].sop_drop_qpn);
806 		/* B.2 copy mbuf pointers. */
807 		_mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
808 		_mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
809 		rte_cio_rmb();
810 		/* C.1 load remained CQE data and extract necessary fields. */
811 		cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
812 		cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
813 		cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
814 		cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
815 		cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
816 		cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
817 		cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
818 		cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
819 		cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
820 		cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
821 		cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
822 		cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
823 		/* C.2 generate final structure for mbuf with swapping bytes. */
824 		pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
825 		pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
826 		/* C.3 adjust CRC length. */
827 		pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
828 		pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
829 		/* C.4 adjust flow mark. */
830 		pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
831 		pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
832 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
833 		_mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
834 		_mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
835 		/* E.1 extract op_own field. */
836 		op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
837 		/* C.1 load remained CQE data and extract necessary fields. */
838 		cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
839 		cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
840 		cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
841 		cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
842 		cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
843 		cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
844 		cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
845 		cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
846 		cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
847 		cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
848 		cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
849 		cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
850 		/* C.2 generate final structure for mbuf with swapping bytes. */
851 		pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
852 		pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
853 		/* C.3 adjust CRC length. */
854 		pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
855 		pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
856 		/* C.4 adjust flow mark. */
857 		pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
858 		pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
859 		/* E.1 extract op_own byte. */
860 		op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
861 		op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
862 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
863 		_mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
864 		_mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
865 		/* E.2 flip owner bit to mark CQEs from last round. */
866 		owner_mask = _mm_and_si128(op_own, owner_check);
867 		if (ownership)
868 			owner_mask = _mm_xor_si128(owner_mask, owner_check);
869 		owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
870 		owner_mask = _mm_packs_epi32(owner_mask, zero);
871 		/* E.3 get mask for invalidated CQEs. */
872 		opcode = _mm_and_si128(op_own, opcode_check);
873 		invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
874 		invalid_mask = _mm_packs_epi32(invalid_mask, zero);
875 		/* E.4 mask out beyond boundary. */
876 		invalid_mask = _mm_or_si128(invalid_mask, mask);
877 		/* E.5 merge invalid_mask with invalid owner. */
878 		invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
879 		/* F.1 find compressed CQE format. */
880 		comp_mask = _mm_and_si128(op_own, format_check);
881 		comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
882 		comp_mask = _mm_packs_epi32(comp_mask, zero);
883 		/* F.2 mask out invalid entries. */
884 		comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
885 		comp_idx = _mm_cvtsi128_si64(comp_mask);
886 		/* F.3 get the first compressed CQE. */
887 		comp_idx = comp_idx ?
888 				__builtin_ctzll(comp_idx) /
889 					(sizeof(uint16_t) * 8) :
890 				MLX5_VPMD_DESCS_PER_LOOP;
891 		/* E.6 mask out entries after the compressed CQE. */
892 		mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
893 		mask = _mm_sll_epi64(ones, mask);
894 		invalid_mask = _mm_or_si128(invalid_mask, mask);
895 		/* E.7 count non-compressed valid CQEs. */
896 		n = _mm_cvtsi128_si64(invalid_mask);
897 		n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
898 			MLX5_VPMD_DESCS_PER_LOOP;
899 		nocmp_n += n;
900 		/* D.2 get the final invalid mask. */
901 		mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
902 		mask = _mm_sll_epi64(ones, mask);
903 		invalid_mask = _mm_or_si128(invalid_mask, mask);
904 		/* D.3 check error in opcode. */
905 		opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
906 		opcode = _mm_packs_epi32(opcode, zero);
907 		opcode = _mm_andnot_si128(invalid_mask, opcode);
908 		/* D.4 mark if any error is set */
909 		*err |= _mm_cvtsi128_si64(opcode);
910 		/* D.5 fill in mbuf - rearm_data and packet_type. */
911 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
912 		if (rxq->hw_timestamp) {
913 			pkts[pos]->timestamp =
914 				rte_be_to_cpu_64(cq[pos].timestamp);
915 			pkts[pos + 1]->timestamp =
916 				rte_be_to_cpu_64(cq[pos + p1].timestamp);
917 			pkts[pos + 2]->timestamp =
918 				rte_be_to_cpu_64(cq[pos + p2].timestamp);
919 			pkts[pos + 3]->timestamp =
920 				rte_be_to_cpu_64(cq[pos + p3].timestamp);
921 		}
922 #ifdef MLX5_PMD_SOFT_COUNTERS
923 		/* Add up received bytes count. */
924 		byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
925 		byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
926 		byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
927 		rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
928 #endif
929 		/*
930 		 * Break the loop unless more valid CQE is expected, or if
931 		 * there's a compressed CQE.
932 		 */
933 		if (n != MLX5_VPMD_DESCS_PER_LOOP)
934 			break;
935 	}
936 	/* If no new CQE seen, return without updating cq_db. */
937 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
938 		return rcvd_pkt;
939 	/* Update the consumer indexes for non-compressed CQEs. */
940 	assert(nocmp_n <= pkts_n);
941 	rxq->cq_ci += nocmp_n;
942 	rxq->rq_pi += nocmp_n;
943 	rcvd_pkt += nocmp_n;
944 #ifdef MLX5_PMD_SOFT_COUNTERS
945 	rxq->stats.ipackets += nocmp_n;
946 	rxq->stats.ibytes += rcvd_byte;
947 #endif
948 	/* Decompress the last CQE if compressed. */
949 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
950 		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
951 		rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
952 		/* Return more packets if needed. */
953 		if (nocmp_n < pkts_n) {
954 			uint16_t n = rxq->cq_ci - rxq->rq_pi;
955 
956 			n = RTE_MIN(n, pkts_n - nocmp_n);
957 			rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
958 			rxq->rq_pi += n;
959 			rcvd_pkt += n;
960 		}
961 	}
962 	rte_compiler_barrier();
963 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
964 	return rcvd_pkt;
965 }
966 
967 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */
968