xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h (revision fdf7471cccb8be023037c218d1402c0549eb2c8e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
8 
9 #include <assert.h>
10 #include <stdint.h>
11 #include <string.h>
12 #include <stdlib.h>
13 
14 #include <altivec.h>
15 
16 #include <rte_mbuf.h>
17 #include <rte_mempool.h>
18 #include <rte_prefetch.h>
19 
20 #include "mlx5.h"
21 #include "mlx5_utils.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_rxtx_vec.h"
24 #include "mlx5_autoconf.h"
25 #include "mlx5_defs.h"
26 #include "mlx5_prm.h"
27 
28 #ifndef __INTEL_COMPILER
29 #pragma GCC diagnostic ignored "-Wcast-qual"
30 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
31 #endif
32 
33 /**
34  * Store free buffers to RX SW ring.
35  *
36  * @param rxq
37  *   Pointer to RX queue structure.
38  * @param pkts
39  *   Pointer to array of packets to be stored.
40  * @param pkts_n
41  *   Number of packets to be stored.
42  */
43 static inline void
44 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
45 {
46 	const uint16_t q_mask = (1 << rxq->elts_n) - 1;
47 	struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
48 	unsigned int pos;
49 	uint16_t p = n & -2;
50 
51 	for (pos = 0; pos < p; pos += 2) {
52 		vector unsigned char mbp;
53 
54 		mbp = (vector unsigned char)vec_vsx_ld(0,
55 				(signed int const *)&elts[pos]);
56 		*(vector unsigned char *)&pkts[pos] = mbp;
57 	}
58 	if (n & 1)
59 		pkts[pos] = elts[pos];
60 }
61 
62 /**
63  * Decompress a compressed completion and fill in mbufs in RX SW ring with data
64  * extracted from the title completion descriptor.
65  *
66  * @param rxq
67  *   Pointer to RX queue structure.
68  * @param cq
69  *   Pointer to completion array having a compressed completion at first.
70  * @param elts
71  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
72  *   the title completion descriptor to be copied to the rest of mbufs.
73  *
74  * @return
75  *   Number of mini-CQEs successfully decompressed.
76  */
77 static inline uint16_t
78 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
79 		    struct rte_mbuf **elts)
80 {
81 	volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
82 	struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
83 	const vector unsigned char zero = (vector unsigned char){0};
84 	/* Mask to shuffle from extracted mini CQE to mbuf. */
85 	const vector unsigned char shuf_mask1 = (vector unsigned char){
86 			-1, -1, -1, -1,   /* skip packet_type */
87 			 7,  6, -1, -1,   /* bswap16, pkt_len */
88 			 7,  6,           /* bswap16, data_len */
89 			-1, -1,           /* skip vlan_tci */
90 			 3,  2,  1,  0};  /* bswap32, rss */
91 	const vector unsigned char shuf_mask2 = (vector unsigned char){
92 			-1, -1, -1, -1,   /* skip packet_type */
93 			15, 14, -1, -1,   /* bswap16, pkt_len */
94 			15, 14,           /* data_len, bswap16 */
95 			-1, -1,           /* skip vlan_tci */
96 			11, 10,  9,  8};  /* bswap32, rss */
97 	/* Restore the compressed count. Must be 16 bits. */
98 	const uint16_t mcqe_n = t_pkt->data_len +
99 		(rxq->crc_present * RTE_ETHER_CRC_LEN);
100 	const vector unsigned char rearm =
101 		(vector unsigned char)vec_vsx_ld(0,
102 		(signed int const *)&t_pkt->rearm_data);
103 	const vector unsigned char rxdf =
104 		(vector unsigned char)vec_vsx_ld(0,
105 		(signed int const *)&t_pkt->rx_descriptor_fields1);
106 	const vector unsigned char crc_adj =
107 		(vector unsigned char)(vector unsigned short){
108 			0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
109 			rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0};
110 	const vector unsigned short rxdf_sel_mask =
111 		(vector unsigned short){
112 			0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0};
113 	const uint32_t flow_tag = t_pkt->hash.fdir.hi;
114 	unsigned int pos;
115 	unsigned int i;
116 	unsigned int inv = 0;
117 
118 #ifdef MLX5_PMD_SOFT_COUNTERS
119 	const vector unsigned char ones = vec_splat_u8(-1);
120 	uint32_t rcvd_byte = 0;
121 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
122 	const vector unsigned char len_shuf_mask = (vector unsigned char){
123 		 3,  2, 11, 10,
124 		 7,  6, 15, 14,
125 		-1, -1, -1, -1,
126 		-1, -1, -1, -1};
127 #endif
128 
129 	/*
130 	 * A. load mCQEs into a 128bit register.
131 	 * B. store rearm data to mbuf.
132 	 * C. combine data from mCQEs with rx_descriptor_fields1.
133 	 * D. store rx_descriptor_fields1.
134 	 * E. store flow tag (rte_flow mark).
135 	 */
136 	for (pos = 0; pos < mcqe_n; ) {
137 		vector unsigned char mcqe1, mcqe2;
138 		vector unsigned char rxdf1, rxdf2;
139 #ifdef MLX5_PMD_SOFT_COUNTERS
140 		const vector unsigned short mcqe_sel_mask =
141 			(vector unsigned short){0, 0, 0xffff, 0xffff,
142 			0, 0, 0xfff, 0xffff};
143 		const vector unsigned char lower_half = {
144 			0, 1, 4, 5, 8, 9, 12, 13, 16,
145 			17, 20, 21, 24, 25, 28, 29};
146 		const vector unsigned char upper_half = {
147 			2, 3, 6, 7, 10, 11, 14, 15,
148 			18, 19, 22, 23, 26, 27, 30, 31};
149 		vector unsigned short left, right;
150 		vector unsigned char byte_cnt, invalid_mask;
151 		vector unsigned long lshift;
152 		__attribute__((altivec(vector__)))
153 			__attribute__((altivec(bool__)))
154 			unsigned long long shmask;
155 		const vector unsigned long shmax = {64, 64};
156 #endif
157 
158 		if (!(pos & 0x7) && pos + 8 < mcqe_n)
159 			rte_prefetch0((void *)(cq + pos + 8));
160 
161 		/* A.1 load mCQEs into a 128bit register. */
162 		mcqe1 = (vector unsigned char)vec_vsx_ld(0,
163 			(signed int const *)&mcq[pos % 8]);
164 		mcqe2 = (vector unsigned char)vec_vsx_ld(0,
165 			(signed int const *)&mcq[pos % 8 + 2]);
166 
167 		/* B.1 store rearm data to mbuf. */
168 		*(vector unsigned char *)
169 			&elts[pos]->rearm_data = rearm;
170 		*(vector unsigned char *)
171 			&elts[pos + 1]->rearm_data = rearm;
172 
173 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
174 		rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
175 		rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
176 		rxdf1 = (vector unsigned char)
177 			((vector unsigned short)rxdf1 -
178 			(vector unsigned short)crc_adj);
179 		rxdf2 = (vector unsigned char)
180 			((vector unsigned short)rxdf2 -
181 			(vector unsigned short)crc_adj);
182 		rxdf1 = (vector unsigned char)
183 			vec_sel((vector unsigned short)rxdf1,
184 			(vector unsigned short)rxdf, rxdf_sel_mask);
185 		rxdf2 = (vector unsigned char)
186 			vec_sel((vector unsigned short)rxdf2,
187 			(vector unsigned short)rxdf, rxdf_sel_mask);
188 
189 		/* D.1 store rx_descriptor_fields1. */
190 		*(vector unsigned char *)
191 			&elts[pos]->rx_descriptor_fields1 = rxdf1;
192 		*(vector unsigned char *)
193 			&elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
194 
195 		/* B.1 store rearm data to mbuf. */
196 		*(vector unsigned char *)
197 			&elts[pos + 2]->rearm_data = rearm;
198 		*(vector unsigned char *)
199 			&elts[pos + 3]->rearm_data = rearm;
200 
201 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
202 		rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
203 		rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
204 		rxdf1 = (vector unsigned char)
205 			((vector unsigned short)rxdf1 -
206 			(vector unsigned short)crc_adj);
207 		rxdf2 = (vector unsigned char)
208 			((vector unsigned short)rxdf2 -
209 			(vector unsigned short)crc_adj);
210 		rxdf1 = (vector unsigned char)
211 			vec_sel((vector unsigned short)rxdf1,
212 			(vector unsigned short)rxdf, rxdf_sel_mask);
213 		rxdf2 = (vector unsigned char)
214 			vec_sel((vector unsigned short)rxdf2,
215 			(vector unsigned short)rxdf, rxdf_sel_mask);
216 
217 		/* D.1 store rx_descriptor_fields1. */
218 		*(vector unsigned char *)
219 			&elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
220 		*(vector unsigned char *)
221 			&elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
222 
223 #ifdef MLX5_PMD_SOFT_COUNTERS
224 		invalid_mask = (vector unsigned char)(vector unsigned long){
225 			(mcqe_n - pos) * sizeof(uint16_t) * 8, 0};
226 
227 		lshift =
228 			vec_splat((vector unsigned long)invalid_mask, 0);
229 		shmask = vec_cmpgt(shmax, lshift);
230 		invalid_mask = (vector unsigned char)
231 			vec_sl((vector unsigned long)ones, lshift);
232 		invalid_mask = (vector unsigned char)
233 			vec_sel((vector unsigned long)shmask,
234 			(vector unsigned long)invalid_mask, shmask);
235 
236 		mcqe1 = (vector unsigned char)
237 			vec_sro((vector unsigned short)mcqe1,
238 			(vector unsigned char){32}),
239 		byte_cnt = (vector unsigned char)
240 			vec_sel((vector unsigned short)mcqe1,
241 			(vector unsigned short)mcqe2, mcqe_sel_mask);
242 		byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask);
243 		byte_cnt = (vector unsigned char)
244 			vec_andc((vector unsigned long)byte_cnt,
245 			(vector unsigned long)invalid_mask);
246 		left = vec_perm((vector unsigned short)byte_cnt,
247 			(vector unsigned short)zero, lower_half);
248 		right = vec_perm((vector unsigned short)byte_cnt,
249 			(vector unsigned short)zero, upper_half);
250 		byte_cnt = (vector unsigned char)vec_add(left, right);
251 		left = vec_perm((vector unsigned short)byte_cnt,
252 			(vector unsigned short)zero, lower_half);
253 		right = vec_perm((vector unsigned short)byte_cnt,
254 			(vector unsigned short)zero, upper_half);
255 		byte_cnt = (vector unsigned char)vec_add(left, right);
256 		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
257 #endif
258 
259 		if (rxq->mark) {
260 			/* E.1 store flow tag (rte_flow mark). */
261 			elts[pos]->hash.fdir.hi = flow_tag;
262 			elts[pos + 1]->hash.fdir.hi = flow_tag;
263 			elts[pos + 2]->hash.fdir.hi = flow_tag;
264 			elts[pos + 3]->hash.fdir.hi = flow_tag;
265 		}
266 
267 		pos += MLX5_VPMD_DESCS_PER_LOOP;
268 		/* Move to next CQE and invalidate consumed CQEs. */
269 		if (!(pos & 0x7) && pos < mcqe_n) {
270 			mcq = (void *)&(cq + pos)->pkt_info;
271 			for (i = 0; i < 8; ++i)
272 				cq[inv++].op_own = MLX5_CQE_INVALIDATE;
273 		}
274 	}
275 
276 	/* Invalidate the rest of CQEs. */
277 	for (; inv < mcqe_n; ++inv)
278 		cq[inv].op_own = MLX5_CQE_INVALIDATE;
279 
280 #ifdef MLX5_PMD_SOFT_COUNTERS
281 	rxq->stats.ipackets += mcqe_n;
282 	rxq->stats.ibytes += rcvd_byte;
283 #endif
284 
285 	rxq->cq_ci += mcqe_n;
286 	return mcqe_n;
287 }
288 
289 /**
290  * Calculate packet type and offload flag for mbuf and store it.
291  *
292  * @param rxq
293  *   Pointer to RX queue structure.
294  * @param cqes[4]
295  *   Array of four 16bytes completions extracted from the original completion
296  *   descriptor.
297  * @param op_err
298  *   Opcode vector having responder error status. Each field is 4B.
299  * @param pkts
300  *   Pointer to array of packets to be filled.
301  */
302 static inline void
303 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
304 		vector unsigned char cqes[4], vector unsigned char op_err,
305 		struct rte_mbuf **pkts)
306 {
307 	vector unsigned char pinfo0, pinfo1;
308 	vector unsigned char pinfo, ptype;
309 	vector unsigned char ol_flags = (vector unsigned char)
310 		(vector unsigned int){
311 			rxq->rss_hash * PKT_RX_RSS_HASH |
312 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
313 			rxq->rss_hash * PKT_RX_RSS_HASH |
314 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
315 			rxq->rss_hash * PKT_RX_RSS_HASH |
316 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
317 			rxq->rss_hash * PKT_RX_RSS_HASH |
318 				rxq->hw_timestamp * PKT_RX_TIMESTAMP};
319 	vector unsigned char cv_flags;
320 	const vector unsigned char zero = (vector unsigned char){0};
321 	const vector unsigned char ptype_mask =
322 		(vector unsigned char)(vector unsigned int){
323 		0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06};
324 	const vector unsigned char ptype_ol_mask =
325 		(vector unsigned char)(vector unsigned int){
326 		0x00000106, 0x00000106, 0x00000106, 0x00000106};
327 	const vector unsigned char pinfo_mask =
328 		(vector unsigned char)(vector unsigned int){
329 		0x00000003, 0x00000003, 0x00000003, 0x00000003};
330 	const vector unsigned char cv_flag_sel = (vector unsigned char){
331 		0, (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
332 		(uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0,
333 		(uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 0,
334 		(uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
335 		0, 0, 0, 0, 0, 0, 0, 0, 0};
336 	const vector unsigned char cv_mask =
337 		(vector unsigned char)(vector unsigned int){
338 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
339 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
340 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
341 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
342 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
343 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
344 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
345 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED};
346 	const vector unsigned char mbuf_init =
347 		(vector unsigned char)(vector unsigned long){
348 		*(__attribute__((__aligned__(8))) unsigned long *)
349 		&rxq->mbuf_initializer, 0LL};
350 	const vector unsigned short rearm_sel_mask =
351 		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
352 	vector unsigned char rearm0, rearm1, rearm2, rearm3;
353 	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
354 
355 	/* Extract pkt_info field. */
356 	pinfo0 = (vector unsigned char)
357 		vec_mergeh((vector unsigned int)cqes[0],
358 		(vector unsigned int)cqes[1]);
359 	pinfo1 = (vector unsigned char)
360 		vec_mergeh((vector unsigned int)cqes[2],
361 		(vector unsigned int)cqes[3]);
362 	pinfo = (vector unsigned char)
363 		vec_mergeh((vector unsigned long)pinfo0,
364 		(vector unsigned long)pinfo1);
365 
366 	/* Extract hdr_type_etc field. */
367 	pinfo0 = (vector unsigned char)
368 		vec_mergel((vector unsigned int)cqes[0],
369 		(vector unsigned int)cqes[1]);
370 	pinfo1 = (vector unsigned char)
371 		vec_mergel((vector unsigned int)cqes[2],
372 		(vector unsigned int)cqes[3]);
373 	ptype = (vector unsigned char)
374 		vec_mergeh((vector unsigned long)pinfo0,
375 		(vector unsigned long)pinfo1);
376 
377 	if (rxq->mark) {
378 		const vector unsigned char pinfo_ft_mask =
379 			(vector unsigned char)(vector unsigned int){
380 			0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
381 		const vector unsigned char fdir_flags =
382 			(vector unsigned char)(vector unsigned int){
383 			PKT_RX_FDIR, PKT_RX_FDIR,
384 			PKT_RX_FDIR, PKT_RX_FDIR};
385 		vector unsigned char fdir_id_flags =
386 			(vector unsigned char)(vector unsigned int){
387 			PKT_RX_FDIR_ID, PKT_RX_FDIR_ID,
388 			PKT_RX_FDIR_ID, PKT_RX_FDIR_ID};
389 		vector unsigned char flow_tag, invalid_mask;
390 
391 		flow_tag = (vector unsigned char)
392 			vec_and((vector unsigned long)pinfo,
393 			(vector unsigned long)pinfo_ft_mask);
394 
395 		/* Check if flow tag is non-zero then set PKT_RX_FDIR. */
396 		invalid_mask = (vector unsigned char)
397 			vec_cmpeq((vector unsigned int)flow_tag,
398 			(vector unsigned int)zero);
399 		ol_flags = (vector unsigned char)
400 			vec_or((vector unsigned long)ol_flags,
401 			(vector unsigned long)
402 			vec_andc((vector unsigned long)fdir_flags,
403 			(vector unsigned long)invalid_mask));
404 
405 		/* Mask out invalid entries. */
406 		fdir_id_flags = (vector unsigned char)
407 			vec_andc((vector unsigned long)fdir_id_flags,
408 			(vector unsigned long)invalid_mask);
409 
410 		/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
411 		ol_flags = (vector unsigned char)
412 			vec_or((vector unsigned long)ol_flags,
413 			(vector unsigned long)
414 			vec_andc((vector unsigned long)fdir_id_flags,
415 			(vector unsigned long)
416 			vec_cmpeq((vector unsigned int)flow_tag,
417 			(vector unsigned int)pinfo_ft_mask)));
418 	}
419 	/*
420 	 * Merge the two fields to generate the following:
421 	 * bit[1]     = l3_ok
422 	 * bit[2]     = l4_ok
423 	 * bit[8]     = cv
424 	 * bit[11:10] = l3_hdr_type
425 	 * bit[14:12] = l4_hdr_type
426 	 * bit[15]    = ip_frag
427 	 * bit[16]    = tunneled
428 	 * bit[17]    = outer_l3_type
429 	 */
430 	ptype = (vector unsigned char)
431 		vec_and((vector unsigned long)ptype,
432 		(vector unsigned long)ptype_mask);
433 	pinfo = (vector unsigned char)
434 		vec_and((vector unsigned long)pinfo,
435 		(vector unsigned long)pinfo_mask);
436 	pinfo = (vector unsigned char)
437 		vec_sl((vector unsigned int)pinfo,
438 		(vector unsigned int){16, 16, 16, 16});
439 
440 	/* Make pinfo has merged fields for ol_flags calculation. */
441 	pinfo = (vector unsigned char)
442 		vec_or((vector unsigned long)ptype,
443 		(vector unsigned long)pinfo);
444 	ptype = (vector unsigned char)
445 		vec_sr((vector unsigned int)pinfo,
446 		(vector unsigned int){10, 10, 10, 10});
447 	ptype = (vector unsigned char)
448 		vec_packs((vector unsigned int)ptype,
449 		(vector unsigned int)zero);
450 
451 	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
452 	op_err = (vector unsigned char)
453 		vec_sr((vector unsigned short)op_err,
454 		(vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
455 	ptype = (vector unsigned char)
456 		vec_or((vector unsigned long)ptype,
457 		(vector unsigned long)op_err);
458 
459 	pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0];
460 	pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2];
461 	pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4];
462 	pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6];
463 
464 	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
465 		!!(pt_idx0 & (1 << 6)) * rxq->tunnel;
466 	pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
467 		!!(pt_idx1 & (1 << 6)) * rxq->tunnel;
468 	pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
469 		!!(pt_idx2 & (1 << 6)) * rxq->tunnel;
470 	pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
471 		!!(pt_idx3 & (1 << 6)) * rxq->tunnel;
472 
473 	/* Fill flags for checksum and VLAN. */
474 	pinfo = (vector unsigned char)
475 		vec_and((vector unsigned long)pinfo,
476 		(vector unsigned long)ptype_ol_mask);
477 	pinfo = vec_perm(cv_flag_sel, zero, pinfo);
478 
479 	/* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
480 	cv_flags = (vector unsigned char)
481 		vec_sl((vector unsigned int)pinfo,
482 		(vector unsigned int){9, 9, 9, 9});
483 	cv_flags = (vector unsigned char)
484 		vec_or((vector unsigned long)pinfo,
485 		(vector unsigned long)cv_flags);
486 
487 	/* Move back flags to start from byte[0]. */
488 	cv_flags = (vector unsigned char)
489 		vec_sr((vector unsigned int)cv_flags,
490 		(vector unsigned int){8, 8, 8, 8});
491 
492 	/* Mask out garbage bits. */
493 	cv_flags = (vector unsigned char)
494 		vec_and((vector unsigned long)cv_flags,
495 		(vector unsigned long)cv_mask);
496 
497 	/* Merge to ol_flags. */
498 	ol_flags = (vector unsigned char)
499 		vec_or((vector unsigned long)ol_flags,
500 		(vector unsigned long)cv_flags);
501 
502 	/* Merge mbuf_init and ol_flags. */
503 	rearm0 = (vector unsigned char)
504 		vec_sel((vector unsigned short)mbuf_init,
505 		(vector unsigned short)
506 		vec_slo((vector unsigned short)ol_flags,
507 		(vector unsigned char){64}), rearm_sel_mask);
508 	rearm1 = (vector unsigned char)
509 		vec_sel((vector unsigned short)mbuf_init,
510 		(vector unsigned short)
511 		vec_slo((vector unsigned short)ol_flags,
512 		(vector unsigned char){32}), rearm_sel_mask);
513 	rearm2 = (vector unsigned char)
514 		vec_sel((vector unsigned short)mbuf_init,
515 		(vector unsigned short)ol_flags, rearm_sel_mask);
516 	rearm3 = (vector unsigned char)
517 		vec_sel((vector unsigned short)mbuf_init,
518 		(vector unsigned short)
519 		vec_sro((vector unsigned short)ol_flags,
520 		(vector unsigned char){32}), rearm_sel_mask);
521 
522 	/* Write 8B rearm_data and 8B ol_flags. */
523 	vec_vsx_st(rearm0, 0,
524 		(vector unsigned char *)&pkts[0]->rearm_data);
525 	vec_vsx_st(rearm1, 0,
526 		(vector unsigned char *)&pkts[1]->rearm_data);
527 	vec_vsx_st(rearm2, 0,
528 		(vector unsigned char *)&pkts[2]->rearm_data);
529 	vec_vsx_st(rearm3, 0,
530 		(vector unsigned char *)&pkts[3]->rearm_data);
531 }
532 
533 
534 /**
535  * Receive burst of packets. An errored completion also consumes a mbuf, but the
536  * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
537  * before returning to application.
538  *
539  * @param rxq
540  *   Pointer to RX queue structure.
541  * @param[out] pkts
542  *   Array to store received packets.
543  * @param pkts_n
544  *   Maximum number of packets in array.
545  * @param[out] err
546  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
547  *   packet to handle.
548  *
549  * @return
550  *   Number of packets received including errors (<= pkts_n).
551  */
552 static inline uint16_t
553 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
554 	    uint64_t *err)
555 {
556 	const uint16_t q_n = 1 << rxq->cqe_n;
557 	const uint16_t q_mask = q_n - 1;
558 	volatile struct mlx5_cqe *cq;
559 	struct rte_mbuf **elts;
560 	unsigned int pos;
561 	uint64_t n;
562 	uint16_t repl_n;
563 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
564 	uint16_t nocmp_n = 0;
565 	uint16_t rcvd_pkt = 0;
566 	unsigned int cq_idx = rxq->cq_ci & q_mask;
567 	unsigned int elts_idx;
568 	unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
569 	const vector unsigned char zero = (vector unsigned char){0};
570 	const vector unsigned char ones = vec_splat_u8(-1);
571 	const vector unsigned char owner_check =
572 		(vector unsigned char)(vector unsigned long){
573 		0x0100000001000000LL, 0x0100000001000000LL};
574 	const vector unsigned char opcode_check =
575 		(vector unsigned char)(vector unsigned long){
576 		0xf0000000f0000000LL, 0xf0000000f0000000LL};
577 	const vector unsigned char format_check =
578 		(vector unsigned char)(vector unsigned long){
579 		0x0c0000000c000000LL, 0x0c0000000c000000LL};
580 	const vector unsigned char resp_err_check =
581 		(vector unsigned char)(vector unsigned long){
582 		0xe0000000e0000000LL, 0xe0000000e0000000LL};
583 #ifdef MLX5_PMD_SOFT_COUNTERS
584 	uint32_t rcvd_byte = 0;
585 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
586 	const vector unsigned char len_shuf_mask = (vector unsigned char){
587 		 1,  0,  5,  4,
588 		 9,  8, 13, 12,
589 		-1, -1, -1, -1,
590 		-1, -1, -1, -1};
591 #endif
592 	/* Mask to shuffle from extracted CQE to mbuf. */
593 	const vector unsigned char shuf_mask = (vector unsigned char){
594 		 5,  4,           /* bswap16, pkt_len */
595 		-1, -1,           /* zero out 2nd half of pkt_len */
596 		 5,  4,           /* bswap16, data_len */
597 		11, 10,           /* bswap16, vlan+tci */
598 		15, 14, 13, 12,   /* bswap32, rss */
599 		 1,  2,  3, -1};  /* fdir.hi */
600 	/* Mask to blend from the last Qword to the first DQword. */
601 	/* Mask to blend from the last Qword to the first DQword. */
602 	const vector unsigned char blend_mask = (vector unsigned char){
603 		-1,  0,  0,  0,
604 		 0,  0,  0,  0,
605 		-1, -1, -1, -1,
606 		-1, -1, -1, -1};
607 	const vector unsigned char crc_adj =
608 		(vector unsigned char)(vector unsigned short){
609 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
610 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0};
611 	const vector unsigned char flow_mark_adj =
612 		(vector unsigned char)(vector unsigned int){
613 		0, 0, 0, rxq->mark * (-1)};
614 	const vector unsigned short cqe_sel_mask1 =
615 		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
616 	const vector unsigned short cqe_sel_mask2 =
617 		(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
618 
619 	assert(rxq->sges_n == 0);
620 	assert(rxq->cqe_n == rxq->elts_n);
621 	cq = &(*rxq->cqes)[cq_idx];
622 	rte_prefetch0(cq);
623 	rte_prefetch0(cq + 1);
624 	rte_prefetch0(cq + 2);
625 	rte_prefetch0(cq + 3);
626 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
627 
628 	repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
629 	if (repl_n >= rxq->rq_repl_thresh)
630 		mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
631 	/* See if there're unreturned mbufs from compressed CQE. */
632 	rcvd_pkt = rxq->decompressed;
633 	if (rcvd_pkt > 0) {
634 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
635 		rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
636 		rxq->rq_pi += rcvd_pkt;
637 		rxq->decompressed -= rcvd_pkt;
638 		pkts += rcvd_pkt;
639 	}
640 	elts_idx = rxq->rq_pi & q_mask;
641 	elts = &(*rxq->elts)[elts_idx];
642 	/* Not to overflow pkts array. */
643 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
644 	/* Not to cross queue end. */
645 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
646 	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
647 	if (!pkts_n)
648 		return rcvd_pkt;
649 	/* At this point, there shouldn't be any remaining packets. */
650 	assert(rxq->decompressed == 0);
651 
652 	/*
653 	 * A. load first Qword (8bytes) in one loop.
654 	 * B. copy 4 mbuf pointers from elts ring to returing pkts.
655 	 * C. load remaining CQE data and extract necessary fields.
656 	 *    Final 16bytes cqes[] extracted from original 64bytes CQE has the
657 	 *    following structure:
658 	 *        struct {
659 	 *          uint8_t  pkt_info;
660 	 *          uint8_t  flow_tag[3];
661 	 *          uint16_t byte_cnt;
662 	 *          uint8_t  rsvd4;
663 	 *          uint8_t  op_own;
664 	 *          uint16_t hdr_type_etc;
665 	 *          uint16_t vlan_info;
666 	 *          uint32_t rx_has_res;
667 	 *        } c;
668 	 * D. fill in mbuf.
669 	 * E. get valid CQEs.
670 	 * F. find compressed CQE.
671 	 */
672 	for (pos = 0;
673 	     pos < pkts_n;
674 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
675 		vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
676 		vector unsigned char cqe_tmp1, cqe_tmp2;
677 		vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
678 		vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
679 		vector unsigned char opcode, owner_mask, invalid_mask;
680 		vector unsigned char comp_mask;
681 		vector unsigned char mask;
682 #ifdef MLX5_PMD_SOFT_COUNTERS
683 		const vector unsigned char lower_half = {
684 			0, 1, 4, 5, 8, 9, 12, 13,
685 			16, 17, 20, 21, 24, 25, 28, 29};
686 		const vector unsigned char upper_half = {
687 			2, 3, 6, 7, 10, 11, 14, 15,
688 			18, 19, 22, 23, 26, 27, 30, 31};
689 		const vector unsigned long shmax = {64, 64};
690 		vector unsigned char byte_cnt;
691 		vector unsigned short left, right;
692 		vector unsigned long lshift;
693 		vector __attribute__((altivec(bool__)))
694 			unsigned long shmask;
695 #endif
696 		vector unsigned char mbp1, mbp2;
697 		vector unsigned char p =
698 			(vector unsigned char)(vector unsigned short){
699 				0, 1, 2, 3, 0, 0, 0, 0};
700 		unsigned int p1, p2, p3;
701 
702 		/* Prefetch next 4 CQEs. */
703 		if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
704 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
705 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
706 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
707 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
708 		}
709 
710 		/* A.0 do not cross the end of CQ. */
711 		mask = (vector unsigned char)(vector unsigned long){
712 			(pkts_n - pos) * sizeof(uint16_t) * 8, 0};
713 
714 		{
715 			vector unsigned long lshift;
716 			vector __attribute__((altivec(bool__)))
717 				unsigned long shmask;
718 			const vector unsigned long shmax = {64, 64};
719 
720 			lshift = vec_splat((vector unsigned long)mask, 0);
721 			shmask = vec_cmpgt(shmax, lshift);
722 			mask = (vector unsigned char)
723 				vec_sl((vector unsigned long)ones, lshift);
724 			mask = (vector unsigned char)
725 				vec_sel((vector unsigned long)shmask,
726 				(vector unsigned long)mask, shmask);
727 		}
728 
729 		p = (vector unsigned char)
730 			vec_andc((vector unsigned long)p,
731 			(vector unsigned long)mask);
732 
733 		/* A.1 load cqes. */
734 		p3 = (unsigned int)((vector unsigned short)p)[3];
735 		cqes[3] = (vector unsigned char)(vector unsigned long){
736 			*(__attribute__((__aligned__(8))) unsigned long *)
737 			&cq[pos + p3].sop_drop_qpn, 0LL};
738 		rte_compiler_barrier();
739 
740 		p2 = (unsigned int)((vector unsigned short)p)[2];
741 		cqes[2] = (vector unsigned char)(vector unsigned long){
742 			*(__attribute__((__aligned__(8))) unsigned long *)
743 			&cq[pos + p2].sop_drop_qpn, 0LL};
744 		rte_compiler_barrier();
745 
746 		/* B.1 load mbuf pointers. */
747 		mbp1 = (vector unsigned char)vec_vsx_ld(0,
748 			(signed int const *)&elts[pos]);
749 		mbp2 = (vector unsigned char)vec_vsx_ld(0,
750 			(signed int const *)&elts[pos + 2]);
751 
752 		/* A.1 load a block having op_own. */
753 		p1 = (unsigned int)((vector unsigned short)p)[1];
754 		cqes[1] = (vector unsigned char)(vector unsigned long){
755 			*(__attribute__((__aligned__(8))) unsigned long *)
756 			&cq[pos + p1].sop_drop_qpn, 0LL};
757 		rte_compiler_barrier();
758 
759 		cqes[0] = (vector unsigned char)(vector unsigned long){
760 			*(__attribute__((__aligned__(8))) unsigned long *)
761 			&cq[pos].sop_drop_qpn, 0LL};
762 		rte_compiler_barrier();
763 
764 		/* B.2 copy mbuf pointers. */
765 		*(vector unsigned char *)&pkts[pos] = mbp1;
766 		*(vector unsigned char *)&pkts[pos + 2] = mbp2;
767 		rte_cio_rmb();
768 
769 		/* C.1 load remaining CQE data and extract necessary fields. */
770 		cqe_tmp2 = *(vector unsigned char *)
771 			&cq[pos + p3].pkt_info;
772 		cqe_tmp1 = *(vector unsigned char *)
773 			&cq[pos + p2].pkt_info;
774 		cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask);
775 		cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask);
776 		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
777 			(signed int const *)&cq[pos + p3].csum);
778 		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
779 			(signed int const *)&cq[pos + p2].csum);
780 		cqes[3] = (vector unsigned char)
781 			vec_sel((vector unsigned short)cqes[3],
782 			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
783 		cqes[2] = (vector unsigned char)
784 			vec_sel((vector unsigned short)cqes[2],
785 			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
786 		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
787 			*(__attribute__((__aligned__(8))) unsigned long *)
788 			&cq[pos + p3].rsvd3[9], 0LL};
789 		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
790 			*(__attribute__((__aligned__(8))) unsigned long *)
791 			&cq[pos + p2].rsvd3[9], 0LL};
792 		cqes[3] = (vector unsigned char)
793 			vec_sel((vector unsigned short)cqes[3],
794 			(vector unsigned short)cqe_tmp2,
795 			(vector unsigned short)cqe_sel_mask2);
796 		cqes[2] = (vector unsigned char)
797 			vec_sel((vector unsigned short)cqes[2],
798 			(vector unsigned short)cqe_tmp1,
799 			(vector unsigned short)cqe_sel_mask2);
800 
801 		/* C.2 generate final structure for mbuf with swapping bytes. */
802 		pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask);
803 		pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask);
804 
805 		/* C.3 adjust CRC length. */
806 		pkt_mb3 = (vector unsigned char)
807 			((vector unsigned short)pkt_mb3 -
808 			(vector unsigned short)crc_adj);
809 		pkt_mb2 = (vector unsigned char)
810 			((vector unsigned short)pkt_mb2 -
811 			(vector unsigned short)crc_adj);
812 
813 		/* C.4 adjust flow mark. */
814 		pkt_mb3 = (vector unsigned char)
815 			((vector unsigned int)pkt_mb3 +
816 			(vector unsigned int)flow_mark_adj);
817 		pkt_mb2 = (vector unsigned char)
818 			((vector unsigned int)pkt_mb2 +
819 			(vector unsigned int)flow_mark_adj);
820 
821 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
822 		*(vector unsigned char *)
823 			&pkts[pos + 3]->pkt_len = pkt_mb3;
824 		*(vector unsigned char *)
825 			&pkts[pos + 2]->pkt_len = pkt_mb2;
826 
827 		/* E.1 extract op_own field. */
828 		op_own_tmp2 = (vector unsigned char)
829 			vec_mergeh((vector unsigned int)cqes[2],
830 			(vector unsigned int)cqes[3]);
831 
832 		/* C.1 load remaining CQE data and extract necessary fields. */
833 		cqe_tmp2 = *(vector unsigned char *)
834 			&cq[pos + p1].pkt_info;
835 		cqe_tmp1 = *(vector unsigned char *)
836 			&cq[pos].pkt_info;
837 		cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask);
838 		cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask);
839 		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
840 			(signed int const *)&cq[pos + p1].csum);
841 		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
842 			(signed int const *)&cq[pos].csum);
843 		cqes[1] = (vector unsigned char)
844 			vec_sel((vector unsigned short)cqes[1],
845 			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
846 		cqes[0] = (vector unsigned char)
847 			vec_sel((vector unsigned short)cqes[0],
848 			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
849 		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
850 			*(__attribute__((__aligned__(8))) unsigned long *)
851 			&cq[pos + p1].rsvd3[9], 0LL};
852 		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
853 			*(__attribute__((__aligned__(8))) unsigned long *)
854 			&cq[pos].rsvd3[9], 0LL};
855 		cqes[1] = (vector unsigned char)
856 			vec_sel((vector unsigned short)cqes[1],
857 			(vector unsigned short)cqe_tmp2, cqe_sel_mask2);
858 		cqes[0] = (vector unsigned char)
859 			vec_sel((vector unsigned short)cqes[0],
860 			(vector unsigned short)cqe_tmp1, cqe_sel_mask2);
861 
862 		/* C.2 generate final structure for mbuf with swapping bytes. */
863 		pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask);
864 		pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask);
865 
866 		/* C.3 adjust CRC length. */
867 		pkt_mb1 = (vector unsigned char)
868 			((vector unsigned short)pkt_mb1 -
869 			(vector unsigned short)crc_adj);
870 		pkt_mb0 = (vector unsigned char)
871 			((vector unsigned short)pkt_mb0 -
872 			(vector unsigned short)crc_adj);
873 
874 		/* C.4 adjust flow mark. */
875 		pkt_mb1 = (vector unsigned char)
876 			((vector unsigned int)pkt_mb1 +
877 			(vector unsigned int)flow_mark_adj);
878 		pkt_mb0 = (vector unsigned char)
879 			((vector unsigned int)pkt_mb0 +
880 			(vector unsigned int)flow_mark_adj);
881 
882 		/* E.1 extract op_own byte. */
883 		op_own_tmp1 = (vector unsigned char)
884 			vec_mergeh((vector unsigned int)cqes[0],
885 			(vector unsigned int)cqes[1]);
886 		op_own = (vector unsigned char)
887 			vec_mergel((vector unsigned long)op_own_tmp1,
888 			(vector unsigned long)op_own_tmp2);
889 
890 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
891 		*(vector unsigned char *)
892 			&pkts[pos + 1]->pkt_len = pkt_mb1;
893 		*(vector unsigned char *)
894 			&pkts[pos]->pkt_len = pkt_mb0;
895 
896 		/* E.2 flip owner bit to mark CQEs from last round. */
897 		owner_mask = (vector unsigned char)
898 			vec_and((vector unsigned long)op_own,
899 			(vector unsigned long)owner_check);
900 		if (ownership)
901 			owner_mask = (vector unsigned char)
902 				vec_xor((vector unsigned long)owner_mask,
903 				(vector unsigned long)owner_check);
904 		owner_mask = (vector unsigned char)
905 			vec_cmpeq((vector unsigned int)owner_mask,
906 			(vector unsigned int)owner_check);
907 		owner_mask = (vector unsigned char)
908 			vec_packs((vector unsigned int)owner_mask,
909 			(vector unsigned int)zero);
910 
911 		/* E.3 get mask for invalidated CQEs. */
912 		opcode = (vector unsigned char)
913 			vec_and((vector unsigned long)op_own,
914 			(vector unsigned long)opcode_check);
915 		invalid_mask = (vector unsigned char)
916 			vec_cmpeq((vector unsigned int)opcode_check,
917 			(vector unsigned int)opcode);
918 		invalid_mask = (vector unsigned char)
919 			vec_packs((vector unsigned int)invalid_mask,
920 			(vector unsigned int)zero);
921 
922 		/* E.4 mask out beyond boundary. */
923 		invalid_mask = (vector unsigned char)
924 			vec_or((vector unsigned long)invalid_mask,
925 			(vector unsigned long)mask);
926 
927 		/* E.5 merge invalid_mask with invalid owner. */
928 		invalid_mask = (vector unsigned char)
929 			vec_or((vector unsigned long)invalid_mask,
930 			(vector unsigned long)owner_mask);
931 
932 		/* F.1 find compressed CQE format. */
933 		comp_mask = (vector unsigned char)
934 			vec_and((vector unsigned long)op_own,
935 			(vector unsigned long)format_check);
936 		comp_mask = (vector unsigned char)
937 			vec_cmpeq((vector unsigned int)comp_mask,
938 			(vector unsigned int)format_check);
939 		comp_mask = (vector unsigned char)
940 			vec_packs((vector unsigned int)comp_mask,
941 			(vector unsigned int)zero);
942 
943 		/* F.2 mask out invalid entries. */
944 		comp_mask = (vector unsigned char)
945 			vec_andc((vector unsigned long)comp_mask,
946 			(vector unsigned long)invalid_mask);
947 		comp_idx = ((vector unsigned long)comp_mask)[0];
948 
949 		/* F.3 get the first compressed CQE. */
950 		comp_idx = comp_idx ? __builtin_ctzll(comp_idx) /
951 			(sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP;
952 
953 		/* E.6 mask out entries after the compressed CQE. */
954 		mask = (vector unsigned char)(vector unsigned long){
955 			(comp_idx * sizeof(uint16_t) * 8), 0};
956 		lshift = vec_splat((vector unsigned long)mask, 0);
957 		shmask = vec_cmpgt(shmax, lshift);
958 		mask = (vector unsigned char)
959 			vec_sl((vector unsigned long)ones, lshift);
960 		mask = (vector unsigned char)
961 			vec_sel((vector unsigned long)shmask,
962 			(vector unsigned long)mask, shmask);
963 		invalid_mask = (vector unsigned char)
964 			vec_or((vector unsigned long)invalid_mask,
965 			(vector unsigned long)mask);
966 
967 		/* E.7 count non-compressed valid CQEs. */
968 		n = ((vector unsigned long)invalid_mask)[0];
969 		n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
970 			MLX5_VPMD_DESCS_PER_LOOP;
971 		nocmp_n += n;
972 
973 		/* D.2 get the final invalid mask. */
974 		mask = (vector unsigned char)(vector unsigned long){
975 			(n * sizeof(uint16_t) * 8), 0};
976 		lshift = vec_splat((vector unsigned long)mask, 0);
977 		shmask = vec_cmpgt(shmax, lshift);
978 		mask = (vector unsigned char)
979 			vec_sl((vector unsigned long)ones, lshift);
980 		mask = (vector unsigned char)
981 			vec_sel((vector unsigned long)shmask,
982 			(vector unsigned long)mask, shmask);
983 		invalid_mask = (vector unsigned char)
984 			vec_or((vector unsigned long)invalid_mask,
985 			(vector unsigned long)mask);
986 
987 		/* D.3 check error in opcode. */
988 		opcode = (vector unsigned char)
989 			vec_cmpeq((vector unsigned int)resp_err_check,
990 			(vector unsigned int)opcode);
991 		opcode = (vector unsigned char)
992 			vec_packs((vector unsigned int)opcode,
993 			(vector unsigned int)zero);
994 		opcode = (vector unsigned char)
995 			vec_andc((vector unsigned long)opcode,
996 			(vector unsigned long)invalid_mask);
997 
998 		/* D.4 mark if any error is set */
999 		*err |= ((vector unsigned long)opcode)[0];
1000 
1001 		/* D.5 fill in mbuf - rearm_data and packet_type. */
1002 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
1003 		if (rxq->hw_timestamp) {
1004 			pkts[pos]->timestamp =
1005 				rte_be_to_cpu_64(cq[pos].timestamp);
1006 			pkts[pos + 1]->timestamp =
1007 				rte_be_to_cpu_64(cq[pos + p1].timestamp);
1008 			pkts[pos + 2]->timestamp =
1009 				rte_be_to_cpu_64(cq[pos + p2].timestamp);
1010 			pkts[pos + 3]->timestamp =
1011 				rte_be_to_cpu_64(cq[pos + p3].timestamp);
1012 		}
1013 		if (rte_flow_dynf_metadata_avail()) {
1014 			uint64_t flag = rte_flow_dynf_metadata_mask;
1015 			int offs = rte_flow_dynf_metadata_offs;
1016 			uint32_t metadata;
1017 
1018 			/* This code is subject for futher optimization. */
1019 			metadata = cq[pos].flow_table_metadata;
1020 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
1021 								metadata;
1022 			pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
1023 			metadata = cq[pos + 1].flow_table_metadata;
1024 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
1025 								metadata;
1026 			pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
1027 			metadata = cq[pos + 2].flow_table_metadata;
1028 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
1029 								metadata;
1030 			pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
1031 			metadata = cq[pos + 3].flow_table_metadata;
1032 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
1033 								metadata;
1034 			pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
1035 		}
1036 #ifdef MLX5_PMD_SOFT_COUNTERS
1037 		/* Add up received bytes count. */
1038 		byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
1039 		byte_cnt = (vector unsigned char)
1040 			vec_andc((vector unsigned long)byte_cnt,
1041 			(vector unsigned long)invalid_mask);
1042 		left = vec_perm((vector unsigned short)byte_cnt,
1043 			(vector unsigned short)zero, lower_half);
1044 		right = vec_perm((vector unsigned short)byte_cnt,
1045 			(vector unsigned short)zero, upper_half);
1046 		byte_cnt = (vector unsigned char)vec_add(left, right);
1047 		left = vec_perm((vector unsigned short)byte_cnt,
1048 			(vector unsigned short)zero, lower_half);
1049 		right = vec_perm((vector unsigned short)byte_cnt,
1050 			(vector unsigned short)zero, upper_half);
1051 		byte_cnt = (vector unsigned char)vec_add(left, right);
1052 		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
1053 #endif
1054 
1055 		/*
1056 		 * Break the loop unless more valid CQE is expected, or if
1057 		 * there's a compressed CQE.
1058 		 */
1059 		if (n != MLX5_VPMD_DESCS_PER_LOOP)
1060 			break;
1061 	}
1062 	/* If no new CQE seen, return without updating cq_db. */
1063 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
1064 		return rcvd_pkt;
1065 	/* Update the consumer indexes for non-compressed CQEs. */
1066 	assert(nocmp_n <= pkts_n);
1067 	rxq->cq_ci += nocmp_n;
1068 	rxq->rq_pi += nocmp_n;
1069 	rcvd_pkt += nocmp_n;
1070 #ifdef MLX5_PMD_SOFT_COUNTERS
1071 	rxq->stats.ipackets += nocmp_n;
1072 	rxq->stats.ibytes += rcvd_byte;
1073 #endif
1074 	/* Decompress the last CQE if compressed. */
1075 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1076 		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1077 		rxq->decompressed =
1078 			rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
1079 		/* Return more packets if needed. */
1080 		if (nocmp_n < pkts_n) {
1081 			uint16_t n = rxq->decompressed;
1082 
1083 			n = RTE_MIN(n, pkts_n - nocmp_n);
1084 			rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1085 			rxq->rq_pi += n;
1086 			rcvd_pkt += n;
1087 			rxq->decompressed -= n;
1088 		}
1089 	}
1090 	rte_compiler_barrier();
1091 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1092 	return rcvd_pkt;
1093 }
1094 
1095 #endif /* RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ */
1096