xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
8 
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdlib.h>
12 
13 #include <altivec.h>
14 
15 #include <rte_mbuf.h>
16 #include <rte_mempool.h>
17 #include <rte_prefetch.h>
18 
19 #include <mlx5_prm.h>
20 
21 #include "mlx5_defs.h"
22 #include "mlx5.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_rxtx.h"
25 #include "mlx5_rxtx_vec.h"
26 #include "mlx5_autoconf.h"
27 
28 #ifndef __INTEL_COMPILER
29 #pragma GCC diagnostic ignored "-Wcast-qual"
30 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
31 #endif
32 
33 /**
34  * Store free buffers to RX SW ring.
35  *
36  * @param rxq
37  *   Pointer to RX queue structure.
38  * @param pkts
39  *   Pointer to array of packets to be stored.
40  * @param pkts_n
41  *   Number of packets to be stored.
42  */
43 static inline void
44 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
45 {
46 	const uint16_t q_mask = (1 << rxq->elts_n) - 1;
47 	struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
48 	unsigned int pos;
49 	uint16_t p = n & -2;
50 
51 	for (pos = 0; pos < p; pos += 2) {
52 		vector unsigned char mbp;
53 
54 		mbp = (vector unsigned char)vec_vsx_ld(0,
55 				(signed int const *)&elts[pos]);
56 		*(vector unsigned char *)&pkts[pos] = mbp;
57 	}
58 	if (n & 1)
59 		pkts[pos] = elts[pos];
60 }
61 
62 /**
63  * Decompress a compressed completion and fill in mbufs in RX SW ring with data
64  * extracted from the title completion descriptor.
65  *
66  * @param rxq
67  *   Pointer to RX queue structure.
68  * @param cq
69  *   Pointer to completion array having a compressed completion at first.
70  * @param elts
71  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
72  *   the title completion descriptor to be copied to the rest of mbufs.
73  *
74  * @return
75  *   Number of mini-CQEs successfully decompressed.
76  */
77 static inline uint16_t
78 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
79 		    struct rte_mbuf **elts)
80 {
81 	volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
82 	struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
83 	const vector unsigned char zero = (vector unsigned char){0};
84 	/* Mask to shuffle from extracted mini CQE to mbuf. */
85 	const vector unsigned char shuf_mask1 = (vector unsigned char){
86 			-1, -1, -1, -1,   /* skip packet_type */
87 			 7,  6, -1, -1,   /* bswap16, pkt_len */
88 			 7,  6,           /* bswap16, data_len */
89 			-1, -1,           /* skip vlan_tci */
90 			 3,  2,  1,  0};  /* bswap32, rss */
91 	const vector unsigned char shuf_mask2 = (vector unsigned char){
92 			-1, -1, -1, -1,   /* skip packet_type */
93 			15, 14, -1, -1,   /* bswap16, pkt_len */
94 			15, 14,           /* data_len, bswap16 */
95 			-1, -1,           /* skip vlan_tci */
96 			11, 10,  9,  8};  /* bswap32, rss */
97 	/* Restore the compressed count. Must be 16 bits. */
98 	const uint16_t mcqe_n = t_pkt->data_len +
99 		(rxq->crc_present * RTE_ETHER_CRC_LEN);
100 	const vector unsigned char rearm =
101 		(vector unsigned char)vec_vsx_ld(0,
102 		(signed int const *)&t_pkt->rearm_data);
103 	const vector unsigned char rxdf =
104 		(vector unsigned char)vec_vsx_ld(0,
105 		(signed int const *)&t_pkt->rx_descriptor_fields1);
106 	const vector unsigned char crc_adj =
107 		(vector unsigned char)(vector unsigned short){
108 			0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
109 			rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0};
110 	const vector unsigned short rxdf_sel_mask =
111 		(vector unsigned short){
112 			0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0};
113 	const uint32_t flow_tag = t_pkt->hash.fdir.hi;
114 	unsigned int pos;
115 	unsigned int i;
116 	unsigned int inv = 0;
117 
118 #ifdef MLX5_PMD_SOFT_COUNTERS
119 	const vector unsigned char ones = vec_splat_u8(-1);
120 	uint32_t rcvd_byte = 0;
121 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
122 	const vector unsigned char len_shuf_mask = (vector unsigned char){
123 		 3,  2, 11, 10,
124 		 7,  6, 15, 14,
125 		-1, -1, -1, -1,
126 		-1, -1, -1, -1};
127 #endif
128 
129 	/*
130 	 * A. load mCQEs into a 128bit register.
131 	 * B. store rearm data to mbuf.
132 	 * C. combine data from mCQEs with rx_descriptor_fields1.
133 	 * D. store rx_descriptor_fields1.
134 	 * E. store flow tag (rte_flow mark).
135 	 */
136 	for (pos = 0; pos < mcqe_n; ) {
137 		vector unsigned char mcqe1, mcqe2;
138 		vector unsigned char rxdf1, rxdf2;
139 #ifdef MLX5_PMD_SOFT_COUNTERS
140 		const vector unsigned short mcqe_sel_mask =
141 			(vector unsigned short){0, 0, 0xffff, 0xffff,
142 			0, 0, 0xfff, 0xffff};
143 		const vector unsigned char lower_half = {
144 			0, 1, 4, 5, 8, 9, 12, 13, 16,
145 			17, 20, 21, 24, 25, 28, 29};
146 		const vector unsigned char upper_half = {
147 			2, 3, 6, 7, 10, 11, 14, 15,
148 			18, 19, 22, 23, 26, 27, 30, 31};
149 		vector unsigned short left, right;
150 		vector unsigned char byte_cnt, invalid_mask;
151 		vector unsigned long lshift;
152 		__attribute__((altivec(vector__)))
153 			__attribute__((altivec(bool__)))
154 			unsigned long long shmask;
155 		const vector unsigned long shmax = {64, 64};
156 #endif
157 
158 		if (!(pos & 0x7) && pos + 8 < mcqe_n)
159 			rte_prefetch0((void *)(cq + pos + 8));
160 
161 		/* A.1 load mCQEs into a 128bit register. */
162 		mcqe1 = (vector unsigned char)vec_vsx_ld(0,
163 			(signed int const *)&mcq[pos % 8]);
164 		mcqe2 = (vector unsigned char)vec_vsx_ld(0,
165 			(signed int const *)&mcq[pos % 8 + 2]);
166 
167 		/* B.1 store rearm data to mbuf. */
168 		*(vector unsigned char *)
169 			&elts[pos]->rearm_data = rearm;
170 		*(vector unsigned char *)
171 			&elts[pos + 1]->rearm_data = rearm;
172 
173 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
174 		rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
175 		rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
176 		rxdf1 = (vector unsigned char)
177 			((vector unsigned short)rxdf1 -
178 			(vector unsigned short)crc_adj);
179 		rxdf2 = (vector unsigned char)
180 			((vector unsigned short)rxdf2 -
181 			(vector unsigned short)crc_adj);
182 		rxdf1 = (vector unsigned char)
183 			vec_sel((vector unsigned short)rxdf1,
184 			(vector unsigned short)rxdf, rxdf_sel_mask);
185 		rxdf2 = (vector unsigned char)
186 			vec_sel((vector unsigned short)rxdf2,
187 			(vector unsigned short)rxdf, rxdf_sel_mask);
188 
189 		/* D.1 store rx_descriptor_fields1. */
190 		*(vector unsigned char *)
191 			&elts[pos]->rx_descriptor_fields1 = rxdf1;
192 		*(vector unsigned char *)
193 			&elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
194 
195 		/* B.1 store rearm data to mbuf. */
196 		*(vector unsigned char *)
197 			&elts[pos + 2]->rearm_data = rearm;
198 		*(vector unsigned char *)
199 			&elts[pos + 3]->rearm_data = rearm;
200 
201 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
202 		rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
203 		rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
204 		rxdf1 = (vector unsigned char)
205 			((vector unsigned short)rxdf1 -
206 			(vector unsigned short)crc_adj);
207 		rxdf2 = (vector unsigned char)
208 			((vector unsigned short)rxdf2 -
209 			(vector unsigned short)crc_adj);
210 		rxdf1 = (vector unsigned char)
211 			vec_sel((vector unsigned short)rxdf1,
212 			(vector unsigned short)rxdf, rxdf_sel_mask);
213 		rxdf2 = (vector unsigned char)
214 			vec_sel((vector unsigned short)rxdf2,
215 			(vector unsigned short)rxdf, rxdf_sel_mask);
216 
217 		/* D.1 store rx_descriptor_fields1. */
218 		*(vector unsigned char *)
219 			&elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
220 		*(vector unsigned char *)
221 			&elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
222 
223 #ifdef MLX5_PMD_SOFT_COUNTERS
224 		invalid_mask = (vector unsigned char)(vector unsigned long){
225 			(mcqe_n - pos) * sizeof(uint16_t) * 8, 0};
226 
227 		lshift =
228 			vec_splat((vector unsigned long)invalid_mask, 0);
229 		shmask = vec_cmpgt(shmax, lshift);
230 		invalid_mask = (vector unsigned char)
231 			vec_sl((vector unsigned long)ones, lshift);
232 		invalid_mask = (vector unsigned char)
233 			vec_sel((vector unsigned long)shmask,
234 			(vector unsigned long)invalid_mask, shmask);
235 
236 		mcqe1 = (vector unsigned char)
237 			vec_sro((vector unsigned short)mcqe1,
238 			(vector unsigned char){32}),
239 		byte_cnt = (vector unsigned char)
240 			vec_sel((vector unsigned short)mcqe1,
241 			(vector unsigned short)mcqe2, mcqe_sel_mask);
242 		byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask);
243 		byte_cnt = (vector unsigned char)
244 			vec_andc((vector unsigned long)byte_cnt,
245 			(vector unsigned long)invalid_mask);
246 		left = vec_perm((vector unsigned short)byte_cnt,
247 			(vector unsigned short)zero, lower_half);
248 		right = vec_perm((vector unsigned short)byte_cnt,
249 			(vector unsigned short)zero, upper_half);
250 		byte_cnt = (vector unsigned char)vec_add(left, right);
251 		left = vec_perm((vector unsigned short)byte_cnt,
252 			(vector unsigned short)zero, lower_half);
253 		right = vec_perm((vector unsigned short)byte_cnt,
254 			(vector unsigned short)zero, upper_half);
255 		byte_cnt = (vector unsigned char)vec_add(left, right);
256 		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
257 #endif
258 
259 		if (rxq->mark) {
260 			/* E.1 store flow tag (rte_flow mark). */
261 			elts[pos]->hash.fdir.hi = flow_tag;
262 			elts[pos + 1]->hash.fdir.hi = flow_tag;
263 			elts[pos + 2]->hash.fdir.hi = flow_tag;
264 			elts[pos + 3]->hash.fdir.hi = flow_tag;
265 		}
266 
267 		pos += MLX5_VPMD_DESCS_PER_LOOP;
268 		/* Move to next CQE and invalidate consumed CQEs. */
269 		if (!(pos & 0x7) && pos < mcqe_n) {
270 			mcq = (void *)&(cq + pos)->pkt_info;
271 			for (i = 0; i < 8; ++i)
272 				cq[inv++].op_own = MLX5_CQE_INVALIDATE;
273 		}
274 	}
275 
276 	/* Invalidate the rest of CQEs. */
277 	for (; inv < mcqe_n; ++inv)
278 		cq[inv].op_own = MLX5_CQE_INVALIDATE;
279 
280 #ifdef MLX5_PMD_SOFT_COUNTERS
281 	rxq->stats.ipackets += mcqe_n;
282 	rxq->stats.ibytes += rcvd_byte;
283 #endif
284 
285 	rxq->cq_ci += mcqe_n;
286 	return mcqe_n;
287 }
288 
289 /**
290  * Calculate packet type and offload flag for mbuf and store it.
291  *
292  * @param rxq
293  *   Pointer to RX queue structure.
294  * @param cqes[4]
295  *   Array of four 16bytes completions extracted from the original completion
296  *   descriptor.
297  * @param op_err
298  *   Opcode vector having responder error status. Each field is 4B.
299  * @param pkts
300  *   Pointer to array of packets to be filled.
301  */
302 static inline void
303 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
304 		vector unsigned char cqes[4], vector unsigned char op_err,
305 		struct rte_mbuf **pkts)
306 {
307 	vector unsigned char pinfo0, pinfo1;
308 	vector unsigned char pinfo, ptype;
309 	vector unsigned char ol_flags = (vector unsigned char)
310 		(vector unsigned int){
311 			rxq->rss_hash * PKT_RX_RSS_HASH |
312 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
313 			rxq->rss_hash * PKT_RX_RSS_HASH |
314 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
315 			rxq->rss_hash * PKT_RX_RSS_HASH |
316 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
317 			rxq->rss_hash * PKT_RX_RSS_HASH |
318 				rxq->hw_timestamp * PKT_RX_TIMESTAMP};
319 	vector unsigned char cv_flags;
320 	const vector unsigned char zero = (vector unsigned char){0};
321 	const vector unsigned char ptype_mask =
322 		(vector unsigned char)(vector unsigned int){
323 		0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06};
324 	const vector unsigned char ptype_ol_mask =
325 		(vector unsigned char)(vector unsigned int){
326 		0x00000106, 0x00000106, 0x00000106, 0x00000106};
327 	const vector unsigned char pinfo_mask =
328 		(vector unsigned char)(vector unsigned int){
329 		0x00000003, 0x00000003, 0x00000003, 0x00000003};
330 	const vector unsigned char cv_flag_sel = (vector unsigned char){
331 		0, (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
332 		(uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0,
333 		(uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 0,
334 		(uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
335 		0, 0, 0, 0, 0, 0, 0, 0, 0};
336 	const vector unsigned char cv_mask =
337 		(vector unsigned char)(vector unsigned int){
338 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
339 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
340 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
341 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
342 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
343 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
344 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
345 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED};
346 	const vector unsigned char mbuf_init =
347 		(vector unsigned char)vec_vsx_ld
348 			(0, (vector unsigned char *)&rxq->mbuf_initializer);
349 	const vector unsigned short rearm_sel_mask =
350 		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
351 	vector unsigned char rearm0, rearm1, rearm2, rearm3;
352 	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
353 
354 	/* Extract pkt_info field. */
355 	pinfo0 = (vector unsigned char)
356 		vec_mergeh((vector unsigned int)cqes[0],
357 		(vector unsigned int)cqes[1]);
358 	pinfo1 = (vector unsigned char)
359 		vec_mergeh((vector unsigned int)cqes[2],
360 		(vector unsigned int)cqes[3]);
361 	pinfo = (vector unsigned char)
362 		vec_mergeh((vector unsigned long)pinfo0,
363 		(vector unsigned long)pinfo1);
364 
365 	/* Extract hdr_type_etc field. */
366 	pinfo0 = (vector unsigned char)
367 		vec_mergel((vector unsigned int)cqes[0],
368 		(vector unsigned int)cqes[1]);
369 	pinfo1 = (vector unsigned char)
370 		vec_mergel((vector unsigned int)cqes[2],
371 		(vector unsigned int)cqes[3]);
372 	ptype = (vector unsigned char)
373 		vec_mergeh((vector unsigned long)pinfo0,
374 		(vector unsigned long)pinfo1);
375 
376 	if (rxq->mark) {
377 		const vector unsigned char pinfo_ft_mask =
378 			(vector unsigned char)(vector unsigned int){
379 			0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
380 		const vector unsigned char fdir_flags =
381 			(vector unsigned char)(vector unsigned int){
382 			PKT_RX_FDIR, PKT_RX_FDIR,
383 			PKT_RX_FDIR, PKT_RX_FDIR};
384 		vector unsigned char fdir_id_flags =
385 			(vector unsigned char)(vector unsigned int){
386 			PKT_RX_FDIR_ID, PKT_RX_FDIR_ID,
387 			PKT_RX_FDIR_ID, PKT_RX_FDIR_ID};
388 		vector unsigned char flow_tag, invalid_mask;
389 
390 		flow_tag = (vector unsigned char)
391 			vec_and((vector unsigned long)pinfo,
392 			(vector unsigned long)pinfo_ft_mask);
393 
394 		/* Check if flow tag is non-zero then set PKT_RX_FDIR. */
395 		invalid_mask = (vector unsigned char)
396 			vec_cmpeq((vector unsigned int)flow_tag,
397 			(vector unsigned int)zero);
398 		ol_flags = (vector unsigned char)
399 			vec_or((vector unsigned long)ol_flags,
400 			(vector unsigned long)
401 			vec_andc((vector unsigned long)fdir_flags,
402 			(vector unsigned long)invalid_mask));
403 
404 		/* Mask out invalid entries. */
405 		fdir_id_flags = (vector unsigned char)
406 			vec_andc((vector unsigned long)fdir_id_flags,
407 			(vector unsigned long)invalid_mask);
408 
409 		/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
410 		ol_flags = (vector unsigned char)
411 			vec_or((vector unsigned long)ol_flags,
412 			(vector unsigned long)
413 			vec_andc((vector unsigned long)fdir_id_flags,
414 			(vector unsigned long)
415 			vec_cmpeq((vector unsigned int)flow_tag,
416 			(vector unsigned int)pinfo_ft_mask)));
417 	}
418 	/*
419 	 * Merge the two fields to generate the following:
420 	 * bit[1]     = l3_ok
421 	 * bit[2]     = l4_ok
422 	 * bit[8]     = cv
423 	 * bit[11:10] = l3_hdr_type
424 	 * bit[14:12] = l4_hdr_type
425 	 * bit[15]    = ip_frag
426 	 * bit[16]    = tunneled
427 	 * bit[17]    = outer_l3_type
428 	 */
429 	ptype = (vector unsigned char)
430 		vec_and((vector unsigned long)ptype,
431 		(vector unsigned long)ptype_mask);
432 	pinfo = (vector unsigned char)
433 		vec_and((vector unsigned long)pinfo,
434 		(vector unsigned long)pinfo_mask);
435 	pinfo = (vector unsigned char)
436 		vec_sl((vector unsigned int)pinfo,
437 		(vector unsigned int){16, 16, 16, 16});
438 
439 	/* Make pinfo has merged fields for ol_flags calculation. */
440 	pinfo = (vector unsigned char)
441 		vec_or((vector unsigned long)ptype,
442 		(vector unsigned long)pinfo);
443 	ptype = (vector unsigned char)
444 		vec_sr((vector unsigned int)pinfo,
445 		(vector unsigned int){10, 10, 10, 10});
446 	ptype = (vector unsigned char)
447 		vec_packs((vector unsigned int)ptype,
448 		(vector unsigned int)zero);
449 
450 	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
451 	op_err = (vector unsigned char)
452 		vec_sr((vector unsigned short)op_err,
453 		(vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
454 	ptype = (vector unsigned char)
455 		vec_or((vector unsigned long)ptype,
456 		(vector unsigned long)op_err);
457 
458 	pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0];
459 	pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2];
460 	pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4];
461 	pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6];
462 
463 	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
464 		!!(pt_idx0 & (1 << 6)) * rxq->tunnel;
465 	pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
466 		!!(pt_idx1 & (1 << 6)) * rxq->tunnel;
467 	pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
468 		!!(pt_idx2 & (1 << 6)) * rxq->tunnel;
469 	pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
470 		!!(pt_idx3 & (1 << 6)) * rxq->tunnel;
471 
472 	/* Fill flags for checksum and VLAN. */
473 	pinfo = (vector unsigned char)
474 		vec_and((vector unsigned long)pinfo,
475 		(vector unsigned long)ptype_ol_mask);
476 	pinfo = vec_perm(cv_flag_sel, zero, pinfo);
477 
478 	/* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
479 	cv_flags = (vector unsigned char)
480 		vec_sl((vector unsigned int)pinfo,
481 		(vector unsigned int){9, 9, 9, 9});
482 	cv_flags = (vector unsigned char)
483 		vec_or((vector unsigned long)pinfo,
484 		(vector unsigned long)cv_flags);
485 
486 	/* Move back flags to start from byte[0]. */
487 	cv_flags = (vector unsigned char)
488 		vec_sr((vector unsigned int)cv_flags,
489 		(vector unsigned int){8, 8, 8, 8});
490 
491 	/* Mask out garbage bits. */
492 	cv_flags = (vector unsigned char)
493 		vec_and((vector unsigned long)cv_flags,
494 		(vector unsigned long)cv_mask);
495 
496 	/* Merge to ol_flags. */
497 	ol_flags = (vector unsigned char)
498 		vec_or((vector unsigned long)ol_flags,
499 		(vector unsigned long)cv_flags);
500 
501 	/* Merge mbuf_init and ol_flags. */
502 	rearm0 = (vector unsigned char)
503 		vec_sel((vector unsigned short)mbuf_init,
504 		(vector unsigned short)
505 		vec_slo((vector unsigned short)ol_flags,
506 		(vector unsigned char){64}), rearm_sel_mask);
507 	rearm1 = (vector unsigned char)
508 		vec_sel((vector unsigned short)mbuf_init,
509 		(vector unsigned short)
510 		vec_slo((vector unsigned short)ol_flags,
511 		(vector unsigned char){32}), rearm_sel_mask);
512 	rearm2 = (vector unsigned char)
513 		vec_sel((vector unsigned short)mbuf_init,
514 		(vector unsigned short)ol_flags, rearm_sel_mask);
515 	rearm3 = (vector unsigned char)
516 		vec_sel((vector unsigned short)mbuf_init,
517 		(vector unsigned short)
518 		vec_sro((vector unsigned short)ol_flags,
519 		(vector unsigned char){32}), rearm_sel_mask);
520 
521 	/* Write 8B rearm_data and 8B ol_flags. */
522 	vec_vsx_st(rearm0, 0,
523 		(vector unsigned char *)&pkts[0]->rearm_data);
524 	vec_vsx_st(rearm1, 0,
525 		(vector unsigned char *)&pkts[1]->rearm_data);
526 	vec_vsx_st(rearm2, 0,
527 		(vector unsigned char *)&pkts[2]->rearm_data);
528 	vec_vsx_st(rearm3, 0,
529 		(vector unsigned char *)&pkts[3]->rearm_data);
530 }
531 
532 
533 /**
534  * Receive burst of packets. An errored completion also consumes a mbuf, but the
535  * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
536  * before returning to application.
537  *
538  * @param rxq
539  *   Pointer to RX queue structure.
540  * @param[out] pkts
541  *   Array to store received packets.
542  * @param pkts_n
543  *   Maximum number of packets in array.
544  * @param[out] err
545  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
546  *   packet to handle.
547  *
548  * @return
549  *   Number of packets received including errors (<= pkts_n).
550  */
551 static inline uint16_t
552 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
553 	    uint64_t *err)
554 {
555 	const uint16_t q_n = 1 << rxq->cqe_n;
556 	const uint16_t q_mask = q_n - 1;
557 	volatile struct mlx5_cqe *cq;
558 	struct rte_mbuf **elts;
559 	unsigned int pos;
560 	uint64_t n;
561 	uint16_t repl_n;
562 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
563 	uint16_t nocmp_n = 0;
564 	uint16_t rcvd_pkt = 0;
565 	unsigned int cq_idx = rxq->cq_ci & q_mask;
566 	unsigned int elts_idx;
567 	unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
568 	const vector unsigned char zero = (vector unsigned char){0};
569 	const vector unsigned char ones = vec_splat_u8(-1);
570 	const vector unsigned char owner_check =
571 		(vector unsigned char)(vector unsigned long){
572 		0x0100000001000000LL, 0x0100000001000000LL};
573 	const vector unsigned char opcode_check =
574 		(vector unsigned char)(vector unsigned long){
575 		0xf0000000f0000000LL, 0xf0000000f0000000LL};
576 	const vector unsigned char format_check =
577 		(vector unsigned char)(vector unsigned long){
578 		0x0c0000000c000000LL, 0x0c0000000c000000LL};
579 	const vector unsigned char resp_err_check =
580 		(vector unsigned char)(vector unsigned long){
581 		0xe0000000e0000000LL, 0xe0000000e0000000LL};
582 #ifdef MLX5_PMD_SOFT_COUNTERS
583 	uint32_t rcvd_byte = 0;
584 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
585 	const vector unsigned char len_shuf_mask = (vector unsigned char){
586 		 1,  0,  5,  4,
587 		 9,  8, 13, 12,
588 		-1, -1, -1, -1,
589 		-1, -1, -1, -1};
590 #endif
591 	/* Mask to shuffle from extracted CQE to mbuf. */
592 	const vector unsigned char shuf_mask = (vector unsigned char){
593 		 5,  4,           /* bswap16, pkt_len */
594 		-1, -1,           /* zero out 2nd half of pkt_len */
595 		 5,  4,           /* bswap16, data_len */
596 		11, 10,           /* bswap16, vlan+tci */
597 		15, 14, 13, 12,   /* bswap32, rss */
598 		 1,  2,  3, -1};  /* fdir.hi */
599 	/* Mask to blend from the last Qword to the first DQword. */
600 	/* Mask to blend from the last Qword to the first DQword. */
601 	const vector unsigned char blend_mask = (vector unsigned char){
602 		-1,  0,  0,  0,
603 		 0,  0,  0,  0,
604 		-1, -1, -1, -1,
605 		-1, -1, -1, -1};
606 	const vector unsigned char crc_adj =
607 		(vector unsigned char)(vector unsigned short){
608 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
609 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0};
610 	const vector unsigned char flow_mark_adj =
611 		(vector unsigned char)(vector unsigned int){
612 		0, 0, 0, rxq->mark * (-1)};
613 	const vector unsigned short cqe_sel_mask1 =
614 		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
615 	const vector unsigned short cqe_sel_mask2 =
616 		(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
617 
618 	MLX5_ASSERT(rxq->sges_n == 0);
619 	MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
620 	cq = &(*rxq->cqes)[cq_idx];
621 	rte_prefetch0(cq);
622 	rte_prefetch0(cq + 1);
623 	rte_prefetch0(cq + 2);
624 	rte_prefetch0(cq + 3);
625 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
626 
627 	repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
628 	if (repl_n >= rxq->rq_repl_thresh)
629 		mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
630 	/* See if there're unreturned mbufs from compressed CQE. */
631 	rcvd_pkt = rxq->decompressed;
632 	if (rcvd_pkt > 0) {
633 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
634 		rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
635 		rxq->rq_pi += rcvd_pkt;
636 		rxq->decompressed -= rcvd_pkt;
637 		pkts += rcvd_pkt;
638 	}
639 	elts_idx = rxq->rq_pi & q_mask;
640 	elts = &(*rxq->elts)[elts_idx];
641 	/* Not to overflow pkts array. */
642 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
643 	/* Not to cross queue end. */
644 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
645 	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
646 	if (!pkts_n)
647 		return rcvd_pkt;
648 	/* At this point, there shouldn't be any remaining packets. */
649 	MLX5_ASSERT(rxq->decompressed == 0);
650 
651 	/*
652 	 * A. load first Qword (8bytes) in one loop.
653 	 * B. copy 4 mbuf pointers from elts ring to returing pkts.
654 	 * C. load remaining CQE data and extract necessary fields.
655 	 *    Final 16bytes cqes[] extracted from original 64bytes CQE has the
656 	 *    following structure:
657 	 *        struct {
658 	 *          uint8_t  pkt_info;
659 	 *          uint8_t  flow_tag[3];
660 	 *          uint16_t byte_cnt;
661 	 *          uint8_t  rsvd4;
662 	 *          uint8_t  op_own;
663 	 *          uint16_t hdr_type_etc;
664 	 *          uint16_t vlan_info;
665 	 *          uint32_t rx_has_res;
666 	 *        } c;
667 	 * D. fill in mbuf.
668 	 * E. get valid CQEs.
669 	 * F. find compressed CQE.
670 	 */
671 	for (pos = 0;
672 	     pos < pkts_n;
673 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
674 		vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
675 		vector unsigned char cqe_tmp1, cqe_tmp2;
676 		vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
677 		vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
678 		vector unsigned char opcode, owner_mask, invalid_mask;
679 		vector unsigned char comp_mask;
680 		vector unsigned char mask;
681 #ifdef MLX5_PMD_SOFT_COUNTERS
682 		const vector unsigned char lower_half = {
683 			0, 1, 4, 5, 8, 9, 12, 13,
684 			16, 17, 20, 21, 24, 25, 28, 29};
685 		const vector unsigned char upper_half = {
686 			2, 3, 6, 7, 10, 11, 14, 15,
687 			18, 19, 22, 23, 26, 27, 30, 31};
688 		const vector unsigned long shmax = {64, 64};
689 		vector unsigned char byte_cnt;
690 		vector unsigned short left, right;
691 		vector unsigned long lshift;
692 		vector __attribute__((altivec(bool__)))
693 			unsigned long shmask;
694 #endif
695 		vector unsigned char mbp1, mbp2;
696 		vector unsigned char p =
697 			(vector unsigned char)(vector unsigned short){
698 				0, 1, 2, 3, 0, 0, 0, 0};
699 		unsigned int p1, p2, p3;
700 
701 		/* Prefetch next 4 CQEs. */
702 		if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
703 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
704 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
705 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
706 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
707 		}
708 
709 		/* A.0 do not cross the end of CQ. */
710 		mask = (vector unsigned char)(vector unsigned long){
711 			(pkts_n - pos) * sizeof(uint16_t) * 8, 0};
712 
713 		{
714 			vector unsigned long lshift;
715 			vector __attribute__((altivec(bool__)))
716 				unsigned long shmask;
717 			const vector unsigned long shmax = {64, 64};
718 
719 			lshift = vec_splat((vector unsigned long)mask, 0);
720 			shmask = vec_cmpgt(shmax, lshift);
721 			mask = (vector unsigned char)
722 				vec_sl((vector unsigned long)ones, lshift);
723 			mask = (vector unsigned char)
724 				vec_sel((vector unsigned long)shmask,
725 				(vector unsigned long)mask, shmask);
726 		}
727 
728 		p = (vector unsigned char)
729 			vec_andc((vector unsigned long)p,
730 			(vector unsigned long)mask);
731 
732 		/* A.1 load cqes. */
733 		p3 = (unsigned int)((vector unsigned short)p)[3];
734 		cqes[3] = (vector unsigned char)(vector unsigned long){
735 			*(__attribute__((__aligned__(8))) unsigned long *)
736 			&cq[pos + p3].sop_drop_qpn, 0LL};
737 		rte_compiler_barrier();
738 
739 		p2 = (unsigned int)((vector unsigned short)p)[2];
740 		cqes[2] = (vector unsigned char)(vector unsigned long){
741 			*(__attribute__((__aligned__(8))) unsigned long *)
742 			&cq[pos + p2].sop_drop_qpn, 0LL};
743 		rte_compiler_barrier();
744 
745 		/* B.1 load mbuf pointers. */
746 		mbp1 = (vector unsigned char)vec_vsx_ld(0,
747 			(signed int const *)&elts[pos]);
748 		mbp2 = (vector unsigned char)vec_vsx_ld(0,
749 			(signed int const *)&elts[pos + 2]);
750 
751 		/* A.1 load a block having op_own. */
752 		p1 = (unsigned int)((vector unsigned short)p)[1];
753 		cqes[1] = (vector unsigned char)(vector unsigned long){
754 			*(__attribute__((__aligned__(8))) unsigned long *)
755 			&cq[pos + p1].sop_drop_qpn, 0LL};
756 		rte_compiler_barrier();
757 
758 		cqes[0] = (vector unsigned char)(vector unsigned long){
759 			*(__attribute__((__aligned__(8))) unsigned long *)
760 			&cq[pos].sop_drop_qpn, 0LL};
761 		rte_compiler_barrier();
762 
763 		/* B.2 copy mbuf pointers. */
764 		*(vector unsigned char *)&pkts[pos] = mbp1;
765 		*(vector unsigned char *)&pkts[pos + 2] = mbp2;
766 		rte_cio_rmb();
767 
768 		/* C.1 load remaining CQE data and extract necessary fields. */
769 		cqe_tmp2 = *(vector unsigned char *)
770 			&cq[pos + p3].pkt_info;
771 		cqe_tmp1 = *(vector unsigned char *)
772 			&cq[pos + p2].pkt_info;
773 		cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask);
774 		cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask);
775 		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
776 			(signed int const *)&cq[pos + p3].csum);
777 		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
778 			(signed int const *)&cq[pos + p2].csum);
779 		cqes[3] = (vector unsigned char)
780 			vec_sel((vector unsigned short)cqes[3],
781 			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
782 		cqes[2] = (vector unsigned char)
783 			vec_sel((vector unsigned short)cqes[2],
784 			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
785 		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
786 			*(__attribute__((__aligned__(8))) unsigned long *)
787 			&cq[pos + p3].rsvd3[9], 0LL};
788 		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
789 			*(__attribute__((__aligned__(8))) unsigned long *)
790 			&cq[pos + p2].rsvd3[9], 0LL};
791 		cqes[3] = (vector unsigned char)
792 			vec_sel((vector unsigned short)cqes[3],
793 			(vector unsigned short)cqe_tmp2,
794 			(vector unsigned short)cqe_sel_mask2);
795 		cqes[2] = (vector unsigned char)
796 			vec_sel((vector unsigned short)cqes[2],
797 			(vector unsigned short)cqe_tmp1,
798 			(vector unsigned short)cqe_sel_mask2);
799 
800 		/* C.2 generate final structure for mbuf with swapping bytes. */
801 		pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask);
802 		pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask);
803 
804 		/* C.3 adjust CRC length. */
805 		pkt_mb3 = (vector unsigned char)
806 			((vector unsigned short)pkt_mb3 -
807 			(vector unsigned short)crc_adj);
808 		pkt_mb2 = (vector unsigned char)
809 			((vector unsigned short)pkt_mb2 -
810 			(vector unsigned short)crc_adj);
811 
812 		/* C.4 adjust flow mark. */
813 		pkt_mb3 = (vector unsigned char)
814 			((vector unsigned int)pkt_mb3 +
815 			(vector unsigned int)flow_mark_adj);
816 		pkt_mb2 = (vector unsigned char)
817 			((vector unsigned int)pkt_mb2 +
818 			(vector unsigned int)flow_mark_adj);
819 
820 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
821 		*(vector unsigned char *)
822 			&pkts[pos + 3]->pkt_len = pkt_mb3;
823 		*(vector unsigned char *)
824 			&pkts[pos + 2]->pkt_len = pkt_mb2;
825 
826 		/* E.1 extract op_own field. */
827 		op_own_tmp2 = (vector unsigned char)
828 			vec_mergeh((vector unsigned int)cqes[2],
829 			(vector unsigned int)cqes[3]);
830 
831 		/* C.1 load remaining CQE data and extract necessary fields. */
832 		cqe_tmp2 = *(vector unsigned char *)
833 			&cq[pos + p1].pkt_info;
834 		cqe_tmp1 = *(vector unsigned char *)
835 			&cq[pos].pkt_info;
836 		cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask);
837 		cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask);
838 		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
839 			(signed int const *)&cq[pos + p1].csum);
840 		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
841 			(signed int const *)&cq[pos].csum);
842 		cqes[1] = (vector unsigned char)
843 			vec_sel((vector unsigned short)cqes[1],
844 			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
845 		cqes[0] = (vector unsigned char)
846 			vec_sel((vector unsigned short)cqes[0],
847 			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
848 		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
849 			*(__attribute__((__aligned__(8))) unsigned long *)
850 			&cq[pos + p1].rsvd3[9], 0LL};
851 		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
852 			*(__attribute__((__aligned__(8))) unsigned long *)
853 			&cq[pos].rsvd3[9], 0LL};
854 		cqes[1] = (vector unsigned char)
855 			vec_sel((vector unsigned short)cqes[1],
856 			(vector unsigned short)cqe_tmp2, cqe_sel_mask2);
857 		cqes[0] = (vector unsigned char)
858 			vec_sel((vector unsigned short)cqes[0],
859 			(vector unsigned short)cqe_tmp1, cqe_sel_mask2);
860 
861 		/* C.2 generate final structure for mbuf with swapping bytes. */
862 		pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask);
863 		pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask);
864 
865 		/* C.3 adjust CRC length. */
866 		pkt_mb1 = (vector unsigned char)
867 			((vector unsigned short)pkt_mb1 -
868 			(vector unsigned short)crc_adj);
869 		pkt_mb0 = (vector unsigned char)
870 			((vector unsigned short)pkt_mb0 -
871 			(vector unsigned short)crc_adj);
872 
873 		/* C.4 adjust flow mark. */
874 		pkt_mb1 = (vector unsigned char)
875 			((vector unsigned int)pkt_mb1 +
876 			(vector unsigned int)flow_mark_adj);
877 		pkt_mb0 = (vector unsigned char)
878 			((vector unsigned int)pkt_mb0 +
879 			(vector unsigned int)flow_mark_adj);
880 
881 		/* E.1 extract op_own byte. */
882 		op_own_tmp1 = (vector unsigned char)
883 			vec_mergeh((vector unsigned int)cqes[0],
884 			(vector unsigned int)cqes[1]);
885 		op_own = (vector unsigned char)
886 			vec_mergel((vector unsigned long)op_own_tmp1,
887 			(vector unsigned long)op_own_tmp2);
888 
889 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
890 		*(vector unsigned char *)
891 			&pkts[pos + 1]->pkt_len = pkt_mb1;
892 		*(vector unsigned char *)
893 			&pkts[pos]->pkt_len = pkt_mb0;
894 
895 		/* E.2 flip owner bit to mark CQEs from last round. */
896 		owner_mask = (vector unsigned char)
897 			vec_and((vector unsigned long)op_own,
898 			(vector unsigned long)owner_check);
899 		if (ownership)
900 			owner_mask = (vector unsigned char)
901 				vec_xor((vector unsigned long)owner_mask,
902 				(vector unsigned long)owner_check);
903 		owner_mask = (vector unsigned char)
904 			vec_cmpeq((vector unsigned int)owner_mask,
905 			(vector unsigned int)owner_check);
906 		owner_mask = (vector unsigned char)
907 			vec_packs((vector unsigned int)owner_mask,
908 			(vector unsigned int)zero);
909 
910 		/* E.3 get mask for invalidated CQEs. */
911 		opcode = (vector unsigned char)
912 			vec_and((vector unsigned long)op_own,
913 			(vector unsigned long)opcode_check);
914 		invalid_mask = (vector unsigned char)
915 			vec_cmpeq((vector unsigned int)opcode_check,
916 			(vector unsigned int)opcode);
917 		invalid_mask = (vector unsigned char)
918 			vec_packs((vector unsigned int)invalid_mask,
919 			(vector unsigned int)zero);
920 
921 		/* E.4 mask out beyond boundary. */
922 		invalid_mask = (vector unsigned char)
923 			vec_or((vector unsigned long)invalid_mask,
924 			(vector unsigned long)mask);
925 
926 		/* E.5 merge invalid_mask with invalid owner. */
927 		invalid_mask = (vector unsigned char)
928 			vec_or((vector unsigned long)invalid_mask,
929 			(vector unsigned long)owner_mask);
930 
931 		/* F.1 find compressed CQE format. */
932 		comp_mask = (vector unsigned char)
933 			vec_and((vector unsigned long)op_own,
934 			(vector unsigned long)format_check);
935 		comp_mask = (vector unsigned char)
936 			vec_cmpeq((vector unsigned int)comp_mask,
937 			(vector unsigned int)format_check);
938 		comp_mask = (vector unsigned char)
939 			vec_packs((vector unsigned int)comp_mask,
940 			(vector unsigned int)zero);
941 
942 		/* F.2 mask out invalid entries. */
943 		comp_mask = (vector unsigned char)
944 			vec_andc((vector unsigned long)comp_mask,
945 			(vector unsigned long)invalid_mask);
946 		comp_idx = ((vector unsigned long)comp_mask)[0];
947 
948 		/* F.3 get the first compressed CQE. */
949 		comp_idx = comp_idx ? __builtin_ctzll(comp_idx) /
950 			(sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP;
951 
952 		/* E.6 mask out entries after the compressed CQE. */
953 		mask = (vector unsigned char)(vector unsigned long){
954 			(comp_idx * sizeof(uint16_t) * 8), 0};
955 		lshift = vec_splat((vector unsigned long)mask, 0);
956 		shmask = vec_cmpgt(shmax, lshift);
957 		mask = (vector unsigned char)
958 			vec_sl((vector unsigned long)ones, lshift);
959 		mask = (vector unsigned char)
960 			vec_sel((vector unsigned long)shmask,
961 			(vector unsigned long)mask, shmask);
962 		invalid_mask = (vector unsigned char)
963 			vec_or((vector unsigned long)invalid_mask,
964 			(vector unsigned long)mask);
965 
966 		/* E.7 count non-compressed valid CQEs. */
967 		n = ((vector unsigned long)invalid_mask)[0];
968 		n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
969 			MLX5_VPMD_DESCS_PER_LOOP;
970 		nocmp_n += n;
971 
972 		/* D.2 get the final invalid mask. */
973 		mask = (vector unsigned char)(vector unsigned long){
974 			(n * sizeof(uint16_t) * 8), 0};
975 		lshift = vec_splat((vector unsigned long)mask, 0);
976 		shmask = vec_cmpgt(shmax, lshift);
977 		mask = (vector unsigned char)
978 			vec_sl((vector unsigned long)ones, lshift);
979 		mask = (vector unsigned char)
980 			vec_sel((vector unsigned long)shmask,
981 			(vector unsigned long)mask, shmask);
982 		invalid_mask = (vector unsigned char)
983 			vec_or((vector unsigned long)invalid_mask,
984 			(vector unsigned long)mask);
985 
986 		/* D.3 check error in opcode. */
987 		opcode = (vector unsigned char)
988 			vec_cmpeq((vector unsigned int)resp_err_check,
989 			(vector unsigned int)opcode);
990 		opcode = (vector unsigned char)
991 			vec_packs((vector unsigned int)opcode,
992 			(vector unsigned int)zero);
993 		opcode = (vector unsigned char)
994 			vec_andc((vector unsigned long)opcode,
995 			(vector unsigned long)invalid_mask);
996 
997 		/* D.4 mark if any error is set */
998 		*err |= ((vector unsigned long)opcode)[0];
999 
1000 		/* D.5 fill in mbuf - rearm_data and packet_type. */
1001 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
1002 		if (rxq->hw_timestamp) {
1003 			pkts[pos]->timestamp =
1004 				rte_be_to_cpu_64(cq[pos].timestamp);
1005 			pkts[pos + 1]->timestamp =
1006 				rte_be_to_cpu_64(cq[pos + p1].timestamp);
1007 			pkts[pos + 2]->timestamp =
1008 				rte_be_to_cpu_64(cq[pos + p2].timestamp);
1009 			pkts[pos + 3]->timestamp =
1010 				rte_be_to_cpu_64(cq[pos + p3].timestamp);
1011 		}
1012 		if (rte_flow_dynf_metadata_avail()) {
1013 			uint64_t flag = rte_flow_dynf_metadata_mask;
1014 			int offs = rte_flow_dynf_metadata_offs;
1015 			uint32_t metadata;
1016 
1017 			/* This code is subject for futher optimization. */
1018 			metadata = cq[pos].flow_table_metadata;
1019 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
1020 								metadata;
1021 			pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
1022 			metadata = cq[pos + 1].flow_table_metadata;
1023 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
1024 								metadata;
1025 			pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
1026 			metadata = cq[pos + 2].flow_table_metadata;
1027 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
1028 								metadata;
1029 			pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
1030 			metadata = cq[pos + 3].flow_table_metadata;
1031 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
1032 								metadata;
1033 			pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
1034 		}
1035 #ifdef MLX5_PMD_SOFT_COUNTERS
1036 		/* Add up received bytes count. */
1037 		byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
1038 		byte_cnt = (vector unsigned char)
1039 			vec_andc((vector unsigned long)byte_cnt,
1040 			(vector unsigned long)invalid_mask);
1041 		left = vec_perm((vector unsigned short)byte_cnt,
1042 			(vector unsigned short)zero, lower_half);
1043 		right = vec_perm((vector unsigned short)byte_cnt,
1044 			(vector unsigned short)zero, upper_half);
1045 		byte_cnt = (vector unsigned char)vec_add(left, right);
1046 		left = vec_perm((vector unsigned short)byte_cnt,
1047 			(vector unsigned short)zero, lower_half);
1048 		right = vec_perm((vector unsigned short)byte_cnt,
1049 			(vector unsigned short)zero, upper_half);
1050 		byte_cnt = (vector unsigned char)vec_add(left, right);
1051 		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
1052 #endif
1053 
1054 		/*
1055 		 * Break the loop unless more valid CQE is expected, or if
1056 		 * there's a compressed CQE.
1057 		 */
1058 		if (n != MLX5_VPMD_DESCS_PER_LOOP)
1059 			break;
1060 	}
1061 	/* If no new CQE seen, return without updating cq_db. */
1062 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
1063 		return rcvd_pkt;
1064 	/* Update the consumer indexes for non-compressed CQEs. */
1065 	MLX5_ASSERT(nocmp_n <= pkts_n);
1066 	rxq->cq_ci += nocmp_n;
1067 	rxq->rq_pi += nocmp_n;
1068 	rcvd_pkt += nocmp_n;
1069 #ifdef MLX5_PMD_SOFT_COUNTERS
1070 	rxq->stats.ipackets += nocmp_n;
1071 	rxq->stats.ibytes += rcvd_byte;
1072 #endif
1073 	/* Decompress the last CQE if compressed. */
1074 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1075 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1076 		rxq->decompressed =
1077 			rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
1078 		/* Return more packets if needed. */
1079 		if (nocmp_n < pkts_n) {
1080 			uint16_t n = rxq->decompressed;
1081 
1082 			n = RTE_MIN(n, pkts_n - nocmp_n);
1083 			rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1084 			rxq->rq_pi += n;
1085 			rcvd_pkt += n;
1086 			rxq->decompressed -= n;
1087 		}
1088 	}
1089 	rte_compiler_barrier();
1090 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1091 	return rcvd_pkt;
1092 }
1093 
1094 #endif /* RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ */
1095