xref: /dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h (revision a2854c4de12970dc351eaefe9fbd3c77d403caf7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5 
6 #ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
7 #define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
8 
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdlib.h>
12 
13 #include <rte_altivec.h>
14 
15 #include <rte_mbuf.h>
16 #include <rte_mempool.h>
17 #include <rte_prefetch.h>
18 
19 #include <mlx5_prm.h>
20 
21 #include "mlx5_defs.h"
22 #include "mlx5.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_rxtx.h"
25 #include "mlx5_rxtx_vec.h"
26 #include "mlx5_autoconf.h"
27 
28 #ifndef __INTEL_COMPILER
29 #pragma GCC diagnostic ignored "-Wcast-qual"
30 #pragma GCC diagnostic ignored "-Wstrict-aliasing"
31 #endif
32 
33 /**
34  * Store free buffers to RX SW ring.
35  *
36  * @param rxq
37  *   Pointer to RX queue structure.
38  * @param pkts
39  *   Pointer to array of packets to be stored.
40  * @param pkts_n
41  *   Number of packets to be stored.
42  */
43 static inline void
44 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
45 {
46 	const uint16_t q_mask = (1 << rxq->elts_n) - 1;
47 	struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
48 	unsigned int pos;
49 	uint16_t p = n & -2;
50 
51 	for (pos = 0; pos < p; pos += 2) {
52 		vector unsigned char mbp;
53 
54 		mbp = (vector unsigned char)vec_vsx_ld(0,
55 				(signed int const *)&elts[pos]);
56 		*(vector unsigned char *)&pkts[pos] = mbp;
57 	}
58 	if (n & 1)
59 		pkts[pos] = elts[pos];
60 }
61 
62 /**
63  * Decompress a compressed completion and fill in mbufs in RX SW ring with data
64  * extracted from the title completion descriptor.
65  *
66  * @param rxq
67  *   Pointer to RX queue structure.
68  * @param cq
69  *   Pointer to completion array having a compressed completion at first.
70  * @param elts
71  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
72  *   the title completion descriptor to be copied to the rest of mbufs.
73  *
74  * @return
75  *   Number of mini-CQEs successfully decompressed.
76  */
77 static inline uint16_t
78 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
79 		    struct rte_mbuf **elts)
80 {
81 	volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info;
82 	struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
83 	const vector unsigned char zero = (vector unsigned char){0};
84 	/* Mask to shuffle from extracted mini CQE to mbuf. */
85 	const vector unsigned char shuf_mask1 = (vector unsigned char){
86 			-1, -1, -1, -1,   /* skip packet_type */
87 			 7,  6, -1, -1,   /* bswap16, pkt_len */
88 			 7,  6,           /* bswap16, data_len */
89 			-1, -1,           /* skip vlan_tci */
90 			 3,  2,  1,  0};  /* bswap32, rss */
91 	const vector unsigned char shuf_mask2 = (vector unsigned char){
92 			-1, -1, -1, -1,   /* skip packet_type */
93 			15, 14, -1, -1,   /* bswap16, pkt_len */
94 			15, 14,           /* data_len, bswap16 */
95 			-1, -1,           /* skip vlan_tci */
96 			11, 10,  9,  8};  /* bswap32, rss */
97 	/* Restore the compressed count. Must be 16 bits. */
98 	const uint16_t mcqe_n = t_pkt->data_len +
99 		(rxq->crc_present * RTE_ETHER_CRC_LEN);
100 	const vector unsigned char rearm =
101 		(vector unsigned char)vec_vsx_ld(0,
102 		(signed int const *)&t_pkt->rearm_data);
103 	const vector unsigned char rxdf =
104 		(vector unsigned char)vec_vsx_ld(0,
105 		(signed int const *)&t_pkt->rx_descriptor_fields1);
106 	const vector unsigned char crc_adj =
107 		(vector unsigned char)(vector unsigned short){
108 			0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
109 			rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0};
110 	const vector unsigned short rxdf_sel_mask =
111 		(vector unsigned short){
112 			0xffff, 0xffff, 0, 0, 0, 0xffff, 0, 0};
113 	const uint32_t flow_tag = t_pkt->hash.fdir.hi;
114 	unsigned int pos;
115 	unsigned int i;
116 	unsigned int inv = 0;
117 
118 #ifdef MLX5_PMD_SOFT_COUNTERS
119 	const vector unsigned char ones = vec_splat_u8(-1);
120 	uint32_t rcvd_byte = 0;
121 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
122 	const vector unsigned char len_shuf_mask = (vector unsigned char){
123 		 3,  2, 11, 10,
124 		 7,  6, 15, 14,
125 		-1, -1, -1, -1,
126 		-1, -1, -1, -1};
127 #endif
128 
129 	/*
130 	 * A. load mCQEs into a 128bit register.
131 	 * B. store rearm data to mbuf.
132 	 * C. combine data from mCQEs with rx_descriptor_fields1.
133 	 * D. store rx_descriptor_fields1.
134 	 * E. store flow tag (rte_flow mark).
135 	 */
136 	for (pos = 0; pos < mcqe_n; ) {
137 		vector unsigned char mcqe1, mcqe2;
138 		vector unsigned char rxdf1, rxdf2;
139 #ifdef MLX5_PMD_SOFT_COUNTERS
140 		const vector unsigned short mcqe_sel_mask =
141 			(vector unsigned short){0, 0, 0xffff, 0xffff,
142 			0, 0, 0xfff, 0xffff};
143 		const vector unsigned char lower_half = {
144 			0, 1, 4, 5, 8, 9, 12, 13, 16,
145 			17, 20, 21, 24, 25, 28, 29};
146 		const vector unsigned char upper_half = {
147 			2, 3, 6, 7, 10, 11, 14, 15,
148 			18, 19, 22, 23, 26, 27, 30, 31};
149 		vector unsigned short left, right;
150 		vector unsigned char byte_cnt, invalid_mask;
151 		vector unsigned long lshift;
152 		__attribute__((altivec(vector__)))
153 			__attribute__((altivec(bool__)))
154 			unsigned long long shmask;
155 		const vector unsigned long shmax = {64, 64};
156 #endif
157 
158 		for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
159 			if (likely(pos + i < mcqe_n))
160 				rte_prefetch0((void *)(cq + pos + i));
161 
162 		/* A.1 load mCQEs into a 128bit register. */
163 		mcqe1 = (vector unsigned char)vec_vsx_ld(0,
164 			(signed int const *)&mcq[pos % 8]);
165 		mcqe2 = (vector unsigned char)vec_vsx_ld(0,
166 			(signed int const *)&mcq[pos % 8 + 2]);
167 
168 		/* B.1 store rearm data to mbuf. */
169 		*(vector unsigned char *)
170 			&elts[pos]->rearm_data = rearm;
171 		*(vector unsigned char *)
172 			&elts[pos + 1]->rearm_data = rearm;
173 
174 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
175 		rxdf1 = vec_perm(mcqe1, zero, shuf_mask1);
176 		rxdf2 = vec_perm(mcqe1, zero, shuf_mask2);
177 		rxdf1 = (vector unsigned char)
178 			((vector unsigned short)rxdf1 -
179 			(vector unsigned short)crc_adj);
180 		rxdf2 = (vector unsigned char)
181 			((vector unsigned short)rxdf2 -
182 			(vector unsigned short)crc_adj);
183 		rxdf1 = (vector unsigned char)
184 			vec_sel((vector unsigned short)rxdf1,
185 			(vector unsigned short)rxdf, rxdf_sel_mask);
186 		rxdf2 = (vector unsigned char)
187 			vec_sel((vector unsigned short)rxdf2,
188 			(vector unsigned short)rxdf, rxdf_sel_mask);
189 
190 		/* D.1 store rx_descriptor_fields1. */
191 		*(vector unsigned char *)
192 			&elts[pos]->rx_descriptor_fields1 = rxdf1;
193 		*(vector unsigned char *)
194 			&elts[pos + 1]->rx_descriptor_fields1 = rxdf2;
195 
196 		/* B.1 store rearm data to mbuf. */
197 		*(vector unsigned char *)
198 			&elts[pos + 2]->rearm_data = rearm;
199 		*(vector unsigned char *)
200 			&elts[pos + 3]->rearm_data = rearm;
201 
202 		/* C.1 combine data from mCQEs with rx_descriptor_fields1. */
203 		rxdf1 = vec_perm(mcqe2, zero, shuf_mask1);
204 		rxdf2 = vec_perm(mcqe2, zero, shuf_mask2);
205 		rxdf1 = (vector unsigned char)
206 			((vector unsigned short)rxdf1 -
207 			(vector unsigned short)crc_adj);
208 		rxdf2 = (vector unsigned char)
209 			((vector unsigned short)rxdf2 -
210 			(vector unsigned short)crc_adj);
211 		rxdf1 = (vector unsigned char)
212 			vec_sel((vector unsigned short)rxdf1,
213 			(vector unsigned short)rxdf, rxdf_sel_mask);
214 		rxdf2 = (vector unsigned char)
215 			vec_sel((vector unsigned short)rxdf2,
216 			(vector unsigned short)rxdf, rxdf_sel_mask);
217 
218 		/* D.1 store rx_descriptor_fields1. */
219 		*(vector unsigned char *)
220 			&elts[pos + 2]->rx_descriptor_fields1 = rxdf1;
221 		*(vector unsigned char *)
222 			&elts[pos + 3]->rx_descriptor_fields1 = rxdf2;
223 
224 #ifdef MLX5_PMD_SOFT_COUNTERS
225 		invalid_mask = (vector unsigned char)(vector unsigned long){
226 			(mcqe_n - pos) * sizeof(uint16_t) * 8, 0};
227 
228 		lshift =
229 			vec_splat((vector unsigned long)invalid_mask, 0);
230 		shmask = vec_cmpgt(shmax, lshift);
231 		invalid_mask = (vector unsigned char)
232 			vec_sl((vector unsigned long)ones, lshift);
233 		invalid_mask = (vector unsigned char)
234 			vec_sel((vector unsigned long)shmask,
235 			(vector unsigned long)invalid_mask, shmask);
236 
237 		mcqe1 = (vector unsigned char)
238 			vec_sro((vector unsigned short)mcqe1,
239 			(vector unsigned char){32}),
240 		byte_cnt = (vector unsigned char)
241 			vec_sel((vector unsigned short)mcqe1,
242 			(vector unsigned short)mcqe2, mcqe_sel_mask);
243 		byte_cnt = vec_perm(byte_cnt, zero, len_shuf_mask);
244 		byte_cnt = (vector unsigned char)
245 			vec_andc((vector unsigned long)byte_cnt,
246 			(vector unsigned long)invalid_mask);
247 		left = vec_perm((vector unsigned short)byte_cnt,
248 			(vector unsigned short)zero, lower_half);
249 		right = vec_perm((vector unsigned short)byte_cnt,
250 			(vector unsigned short)zero, upper_half);
251 		byte_cnt = (vector unsigned char)vec_add(left, right);
252 		left = vec_perm((vector unsigned short)byte_cnt,
253 			(vector unsigned short)zero, lower_half);
254 		right = vec_perm((vector unsigned short)byte_cnt,
255 			(vector unsigned short)zero, upper_half);
256 		byte_cnt = (vector unsigned char)vec_add(left, right);
257 		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
258 #endif
259 
260 		if (rxq->mark) {
261 			/* E.1 store flow tag (rte_flow mark). */
262 			elts[pos]->hash.fdir.hi = flow_tag;
263 			elts[pos + 1]->hash.fdir.hi = flow_tag;
264 			elts[pos + 2]->hash.fdir.hi = flow_tag;
265 			elts[pos + 3]->hash.fdir.hi = flow_tag;
266 		}
267 		if (rxq->dynf_meta) {
268 			int32_t offs = rxq->flow_meta_offset;
269 			const uint32_t meta =
270 				*RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *);
271 
272 			/* Check if title packet has valid metadata. */
273 			if (meta) {
274 				MLX5_ASSERT(t_pkt->ol_flags &
275 					    rxq->flow_meta_mask);
276 				*RTE_MBUF_DYNFIELD(elts[pos], offs,
277 							uint32_t *) = meta;
278 				*RTE_MBUF_DYNFIELD(elts[pos + 1], offs,
279 							uint32_t *) = meta;
280 				*RTE_MBUF_DYNFIELD(elts[pos + 2], offs,
281 							uint32_t *) = meta;
282 				*RTE_MBUF_DYNFIELD(elts[pos + 3], offs,
283 							uint32_t *) = meta;
284 			}
285 		}
286 
287 		pos += MLX5_VPMD_DESCS_PER_LOOP;
288 		/* Move to next CQE and invalidate consumed CQEs. */
289 		if (!(pos & 0x7) && pos < mcqe_n) {
290 			mcq = (void *)&(cq + pos)->pkt_info;
291 			for (i = 0; i < 8; ++i)
292 				cq[inv++].op_own = MLX5_CQE_INVALIDATE;
293 		}
294 	}
295 
296 	/* Invalidate the rest of CQEs. */
297 	for (; inv < mcqe_n; ++inv)
298 		cq[inv].op_own = MLX5_CQE_INVALIDATE;
299 
300 #ifdef MLX5_PMD_SOFT_COUNTERS
301 	rxq->stats.ipackets += mcqe_n;
302 	rxq->stats.ibytes += rcvd_byte;
303 #endif
304 
305 	rxq->cq_ci += mcqe_n;
306 	return mcqe_n;
307 }
308 
309 /**
310  * Calculate packet type and offload flag for mbuf and store it.
311  *
312  * @param rxq
313  *   Pointer to RX queue structure.
314  * @param cqes[4]
315  *   Array of four 16bytes completions extracted from the original completion
316  *   descriptor.
317  * @param op_err
318  *   Opcode vector having responder error status. Each field is 4B.
319  * @param pkts
320  *   Pointer to array of packets to be filled.
321  */
322 static inline void
323 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
324 		vector unsigned char cqes[4], vector unsigned char op_err,
325 		struct rte_mbuf **pkts)
326 {
327 	vector unsigned char pinfo0, pinfo1;
328 	vector unsigned char pinfo, ptype;
329 	vector unsigned char ol_flags = (vector unsigned char)
330 		(vector unsigned int){
331 			rxq->rss_hash * PKT_RX_RSS_HASH |
332 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
333 			rxq->rss_hash * PKT_RX_RSS_HASH |
334 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
335 			rxq->rss_hash * PKT_RX_RSS_HASH |
336 				rxq->hw_timestamp * PKT_RX_TIMESTAMP,
337 			rxq->rss_hash * PKT_RX_RSS_HASH |
338 				rxq->hw_timestamp * PKT_RX_TIMESTAMP};
339 	vector unsigned char cv_flags;
340 	const vector unsigned char zero = (vector unsigned char){0};
341 	const vector unsigned char ptype_mask =
342 		(vector unsigned char)(vector unsigned int){
343 		0x0000fd06, 0x0000fd06, 0x0000fd06, 0x0000fd06};
344 	const vector unsigned char ptype_ol_mask =
345 		(vector unsigned char)(vector unsigned int){
346 		0x00000106, 0x00000106, 0x00000106, 0x00000106};
347 	const vector unsigned char pinfo_mask =
348 		(vector unsigned char)(vector unsigned int){
349 		0x00000003, 0x00000003, 0x00000003, 0x00000003};
350 	const vector unsigned char cv_flag_sel = (vector unsigned char){
351 		0, (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
352 		(uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0,
353 		(uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 0,
354 		(uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
355 		0, 0, 0, 0, 0, 0, 0, 0, 0};
356 	const vector unsigned char cv_mask =
357 		(vector unsigned char)(vector unsigned int){
358 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
359 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
360 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
361 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
362 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
363 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
364 		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
365 		PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED};
366 	const vector unsigned char mbuf_init =
367 		(vector unsigned char)vec_vsx_ld
368 			(0, (vector unsigned char *)&rxq->mbuf_initializer);
369 	const vector unsigned short rearm_sel_mask =
370 		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
371 	vector unsigned char rearm0, rearm1, rearm2, rearm3;
372 	uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
373 
374 	/* Extract pkt_info field. */
375 	pinfo0 = (vector unsigned char)
376 		vec_mergeh((vector unsigned int)cqes[0],
377 		(vector unsigned int)cqes[1]);
378 	pinfo1 = (vector unsigned char)
379 		vec_mergeh((vector unsigned int)cqes[2],
380 		(vector unsigned int)cqes[3]);
381 	pinfo = (vector unsigned char)
382 		vec_mergeh((vector unsigned long)pinfo0,
383 		(vector unsigned long)pinfo1);
384 
385 	/* Extract hdr_type_etc field. */
386 	pinfo0 = (vector unsigned char)
387 		vec_mergel((vector unsigned int)cqes[0],
388 		(vector unsigned int)cqes[1]);
389 	pinfo1 = (vector unsigned char)
390 		vec_mergel((vector unsigned int)cqes[2],
391 		(vector unsigned int)cqes[3]);
392 	ptype = (vector unsigned char)
393 		vec_mergeh((vector unsigned long)pinfo0,
394 		(vector unsigned long)pinfo1);
395 
396 	if (rxq->mark) {
397 		const vector unsigned char pinfo_ft_mask =
398 			(vector unsigned char)(vector unsigned int){
399 			0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00};
400 		const vector unsigned char fdir_flags =
401 			(vector unsigned char)(vector unsigned int){
402 			PKT_RX_FDIR, PKT_RX_FDIR,
403 			PKT_RX_FDIR, PKT_RX_FDIR};
404 		vector unsigned char fdir_id_flags =
405 			(vector unsigned char)(vector unsigned int){
406 			PKT_RX_FDIR_ID, PKT_RX_FDIR_ID,
407 			PKT_RX_FDIR_ID, PKT_RX_FDIR_ID};
408 		vector unsigned char flow_tag, invalid_mask;
409 
410 		flow_tag = (vector unsigned char)
411 			vec_and((vector unsigned long)pinfo,
412 			(vector unsigned long)pinfo_ft_mask);
413 
414 		/* Check if flow tag is non-zero then set PKT_RX_FDIR. */
415 		invalid_mask = (vector unsigned char)
416 			vec_cmpeq((vector unsigned int)flow_tag,
417 			(vector unsigned int)zero);
418 		ol_flags = (vector unsigned char)
419 			vec_or((vector unsigned long)ol_flags,
420 			(vector unsigned long)
421 			vec_andc((vector unsigned long)fdir_flags,
422 			(vector unsigned long)invalid_mask));
423 
424 		/* Mask out invalid entries. */
425 		fdir_id_flags = (vector unsigned char)
426 			vec_andc((vector unsigned long)fdir_id_flags,
427 			(vector unsigned long)invalid_mask);
428 
429 		/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
430 		ol_flags = (vector unsigned char)
431 			vec_or((vector unsigned long)ol_flags,
432 			(vector unsigned long)
433 			vec_andc((vector unsigned long)fdir_id_flags,
434 			(vector unsigned long)
435 			vec_cmpeq((vector unsigned int)flow_tag,
436 			(vector unsigned int)pinfo_ft_mask)));
437 	}
438 	/*
439 	 * Merge the two fields to generate the following:
440 	 * bit[1]     = l3_ok
441 	 * bit[2]     = l4_ok
442 	 * bit[8]     = cv
443 	 * bit[11:10] = l3_hdr_type
444 	 * bit[14:12] = l4_hdr_type
445 	 * bit[15]    = ip_frag
446 	 * bit[16]    = tunneled
447 	 * bit[17]    = outer_l3_type
448 	 */
449 	ptype = (vector unsigned char)
450 		vec_and((vector unsigned long)ptype,
451 		(vector unsigned long)ptype_mask);
452 	pinfo = (vector unsigned char)
453 		vec_and((vector unsigned long)pinfo,
454 		(vector unsigned long)pinfo_mask);
455 	pinfo = (vector unsigned char)
456 		vec_sl((vector unsigned int)pinfo,
457 		(vector unsigned int){16, 16, 16, 16});
458 
459 	/* Make pinfo has merged fields for ol_flags calculation. */
460 	pinfo = (vector unsigned char)
461 		vec_or((vector unsigned long)ptype,
462 		(vector unsigned long)pinfo);
463 	ptype = (vector unsigned char)
464 		vec_sr((vector unsigned int)pinfo,
465 		(vector unsigned int){10, 10, 10, 10});
466 	ptype = (vector unsigned char)
467 		vec_packs((vector unsigned int)ptype,
468 		(vector unsigned int)zero);
469 
470 	/* Errored packets will have RTE_PTYPE_ALL_MASK. */
471 	op_err = (vector unsigned char)
472 		vec_sr((vector unsigned short)op_err,
473 		(vector unsigned short){8, 8, 8, 8, 8, 8, 8, 8});
474 	ptype = (vector unsigned char)
475 		vec_or((vector unsigned long)ptype,
476 		(vector unsigned long)op_err);
477 
478 	pt_idx0 = (uint8_t)((vector unsigned char)ptype)[0];
479 	pt_idx1 = (uint8_t)((vector unsigned char)ptype)[2];
480 	pt_idx2 = (uint8_t)((vector unsigned char)ptype)[4];
481 	pt_idx3 = (uint8_t)((vector unsigned char)ptype)[6];
482 
483 	pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
484 		!!(pt_idx0 & (1 << 6)) * rxq->tunnel;
485 	pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
486 		!!(pt_idx1 & (1 << 6)) * rxq->tunnel;
487 	pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
488 		!!(pt_idx2 & (1 << 6)) * rxq->tunnel;
489 	pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
490 		!!(pt_idx3 & (1 << 6)) * rxq->tunnel;
491 
492 	/* Fill flags for checksum and VLAN. */
493 	pinfo = (vector unsigned char)
494 		vec_and((vector unsigned long)pinfo,
495 		(vector unsigned long)ptype_ol_mask);
496 	pinfo = vec_perm(cv_flag_sel, zero, pinfo);
497 
498 	/* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
499 	cv_flags = (vector unsigned char)
500 		vec_sl((vector unsigned int)pinfo,
501 		(vector unsigned int){9, 9, 9, 9});
502 	cv_flags = (vector unsigned char)
503 		vec_or((vector unsigned long)pinfo,
504 		(vector unsigned long)cv_flags);
505 
506 	/* Move back flags to start from byte[0]. */
507 	cv_flags = (vector unsigned char)
508 		vec_sr((vector unsigned int)cv_flags,
509 		(vector unsigned int){8, 8, 8, 8});
510 
511 	/* Mask out garbage bits. */
512 	cv_flags = (vector unsigned char)
513 		vec_and((vector unsigned long)cv_flags,
514 		(vector unsigned long)cv_mask);
515 
516 	/* Merge to ol_flags. */
517 	ol_flags = (vector unsigned char)
518 		vec_or((vector unsigned long)ol_flags,
519 		(vector unsigned long)cv_flags);
520 
521 	/* Merge mbuf_init and ol_flags. */
522 	rearm0 = (vector unsigned char)
523 		vec_sel((vector unsigned short)mbuf_init,
524 		(vector unsigned short)
525 		vec_slo((vector unsigned short)ol_flags,
526 		(vector unsigned char){64}), rearm_sel_mask);
527 	rearm1 = (vector unsigned char)
528 		vec_sel((vector unsigned short)mbuf_init,
529 		(vector unsigned short)
530 		vec_slo((vector unsigned short)ol_flags,
531 		(vector unsigned char){32}), rearm_sel_mask);
532 	rearm2 = (vector unsigned char)
533 		vec_sel((vector unsigned short)mbuf_init,
534 		(vector unsigned short)ol_flags, rearm_sel_mask);
535 	rearm3 = (vector unsigned char)
536 		vec_sel((vector unsigned short)mbuf_init,
537 		(vector unsigned short)
538 		vec_sro((vector unsigned short)ol_flags,
539 		(vector unsigned char){32}), rearm_sel_mask);
540 
541 	/* Write 8B rearm_data and 8B ol_flags. */
542 	vec_vsx_st(rearm0, 0,
543 		(vector unsigned char *)&pkts[0]->rearm_data);
544 	vec_vsx_st(rearm1, 0,
545 		(vector unsigned char *)&pkts[1]->rearm_data);
546 	vec_vsx_st(rearm2, 0,
547 		(vector unsigned char *)&pkts[2]->rearm_data);
548 	vec_vsx_st(rearm3, 0,
549 		(vector unsigned char *)&pkts[3]->rearm_data);
550 }
551 
552 
553 /**
554  * Receive burst of packets. An errored completion also consumes a mbuf, but the
555  * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
556  * before returning to application.
557  *
558  * @param rxq
559  *   Pointer to RX queue structure.
560  * @param[out] pkts
561  *   Array to store received packets.
562  * @param pkts_n
563  *   Maximum number of packets in array.
564  * @param[out] err
565  *   Pointer to a flag. Set non-zero value if pkts array has at least one error
566  *   packet to handle.
567  * @param[out] no_cq
568  *  Pointer to a boolean. Set true if no new CQE seen.
569  *
570  * @return
571  *   Number of packets received including errors (<= pkts_n).
572  */
573 static inline uint16_t
574 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
575 	    uint64_t *err, bool *no_cq)
576 {
577 	const uint16_t q_n = 1 << rxq->cqe_n;
578 	const uint16_t q_mask = q_n - 1;
579 	volatile struct mlx5_cqe *cq;
580 	struct rte_mbuf **elts;
581 	unsigned int pos;
582 	uint64_t n;
583 	uint16_t repl_n;
584 	uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
585 	uint16_t nocmp_n = 0;
586 	uint16_t rcvd_pkt = 0;
587 	unsigned int cq_idx = rxq->cq_ci & q_mask;
588 	unsigned int elts_idx;
589 	unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
590 	const vector unsigned char zero = (vector unsigned char){0};
591 	const vector unsigned char ones = vec_splat_u8(-1);
592 	const vector unsigned char owner_check =
593 		(vector unsigned char)(vector unsigned long){
594 		0x0100000001000000LL, 0x0100000001000000LL};
595 	const vector unsigned char opcode_check =
596 		(vector unsigned char)(vector unsigned long){
597 		0xf0000000f0000000LL, 0xf0000000f0000000LL};
598 	const vector unsigned char format_check =
599 		(vector unsigned char)(vector unsigned long){
600 		0x0c0000000c000000LL, 0x0c0000000c000000LL};
601 	const vector unsigned char resp_err_check =
602 		(vector unsigned char)(vector unsigned long){
603 		0xe0000000e0000000LL, 0xe0000000e0000000LL};
604 #ifdef MLX5_PMD_SOFT_COUNTERS
605 	uint32_t rcvd_byte = 0;
606 	/* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
607 	const vector unsigned char len_shuf_mask = (vector unsigned char){
608 		 1,  0,  5,  4,
609 		 9,  8, 13, 12,
610 		-1, -1, -1, -1,
611 		-1, -1, -1, -1};
612 #endif
613 	/* Mask to shuffle from extracted CQE to mbuf. */
614 	const vector unsigned char shuf_mask = (vector unsigned char){
615 		 5,  4,           /* bswap16, pkt_len */
616 		-1, -1,           /* zero out 2nd half of pkt_len */
617 		 5,  4,           /* bswap16, data_len */
618 		11, 10,           /* bswap16, vlan+tci */
619 		15, 14, 13, 12,   /* bswap32, rss */
620 		 1,  2,  3, -1};  /* fdir.hi */
621 	/* Mask to blend from the last Qword to the first DQword. */
622 	/* Mask to blend from the last Qword to the first DQword. */
623 	const vector unsigned char blend_mask = (vector unsigned char){
624 		-1,  0,  0,  0,
625 		 0,  0,  0,  0,
626 		-1, -1, -1, -1,
627 		-1, -1, -1, -1};
628 	const vector unsigned char crc_adj =
629 		(vector unsigned char)(vector unsigned short){
630 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
631 		rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0};
632 	const vector unsigned char flow_mark_adj =
633 		(vector unsigned char)(vector unsigned int){
634 		0, 0, 0, rxq->mark * (-1)};
635 	const vector unsigned short cqe_sel_mask1 =
636 		(vector unsigned short){0, 0, 0, 0, 0xffff, 0xffff, 0, 0};
637 	const vector unsigned short cqe_sel_mask2 =
638 		(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
639 
640 	MLX5_ASSERT(rxq->sges_n == 0);
641 	MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
642 	cq = &(*rxq->cqes)[cq_idx];
643 	rte_prefetch0(cq);
644 	rte_prefetch0(cq + 1);
645 	rte_prefetch0(cq + 2);
646 	rte_prefetch0(cq + 3);
647 	pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
648 
649 	repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
650 	if (repl_n >= rxq->rq_repl_thresh)
651 		mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
652 	/* See if there're unreturned mbufs from compressed CQE. */
653 	rcvd_pkt = rxq->decompressed;
654 	if (rcvd_pkt > 0) {
655 		rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
656 		rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
657 		rxq->rq_pi += rcvd_pkt;
658 		rxq->decompressed -= rcvd_pkt;
659 		pkts += rcvd_pkt;
660 	}
661 	elts_idx = rxq->rq_pi & q_mask;
662 	elts = &(*rxq->elts)[elts_idx];
663 	/* Not to overflow pkts array. */
664 	pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
665 	/* Not to cross queue end. */
666 	pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
667 	pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
668 	if (!pkts_n) {
669 		*no_cq = !rcvd_pkt;
670 		return rcvd_pkt;
671 	}
672 	/* At this point, there shouldn't be any remaining packets. */
673 	MLX5_ASSERT(rxq->decompressed == 0);
674 
675 	/*
676 	 * A. load first Qword (8bytes) in one loop.
677 	 * B. copy 4 mbuf pointers from elts ring to returing pkts.
678 	 * C. load remaining CQE data and extract necessary fields.
679 	 *    Final 16bytes cqes[] extracted from original 64bytes CQE has the
680 	 *    following structure:
681 	 *        struct {
682 	 *          uint8_t  pkt_info;
683 	 *          uint8_t  flow_tag[3];
684 	 *          uint16_t byte_cnt;
685 	 *          uint8_t  rsvd4;
686 	 *          uint8_t  op_own;
687 	 *          uint16_t hdr_type_etc;
688 	 *          uint16_t vlan_info;
689 	 *          uint32_t rx_has_res;
690 	 *        } c;
691 	 * D. fill in mbuf.
692 	 * E. get valid CQEs.
693 	 * F. find compressed CQE.
694 	 */
695 	for (pos = 0;
696 	     pos < pkts_n;
697 	     pos += MLX5_VPMD_DESCS_PER_LOOP) {
698 		vector unsigned char cqes[MLX5_VPMD_DESCS_PER_LOOP];
699 		vector unsigned char cqe_tmp1, cqe_tmp2;
700 		vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
701 		vector unsigned char op_own, op_own_tmp1, op_own_tmp2;
702 		vector unsigned char opcode, owner_mask, invalid_mask;
703 		vector unsigned char comp_mask;
704 		vector unsigned char mask;
705 #ifdef MLX5_PMD_SOFT_COUNTERS
706 		const vector unsigned char lower_half = {
707 			0, 1, 4, 5, 8, 9, 12, 13,
708 			16, 17, 20, 21, 24, 25, 28, 29};
709 		const vector unsigned char upper_half = {
710 			2, 3, 6, 7, 10, 11, 14, 15,
711 			18, 19, 22, 23, 26, 27, 30, 31};
712 		const vector unsigned long shmax = {64, 64};
713 		vector unsigned char byte_cnt;
714 		vector unsigned short left, right;
715 		vector unsigned long lshift;
716 		vector __attribute__((altivec(bool__)))
717 			unsigned long shmask;
718 #endif
719 		vector unsigned char mbp1, mbp2;
720 		vector unsigned char p =
721 			(vector unsigned char)(vector unsigned short){
722 				0, 1, 2, 3, 0, 0, 0, 0};
723 		unsigned int p1, p2, p3;
724 
725 		/* Prefetch next 4 CQEs. */
726 		if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
727 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
728 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
729 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
730 			rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
731 		}
732 
733 		/* A.0 do not cross the end of CQ. */
734 		mask = (vector unsigned char)(vector unsigned long){
735 			(pkts_n - pos) * sizeof(uint16_t) * 8, 0};
736 
737 		{
738 			vector unsigned long lshift;
739 			vector __attribute__((altivec(bool__)))
740 				unsigned long shmask;
741 			const vector unsigned long shmax = {64, 64};
742 
743 			lshift = vec_splat((vector unsigned long)mask, 0);
744 			shmask = vec_cmpgt(shmax, lshift);
745 			mask = (vector unsigned char)
746 				vec_sl((vector unsigned long)ones, lshift);
747 			mask = (vector unsigned char)
748 				vec_sel((vector unsigned long)shmask,
749 				(vector unsigned long)mask, shmask);
750 		}
751 
752 		p = (vector unsigned char)
753 			vec_andc((vector unsigned long)p,
754 			(vector unsigned long)mask);
755 
756 		/* A.1 load cqes. */
757 		p3 = (unsigned int)((vector unsigned short)p)[3];
758 		cqes[3] = (vector unsigned char)(vector unsigned long){
759 			*(__rte_aligned(8) unsigned long *)
760 			&cq[pos + p3].sop_drop_qpn, 0LL};
761 		rte_compiler_barrier();
762 
763 		p2 = (unsigned int)((vector unsigned short)p)[2];
764 		cqes[2] = (vector unsigned char)(vector unsigned long){
765 			*(__rte_aligned(8) unsigned long *)
766 			&cq[pos + p2].sop_drop_qpn, 0LL};
767 		rte_compiler_barrier();
768 
769 		/* B.1 load mbuf pointers. */
770 		mbp1 = (vector unsigned char)vec_vsx_ld(0,
771 			(signed int const *)&elts[pos]);
772 		mbp2 = (vector unsigned char)vec_vsx_ld(0,
773 			(signed int const *)&elts[pos + 2]);
774 
775 		/* A.1 load a block having op_own. */
776 		p1 = (unsigned int)((vector unsigned short)p)[1];
777 		cqes[1] = (vector unsigned char)(vector unsigned long){
778 			*(__rte_aligned(8) unsigned long *)
779 			&cq[pos + p1].sop_drop_qpn, 0LL};
780 		rte_compiler_barrier();
781 
782 		cqes[0] = (vector unsigned char)(vector unsigned long){
783 			*(__rte_aligned(8) unsigned long *)
784 			&cq[pos].sop_drop_qpn, 0LL};
785 		rte_compiler_barrier();
786 
787 		/* B.2 copy mbuf pointers. */
788 		*(vector unsigned char *)&pkts[pos] = mbp1;
789 		*(vector unsigned char *)&pkts[pos + 2] = mbp2;
790 		rte_cio_rmb();
791 
792 		/* C.1 load remaining CQE data and extract necessary fields. */
793 		cqe_tmp2 = *(vector unsigned char *)
794 			&cq[pos + p3].pkt_info;
795 		cqe_tmp1 = *(vector unsigned char *)
796 			&cq[pos + p2].pkt_info;
797 		cqes[3] = vec_sel(cqes[3], cqe_tmp2, blend_mask);
798 		cqes[2] = vec_sel(cqes[2], cqe_tmp1, blend_mask);
799 		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
800 			(signed int const *)&cq[pos + p3].csum);
801 		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
802 			(signed int const *)&cq[pos + p2].csum);
803 		cqes[3] = (vector unsigned char)
804 			vec_sel((vector unsigned short)cqes[3],
805 			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
806 		cqes[2] = (vector unsigned char)
807 			vec_sel((vector unsigned short)cqes[2],
808 			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
809 		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
810 			*(__rte_aligned(8) unsigned long *)
811 			&cq[pos + p3].rsvd3[9], 0LL};
812 		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
813 			*(__rte_aligned(8) unsigned long *)
814 			&cq[pos + p2].rsvd3[9], 0LL};
815 		cqes[3] = (vector unsigned char)
816 			vec_sel((vector unsigned short)cqes[3],
817 			(vector unsigned short)cqe_tmp2,
818 			(vector unsigned short)cqe_sel_mask2);
819 		cqes[2] = (vector unsigned char)
820 			vec_sel((vector unsigned short)cqes[2],
821 			(vector unsigned short)cqe_tmp1,
822 			(vector unsigned short)cqe_sel_mask2);
823 
824 		/* C.2 generate final structure for mbuf with swapping bytes. */
825 		pkt_mb3 = vec_perm(cqes[3], zero, shuf_mask);
826 		pkt_mb2 = vec_perm(cqes[2], zero, shuf_mask);
827 
828 		/* C.3 adjust CRC length. */
829 		pkt_mb3 = (vector unsigned char)
830 			((vector unsigned short)pkt_mb3 -
831 			(vector unsigned short)crc_adj);
832 		pkt_mb2 = (vector unsigned char)
833 			((vector unsigned short)pkt_mb2 -
834 			(vector unsigned short)crc_adj);
835 
836 		/* C.4 adjust flow mark. */
837 		pkt_mb3 = (vector unsigned char)
838 			((vector unsigned int)pkt_mb3 +
839 			(vector unsigned int)flow_mark_adj);
840 		pkt_mb2 = (vector unsigned char)
841 			((vector unsigned int)pkt_mb2 +
842 			(vector unsigned int)flow_mark_adj);
843 
844 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
845 		*(vector unsigned char *)
846 			&pkts[pos + 3]->pkt_len = pkt_mb3;
847 		*(vector unsigned char *)
848 			&pkts[pos + 2]->pkt_len = pkt_mb2;
849 
850 		/* E.1 extract op_own field. */
851 		op_own_tmp2 = (vector unsigned char)
852 			vec_mergeh((vector unsigned int)cqes[2],
853 			(vector unsigned int)cqes[3]);
854 
855 		/* C.1 load remaining CQE data and extract necessary fields. */
856 		cqe_tmp2 = *(vector unsigned char *)
857 			&cq[pos + p1].pkt_info;
858 		cqe_tmp1 = *(vector unsigned char *)
859 			&cq[pos].pkt_info;
860 		cqes[1] = vec_sel(cqes[1], cqe_tmp2, blend_mask);
861 		cqes[0] = vec_sel(cqes[0], cqe_tmp2, blend_mask);
862 		cqe_tmp2 = (vector unsigned char)vec_vsx_ld(0,
863 			(signed int const *)&cq[pos + p1].csum);
864 		cqe_tmp1 = (vector unsigned char)vec_vsx_ld(0,
865 			(signed int const *)&cq[pos].csum);
866 		cqes[1] = (vector unsigned char)
867 			vec_sel((vector unsigned short)cqes[1],
868 			(vector unsigned short)cqe_tmp2, cqe_sel_mask1);
869 		cqes[0] = (vector unsigned char)
870 			vec_sel((vector unsigned short)cqes[0],
871 			(vector unsigned short)cqe_tmp1, cqe_sel_mask1);
872 		cqe_tmp2 = (vector unsigned char)(vector unsigned long){
873 			*(__rte_aligned(8) unsigned long *)
874 			&cq[pos + p1].rsvd3[9], 0LL};
875 		cqe_tmp1 = (vector unsigned char)(vector unsigned long){
876 			*(__rte_aligned(8) unsigned long *)
877 			&cq[pos].rsvd3[9], 0LL};
878 		cqes[1] = (vector unsigned char)
879 			vec_sel((vector unsigned short)cqes[1],
880 			(vector unsigned short)cqe_tmp2, cqe_sel_mask2);
881 		cqes[0] = (vector unsigned char)
882 			vec_sel((vector unsigned short)cqes[0],
883 			(vector unsigned short)cqe_tmp1, cqe_sel_mask2);
884 
885 		/* C.2 generate final structure for mbuf with swapping bytes. */
886 		pkt_mb1 = vec_perm(cqes[1], zero, shuf_mask);
887 		pkt_mb0 = vec_perm(cqes[0], zero, shuf_mask);
888 
889 		/* C.3 adjust CRC length. */
890 		pkt_mb1 = (vector unsigned char)
891 			((vector unsigned short)pkt_mb1 -
892 			(vector unsigned short)crc_adj);
893 		pkt_mb0 = (vector unsigned char)
894 			((vector unsigned short)pkt_mb0 -
895 			(vector unsigned short)crc_adj);
896 
897 		/* C.4 adjust flow mark. */
898 		pkt_mb1 = (vector unsigned char)
899 			((vector unsigned int)pkt_mb1 +
900 			(vector unsigned int)flow_mark_adj);
901 		pkt_mb0 = (vector unsigned char)
902 			((vector unsigned int)pkt_mb0 +
903 			(vector unsigned int)flow_mark_adj);
904 
905 		/* E.1 extract op_own byte. */
906 		op_own_tmp1 = (vector unsigned char)
907 			vec_mergeh((vector unsigned int)cqes[0],
908 			(vector unsigned int)cqes[1]);
909 		op_own = (vector unsigned char)
910 			vec_mergel((vector unsigned long)op_own_tmp1,
911 			(vector unsigned long)op_own_tmp2);
912 
913 		/* D.1 fill in mbuf - rx_descriptor_fields1. */
914 		*(vector unsigned char *)
915 			&pkts[pos + 1]->pkt_len = pkt_mb1;
916 		*(vector unsigned char *)
917 			&pkts[pos]->pkt_len = pkt_mb0;
918 
919 		/* E.2 flip owner bit to mark CQEs from last round. */
920 		owner_mask = (vector unsigned char)
921 			vec_and((vector unsigned long)op_own,
922 			(vector unsigned long)owner_check);
923 		if (ownership)
924 			owner_mask = (vector unsigned char)
925 				vec_xor((vector unsigned long)owner_mask,
926 				(vector unsigned long)owner_check);
927 		owner_mask = (vector unsigned char)
928 			vec_cmpeq((vector unsigned int)owner_mask,
929 			(vector unsigned int)owner_check);
930 		owner_mask = (vector unsigned char)
931 			vec_packs((vector unsigned int)owner_mask,
932 			(vector unsigned int)zero);
933 
934 		/* E.3 get mask for invalidated CQEs. */
935 		opcode = (vector unsigned char)
936 			vec_and((vector unsigned long)op_own,
937 			(vector unsigned long)opcode_check);
938 		invalid_mask = (vector unsigned char)
939 			vec_cmpeq((vector unsigned int)opcode_check,
940 			(vector unsigned int)opcode);
941 		invalid_mask = (vector unsigned char)
942 			vec_packs((vector unsigned int)invalid_mask,
943 			(vector unsigned int)zero);
944 
945 		/* E.4 mask out beyond boundary. */
946 		invalid_mask = (vector unsigned char)
947 			vec_or((vector unsigned long)invalid_mask,
948 			(vector unsigned long)mask);
949 
950 		/* E.5 merge invalid_mask with invalid owner. */
951 		invalid_mask = (vector unsigned char)
952 			vec_or((vector unsigned long)invalid_mask,
953 			(vector unsigned long)owner_mask);
954 
955 		/* F.1 find compressed CQE format. */
956 		comp_mask = (vector unsigned char)
957 			vec_and((vector unsigned long)op_own,
958 			(vector unsigned long)format_check);
959 		comp_mask = (vector unsigned char)
960 			vec_cmpeq((vector unsigned int)comp_mask,
961 			(vector unsigned int)format_check);
962 		comp_mask = (vector unsigned char)
963 			vec_packs((vector unsigned int)comp_mask,
964 			(vector unsigned int)zero);
965 
966 		/* F.2 mask out invalid entries. */
967 		comp_mask = (vector unsigned char)
968 			vec_andc((vector unsigned long)comp_mask,
969 			(vector unsigned long)invalid_mask);
970 		comp_idx = ((vector unsigned long)comp_mask)[0];
971 
972 		/* F.3 get the first compressed CQE. */
973 		comp_idx = comp_idx ? __builtin_ctzll(comp_idx) /
974 			(sizeof(uint16_t) * 8) : MLX5_VPMD_DESCS_PER_LOOP;
975 
976 		/* E.6 mask out entries after the compressed CQE. */
977 		mask = (vector unsigned char)(vector unsigned long){
978 			(comp_idx * sizeof(uint16_t) * 8), 0};
979 		lshift = vec_splat((vector unsigned long)mask, 0);
980 		shmask = vec_cmpgt(shmax, lshift);
981 		mask = (vector unsigned char)
982 			vec_sl((vector unsigned long)ones, lshift);
983 		mask = (vector unsigned char)
984 			vec_sel((vector unsigned long)shmask,
985 			(vector unsigned long)mask, shmask);
986 		invalid_mask = (vector unsigned char)
987 			vec_or((vector unsigned long)invalid_mask,
988 			(vector unsigned long)mask);
989 
990 		/* E.7 count non-compressed valid CQEs. */
991 		n = ((vector unsigned long)invalid_mask)[0];
992 		n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
993 			MLX5_VPMD_DESCS_PER_LOOP;
994 		nocmp_n += n;
995 
996 		/* D.2 get the final invalid mask. */
997 		mask = (vector unsigned char)(vector unsigned long){
998 			(n * sizeof(uint16_t) * 8), 0};
999 		lshift = vec_splat((vector unsigned long)mask, 0);
1000 		shmask = vec_cmpgt(shmax, lshift);
1001 		mask = (vector unsigned char)
1002 			vec_sl((vector unsigned long)ones, lshift);
1003 		mask = (vector unsigned char)
1004 			vec_sel((vector unsigned long)shmask,
1005 			(vector unsigned long)mask, shmask);
1006 		invalid_mask = (vector unsigned char)
1007 			vec_or((vector unsigned long)invalid_mask,
1008 			(vector unsigned long)mask);
1009 
1010 		/* D.3 check error in opcode. */
1011 		opcode = (vector unsigned char)
1012 			vec_cmpeq((vector unsigned int)resp_err_check,
1013 			(vector unsigned int)opcode);
1014 		opcode = (vector unsigned char)
1015 			vec_packs((vector unsigned int)opcode,
1016 			(vector unsigned int)zero);
1017 		opcode = (vector unsigned char)
1018 			vec_andc((vector unsigned long)opcode,
1019 			(vector unsigned long)invalid_mask);
1020 
1021 		/* D.4 mark if any error is set */
1022 		*err |= ((vector unsigned long)opcode)[0];
1023 
1024 		/* D.5 fill in mbuf - rearm_data and packet_type. */
1025 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
1026 		if (rxq->hw_timestamp) {
1027 			if (rxq->rt_timestamp) {
1028 				struct mlx5_dev_ctx_shared *sh = rxq->sh;
1029 				uint64_t ts;
1030 
1031 				ts = rte_be_to_cpu_64(cq[pos].timestamp);
1032 				pkts[pos]->timestamp =
1033 					mlx5_txpp_convert_rx_ts(sh, ts);
1034 				ts = rte_be_to_cpu_64(cq[pos + p1].timestamp);
1035 				pkts[pos + 1]->timestamp =
1036 					mlx5_txpp_convert_rx_ts(sh, ts);
1037 				ts = rte_be_to_cpu_64(cq[pos + p2].timestamp);
1038 				pkts[pos + 2]->timestamp =
1039 					mlx5_txpp_convert_rx_ts(sh, ts);
1040 				ts = rte_be_to_cpu_64(cq[pos + p3].timestamp);
1041 				pkts[pos + 3]->timestamp =
1042 					mlx5_txpp_convert_rx_ts(sh, ts);
1043 			} else {
1044 				pkts[pos]->timestamp = rte_be_to_cpu_64
1045 						(cq[pos].timestamp);
1046 				pkts[pos + 1]->timestamp = rte_be_to_cpu_64
1047 						(cq[pos + p1].timestamp);
1048 				pkts[pos + 2]->timestamp = rte_be_to_cpu_64
1049 						(cq[pos + p2].timestamp);
1050 				pkts[pos + 3]->timestamp = rte_be_to_cpu_64
1051 						(cq[pos + p3].timestamp);
1052 			}
1053 		}
1054 		if (rxq->dynf_meta) {
1055 			uint64_t flag = rxq->flow_meta_mask;
1056 			int32_t offs = rxq->flow_meta_offset;
1057 			uint32_t metadata;
1058 
1059 			/* This code is subject for futher optimization. */
1060 			metadata = cq[pos].flow_table_metadata;
1061 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
1062 								metadata;
1063 			pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
1064 			metadata = cq[pos + 1].flow_table_metadata;
1065 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
1066 								metadata;
1067 			pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
1068 			metadata = cq[pos + 2].flow_table_metadata;
1069 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
1070 								metadata;
1071 			pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
1072 			metadata = cq[pos + 3].flow_table_metadata;
1073 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
1074 								metadata;
1075 			pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
1076 		}
1077 #ifdef MLX5_PMD_SOFT_COUNTERS
1078 		/* Add up received bytes count. */
1079 		byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
1080 		byte_cnt = (vector unsigned char)
1081 			vec_andc((vector unsigned long)byte_cnt,
1082 			(vector unsigned long)invalid_mask);
1083 		left = vec_perm((vector unsigned short)byte_cnt,
1084 			(vector unsigned short)zero, lower_half);
1085 		right = vec_perm((vector unsigned short)byte_cnt,
1086 			(vector unsigned short)zero, upper_half);
1087 		byte_cnt = (vector unsigned char)vec_add(left, right);
1088 		left = vec_perm((vector unsigned short)byte_cnt,
1089 			(vector unsigned short)zero, lower_half);
1090 		right = vec_perm((vector unsigned short)byte_cnt,
1091 			(vector unsigned short)zero, upper_half);
1092 		byte_cnt = (vector unsigned char)vec_add(left, right);
1093 		rcvd_byte += ((vector unsigned long)byte_cnt)[0];
1094 #endif
1095 
1096 		/*
1097 		 * Break the loop unless more valid CQE is expected, or if
1098 		 * there's a compressed CQE.
1099 		 */
1100 		if (n != MLX5_VPMD_DESCS_PER_LOOP)
1101 			break;
1102 	}
1103 	/* If no new CQE seen, return without updating cq_db. */
1104 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
1105 		*no_cq = true;
1106 		return rcvd_pkt;
1107 	}
1108 	/* Update the consumer indexes for non-compressed CQEs. */
1109 	MLX5_ASSERT(nocmp_n <= pkts_n);
1110 	rxq->cq_ci += nocmp_n;
1111 	rxq->rq_pi += nocmp_n;
1112 	rcvd_pkt += nocmp_n;
1113 #ifdef MLX5_PMD_SOFT_COUNTERS
1114 	rxq->stats.ipackets += nocmp_n;
1115 	rxq->stats.ibytes += rcvd_byte;
1116 #endif
1117 	/* Decompress the last CQE if compressed. */
1118 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
1119 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
1120 		rxq->decompressed =
1121 			rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
1122 		/* Return more packets if needed. */
1123 		if (nocmp_n < pkts_n) {
1124 			uint16_t n = rxq->decompressed;
1125 
1126 			n = RTE_MIN(n, pkts_n - nocmp_n);
1127 			rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
1128 			rxq->rq_pi += n;
1129 			rcvd_pkt += n;
1130 			rxq->decompressed -= n;
1131 		}
1132 	}
1133 	rte_compiler_barrier();
1134 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1135 	*no_cq = !rcvd_pkt;
1136 	return rcvd_pkt;
1137 }
1138 
1139 #endif /* RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_ */
1140