1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_ 7 #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_ 8 9 #include <assert.h> 10 #include <stdint.h> 11 #include <string.h> 12 #include <stdlib.h> 13 #include <arm_neon.h> 14 15 #include <rte_mbuf.h> 16 #include <rte_mempool.h> 17 #include <rte_prefetch.h> 18 19 #include "mlx5.h" 20 #include "mlx5_utils.h" 21 #include "mlx5_rxtx.h" 22 #include "mlx5_rxtx_vec.h" 23 #include "mlx5_autoconf.h" 24 #include "mlx5_defs.h" 25 #include "mlx5_prm.h" 26 27 #pragma GCC diagnostic ignored "-Wcast-qual" 28 29 /** 30 * Fill in buffer descriptors in a multi-packet send descriptor. 31 * 32 * @param txq 33 * Pointer to TX queue structure. 34 * @param dseg 35 * Pointer to buffer descriptor to be written. 36 * @param pkts 37 * Pointer to array of packets to be sent. 38 * @param n 39 * Number of packets to be filled. 40 */ 41 static inline void 42 txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg, 43 struct rte_mbuf **pkts, unsigned int n) 44 { 45 unsigned int pos; 46 uintptr_t addr; 47 const uint8x16_t dseg_shuf_m = { 48 3, 2, 1, 0, /* length, bswap32 */ 49 4, 5, 6, 7, /* lkey */ 50 15, 14, 13, 12, /* addr, bswap64 */ 51 11, 10, 9, 8 52 }; 53 #ifdef MLX5_PMD_SOFT_COUNTERS 54 uint32_t tx_byte = 0; 55 #endif 56 57 for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) { 58 uint8x16_t desc; 59 struct rte_mbuf *pkt = pkts[pos]; 60 61 addr = rte_pktmbuf_mtod(pkt, uintptr_t); 62 desc = vreinterpretq_u8_u32((uint32x4_t) { 63 DATA_LEN(pkt), 64 mlx5_tx_mb2mr(txq, pkt), 65 addr, 66 addr >> 32 }); 67 desc = vqtbl1q_u8(desc, dseg_shuf_m); 68 vst1q_u8(dseg, desc); 69 #ifdef MLX5_PMD_SOFT_COUNTERS 70 tx_byte += DATA_LEN(pkt); 71 #endif 72 } 73 #ifdef MLX5_PMD_SOFT_COUNTERS 74 txq->stats.obytes += tx_byte; 75 #endif 76 } 77 78 /** 79 * Send multi-segmented packets until it encounters a single segment packet in 80 * the pkts list. 81 * 82 * @param txq 83 * Pointer to TX queue structure. 84 * @param pkts 85 * Pointer to array of packets to be sent. 86 * @param pkts_n 87 * Number of packets to be sent. 88 * 89 * @return 90 * Number of packets successfully transmitted (<= pkts_n). 91 */ 92 static uint16_t 93 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, 94 uint16_t pkts_n) 95 { 96 uint16_t elts_head = txq->elts_head; 97 const uint16_t elts_n = 1 << txq->elts_n; 98 const uint16_t elts_m = elts_n - 1; 99 const uint16_t wq_n = 1 << txq->wqe_n; 100 const uint16_t wq_mask = wq_n - 1; 101 const unsigned int nb_dword_per_wqebb = 102 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE; 103 const unsigned int nb_dword_in_hdr = 104 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE; 105 unsigned int n; 106 volatile struct mlx5_wqe *wqe = NULL; 107 bool metadata_ol = 108 txq->offloads & DEV_TX_OFFLOAD_MATCH_METADATA ? true : false; 109 110 assert(elts_n > pkts_n); 111 mlx5_tx_complete(txq); 112 if (unlikely(!pkts_n)) 113 return 0; 114 for (n = 0; n < pkts_n; ++n) { 115 struct rte_mbuf *buf = pkts[n]; 116 unsigned int segs_n = buf->nb_segs; 117 unsigned int ds = nb_dword_in_hdr; 118 unsigned int len = PKT_LEN(buf); 119 uint16_t wqe_ci = txq->wqe_ci; 120 const uint8x16_t ctrl_shuf_m = { 121 3, 2, 1, 0, /* bswap32 */ 122 7, 6, 5, 4, /* bswap32 */ 123 11, 10, 9, 8, /* bswap32 */ 124 12, 13, 14, 15 125 }; 126 uint8_t cs_flags; 127 uint16_t max_elts; 128 uint16_t max_wqe; 129 uint8x16_t *t_wqe; 130 uint8_t *dseg; 131 uint8x16_t ctrl; 132 rte_be32_t metadata = 133 metadata_ol && (buf->ol_flags & PKT_TX_METADATA) ? 134 buf->tx_metadata : 0; 135 136 assert(segs_n); 137 max_elts = elts_n - (elts_head - txq->elts_tail); 138 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi); 139 /* 140 * A MPW session consumes 2 WQEs at most to 141 * include MLX5_MPW_DSEG_MAX pointers. 142 */ 143 if (segs_n == 1 || 144 max_elts < segs_n || max_wqe < 2) 145 break; 146 wqe = &((volatile struct mlx5_wqe64 *) 147 txq->wqes)[wqe_ci & wq_mask].hdr; 148 cs_flags = txq_ol_cksum_to_cs(buf); 149 /* Title WQEBB pointer. */ 150 t_wqe = (uint8x16_t *)wqe; 151 dseg = (uint8_t *)(wqe + 1); 152 do { 153 if (!(ds++ % nb_dword_per_wqebb)) { 154 dseg = (uint8_t *) 155 &((volatile struct mlx5_wqe64 *) 156 txq->wqes)[++wqe_ci & wq_mask]; 157 } 158 txq_wr_dseg_v(txq, dseg, &buf, 1); 159 dseg += MLX5_WQE_DWORD_SIZE; 160 (*txq->elts)[elts_head++ & elts_m] = buf; 161 buf = buf->next; 162 } while (--segs_n); 163 ++wqe_ci; 164 /* Fill CTRL in the header. */ 165 ctrl = vreinterpretq_u8_u32((uint32x4_t) { 166 MLX5_OPC_MOD_MPW << 24 | 167 txq->wqe_ci << 8 | MLX5_OPCODE_TSO, 168 txq->qp_num_8s | ds, 0, 0}); 169 ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m); 170 vst1q_u8((void *)t_wqe, ctrl); 171 /* Fill ESEG in the header. */ 172 vst1q_u32((void *)(t_wqe + 1), 173 ((uint32x4_t){ 0, 174 cs_flags << 16 | rte_cpu_to_be_16(len), 175 metadata, 0 })); 176 txq->wqe_ci = wqe_ci; 177 } 178 if (!n) 179 return 0; 180 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); 181 txq->elts_head = elts_head; 182 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { 183 /* A CQE slot must always be available. */ 184 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); 185 wqe->ctrl[2] = rte_cpu_to_be_32(8); 186 wqe->ctrl[3] = txq->elts_head; 187 txq->elts_comp = 0; 188 } 189 #ifdef MLX5_PMD_SOFT_COUNTERS 190 txq->stats.opackets += n; 191 #endif 192 mlx5_tx_dbrec(txq, wqe); 193 return n; 194 } 195 196 /** 197 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet, 198 * it returns to make it processed by txq_scatter_v(). All the packets in 199 * the pkts list should be single segment packets having same offload flags. 200 * This must be checked by txq_count_contig_single_seg() and txq_calc_offload(). 201 * 202 * @param txq 203 * Pointer to TX queue structure. 204 * @param pkts 205 * Pointer to array of packets to be sent. 206 * @param pkts_n 207 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST). 208 * @param cs_flags 209 * Checksum offload flags to be written in the descriptor. 210 * @param metadata 211 * Metadata value to be written in the descriptor. 212 * 213 * @return 214 * Number of packets successfully transmitted (<= pkts_n). 215 */ 216 static inline uint16_t 217 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, 218 uint8_t cs_flags, rte_be32_t metadata) 219 { 220 struct rte_mbuf **elts; 221 uint16_t elts_head = txq->elts_head; 222 const uint16_t elts_n = 1 << txq->elts_n; 223 const uint16_t elts_m = elts_n - 1; 224 const unsigned int nb_dword_per_wqebb = 225 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE; 226 const unsigned int nb_dword_in_hdr = 227 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE; 228 unsigned int n = 0; 229 unsigned int pos; 230 uint16_t max_elts; 231 uint16_t max_wqe; 232 uint32_t comp_req = 0; 233 const uint16_t wq_n = 1 << txq->wqe_n; 234 const uint16_t wq_mask = wq_n - 1; 235 uint16_t wq_idx = txq->wqe_ci & wq_mask; 236 volatile struct mlx5_wqe64 *wq = 237 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx]; 238 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq; 239 const uint8x16_t ctrl_shuf_m = { 240 3, 2, 1, 0, /* bswap32 */ 241 7, 6, 5, 4, /* bswap32 */ 242 11, 10, 9, 8, /* bswap32 */ 243 12, 13, 14, 15 244 }; 245 uint8x16_t *t_wqe; 246 uint8_t *dseg; 247 uint8x16_t ctrl; 248 249 /* Make sure all packets can fit into a single WQE. */ 250 assert(elts_n > pkts_n); 251 mlx5_tx_complete(txq); 252 max_elts = (elts_n - (elts_head - txq->elts_tail)); 253 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); 254 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); 255 if (unlikely(!pkts_n)) 256 return 0; 257 elts = &(*txq->elts)[elts_head & elts_m]; 258 /* Loop for available tailroom first. */ 259 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n); 260 for (pos = 0; pos < (n & -2); pos += 2) 261 vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos])); 262 if (n & 1) 263 elts[pos] = pkts[pos]; 264 /* Check if it crosses the end of the queue. */ 265 if (unlikely(n < pkts_n)) { 266 elts = &(*txq->elts)[0]; 267 for (pos = 0; pos < pkts_n - n; ++pos) 268 elts[pos] = pkts[n + pos]; 269 } 270 txq->elts_head += pkts_n; 271 /* Save title WQEBB pointer. */ 272 t_wqe = (uint8x16_t *)wqe; 273 dseg = (uint8_t *)(wqe + 1); 274 /* Calculate the number of entries to the end. */ 275 n = RTE_MIN( 276 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr, 277 pkts_n); 278 /* Fill DSEGs. */ 279 txq_wr_dseg_v(txq, dseg, pkts, n); 280 /* Check if it crosses the end of the queue. */ 281 if (n < pkts_n) { 282 dseg = (uint8_t *)txq->wqes; 283 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n); 284 } 285 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { 286 txq->elts_comp += pkts_n; 287 } else { 288 /* A CQE slot must always be available. */ 289 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); 290 /* Request a completion. */ 291 txq->elts_comp = 0; 292 comp_req = 8; 293 } 294 /* Fill CTRL in the header. */ 295 ctrl = vreinterpretq_u8_u32((uint32x4_t) { 296 MLX5_OPC_MOD_ENHANCED_MPSW << 24 | 297 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW, 298 txq->qp_num_8s | (pkts_n + 2), 299 comp_req, 300 txq->elts_head }); 301 ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m); 302 vst1q_u8((void *)t_wqe, ctrl); 303 /* Fill ESEG in the header. */ 304 vst1q_u32((void *)(t_wqe + 1), 305 ((uint32x4_t) { 0, cs_flags, metadata, 0 })); 306 #ifdef MLX5_PMD_SOFT_COUNTERS 307 txq->stats.opackets += pkts_n; 308 #endif 309 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) / 310 nb_dword_per_wqebb; 311 /* Ring QP doorbell. */ 312 mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST); 313 return pkts_n; 314 } 315 316 /** 317 * Store free buffers to RX SW ring. 318 * 319 * @param rxq 320 * Pointer to RX queue structure. 321 * @param pkts 322 * Pointer to array of packets to be stored. 323 * @param pkts_n 324 * Number of packets to be stored. 325 */ 326 static inline void 327 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) 328 { 329 const uint16_t q_mask = (1 << rxq->elts_n) - 1; 330 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; 331 unsigned int pos; 332 uint16_t p = n & -2; 333 334 for (pos = 0; pos < p; pos += 2) { 335 uint64x2_t mbp; 336 337 mbp = vld1q_u64((void *)&elts[pos]); 338 vst1q_u64((void *)&pkts[pos], mbp); 339 } 340 if (n & 1) 341 pkts[pos] = elts[pos]; 342 } 343 344 /** 345 * Decompress a compressed completion and fill in mbufs in RX SW ring with data 346 * extracted from the title completion descriptor. 347 * 348 * @param rxq 349 * Pointer to RX queue structure. 350 * @param cq 351 * Pointer to completion array having a compressed completion at first. 352 * @param elts 353 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from 354 * the title completion descriptor to be copied to the rest of mbufs. 355 */ 356 static inline void 357 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, 358 struct rte_mbuf **elts) 359 { 360 volatile struct mlx5_mini_cqe8 *mcq = (void *)&(cq + 1)->pkt_info; 361 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */ 362 unsigned int pos; 363 unsigned int i; 364 unsigned int inv = 0; 365 /* Mask to shuffle from extracted mini CQE to mbuf. */ 366 const uint8x16_t mcqe_shuf_m1 = { 367 -1, -1, -1, -1, /* skip packet_type */ 368 7, 6, -1, -1, /* pkt_len, bswap16 */ 369 7, 6, /* data_len, bswap16 */ 370 -1, -1, /* skip vlan_tci */ 371 3, 2, 1, 0 /* hash.rss, bswap32 */ 372 }; 373 const uint8x16_t mcqe_shuf_m2 = { 374 -1, -1, -1, -1, /* skip packet_type */ 375 15, 14, -1, -1, /* pkt_len, bswap16 */ 376 15, 14, /* data_len, bswap16 */ 377 -1, -1, /* skip vlan_tci */ 378 11, 10, 9, 8 /* hash.rss, bswap32 */ 379 }; 380 /* Restore the compressed count. Must be 16 bits. */ 381 const uint16_t mcqe_n = t_pkt->data_len + 382 (rxq->crc_present * RTE_ETHER_CRC_LEN); 383 const uint64x2_t rearm = 384 vld1q_u64((void *)&t_pkt->rearm_data); 385 const uint32x4_t rxdf_mask = { 386 0xffffffff, /* packet_type */ 387 0, /* skip pkt_len */ 388 0xffff0000, /* vlan_tci, skip data_len */ 389 0, /* skip hash.rss */ 390 }; 391 const uint8x16_t rxdf = 392 vandq_u8(vld1q_u8((void *)&t_pkt->rx_descriptor_fields1), 393 vreinterpretq_u8_u32(rxdf_mask)); 394 const uint16x8_t crc_adj = { 395 0, 0, 396 rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 397 rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 398 0, 0 399 }; 400 const uint32_t flow_tag = t_pkt->hash.fdir.hi; 401 #ifdef MLX5_PMD_SOFT_COUNTERS 402 uint32_t rcvd_byte = 0; 403 #endif 404 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ 405 const uint8x8_t len_shuf_m = { 406 7, 6, /* 1st mCQE */ 407 15, 14, /* 2nd mCQE */ 408 23, 22, /* 3rd mCQE */ 409 31, 30 /* 4th mCQE */ 410 }; 411 412 /* 413 * A. load mCQEs into a 128bit register. 414 * B. store rearm data to mbuf. 415 * C. combine data from mCQEs with rx_descriptor_fields1. 416 * D. store rx_descriptor_fields1. 417 * E. store flow tag (rte_flow mark). 418 */ 419 for (pos = 0; pos < mcqe_n; ) { 420 uint8_t *p = (void *)&mcq[pos % 8]; 421 uint8_t *e0 = (void *)&elts[pos]->rearm_data; 422 uint8_t *e1 = (void *)&elts[pos + 1]->rearm_data; 423 uint8_t *e2 = (void *)&elts[pos + 2]->rearm_data; 424 uint8_t *e3 = (void *)&elts[pos + 3]->rearm_data; 425 uint16x4_t byte_cnt; 426 #ifdef MLX5_PMD_SOFT_COUNTERS 427 uint16x4_t invalid_mask = 428 vcreate_u16(mcqe_n - pos < MLX5_VPMD_DESCS_PER_LOOP ? 429 -1UL << ((mcqe_n - pos) * 430 sizeof(uint16_t) * 8) : 0); 431 #endif 432 433 if (!(pos & 0x7) && pos + 8 < mcqe_n) 434 rte_prefetch0((void *)(cq + pos + 8)); 435 __asm__ volatile ( 436 /* A.1 load mCQEs into a 128bit register. */ 437 "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t" 438 /* B.1 store rearm data to mbuf. */ 439 "st1 {%[rearm].2d}, [%[e0]] \n\t" 440 "add %[e0], %[e0], #16 \n\t" 441 "st1 {%[rearm].2d}, [%[e1]] \n\t" 442 "add %[e1], %[e1], #16 \n\t" 443 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ 444 "tbl v18.16b, {v16.16b}, %[mcqe_shuf_m1].16b \n\t" 445 "tbl v19.16b, {v16.16b}, %[mcqe_shuf_m2].16b \n\t" 446 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t" 447 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t" 448 "orr v18.16b, v18.16b, %[rxdf].16b \n\t" 449 "orr v19.16b, v19.16b, %[rxdf].16b \n\t" 450 /* D.1 store rx_descriptor_fields1. */ 451 "st1 {v18.2d}, [%[e0]] \n\t" 452 "st1 {v19.2d}, [%[e1]] \n\t" 453 /* B.1 store rearm data to mbuf. */ 454 "st1 {%[rearm].2d}, [%[e2]] \n\t" 455 "add %[e2], %[e2], #16 \n\t" 456 "st1 {%[rearm].2d}, [%[e3]] \n\t" 457 "add %[e3], %[e3], #16 \n\t" 458 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ 459 "tbl v18.16b, {v17.16b}, %[mcqe_shuf_m1].16b \n\t" 460 "tbl v19.16b, {v17.16b}, %[mcqe_shuf_m2].16b \n\t" 461 "sub v18.8h, v18.8h, %[crc_adj].8h \n\t" 462 "sub v19.8h, v19.8h, %[crc_adj].8h \n\t" 463 "orr v18.16b, v18.16b, %[rxdf].16b \n\t" 464 "orr v19.16b, v19.16b, %[rxdf].16b \n\t" 465 /* D.1 store rx_descriptor_fields1. */ 466 "st1 {v18.2d}, [%[e2]] \n\t" 467 "st1 {v19.2d}, [%[e3]] \n\t" 468 #ifdef MLX5_PMD_SOFT_COUNTERS 469 "tbl %[byte_cnt].8b, {v16.16b - v17.16b}, %[len_shuf_m].8b \n\t" 470 #endif 471 :[byte_cnt]"=&w"(byte_cnt) 472 :[mcq]"r"(p), 473 [rxdf]"w"(rxdf), 474 [rearm]"w"(rearm), 475 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0), 476 [mcqe_shuf_m1]"w"(mcqe_shuf_m1), 477 [mcqe_shuf_m2]"w"(mcqe_shuf_m2), 478 [crc_adj]"w"(crc_adj), 479 [len_shuf_m]"w"(len_shuf_m) 480 :"memory", "v16", "v17", "v18", "v19"); 481 #ifdef MLX5_PMD_SOFT_COUNTERS 482 byte_cnt = vbic_u16(byte_cnt, invalid_mask); 483 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0); 484 #endif 485 if (rxq->mark) { 486 /* E.1 store flow tag (rte_flow mark). */ 487 elts[pos]->hash.fdir.hi = flow_tag; 488 elts[pos + 1]->hash.fdir.hi = flow_tag; 489 elts[pos + 2]->hash.fdir.hi = flow_tag; 490 elts[pos + 3]->hash.fdir.hi = flow_tag; 491 } 492 pos += MLX5_VPMD_DESCS_PER_LOOP; 493 /* Move to next CQE and invalidate consumed CQEs. */ 494 if (!(pos & 0x7) && pos < mcqe_n) { 495 mcq = (void *)&(cq + pos)->pkt_info; 496 for (i = 0; i < 8; ++i) 497 cq[inv++].op_own = MLX5_CQE_INVALIDATE; 498 } 499 } 500 /* Invalidate the rest of CQEs. */ 501 for (; inv < mcqe_n; ++inv) 502 cq[inv].op_own = MLX5_CQE_INVALIDATE; 503 #ifdef MLX5_PMD_SOFT_COUNTERS 504 rxq->stats.ipackets += mcqe_n; 505 rxq->stats.ibytes += rcvd_byte; 506 #endif 507 rxq->cq_ci += mcqe_n; 508 } 509 510 /** 511 * Calculate packet type and offload flag for mbuf and store it. 512 * 513 * @param rxq 514 * Pointer to RX queue structure. 515 * @param ptype_info 516 * Array of four 4bytes packet type info extracted from the original 517 * completion descriptor. 518 * @param flow_tag 519 * Array of four 4bytes flow ID extracted from the original completion 520 * descriptor. 521 * @param op_err 522 * Opcode vector having responder error status. Each field is 4B. 523 * @param pkts 524 * Pointer to array of packets to be filled. 525 */ 526 static inline void 527 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, 528 uint32x4_t ptype_info, uint32x4_t flow_tag, 529 uint16x4_t op_err, struct rte_mbuf **pkts) 530 { 531 uint16x4_t ptype; 532 uint32x4_t pinfo, cv_flags; 533 uint32x4_t ol_flags = 534 vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH | 535 rxq->hw_timestamp * PKT_RX_TIMESTAMP); 536 const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 }; 537 const uint8x16_t cv_flag_sel = { 538 0, 539 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED), 540 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 541 0, 542 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 543 0, 544 (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1), 545 0, 0, 0, 0, 0, 0, 0, 0, 0 546 }; 547 const uint32x4_t cv_mask = 548 vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | 549 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); 550 const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer); 551 const uint64x1_t r32_mask = vcreate_u64(0xffffffff); 552 uint64x2_t rearm0, rearm1, rearm2, rearm3; 553 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; 554 555 if (rxq->mark) { 556 const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); 557 const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR); 558 uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); 559 uint32x4_t invalid_mask; 560 561 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ 562 invalid_mask = vceqzq_u32(flow_tag); 563 ol_flags = vorrq_u32(ol_flags, 564 vbicq_u32(fdir_flags, invalid_mask)); 565 /* Mask out invalid entries. */ 566 fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask); 567 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ 568 ol_flags = vorrq_u32(ol_flags, 569 vbicq_u32(fdir_id_flags, 570 vceqq_u32(flow_tag, ft_def))); 571 } 572 /* 573 * ptype_info has the following: 574 * bit[1] = l3_ok 575 * bit[2] = l4_ok 576 * bit[8] = cv 577 * bit[11:10] = l3_hdr_type 578 * bit[14:12] = l4_hdr_type 579 * bit[15] = ip_frag 580 * bit[16] = tunneled 581 * bit[17] = outer_l3_type 582 */ 583 ptype = vshrn_n_u32(ptype_info, 10); 584 /* Errored packets will have RTE_PTYPE_ALL_MASK. */ 585 ptype = vorr_u16(ptype, op_err); 586 pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6); 587 pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4); 588 pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2); 589 pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0); 590 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | 591 !!(pt_idx0 & (1 << 6)) * rxq->tunnel; 592 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | 593 !!(pt_idx1 & (1 << 6)) * rxq->tunnel; 594 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | 595 !!(pt_idx2 & (1 << 6)) * rxq->tunnel; 596 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | 597 !!(pt_idx3 & (1 << 6)) * rxq->tunnel; 598 /* Fill flags for checksum and VLAN. */ 599 pinfo = vandq_u32(ptype_info, ptype_ol_mask); 600 pinfo = vreinterpretq_u32_u8( 601 vqtbl1q_u8(cv_flag_sel, vreinterpretq_u8_u32(pinfo))); 602 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */ 603 cv_flags = vshlq_n_u32(pinfo, 9); 604 cv_flags = vorrq_u32(pinfo, cv_flags); 605 /* Move back flags to start from byte[0]. */ 606 cv_flags = vshrq_n_u32(cv_flags, 8); 607 /* Mask out garbage bits. */ 608 cv_flags = vandq_u32(cv_flags, cv_mask); 609 /* Merge to ol_flags. */ 610 ol_flags = vorrq_u32(ol_flags, cv_flags); 611 /* Merge mbuf_init and ol_flags, and store. */ 612 rearm0 = vcombine_u64(mbuf_init, 613 vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32( 614 ol_flags)), 32)); 615 rearm1 = vcombine_u64(mbuf_init, 616 vand_u64(vget_high_u64(vreinterpretq_u64_u32( 617 ol_flags)), r32_mask)); 618 rearm2 = vcombine_u64(mbuf_init, 619 vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32( 620 ol_flags)), 32)); 621 rearm3 = vcombine_u64(mbuf_init, 622 vand_u64(vget_low_u64(vreinterpretq_u64_u32( 623 ol_flags)), r32_mask)); 624 vst1q_u64((void *)&pkts[0]->rearm_data, rearm0); 625 vst1q_u64((void *)&pkts[1]->rearm_data, rearm1); 626 vst1q_u64((void *)&pkts[2]->rearm_data, rearm2); 627 vst1q_u64((void *)&pkts[3]->rearm_data, rearm3); 628 } 629 630 /** 631 * Receive burst of packets. An errored completion also consumes a mbuf, but the 632 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed 633 * before returning to application. 634 * 635 * @param rxq 636 * Pointer to RX queue structure. 637 * @param[out] pkts 638 * Array to store received packets. 639 * @param pkts_n 640 * Maximum number of packets in array. 641 * @param[out] err 642 * Pointer to a flag. Set non-zero value if pkts array has at least one error 643 * packet to handle. 644 * 645 * @return 646 * Number of packets received including errors (<= pkts_n). 647 */ 648 static inline uint16_t 649 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, 650 uint64_t *err) 651 { 652 const uint16_t q_n = 1 << rxq->cqe_n; 653 const uint16_t q_mask = q_n - 1; 654 volatile struct mlx5_cqe *cq; 655 struct rte_mbuf **elts; 656 unsigned int pos; 657 uint64_t n; 658 uint16_t repl_n; 659 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; 660 uint16_t nocmp_n = 0; 661 uint16_t rcvd_pkt = 0; 662 unsigned int cq_idx = rxq->cq_ci & q_mask; 663 unsigned int elts_idx; 664 const uint16x4_t ownership = vdup_n_u16(!(rxq->cq_ci & (q_mask + 1))); 665 const uint16x4_t owner_check = vcreate_u16(0x0001000100010001); 666 const uint16x4_t opcode_check = vcreate_u16(0x00f000f000f000f0); 667 const uint16x4_t format_check = vcreate_u16(0x000c000c000c000c); 668 const uint16x4_t resp_err_check = vcreate_u16(0x00e000e000e000e0); 669 #ifdef MLX5_PMD_SOFT_COUNTERS 670 uint32_t rcvd_byte = 0; 671 #endif 672 /* Mask to generate 16B length vector. */ 673 const uint8x8_t len_shuf_m = { 674 52, 53, /* 4th CQE */ 675 36, 37, /* 3rd CQE */ 676 20, 21, /* 2nd CQE */ 677 4, 5 /* 1st CQE */ 678 }; 679 /* Mask to extract 16B data from a 64B CQE. */ 680 const uint8x16_t cqe_shuf_m = { 681 28, 29, /* hdr_type_etc */ 682 0, /* pkt_info */ 683 -1, /* null */ 684 47, 46, /* byte_cnt, bswap16 */ 685 31, 30, /* vlan_info, bswap16 */ 686 15, 14, 13, 12, /* rx_hash_res, bswap32 */ 687 57, 58, 59, /* flow_tag */ 688 63 /* op_own */ 689 }; 690 /* Mask to generate 16B data for mbuf. */ 691 const uint8x16_t mb_shuf_m = { 692 4, 5, -1, -1, /* pkt_len */ 693 4, 5, /* data_len */ 694 6, 7, /* vlan_tci */ 695 8, 9, 10, 11, /* hash.rss */ 696 12, 13, 14, -1 /* hash.fdir.hi */ 697 }; 698 /* Mask to generate 16B owner vector. */ 699 const uint8x8_t owner_shuf_m = { 700 63, -1, /* 4th CQE */ 701 47, -1, /* 3rd CQE */ 702 31, -1, /* 2nd CQE */ 703 15, -1 /* 1st CQE */ 704 }; 705 /* Mask to generate a vector having packet_type/ol_flags. */ 706 const uint8x16_t ptype_shuf_m = { 707 48, 49, 50, -1, /* 4th CQE */ 708 32, 33, 34, -1, /* 3rd CQE */ 709 16, 17, 18, -1, /* 2nd CQE */ 710 0, 1, 2, -1 /* 1st CQE */ 711 }; 712 /* Mask to generate a vector having flow tags. */ 713 const uint8x16_t ftag_shuf_m = { 714 60, 61, 62, -1, /* 4th CQE */ 715 44, 45, 46, -1, /* 3rd CQE */ 716 28, 29, 30, -1, /* 2nd CQE */ 717 12, 13, 14, -1 /* 1st CQE */ 718 }; 719 const uint16x8_t crc_adj = { 720 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0 721 }; 722 const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) }; 723 724 assert(rxq->sges_n == 0); 725 assert(rxq->cqe_n == rxq->elts_n); 726 cq = &(*rxq->cqes)[cq_idx]; 727 rte_prefetch_non_temporal(cq); 728 rte_prefetch_non_temporal(cq + 1); 729 rte_prefetch_non_temporal(cq + 2); 730 rte_prefetch_non_temporal(cq + 3); 731 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); 732 /* 733 * Order of indexes: 734 * rq_ci >= cq_ci >= rq_pi 735 * Definition of indexes: 736 * rq_ci - cq_ci := # of buffers owned by HW (posted). 737 * cq_ci - rq_pi := # of buffers not returned to app (decompressed). 738 * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). 739 */ 740 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); 741 if (repl_n >= rxq->rq_repl_thresh) 742 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); 743 /* See if there're unreturned mbufs from compressed CQE. */ 744 rcvd_pkt = rxq->cq_ci - rxq->rq_pi; 745 if (rcvd_pkt > 0) { 746 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); 747 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt); 748 rxq->rq_pi += rcvd_pkt; 749 pkts += rcvd_pkt; 750 } 751 elts_idx = rxq->rq_pi & q_mask; 752 elts = &(*rxq->elts)[elts_idx]; 753 /* Not to overflow pkts array. */ 754 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); 755 /* Not to cross queue end. */ 756 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); 757 if (!pkts_n) 758 return rcvd_pkt; 759 /* At this point, there shouldn't be any remained packets. */ 760 assert(rxq->rq_pi == rxq->cq_ci); 761 /* 762 * Note that vectors have reverse order - {v3, v2, v1, v0}, because 763 * there's no instruction to count trailing zeros. __builtin_clzl() is 764 * used instead. 765 * 766 * A. copy 4 mbuf pointers from elts ring to returing pkts. 767 * B. load 64B CQE and extract necessary fields 768 * Final 16bytes cqes[] extracted from original 64bytes CQE has the 769 * following structure: 770 * struct { 771 * uint16_t hdr_type_etc; 772 * uint8_t pkt_info; 773 * uint8_t rsvd; 774 * uint16_t byte_cnt; 775 * uint16_t vlan_info; 776 * uint32_t rx_has_res; 777 * uint8_t flow_tag[3]; 778 * uint8_t op_own; 779 * } c; 780 * C. fill in mbuf. 781 * D. get valid CQEs. 782 * E. find compressed CQE. 783 */ 784 for (pos = 0; 785 pos < pkts_n; 786 pos += MLX5_VPMD_DESCS_PER_LOOP) { 787 uint16x4_t op_own; 788 uint16x4_t opcode, owner_mask, invalid_mask; 789 uint16x4_t comp_mask; 790 uint16x4_t mask; 791 uint16x4_t byte_cnt; 792 uint32x4_t ptype_info, flow_tag; 793 register uint64x2_t c0, c1, c2, c3; 794 uint8_t *p0, *p1, *p2, *p3; 795 uint8_t *e0 = (void *)&elts[pos]->pkt_len; 796 uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len; 797 uint8_t *e2 = (void *)&elts[pos + 2]->pkt_len; 798 uint8_t *e3 = (void *)&elts[pos + 3]->pkt_len; 799 void *elts_p = (void *)&elts[pos]; 800 void *pkts_p = (void *)&pkts[pos]; 801 802 /* A.0 do not cross the end of CQ. */ 803 mask = vcreate_u16(pkts_n - pos < MLX5_VPMD_DESCS_PER_LOOP ? 804 -1UL >> ((pkts_n - pos) * 805 sizeof(uint16_t) * 8) : 0); 806 p0 = (void *)&cq[pos].pkt_info; 807 p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe); 808 p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe); 809 p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe); 810 /* B.0 (CQE 3) load a block having op_own. */ 811 c3 = vld1q_u64((uint64_t *)(p3 + 48)); 812 /* B.0 (CQE 2) load a block having op_own. */ 813 c2 = vld1q_u64((uint64_t *)(p2 + 48)); 814 /* B.0 (CQE 1) load a block having op_own. */ 815 c1 = vld1q_u64((uint64_t *)(p1 + 48)); 816 /* B.0 (CQE 0) load a block having op_own. */ 817 c0 = vld1q_u64((uint64_t *)(p0 + 48)); 818 /* Synchronize for loading the rest of blocks. */ 819 rte_cio_rmb(); 820 /* Prefetch next 4 CQEs. */ 821 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { 822 unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP; 823 rte_prefetch_non_temporal(&cq[next]); 824 rte_prefetch_non_temporal(&cq[next + 1]); 825 rte_prefetch_non_temporal(&cq[next + 2]); 826 rte_prefetch_non_temporal(&cq[next + 3]); 827 } 828 __asm__ volatile ( 829 /* B.1 (CQE 3) load the rest of blocks. */ 830 "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t" 831 /* B.2 (CQE 3) move the block having op_own. */ 832 "mov v19.16b, %[c3].16b \n\t" 833 /* B.3 (CQE 3) extract 16B fields. */ 834 "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" 835 /* B.1 (CQE 2) load the rest of blocks. */ 836 "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t" 837 /* B.4 (CQE 3) adjust CRC length. */ 838 "sub v23.8h, v23.8h, %[crc_adj].8h \n\t" 839 /* C.1 (CQE 3) generate final structure for mbuf. */ 840 "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t" 841 /* B.2 (CQE 2) move the block having op_own. */ 842 "mov v19.16b, %[c2].16b \n\t" 843 /* B.3 (CQE 2) extract 16B fields. */ 844 "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" 845 /* B.1 (CQE 1) load the rest of blocks. */ 846 "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t" 847 /* B.4 (CQE 2) adjust CRC length. */ 848 "sub v22.8h, v22.8h, %[crc_adj].8h \n\t" 849 /* C.1 (CQE 2) generate final structure for mbuf. */ 850 "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t" 851 /* B.2 (CQE 1) move the block having op_own. */ 852 "mov v19.16b, %[c1].16b \n\t" 853 /* B.3 (CQE 1) extract 16B fields. */ 854 "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" 855 /* B.1 (CQE 0) load the rest of blocks. */ 856 "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t" 857 /* B.4 (CQE 1) adjust CRC length. */ 858 "sub v21.8h, v21.8h, %[crc_adj].8h \n\t" 859 /* C.1 (CQE 1) generate final structure for mbuf. */ 860 "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t" 861 /* B.2 (CQE 0) move the block having op_own. */ 862 "mov v19.16b, %[c0].16b \n\t" 863 /* A.1 load mbuf pointers. */ 864 "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t" 865 /* B.3 (CQE 0) extract 16B fields. */ 866 "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" 867 /* B.4 (CQE 0) adjust CRC length. */ 868 "sub v20.8h, v20.8h, %[crc_adj].8h \n\t" 869 /* D.1 extract op_own byte. */ 870 "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t" 871 /* C.2 (CQE 3) adjust flow mark. */ 872 "add v15.4s, v15.4s, %[flow_mark_adj].4s \n\t" 873 /* C.3 (CQE 3) fill in mbuf - rx_descriptor_fields1. */ 874 "st1 {v15.2d}, [%[e3]] \n\t" 875 /* C.2 (CQE 2) adjust flow mark. */ 876 "add v14.4s, v14.4s, %[flow_mark_adj].4s \n\t" 877 /* C.3 (CQE 2) fill in mbuf - rx_descriptor_fields1. */ 878 "st1 {v14.2d}, [%[e2]] \n\t" 879 /* C.1 (CQE 0) generate final structure for mbuf. */ 880 "tbl v12.16b, {v20.16b}, %[mb_shuf_m].16b \n\t" 881 /* C.2 (CQE 1) adjust flow mark. */ 882 "add v13.4s, v13.4s, %[flow_mark_adj].4s \n\t" 883 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */ 884 "st1 {v13.2d}, [%[e1]] \n\t" 885 #ifdef MLX5_PMD_SOFT_COUNTERS 886 /* Extract byte_cnt. */ 887 "tbl %[byte_cnt].8b, {v20.16b - v23.16b}, %[len_shuf_m].8b \n\t" 888 #endif 889 /* Extract ptype_info. */ 890 "tbl %[ptype_info].16b, {v20.16b - v23.16b}, %[ptype_shuf_m].16b \n\t" 891 /* Extract flow_tag. */ 892 "tbl %[flow_tag].16b, {v20.16b - v23.16b}, %[ftag_shuf_m].16b \n\t" 893 /* A.2 copy mbuf pointers. */ 894 "st1 {v24.2d - v25.2d}, [%[pkts_p]] \n\t" 895 /* C.2 (CQE 0) adjust flow mark. */ 896 "add v12.4s, v12.4s, %[flow_mark_adj].4s \n\t" 897 /* C.3 (CQE 1) fill in mbuf - rx_descriptor_fields1. */ 898 "st1 {v12.2d}, [%[e0]] \n\t" 899 :[op_own]"=&w"(op_own), 900 [byte_cnt]"=&w"(byte_cnt), 901 [ptype_info]"=&w"(ptype_info), 902 [flow_tag]"=&w"(flow_tag) 903 :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0), 904 [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0), 905 [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0), 906 [elts_p]"r"(elts_p), 907 [pkts_p]"r"(pkts_p), 908 [cqe_shuf_m]"w"(cqe_shuf_m), 909 [mb_shuf_m]"w"(mb_shuf_m), 910 [owner_shuf_m]"w"(owner_shuf_m), 911 [len_shuf_m]"w"(len_shuf_m), 912 [ptype_shuf_m]"w"(ptype_shuf_m), 913 [ftag_shuf_m]"w"(ftag_shuf_m), 914 [crc_adj]"w"(crc_adj), 915 [flow_mark_adj]"w"(flow_mark_adj) 916 :"memory", 917 "v12", "v13", "v14", "v15", 918 "v16", "v17", "v18", "v19", 919 "v20", "v21", "v22", "v23", 920 "v24", "v25"); 921 /* D.2 flip owner bit to mark CQEs from last round. */ 922 owner_mask = vand_u16(op_own, owner_check); 923 owner_mask = vceq_u16(owner_mask, ownership); 924 /* D.3 get mask for invalidated CQEs. */ 925 opcode = vand_u16(op_own, opcode_check); 926 invalid_mask = vceq_u16(opcode_check, opcode); 927 /* E.1 find compressed CQE format. */ 928 comp_mask = vand_u16(op_own, format_check); 929 comp_mask = vceq_u16(comp_mask, format_check); 930 /* D.4 mask out beyond boundary. */ 931 invalid_mask = vorr_u16(invalid_mask, mask); 932 /* D.5 merge invalid_mask with invalid owner. */ 933 invalid_mask = vorr_u16(invalid_mask, owner_mask); 934 /* E.2 mask out invalid entries. */ 935 comp_mask = vbic_u16(comp_mask, invalid_mask); 936 /* E.3 get the first compressed CQE. */ 937 comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16( 938 comp_mask), 0)) / 939 (sizeof(uint16_t) * 8); 940 /* D.6 mask out entries after the compressed CQE. */ 941 mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ? 942 -1UL >> (comp_idx * sizeof(uint16_t) * 8) : 943 0); 944 invalid_mask = vorr_u16(invalid_mask, mask); 945 /* D.7 count non-compressed valid CQEs. */ 946 n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16( 947 invalid_mask), 0)) / (sizeof(uint16_t) * 8); 948 nocmp_n += n; 949 /* D.2 get the final invalid mask. */ 950 mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ? 951 -1UL >> (n * sizeof(uint16_t) * 8) : 0); 952 invalid_mask = vorr_u16(invalid_mask, mask); 953 /* D.3 check error in opcode. */ 954 opcode = vceq_u16(resp_err_check, opcode); 955 opcode = vbic_u16(opcode, invalid_mask); 956 /* D.4 mark if any error is set */ 957 *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); 958 /* C.4 fill in mbuf - rearm_data and packet_type. */ 959 rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, 960 opcode, &elts[pos]); 961 if (rxq->hw_timestamp) { 962 elts[pos]->timestamp = 963 rte_be_to_cpu_64( 964 container_of(p0, struct mlx5_cqe, 965 pkt_info)->timestamp); 966 elts[pos + 1]->timestamp = 967 rte_be_to_cpu_64( 968 container_of(p1, struct mlx5_cqe, 969 pkt_info)->timestamp); 970 elts[pos + 2]->timestamp = 971 rte_be_to_cpu_64( 972 container_of(p2, struct mlx5_cqe, 973 pkt_info)->timestamp); 974 elts[pos + 3]->timestamp = 975 rte_be_to_cpu_64( 976 container_of(p3, struct mlx5_cqe, 977 pkt_info)->timestamp); 978 } 979 #ifdef MLX5_PMD_SOFT_COUNTERS 980 /* Add up received bytes count. */ 981 byte_cnt = vbic_u16(byte_cnt, invalid_mask); 982 rcvd_byte += vget_lane_u64(vpaddl_u32(vpaddl_u16(byte_cnt)), 0); 983 #endif 984 /* 985 * Break the loop unless more valid CQE is expected, or if 986 * there's a compressed CQE. 987 */ 988 if (n != MLX5_VPMD_DESCS_PER_LOOP) 989 break; 990 } 991 /* If no new CQE seen, return without updating cq_db. */ 992 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) 993 return rcvd_pkt; 994 /* Update the consumer indexes for non-compressed CQEs. */ 995 assert(nocmp_n <= pkts_n); 996 rxq->cq_ci += nocmp_n; 997 rxq->rq_pi += nocmp_n; 998 rcvd_pkt += nocmp_n; 999 #ifdef MLX5_PMD_SOFT_COUNTERS 1000 rxq->stats.ipackets += nocmp_n; 1001 rxq->stats.ibytes += rcvd_byte; 1002 #endif 1003 /* Decompress the last CQE if compressed. */ 1004 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) { 1005 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); 1006 rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]); 1007 /* Return more packets if needed. */ 1008 if (nocmp_n < pkts_n) { 1009 uint16_t n = rxq->cq_ci - rxq->rq_pi; 1010 1011 n = RTE_MIN(n, pkts_n - nocmp_n); 1012 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n); 1013 rxq->rq_pi += n; 1014 rcvd_pkt += n; 1015 } 1016 } 1017 rte_compiler_barrier(); 1018 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 1019 return rcvd_pkt; 1020 } 1021 1022 #endif /* RTE_PMD_MLX5_RXTX_VEC_NEON_H_ */ 1023