1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 6WIND S.A. 3 * Copyright 2017 Mellanox Technologies, Ltd 4 */ 5 6 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_ 7 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_ 8 9 #include <assert.h> 10 #include <stdint.h> 11 #include <string.h> 12 #include <stdlib.h> 13 #include <smmintrin.h> 14 15 #include <rte_mbuf.h> 16 #include <rte_mempool.h> 17 #include <rte_prefetch.h> 18 19 #include "mlx5.h" 20 #include "mlx5_utils.h" 21 #include "mlx5_rxtx.h" 22 #include "mlx5_rxtx_vec.h" 23 #include "mlx5_autoconf.h" 24 #include "mlx5_defs.h" 25 #include "mlx5_prm.h" 26 27 #ifndef __INTEL_COMPILER 28 #pragma GCC diagnostic ignored "-Wcast-qual" 29 #endif 30 31 /** 32 * Fill in buffer descriptors in a multi-packet send descriptor. 33 * 34 * @param txq 35 * Pointer to TX queue structure. 36 * @param dseg 37 * Pointer to buffer descriptor to be written. 38 * @param pkts 39 * Pointer to array of packets to be sent. 40 * @param n 41 * Number of packets to be filled. 42 */ 43 static inline void 44 txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg, 45 struct rte_mbuf **pkts, unsigned int n) 46 { 47 unsigned int pos; 48 uintptr_t addr; 49 const __m128i shuf_mask_dseg = 50 _mm_set_epi8(8, 9, 10, 11, /* addr, bswap64 */ 51 12, 13, 14, 15, 52 7, 6, 5, 4, /* lkey */ 53 0, 1, 2, 3 /* length, bswap32 */); 54 #ifdef MLX5_PMD_SOFT_COUNTERS 55 uint32_t tx_byte = 0; 56 #endif 57 58 for (pos = 0; pos < n; ++pos, ++dseg) { 59 __m128i desc; 60 struct rte_mbuf *pkt = pkts[pos]; 61 62 addr = rte_pktmbuf_mtod(pkt, uintptr_t); 63 desc = _mm_set_epi32(addr >> 32, 64 addr, 65 mlx5_tx_mb2mr(txq, pkt), 66 DATA_LEN(pkt)); 67 desc = _mm_shuffle_epi8(desc, shuf_mask_dseg); 68 _mm_store_si128(dseg, desc); 69 #ifdef MLX5_PMD_SOFT_COUNTERS 70 tx_byte += DATA_LEN(pkt); 71 #endif 72 } 73 #ifdef MLX5_PMD_SOFT_COUNTERS 74 txq->stats.obytes += tx_byte; 75 #endif 76 } 77 78 /** 79 * Send multi-segmented packets until it encounters a single segment packet in 80 * the pkts list. 81 * 82 * @param txq 83 * Pointer to TX queue structure. 84 * @param pkts 85 * Pointer to array of packets to be sent. 86 * @param pkts_n 87 * Number of packets to be sent. 88 * 89 * @return 90 * Number of packets successfully transmitted (<= pkts_n). 91 */ 92 static uint16_t 93 txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, 94 uint16_t pkts_n) 95 { 96 uint16_t elts_head = txq->elts_head; 97 const uint16_t elts_n = 1 << txq->elts_n; 98 const uint16_t elts_m = elts_n - 1; 99 const uint16_t wq_n = 1 << txq->wqe_n; 100 const uint16_t wq_mask = wq_n - 1; 101 const unsigned int nb_dword_per_wqebb = 102 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE; 103 const unsigned int nb_dword_in_hdr = 104 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE; 105 unsigned int n; 106 volatile struct mlx5_wqe *wqe = NULL; 107 108 assert(elts_n > pkts_n); 109 mlx5_tx_complete(txq); 110 if (unlikely(!pkts_n)) 111 return 0; 112 for (n = 0; n < pkts_n; ++n) { 113 struct rte_mbuf *buf = pkts[n]; 114 unsigned int segs_n = buf->nb_segs; 115 unsigned int ds = nb_dword_in_hdr; 116 unsigned int len = PKT_LEN(buf); 117 uint16_t wqe_ci = txq->wqe_ci; 118 const __m128i shuf_mask_ctrl = 119 _mm_set_epi8(15, 14, 13, 12, 120 8, 9, 10, 11, /* bswap32 */ 121 4, 5, 6, 7, /* bswap32 */ 122 0, 1, 2, 3 /* bswap32 */); 123 uint8_t cs_flags; 124 uint16_t max_elts; 125 uint16_t max_wqe; 126 __m128i *t_wqe, *dseg; 127 __m128i ctrl; 128 129 assert(segs_n); 130 max_elts = elts_n - (elts_head - txq->elts_tail); 131 max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi); 132 /* 133 * A MPW session consumes 2 WQEs at most to 134 * include MLX5_MPW_DSEG_MAX pointers. 135 */ 136 if (segs_n == 1 || 137 max_elts < segs_n || max_wqe < 2) 138 break; 139 if (segs_n > MLX5_MPW_DSEG_MAX) { 140 txq->stats.oerrors++; 141 break; 142 } 143 wqe = &((volatile struct mlx5_wqe64 *) 144 txq->wqes)[wqe_ci & wq_mask].hdr; 145 cs_flags = txq_ol_cksum_to_cs(buf); 146 /* Title WQEBB pointer. */ 147 t_wqe = (__m128i *)wqe; 148 dseg = (__m128i *)(wqe + 1); 149 do { 150 if (!(ds++ % nb_dword_per_wqebb)) { 151 dseg = (__m128i *) 152 &((volatile struct mlx5_wqe64 *) 153 txq->wqes)[++wqe_ci & wq_mask]; 154 } 155 txq_wr_dseg_v(txq, dseg++, &buf, 1); 156 (*txq->elts)[elts_head++ & elts_m] = buf; 157 buf = buf->next; 158 } while (--segs_n); 159 ++wqe_ci; 160 /* Fill CTRL in the header. */ 161 ctrl = _mm_set_epi32(0, 0, txq->qp_num_8s | ds, 162 MLX5_OPC_MOD_MPW << 24 | 163 txq->wqe_ci << 8 | MLX5_OPCODE_TSO); 164 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl); 165 _mm_store_si128(t_wqe, ctrl); 166 /* Fill ESEG in the header. */ 167 _mm_store_si128(t_wqe + 1, 168 _mm_set_epi16(0, 0, 0, 0, 169 rte_cpu_to_be_16(len), cs_flags, 170 0, 0)); 171 txq->wqe_ci = wqe_ci; 172 } 173 if (!n) 174 return 0; 175 txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); 176 txq->elts_head = elts_head; 177 if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { 178 /* A CQE slot must always be available. */ 179 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); 180 wqe->ctrl[2] = rte_cpu_to_be_32(8); 181 wqe->ctrl[3] = txq->elts_head; 182 txq->elts_comp = 0; 183 } 184 #ifdef MLX5_PMD_SOFT_COUNTERS 185 txq->stats.opackets += n; 186 #endif 187 mlx5_tx_dbrec(txq, wqe); 188 return n; 189 } 190 191 /** 192 * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet, 193 * it returns to make it processed by txq_scatter_v(). All the packets in 194 * the pkts list should be single segment packets having same offload flags. 195 * This must be checked by txq_count_contig_single_seg() and txq_calc_offload(). 196 * 197 * @param txq 198 * Pointer to TX queue structure. 199 * @param pkts 200 * Pointer to array of packets to be sent. 201 * @param pkts_n 202 * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST). 203 * @param cs_flags 204 * Checksum offload flags to be written in the descriptor. 205 * 206 * @return 207 * Number of packets successfully transmitted (<= pkts_n). 208 */ 209 static inline uint16_t 210 txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, 211 uint8_t cs_flags) 212 { 213 struct rte_mbuf **elts; 214 uint16_t elts_head = txq->elts_head; 215 const uint16_t elts_n = 1 << txq->elts_n; 216 const uint16_t elts_m = elts_n - 1; 217 const unsigned int nb_dword_per_wqebb = 218 MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE; 219 const unsigned int nb_dword_in_hdr = 220 sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE; 221 unsigned int n = 0; 222 unsigned int pos; 223 uint16_t max_elts; 224 uint16_t max_wqe; 225 uint32_t comp_req = 0; 226 const uint16_t wq_n = 1 << txq->wqe_n; 227 const uint16_t wq_mask = wq_n - 1; 228 uint16_t wq_idx = txq->wqe_ci & wq_mask; 229 volatile struct mlx5_wqe64 *wq = 230 &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx]; 231 volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq; 232 const __m128i shuf_mask_ctrl = 233 _mm_set_epi8(15, 14, 13, 12, 234 8, 9, 10, 11, /* bswap32 */ 235 4, 5, 6, 7, /* bswap32 */ 236 0, 1, 2, 3 /* bswap32 */); 237 __m128i *t_wqe, *dseg; 238 __m128i ctrl; 239 240 /* Make sure all packets can fit into a single WQE. */ 241 assert(elts_n > pkts_n); 242 mlx5_tx_complete(txq); 243 max_elts = (elts_n - (elts_head - txq->elts_tail)); 244 max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); 245 pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); 246 assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr); 247 if (unlikely(!pkts_n)) 248 return 0; 249 elts = &(*txq->elts)[elts_head & elts_m]; 250 /* Loop for available tailroom first. */ 251 n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n); 252 for (pos = 0; pos < (n & -2); pos += 2) 253 _mm_storeu_si128((__m128i *)&elts[pos], 254 _mm_loadu_si128((__m128i *)&pkts[pos])); 255 if (n & 1) 256 elts[pos] = pkts[pos]; 257 /* Check if it crosses the end of the queue. */ 258 if (unlikely(n < pkts_n)) { 259 elts = &(*txq->elts)[0]; 260 for (pos = 0; pos < pkts_n - n; ++pos) 261 elts[pos] = pkts[n + pos]; 262 } 263 txq->elts_head += pkts_n; 264 /* Save title WQEBB pointer. */ 265 t_wqe = (__m128i *)wqe; 266 dseg = (__m128i *)(wqe + 1); 267 /* Calculate the number of entries to the end. */ 268 n = RTE_MIN( 269 (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr, 270 pkts_n); 271 /* Fill DSEGs. */ 272 txq_wr_dseg_v(txq, dseg, pkts, n); 273 /* Check if it crosses the end of the queue. */ 274 if (n < pkts_n) { 275 dseg = (__m128i *)txq->wqes; 276 txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n); 277 } 278 if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { 279 txq->elts_comp += pkts_n; 280 } else { 281 /* A CQE slot must always be available. */ 282 assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci)); 283 /* Request a completion. */ 284 txq->elts_comp = 0; 285 comp_req = 8; 286 } 287 /* Fill CTRL in the header. */ 288 ctrl = _mm_set_epi32(txq->elts_head, comp_req, 289 txq->qp_num_8s | (pkts_n + 2), 290 MLX5_OPC_MOD_ENHANCED_MPSW << 24 | 291 txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW); 292 ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl); 293 _mm_store_si128(t_wqe, ctrl); 294 /* Fill ESEG in the header. */ 295 _mm_store_si128(t_wqe + 1, 296 _mm_set_epi8(0, 0, 0, 0, 297 0, 0, 0, 0, 298 0, 0, 0, cs_flags, 299 0, 0, 0, 0)); 300 #ifdef MLX5_PMD_SOFT_COUNTERS 301 txq->stats.opackets += pkts_n; 302 #endif 303 txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) / 304 nb_dword_per_wqebb; 305 /* Ring QP doorbell. */ 306 mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST); 307 return pkts_n; 308 } 309 310 /** 311 * Store free buffers to RX SW ring. 312 * 313 * @param rxq 314 * Pointer to RX queue structure. 315 * @param pkts 316 * Pointer to array of packets to be stored. 317 * @param pkts_n 318 * Number of packets to be stored. 319 */ 320 static inline void 321 rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) 322 { 323 const uint16_t q_mask = (1 << rxq->elts_n) - 1; 324 struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask]; 325 unsigned int pos; 326 uint16_t p = n & -2; 327 328 for (pos = 0; pos < p; pos += 2) { 329 __m128i mbp; 330 331 mbp = _mm_loadu_si128((__m128i *)&elts[pos]); 332 _mm_storeu_si128((__m128i *)&pkts[pos], mbp); 333 } 334 if (n & 1) 335 pkts[pos] = elts[pos]; 336 } 337 338 /** 339 * Decompress a compressed completion and fill in mbufs in RX SW ring with data 340 * extracted from the title completion descriptor. 341 * 342 * @param rxq 343 * Pointer to RX queue structure. 344 * @param cq 345 * Pointer to completion array having a compressed completion at first. 346 * @param elts 347 * Pointer to SW ring to be filled. The first mbuf has to be pre-built from 348 * the title completion descriptor to be copied to the rest of mbufs. 349 */ 350 static inline void 351 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, 352 struct rte_mbuf **elts) 353 { 354 volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1); 355 struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */ 356 unsigned int pos; 357 unsigned int i; 358 unsigned int inv = 0; 359 /* Mask to shuffle from extracted mini CQE to mbuf. */ 360 const __m128i shuf_mask1 = 361 _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */ 362 -1, -1, /* skip vlan_tci */ 363 6, 7, /* data_len, bswap16 */ 364 -1, -1, 6, 7, /* pkt_len, bswap16 */ 365 -1, -1, -1, -1 /* skip packet_type */); 366 const __m128i shuf_mask2 = 367 _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */ 368 -1, -1, /* skip vlan_tci */ 369 14, 15, /* data_len, bswap16 */ 370 -1, -1, 14, 15, /* pkt_len, bswap16 */ 371 -1, -1, -1, -1 /* skip packet_type */); 372 /* Restore the compressed count. Must be 16 bits. */ 373 const uint16_t mcqe_n = t_pkt->data_len + 374 (rxq->crc_present * ETHER_CRC_LEN); 375 const __m128i rearm = 376 _mm_loadu_si128((__m128i *)&t_pkt->rearm_data); 377 const __m128i rxdf = 378 _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1); 379 const __m128i crc_adj = 380 _mm_set_epi16(0, 0, 0, 381 rxq->crc_present * ETHER_CRC_LEN, 382 0, 383 rxq->crc_present * ETHER_CRC_LEN, 384 0, 0); 385 const uint32_t flow_tag = t_pkt->hash.fdir.hi; 386 #ifdef MLX5_PMD_SOFT_COUNTERS 387 const __m128i zero = _mm_setzero_si128(); 388 const __m128i ones = _mm_cmpeq_epi32(zero, zero); 389 uint32_t rcvd_byte = 0; 390 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ 391 const __m128i len_shuf_mask = 392 _mm_set_epi8(-1, -1, -1, -1, 393 -1, -1, -1, -1, 394 14, 15, 6, 7, 395 10, 11, 2, 3); 396 #endif 397 398 /* 399 * A. load mCQEs into a 128bit register. 400 * B. store rearm data to mbuf. 401 * C. combine data from mCQEs with rx_descriptor_fields1. 402 * D. store rx_descriptor_fields1. 403 * E. store flow tag (rte_flow mark). 404 */ 405 for (pos = 0; pos < mcqe_n; ) { 406 __m128i mcqe1, mcqe2; 407 __m128i rxdf1, rxdf2; 408 #ifdef MLX5_PMD_SOFT_COUNTERS 409 __m128i byte_cnt, invalid_mask; 410 #endif 411 412 if (!(pos & 0x7) && pos + 8 < mcqe_n) 413 rte_prefetch0((void *)(cq + pos + 8)); 414 /* A.1 load mCQEs into a 128bit register. */ 415 mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]); 416 mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]); 417 /* B.1 store rearm data to mbuf. */ 418 _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm); 419 _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm); 420 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ 421 rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1); 422 rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2); 423 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj); 424 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj); 425 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23); 426 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23); 427 /* D.1 store rx_descriptor_fields1. */ 428 _mm_storeu_si128((__m128i *) 429 &elts[pos]->rx_descriptor_fields1, 430 rxdf1); 431 _mm_storeu_si128((__m128i *) 432 &elts[pos + 1]->rx_descriptor_fields1, 433 rxdf2); 434 /* B.1 store rearm data to mbuf. */ 435 _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm); 436 _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm); 437 /* C.1 combine data from mCQEs with rx_descriptor_fields1. */ 438 rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1); 439 rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2); 440 rxdf1 = _mm_sub_epi16(rxdf1, crc_adj); 441 rxdf2 = _mm_sub_epi16(rxdf2, crc_adj); 442 rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23); 443 rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23); 444 /* D.1 store rx_descriptor_fields1. */ 445 _mm_storeu_si128((__m128i *) 446 &elts[pos + 2]->rx_descriptor_fields1, 447 rxdf1); 448 _mm_storeu_si128((__m128i *) 449 &elts[pos + 3]->rx_descriptor_fields1, 450 rxdf2); 451 #ifdef MLX5_PMD_SOFT_COUNTERS 452 invalid_mask = _mm_set_epi64x(0, 453 (mcqe_n - pos) * 454 sizeof(uint16_t) * 8); 455 invalid_mask = _mm_sll_epi64(ones, invalid_mask); 456 mcqe1 = _mm_srli_si128(mcqe1, 4); 457 byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc); 458 byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask); 459 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt); 460 byte_cnt = _mm_hadd_epi16(byte_cnt, zero); 461 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero)); 462 #endif 463 if (rxq->mark) { 464 /* E.1 store flow tag (rte_flow mark). */ 465 elts[pos]->hash.fdir.hi = flow_tag; 466 elts[pos + 1]->hash.fdir.hi = flow_tag; 467 elts[pos + 2]->hash.fdir.hi = flow_tag; 468 elts[pos + 3]->hash.fdir.hi = flow_tag; 469 } 470 pos += MLX5_VPMD_DESCS_PER_LOOP; 471 /* Move to next CQE and invalidate consumed CQEs. */ 472 if (!(pos & 0x7) && pos < mcqe_n) { 473 mcq = (void *)(cq + pos); 474 for (i = 0; i < 8; ++i) 475 cq[inv++].op_own = MLX5_CQE_INVALIDATE; 476 } 477 } 478 /* Invalidate the rest of CQEs. */ 479 for (; inv < mcqe_n; ++inv) 480 cq[inv].op_own = MLX5_CQE_INVALIDATE; 481 #ifdef MLX5_PMD_SOFT_COUNTERS 482 rxq->stats.ipackets += mcqe_n; 483 rxq->stats.ibytes += rcvd_byte; 484 #endif 485 rxq->cq_ci += mcqe_n; 486 } 487 488 /** 489 * Calculate packet type and offload flag for mbuf and store it. 490 * 491 * @param rxq 492 * Pointer to RX queue structure. 493 * @param cqes[4] 494 * Array of four 16bytes completions extracted from the original completion 495 * descriptor. 496 * @param op_err 497 * Opcode vector having responder error status. Each field is 4B. 498 * @param pkts 499 * Pointer to array of packets to be filled. 500 */ 501 static inline void 502 rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4], 503 __m128i op_err, struct rte_mbuf **pkts) 504 { 505 __m128i pinfo0, pinfo1; 506 __m128i pinfo, ptype; 507 __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH | 508 rxq->hw_timestamp * PKT_RX_TIMESTAMP); 509 __m128i cv_flags; 510 const __m128i zero = _mm_setzero_si128(); 511 const __m128i ptype_mask = 512 _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06); 513 const __m128i ptype_ol_mask = 514 _mm_set_epi32(0x106, 0x106, 0x106, 0x106); 515 const __m128i pinfo_mask = 516 _mm_set_epi32(0x3, 0x3, 0x3, 0x3); 517 const __m128i cv_flag_sel = 518 _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 519 (uint8_t)((PKT_RX_IP_CKSUM_GOOD | 520 PKT_RX_L4_CKSUM_GOOD) >> 1), 521 0, 522 (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), 523 0, 524 (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 525 (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED), 526 0); 527 const __m128i cv_mask = 528 _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | 529 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 530 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | 531 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 532 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | 533 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 534 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | 535 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); 536 const __m128i mbuf_init = 537 _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer); 538 __m128i rearm0, rearm1, rearm2, rearm3; 539 uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; 540 541 /* Extract pkt_info field. */ 542 pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]); 543 pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]); 544 pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1); 545 /* Extract hdr_type_etc field. */ 546 pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]); 547 pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]); 548 ptype = _mm_unpacklo_epi64(pinfo0, pinfo1); 549 if (rxq->mark) { 550 const __m128i pinfo_ft_mask = 551 _mm_set_epi32(0xffffff00, 0xffffff00, 552 0xffffff00, 0xffffff00); 553 const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR); 554 __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID); 555 __m128i flow_tag, invalid_mask; 556 557 flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask); 558 /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ 559 invalid_mask = _mm_cmpeq_epi32(flow_tag, zero); 560 ol_flags = _mm_or_si128(ol_flags, 561 _mm_andnot_si128(invalid_mask, 562 fdir_flags)); 563 /* Mask out invalid entries. */ 564 fdir_id_flags = _mm_andnot_si128(invalid_mask, fdir_id_flags); 565 /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ 566 ol_flags = _mm_or_si128(ol_flags, 567 _mm_andnot_si128( 568 _mm_cmpeq_epi32(flow_tag, 569 pinfo_ft_mask), 570 fdir_id_flags)); 571 } 572 /* 573 * Merge the two fields to generate the following: 574 * bit[1] = l3_ok 575 * bit[2] = l4_ok 576 * bit[8] = cv 577 * bit[11:10] = l3_hdr_type 578 * bit[14:12] = l4_hdr_type 579 * bit[15] = ip_frag 580 * bit[16] = tunneled 581 * bit[17] = outer_l3_type 582 */ 583 ptype = _mm_and_si128(ptype, ptype_mask); 584 pinfo = _mm_and_si128(pinfo, pinfo_mask); 585 pinfo = _mm_slli_epi32(pinfo, 16); 586 /* Make pinfo has merged fields for ol_flags calculation. */ 587 pinfo = _mm_or_si128(ptype, pinfo); 588 ptype = _mm_srli_epi32(pinfo, 10); 589 ptype = _mm_packs_epi32(ptype, zero); 590 /* Errored packets will have RTE_PTYPE_ALL_MASK. */ 591 op_err = _mm_srli_epi16(op_err, 8); 592 ptype = _mm_or_si128(ptype, op_err); 593 pt_idx0 = _mm_extract_epi8(ptype, 0); 594 pt_idx1 = _mm_extract_epi8(ptype, 2); 595 pt_idx2 = _mm_extract_epi8(ptype, 4); 596 pt_idx3 = _mm_extract_epi8(ptype, 6); 597 pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | 598 !!(pt_idx0 & (1 << 6)) * rxq->tunnel; 599 pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | 600 !!(pt_idx1 & (1 << 6)) * rxq->tunnel; 601 pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | 602 !!(pt_idx2 & (1 << 6)) * rxq->tunnel; 603 pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | 604 !!(pt_idx3 & (1 << 6)) * rxq->tunnel; 605 /* Fill flags for checksum and VLAN. */ 606 pinfo = _mm_and_si128(pinfo, ptype_ol_mask); 607 pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo); 608 /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */ 609 cv_flags = _mm_slli_epi32(pinfo, 9); 610 cv_flags = _mm_or_si128(pinfo, cv_flags); 611 /* Move back flags to start from byte[0]. */ 612 cv_flags = _mm_srli_epi32(cv_flags, 8); 613 /* Mask out garbage bits. */ 614 cv_flags = _mm_and_si128(cv_flags, cv_mask); 615 /* Merge to ol_flags. */ 616 ol_flags = _mm_or_si128(ol_flags, cv_flags); 617 /* Merge mbuf_init and ol_flags. */ 618 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30); 619 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30); 620 rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30); 621 rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30); 622 /* Write 8B rearm_data and 8B ol_flags. */ 623 _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0); 624 _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1); 625 _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2); 626 _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3); 627 } 628 629 /** 630 * Receive burst of packets. An errored completion also consumes a mbuf, but the 631 * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed 632 * before returning to application. 633 * 634 * @param rxq 635 * Pointer to RX queue structure. 636 * @param[out] pkts 637 * Array to store received packets. 638 * @param pkts_n 639 * Maximum number of packets in array. 640 * @param[out] err 641 * Pointer to a flag. Set non-zero value if pkts array has at least one error 642 * packet to handle. 643 * 644 * @return 645 * Number of packets received including errors (<= pkts_n). 646 */ 647 static inline uint16_t 648 rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, 649 uint64_t *err) 650 { 651 const uint16_t q_n = 1 << rxq->cqe_n; 652 const uint16_t q_mask = q_n - 1; 653 volatile struct mlx5_cqe *cq; 654 struct rte_mbuf **elts; 655 unsigned int pos; 656 uint64_t n; 657 uint16_t repl_n; 658 uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; 659 uint16_t nocmp_n = 0; 660 uint16_t rcvd_pkt = 0; 661 unsigned int cq_idx = rxq->cq_ci & q_mask; 662 unsigned int elts_idx; 663 unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1)); 664 const __m128i owner_check = 665 _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL); 666 const __m128i opcode_check = 667 _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL); 668 const __m128i format_check = 669 _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL); 670 const __m128i resp_err_check = 671 _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL); 672 #ifdef MLX5_PMD_SOFT_COUNTERS 673 uint32_t rcvd_byte = 0; 674 /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */ 675 const __m128i len_shuf_mask = 676 _mm_set_epi8(-1, -1, -1, -1, 677 -1, -1, -1, -1, 678 12, 13, 8, 9, 679 4, 5, 0, 1); 680 #endif 681 /* Mask to shuffle from extracted CQE to mbuf. */ 682 const __m128i shuf_mask = 683 _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */ 684 12, 13, 14, 15, /* rss, bswap32 */ 685 10, 11, /* vlan_tci, bswap16 */ 686 4, 5, /* data_len, bswap16 */ 687 -1, -1, /* zero out 2nd half of pkt_len */ 688 4, 5 /* pkt_len, bswap16 */); 689 /* Mask to blend from the last Qword to the first DQword. */ 690 const __m128i blend_mask = 691 _mm_set_epi8(-1, -1, -1, -1, 692 -1, -1, -1, -1, 693 0, 0, 0, 0, 694 0, 0, 0, -1); 695 const __m128i zero = _mm_setzero_si128(); 696 const __m128i ones = _mm_cmpeq_epi32(zero, zero); 697 const __m128i crc_adj = 698 _mm_set_epi16(0, 0, 0, 0, 0, 699 rxq->crc_present * ETHER_CRC_LEN, 700 0, 701 rxq->crc_present * ETHER_CRC_LEN); 702 const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0); 703 704 assert(rxq->sges_n == 0); 705 assert(rxq->cqe_n == rxq->elts_n); 706 cq = &(*rxq->cqes)[cq_idx]; 707 rte_prefetch0(cq); 708 rte_prefetch0(cq + 1); 709 rte_prefetch0(cq + 2); 710 rte_prefetch0(cq + 3); 711 pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); 712 /* 713 * Order of indexes: 714 * rq_ci >= cq_ci >= rq_pi 715 * Definition of indexes: 716 * rq_ci - cq_ci := # of buffers owned by HW (posted). 717 * cq_ci - rq_pi := # of buffers not returned to app (decompressed). 718 * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). 719 */ 720 repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); 721 if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n)) 722 mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); 723 /* See if there're unreturned mbufs from compressed CQE. */ 724 rcvd_pkt = rxq->cq_ci - rxq->rq_pi; 725 if (rcvd_pkt > 0) { 726 rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); 727 rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt); 728 rxq->rq_pi += rcvd_pkt; 729 pkts += rcvd_pkt; 730 } 731 elts_idx = rxq->rq_pi & q_mask; 732 elts = &(*rxq->elts)[elts_idx]; 733 /* Not to overflow pkts array. */ 734 pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); 735 /* Not to cross queue end. */ 736 pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); 737 if (!pkts_n) 738 return rcvd_pkt; 739 /* At this point, there shouldn't be any remained packets. */ 740 assert(rxq->rq_pi == rxq->cq_ci); 741 /* 742 * A. load first Qword (8bytes) in one loop. 743 * B. copy 4 mbuf pointers from elts ring to returing pkts. 744 * C. load remained CQE data and extract necessary fields. 745 * Final 16bytes cqes[] extracted from original 64bytes CQE has the 746 * following structure: 747 * struct { 748 * uint8_t pkt_info; 749 * uint8_t flow_tag[3]; 750 * uint16_t byte_cnt; 751 * uint8_t rsvd4; 752 * uint8_t op_own; 753 * uint16_t hdr_type_etc; 754 * uint16_t vlan_info; 755 * uint32_t rx_has_res; 756 * } c; 757 * D. fill in mbuf. 758 * E. get valid CQEs. 759 * F. find compressed CQE. 760 */ 761 for (pos = 0; 762 pos < pkts_n; 763 pos += MLX5_VPMD_DESCS_PER_LOOP) { 764 __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP]; 765 __m128i cqe_tmp1, cqe_tmp2; 766 __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; 767 __m128i op_own, op_own_tmp1, op_own_tmp2; 768 __m128i opcode, owner_mask, invalid_mask; 769 __m128i comp_mask; 770 __m128i mask; 771 #ifdef MLX5_PMD_SOFT_COUNTERS 772 __m128i byte_cnt; 773 #endif 774 __m128i mbp1, mbp2; 775 __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0); 776 unsigned int p1, p2, p3; 777 778 /* Prefetch next 4 CQEs. */ 779 if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { 780 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]); 781 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]); 782 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]); 783 rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]); 784 } 785 /* A.0 do not cross the end of CQ. */ 786 mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8); 787 mask = _mm_sll_epi64(ones, mask); 788 p = _mm_andnot_si128(mask, p); 789 /* A.1 load cqes. */ 790 p3 = _mm_extract_epi16(p, 3); 791 cqes[3] = _mm_loadl_epi64((__m128i *) 792 &cq[pos + p3].sop_drop_qpn); 793 rte_compiler_barrier(); 794 p2 = _mm_extract_epi16(p, 2); 795 cqes[2] = _mm_loadl_epi64((__m128i *) 796 &cq[pos + p2].sop_drop_qpn); 797 rte_compiler_barrier(); 798 /* B.1 load mbuf pointers. */ 799 mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]); 800 mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]); 801 /* A.1 load a block having op_own. */ 802 p1 = _mm_extract_epi16(p, 1); 803 cqes[1] = _mm_loadl_epi64((__m128i *) 804 &cq[pos + p1].sop_drop_qpn); 805 rte_compiler_barrier(); 806 cqes[0] = _mm_loadl_epi64((__m128i *) 807 &cq[pos].sop_drop_qpn); 808 /* B.2 copy mbuf pointers. */ 809 _mm_storeu_si128((__m128i *)&pkts[pos], mbp1); 810 _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2); 811 rte_cio_rmb(); 812 /* C.1 load remained CQE data and extract necessary fields. */ 813 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]); 814 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]); 815 cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask); 816 cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask); 817 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]); 818 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]); 819 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30); 820 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30); 821 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]); 822 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]); 823 cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04); 824 cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04); 825 /* C.2 generate final structure for mbuf with swapping bytes. */ 826 pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask); 827 pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask); 828 /* C.3 adjust CRC length. */ 829 pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj); 830 pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj); 831 /* C.4 adjust flow mark. */ 832 pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj); 833 pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj); 834 /* D.1 fill in mbuf - rx_descriptor_fields1. */ 835 _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3); 836 _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2); 837 /* E.1 extract op_own field. */ 838 op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]); 839 /* C.1 load remained CQE data and extract necessary fields. */ 840 cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]); 841 cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]); 842 cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask); 843 cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask); 844 cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]); 845 cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]); 846 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30); 847 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30); 848 cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]); 849 cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]); 850 cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04); 851 cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04); 852 /* C.2 generate final structure for mbuf with swapping bytes. */ 853 pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask); 854 pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask); 855 /* C.3 adjust CRC length. */ 856 pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj); 857 pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj); 858 /* C.4 adjust flow mark. */ 859 pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj); 860 pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj); 861 /* E.1 extract op_own byte. */ 862 op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]); 863 op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2); 864 /* D.1 fill in mbuf - rx_descriptor_fields1. */ 865 _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1); 866 _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0); 867 /* E.2 flip owner bit to mark CQEs from last round. */ 868 owner_mask = _mm_and_si128(op_own, owner_check); 869 if (ownership) 870 owner_mask = _mm_xor_si128(owner_mask, owner_check); 871 owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check); 872 owner_mask = _mm_packs_epi32(owner_mask, zero); 873 /* E.3 get mask for invalidated CQEs. */ 874 opcode = _mm_and_si128(op_own, opcode_check); 875 invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode); 876 invalid_mask = _mm_packs_epi32(invalid_mask, zero); 877 /* E.4 mask out beyond boundary. */ 878 invalid_mask = _mm_or_si128(invalid_mask, mask); 879 /* E.5 merge invalid_mask with invalid owner. */ 880 invalid_mask = _mm_or_si128(invalid_mask, owner_mask); 881 /* F.1 find compressed CQE format. */ 882 comp_mask = _mm_and_si128(op_own, format_check); 883 comp_mask = _mm_cmpeq_epi32(comp_mask, format_check); 884 comp_mask = _mm_packs_epi32(comp_mask, zero); 885 /* F.2 mask out invalid entries. */ 886 comp_mask = _mm_andnot_si128(invalid_mask, comp_mask); 887 comp_idx = _mm_cvtsi128_si64(comp_mask); 888 /* F.3 get the first compressed CQE. */ 889 comp_idx = comp_idx ? 890 __builtin_ctzll(comp_idx) / 891 (sizeof(uint16_t) * 8) : 892 MLX5_VPMD_DESCS_PER_LOOP; 893 /* E.6 mask out entries after the compressed CQE. */ 894 mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8); 895 mask = _mm_sll_epi64(ones, mask); 896 invalid_mask = _mm_or_si128(invalid_mask, mask); 897 /* E.7 count non-compressed valid CQEs. */ 898 n = _mm_cvtsi128_si64(invalid_mask); 899 n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) : 900 MLX5_VPMD_DESCS_PER_LOOP; 901 nocmp_n += n; 902 /* D.2 get the final invalid mask. */ 903 mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8); 904 mask = _mm_sll_epi64(ones, mask); 905 invalid_mask = _mm_or_si128(invalid_mask, mask); 906 /* D.3 check error in opcode. */ 907 opcode = _mm_cmpeq_epi32(resp_err_check, opcode); 908 opcode = _mm_packs_epi32(opcode, zero); 909 opcode = _mm_andnot_si128(invalid_mask, opcode); 910 /* D.4 mark if any error is set */ 911 *err |= _mm_cvtsi128_si64(opcode); 912 /* D.5 fill in mbuf - rearm_data and packet_type. */ 913 rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); 914 if (rxq->hw_timestamp) { 915 pkts[pos]->timestamp = 916 rte_be_to_cpu_64(cq[pos].timestamp); 917 pkts[pos + 1]->timestamp = 918 rte_be_to_cpu_64(cq[pos + p1].timestamp); 919 pkts[pos + 2]->timestamp = 920 rte_be_to_cpu_64(cq[pos + p2].timestamp); 921 pkts[pos + 3]->timestamp = 922 rte_be_to_cpu_64(cq[pos + p3].timestamp); 923 } 924 #ifdef MLX5_PMD_SOFT_COUNTERS 925 /* Add up received bytes count. */ 926 byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask); 927 byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt); 928 byte_cnt = _mm_hadd_epi16(byte_cnt, zero); 929 rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero)); 930 #endif 931 /* 932 * Break the loop unless more valid CQE is expected, or if 933 * there's a compressed CQE. 934 */ 935 if (n != MLX5_VPMD_DESCS_PER_LOOP) 936 break; 937 } 938 /* If no new CQE seen, return without updating cq_db. */ 939 if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) 940 return rcvd_pkt; 941 /* Update the consumer indexes for non-compressed CQEs. */ 942 assert(nocmp_n <= pkts_n); 943 rxq->cq_ci += nocmp_n; 944 rxq->rq_pi += nocmp_n; 945 rcvd_pkt += nocmp_n; 946 #ifdef MLX5_PMD_SOFT_COUNTERS 947 rxq->stats.ipackets += nocmp_n; 948 rxq->stats.ibytes += rcvd_byte; 949 #endif 950 /* Decompress the last CQE if compressed. */ 951 if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) { 952 assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); 953 rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]); 954 /* Return more packets if needed. */ 955 if (nocmp_n < pkts_n) { 956 uint16_t n = rxq->cq_ci - rxq->rq_pi; 957 958 n = RTE_MIN(n, pkts_n - nocmp_n); 959 rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n); 960 rxq->rq_pi += n; 961 rcvd_pkt += n; 962 } 963 } 964 rte_compiler_barrier(); 965 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 966 return rcvd_pkt; 967 } 968 969 #endif /* RTE_PMD_MLX5_RXTX_VEC_SSE_H_ */ 970