1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #ifndef _IAVF_RXTX_VEC_COMMON_H_ 6 #define _IAVF_RXTX_VEC_COMMON_H_ 7 #include <stdint.h> 8 #include <ethdev_driver.h> 9 #include <rte_malloc.h> 10 11 #include "../common/rx.h" 12 #include "iavf.h" 13 #include "iavf_rxtx.h" 14 15 static __rte_always_inline uint16_t 16 reassemble_packets(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_bufs, 17 uint16_t nb_bufs, uint8_t *split_flags) 18 { 19 struct rte_mbuf *pkts[IAVF_VPMD_RX_MAX_BURST]; 20 struct rte_mbuf *start = rxq->pkt_first_seg; 21 struct rte_mbuf *end = rxq->pkt_last_seg; 22 unsigned int pkt_idx, buf_idx; 23 24 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 25 if (end) { 26 /* processing a split packet */ 27 end->next = rx_bufs[buf_idx]; 28 rx_bufs[buf_idx]->data_len += rxq->crc_len; 29 30 start->nb_segs++; 31 start->pkt_len += rx_bufs[buf_idx]->data_len; 32 end = end->next; 33 34 if (!split_flags[buf_idx]) { 35 /* it's the last packet of the set */ 36 start->hash = end->hash; 37 start->vlan_tci = end->vlan_tci; 38 start->ol_flags = end->ol_flags; 39 /* we need to strip crc for the whole packet */ 40 start->pkt_len -= rxq->crc_len; 41 if (end->data_len > rxq->crc_len) { 42 end->data_len -= rxq->crc_len; 43 } else { 44 /* free up last mbuf */ 45 struct rte_mbuf *secondlast = start; 46 47 start->nb_segs--; 48 while (secondlast->next != end) 49 secondlast = secondlast->next; 50 secondlast->data_len -= (rxq->crc_len - 51 end->data_len); 52 secondlast->next = NULL; 53 rte_pktmbuf_free_seg(end); 54 } 55 pkts[pkt_idx++] = start; 56 start = NULL; 57 end = NULL; 58 } 59 } else { 60 /* not processing a split packet */ 61 if (!split_flags[buf_idx]) { 62 /* not a split packet, save and skip */ 63 pkts[pkt_idx++] = rx_bufs[buf_idx]; 64 continue; 65 } 66 end = start = rx_bufs[buf_idx]; 67 rx_bufs[buf_idx]->data_len += rxq->crc_len; 68 rx_bufs[buf_idx]->pkt_len += rxq->crc_len; 69 } 70 } 71 72 /* save the partial packet for next time */ 73 rxq->pkt_first_seg = start; 74 rxq->pkt_last_seg = end; 75 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 76 return pkt_idx; 77 } 78 79 static __rte_always_inline int 80 iavf_tx_free_bufs(struct iavf_tx_queue *txq) 81 { 82 struct ci_tx_entry *txep; 83 uint32_t n; 84 uint32_t i; 85 int nb_free = 0; 86 struct rte_mbuf *m, *free[IAVF_VPMD_TX_MAX_FREE_BUF]; 87 88 /* check DD bits on threshold descriptor */ 89 if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & 90 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != 91 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) 92 return 0; 93 94 n = txq->tx_rs_thresh; 95 96 /* first buffer to free from S/W ring is at index 97 * tx_next_dd - (tx_rs_thresh-1) 98 */ 99 txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; 100 m = rte_pktmbuf_prefree_seg(txep[0].mbuf); 101 if (likely(m != NULL)) { 102 free[0] = m; 103 nb_free = 1; 104 for (i = 1; i < n; i++) { 105 m = rte_pktmbuf_prefree_seg(txep[i].mbuf); 106 if (likely(m != NULL)) { 107 if (likely(m->pool == free[0]->pool)) { 108 free[nb_free++] = m; 109 } else { 110 rte_mempool_put_bulk(free[0]->pool, 111 (void *)free, 112 nb_free); 113 free[0] = m; 114 nb_free = 1; 115 } 116 } 117 } 118 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); 119 } else { 120 for (i = 1; i < n; i++) { 121 m = rte_pktmbuf_prefree_seg(txep[i].mbuf); 122 if (m) 123 rte_mempool_put(m->pool, m); 124 } 125 } 126 127 /* buffers were freed, update counters */ 128 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); 129 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); 130 if (txq->tx_next_dd >= txq->nb_tx_desc) 131 txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); 132 133 return txq->tx_rs_thresh; 134 } 135 136 static inline void 137 _iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq) 138 { 139 const unsigned int mask = rxq->nb_rx_desc - 1; 140 unsigned int i; 141 142 if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc) 143 return; 144 145 /* free all mbufs that are valid in the ring */ 146 if (rxq->rxrearm_nb == 0) { 147 for (i = 0; i < rxq->nb_rx_desc; i++) { 148 if (rxq->sw_ring[i]) 149 rte_pktmbuf_free_seg(rxq->sw_ring[i]); 150 } 151 } else { 152 for (i = rxq->rx_tail; 153 i != rxq->rxrearm_start; 154 i = (i + 1) & mask) { 155 if (rxq->sw_ring[i]) 156 rte_pktmbuf_free_seg(rxq->sw_ring[i]); 157 } 158 } 159 160 rxq->rxrearm_nb = rxq->nb_rx_desc; 161 162 /* set all entries to NULL */ 163 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); 164 } 165 166 static inline void 167 _iavf_tx_queue_release_mbufs_vec(struct iavf_tx_queue *txq) 168 { 169 unsigned i; 170 const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); 171 172 if (!txq->sw_ring || txq->nb_tx_free == max_desc) 173 return; 174 175 i = txq->tx_next_dd - txq->tx_rs_thresh + 1; 176 while (i != txq->tx_tail) { 177 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); 178 txq->sw_ring[i].mbuf = NULL; 179 if (++i == txq->nb_tx_desc) 180 i = 0; 181 } 182 } 183 184 static inline int 185 iavf_rxq_vec_setup_default(struct iavf_rx_queue *rxq) 186 { 187 uintptr_t p; 188 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 189 190 mb_def.nb_segs = 1; 191 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 192 mb_def.port = rxq->port_id; 193 rte_mbuf_refcnt_set(&mb_def, 1); 194 195 /* prevent compiler reordering: rearm_data covers previous fields */ 196 rte_compiler_barrier(); 197 p = (uintptr_t)&mb_def.rearm_data; 198 rxq->mbuf_initializer = *(uint64_t *)p; 199 return 0; 200 } 201 202 static inline int 203 iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq) 204 { 205 if (!rxq) 206 return -1; 207 208 if (!rte_is_power_of_2(rxq->nb_rx_desc)) 209 return -1; 210 211 if (rxq->rx_free_thresh < IAVF_VPMD_RX_MAX_BURST) 212 return -1; 213 214 if (rxq->nb_rx_desc % rxq->rx_free_thresh) 215 return -1; 216 217 if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE) 218 return -1; 219 220 if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD) 221 return IAVF_VECTOR_OFFLOAD_PATH; 222 223 return IAVF_VECTOR_PATH; 224 } 225 226 static inline int 227 iavf_tx_vec_queue_default(struct iavf_tx_queue *txq) 228 { 229 if (!txq) 230 return -1; 231 232 if (txq->tx_rs_thresh < IAVF_VPMD_TX_MAX_BURST || 233 txq->tx_rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF) 234 return -1; 235 236 if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) 237 return -1; 238 239 if (rte_pmd_iavf_tx_lldp_dynfield_offset > 0) { 240 txq->use_ctx = 1; 241 return IAVF_VECTOR_CTX_PATH; 242 } 243 244 /** 245 * Vlan tci needs to be inserted via ctx desc, if the vlan_flag is L2TAG2. 246 * Tunneling parameters and other fields need be configured in ctx desc 247 * if the outer checksum offload is enabled. 248 */ 249 if (txq->offloads & (IAVF_TX_VECTOR_OFFLOAD | IAVF_TX_VECTOR_OFFLOAD_CTX)) { 250 if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD_CTX) { 251 if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) { 252 txq->use_ctx = 1; 253 return IAVF_VECTOR_CTX_OFFLOAD_PATH; 254 } else { 255 return -1; 256 } 257 } else { 258 return IAVF_VECTOR_OFFLOAD_PATH; 259 } 260 } else { 261 return IAVF_VECTOR_PATH; 262 } 263 } 264 265 static inline int 266 iavf_rx_vec_dev_check_default(struct rte_eth_dev *dev) 267 { 268 int i; 269 struct iavf_rx_queue *rxq; 270 int ret; 271 int result = 0; 272 273 for (i = 0; i < dev->data->nb_rx_queues; i++) { 274 rxq = dev->data->rx_queues[i]; 275 ret = iavf_rx_vec_queue_default(rxq); 276 277 if (ret < 0) 278 return -1; 279 if (ret > result) 280 result = ret; 281 } 282 283 return result; 284 } 285 286 static inline int 287 iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev) 288 { 289 int i; 290 struct iavf_tx_queue *txq; 291 int ret; 292 int result = 0; 293 294 for (i = 0; i < dev->data->nb_tx_queues; i++) { 295 txq = dev->data->tx_queues[i]; 296 ret = iavf_tx_vec_queue_default(txq); 297 298 if (ret < 0) 299 return -1; 300 if (ret > result) 301 result = ret; 302 } 303 304 return result; 305 } 306 307 /****************************************************************************** 308 * If user knows a specific offload is not enabled by APP, 309 * the macro can be commented to save the effort of fast path. 310 * Currently below 2 features are supported in TX path, 311 * 1, checksum offload 312 * 2, VLAN/QINQ insertion 313 ******************************************************************************/ 314 #define IAVF_TX_CSUM_OFFLOAD 315 #define IAVF_TX_VLAN_QINQ_OFFLOAD 316 317 static __rte_always_inline void 318 iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt, 319 uint64_t *txd_hi) 320 { 321 #if defined(IAVF_TX_CSUM_OFFLOAD) || defined(IAVF_TX_VLAN_QINQ_OFFLOAD) 322 uint64_t ol_flags = tx_pkt->ol_flags; 323 #endif 324 uint32_t td_cmd = 0; 325 #ifdef IAVF_TX_CSUM_OFFLOAD 326 uint32_t td_offset = 0; 327 #endif 328 329 #ifdef IAVF_TX_CSUM_OFFLOAD 330 /* Set MACLEN */ 331 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) 332 td_offset |= (tx_pkt->outer_l2_len >> 1) 333 << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 334 else 335 td_offset |= (tx_pkt->l2_len >> 1) 336 << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 337 338 /* Enable L3 checksum offloads */ 339 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 340 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 341 td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; 342 td_offset |= (tx_pkt->l3_len >> 2) << 343 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 344 } 345 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 346 td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4; 347 td_offset |= (tx_pkt->l3_len >> 2) << 348 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 349 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 350 td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; 351 td_offset |= (tx_pkt->l3_len >> 2) << 352 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 353 } 354 355 /* Enable L4 checksum offloads */ 356 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 357 case RTE_MBUF_F_TX_TCP_CKSUM: 358 td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; 359 td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 360 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 361 break; 362 case RTE_MBUF_F_TX_SCTP_CKSUM: 363 td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; 364 td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 365 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 366 break; 367 case RTE_MBUF_F_TX_UDP_CKSUM: 368 td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; 369 td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 370 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 371 break; 372 default: 373 break; 374 } 375 376 *txd_hi |= ((uint64_t)td_offset) << IAVF_TXD_QW1_OFFSET_SHIFT; 377 #endif 378 379 #ifdef IAVF_TX_VLAN_QINQ_OFFLOAD 380 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 381 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; 382 *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << 383 IAVF_TXD_QW1_L2TAG1_SHIFT); 384 } 385 #endif 386 387 *txd_hi |= ((uint64_t)td_cmd) << IAVF_TXD_QW1_CMD_SHIFT; 388 } 389 390 #ifdef RTE_ARCH_X86 391 static __rte_always_inline void 392 iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512) 393 { 394 int i; 395 uint16_t rx_id; 396 volatile union iavf_rx_desc *rxdp; 397 struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start]; 398 399 rxdp = rxq->rx_ring + rxq->rxrearm_start; 400 401 /* Pull 'n' more MBUFs into the software ring */ 402 if (rte_mempool_get_bulk(rxq->mp, 403 (void *)rxp, 404 IAVF_RXQ_REARM_THRESH) < 0) { 405 if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >= 406 rxq->nb_rx_desc) { 407 __m128i dma_addr0; 408 409 dma_addr0 = _mm_setzero_si128(); 410 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) { 411 rxp[i] = &rxq->fake_mbuf; 412 _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i].read), 413 dma_addr0); 414 } 415 } 416 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += 417 IAVF_RXQ_REARM_THRESH; 418 return; 419 } 420 421 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC 422 struct rte_mbuf *mb0, *mb1; 423 __m128i dma_addr0, dma_addr1; 424 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, 425 RTE_PKTMBUF_HEADROOM); 426 /* Initialize the mbufs in vector, process 2 mbufs in one loop */ 427 for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxp += 2) { 428 __m128i vaddr0, vaddr1; 429 430 mb0 = rxp[0]; 431 mb1 = rxp[1]; 432 433 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 434 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 435 offsetof(struct rte_mbuf, buf_addr) + 8); 436 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 437 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 438 439 /* convert pa to dma_addr hdr/data */ 440 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); 441 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); 442 443 /* add headroom to pa values */ 444 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); 445 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); 446 447 /* flush desc with pa dma_addr */ 448 _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr0); 449 _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr1); 450 } 451 #else 452 #ifdef CC_AVX512_SUPPORT 453 if (avx512) { 454 struct rte_mbuf *mb0, *mb1, *mb2, *mb3; 455 struct rte_mbuf *mb4, *mb5, *mb6, *mb7; 456 __m512i dma_addr0_3, dma_addr4_7; 457 __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM); 458 /* Initialize the mbufs in vector, process 8 mbufs in one loop */ 459 for (i = 0; i < IAVF_RXQ_REARM_THRESH; 460 i += 8, rxp += 8, rxdp += 8) { 461 __m128i vaddr0, vaddr1, vaddr2, vaddr3; 462 __m128i vaddr4, vaddr5, vaddr6, vaddr7; 463 __m256i vaddr0_1, vaddr2_3; 464 __m256i vaddr4_5, vaddr6_7; 465 __m512i vaddr0_3, vaddr4_7; 466 467 mb0 = rxp[0]; 468 mb1 = rxp[1]; 469 mb2 = rxp[2]; 470 mb3 = rxp[3]; 471 mb4 = rxp[4]; 472 mb5 = rxp[5]; 473 mb6 = rxp[6]; 474 mb7 = rxp[7]; 475 476 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 477 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 478 offsetof(struct rte_mbuf, buf_addr) + 8); 479 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 480 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 481 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); 482 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); 483 vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr); 484 vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr); 485 vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr); 486 vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr); 487 488 /** 489 * merge 0 & 1, by casting 0 to 256-bit and inserting 1 490 * into the high lanes. Similarly for 2 & 3, and so on. 491 */ 492 vaddr0_1 = 493 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), 494 vaddr1, 1); 495 vaddr2_3 = 496 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), 497 vaddr3, 1); 498 vaddr4_5 = 499 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4), 500 vaddr5, 1); 501 vaddr6_7 = 502 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6), 503 vaddr7, 1); 504 vaddr0_3 = 505 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1), 506 vaddr2_3, 1); 507 vaddr4_7 = 508 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5), 509 vaddr6_7, 1); 510 511 /* convert pa to dma_addr hdr/data */ 512 dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3); 513 dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7); 514 515 /* add headroom to pa values */ 516 dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room); 517 dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room); 518 519 /* flush desc with pa dma_addr */ 520 _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3); 521 _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7); 522 } 523 } else 524 #endif 525 { 526 struct rte_mbuf *mb0, *mb1, *mb2, *mb3; 527 __m256i dma_addr0_1, dma_addr2_3; 528 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); 529 /* Initialize the mbufs in vector, process 4 mbufs in one loop */ 530 for (i = 0; i < IAVF_RXQ_REARM_THRESH; 531 i += 4, rxp += 4, rxdp += 4) { 532 __m128i vaddr0, vaddr1, vaddr2, vaddr3; 533 __m256i vaddr0_1, vaddr2_3; 534 535 mb0 = rxp[0]; 536 mb1 = rxp[1]; 537 mb2 = rxp[2]; 538 mb3 = rxp[3]; 539 540 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 541 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 542 offsetof(struct rte_mbuf, buf_addr) + 8); 543 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 544 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 545 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); 546 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); 547 548 /** 549 * merge 0 & 1, by casting 0 to 256-bit and inserting 1 550 * into the high lanes. Similarly for 2 & 3 551 */ 552 vaddr0_1 = 553 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), 554 vaddr1, 1); 555 vaddr2_3 = 556 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), 557 vaddr3, 1); 558 559 /* convert pa to dma_addr hdr/data */ 560 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); 561 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); 562 563 /* add headroom to pa values */ 564 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); 565 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); 566 567 /* flush desc with pa dma_addr */ 568 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); 569 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); 570 } 571 } 572 573 #endif 574 575 rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH; 576 if (rxq->rxrearm_start >= rxq->nb_rx_desc) 577 rxq->rxrearm_start = 0; 578 579 rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH; 580 581 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? 582 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); 583 584 /* Update the tail pointer on the NIC */ 585 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 586 } 587 #endif 588 589 #endif 590