1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #ifndef _IAVF_RXTX_VEC_COMMON_H_ 6 #define _IAVF_RXTX_VEC_COMMON_H_ 7 #include <stdint.h> 8 #include <ethdev_driver.h> 9 #include <rte_malloc.h> 10 11 #include "../common/rx.h" 12 #include "iavf.h" 13 #include "iavf_rxtx.h" 14 15 static __rte_always_inline uint16_t 16 reassemble_packets(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_bufs, 17 uint16_t nb_bufs, uint8_t *split_flags) 18 { 19 struct rte_mbuf *pkts[IAVF_VPMD_RX_MAX_BURST]; 20 struct rte_mbuf *start = rxq->pkt_first_seg; 21 struct rte_mbuf *end = rxq->pkt_last_seg; 22 unsigned int pkt_idx, buf_idx; 23 24 for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 25 if (end) { 26 /* processing a split packet */ 27 end->next = rx_bufs[buf_idx]; 28 rx_bufs[buf_idx]->data_len += rxq->crc_len; 29 30 start->nb_segs++; 31 start->pkt_len += rx_bufs[buf_idx]->data_len; 32 end = end->next; 33 34 if (!split_flags[buf_idx]) { 35 /* it's the last packet of the set */ 36 start->hash = end->hash; 37 start->vlan_tci = end->vlan_tci; 38 start->ol_flags = end->ol_flags; 39 /* we need to strip crc for the whole packet */ 40 start->pkt_len -= rxq->crc_len; 41 if (end->data_len > rxq->crc_len) { 42 end->data_len -= rxq->crc_len; 43 } else { 44 /* free up last mbuf */ 45 struct rte_mbuf *secondlast = start; 46 47 start->nb_segs--; 48 while (secondlast->next != end) 49 secondlast = secondlast->next; 50 secondlast->data_len -= (rxq->crc_len - 51 end->data_len); 52 secondlast->next = NULL; 53 rte_pktmbuf_free_seg(end); 54 } 55 pkts[pkt_idx++] = start; 56 start = NULL; 57 end = NULL; 58 } 59 } else { 60 /* not processing a split packet */ 61 if (!split_flags[buf_idx]) { 62 /* not a split packet, save and skip */ 63 pkts[pkt_idx++] = rx_bufs[buf_idx]; 64 continue; 65 } 66 end = start = rx_bufs[buf_idx]; 67 rx_bufs[buf_idx]->data_len += rxq->crc_len; 68 rx_bufs[buf_idx]->pkt_len += rxq->crc_len; 69 } 70 } 71 72 /* save the partial packet for next time */ 73 rxq->pkt_first_seg = start; 74 rxq->pkt_last_seg = end; 75 memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 76 return pkt_idx; 77 } 78 79 static inline int 80 iavf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx) 81 { 82 return (txq->iavf_tx_ring[idx].cmd_type_offset_bsz & 83 rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) == 84 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE); 85 } 86 87 static __rte_always_inline int 88 iavf_tx_free_bufs(struct ci_tx_queue *txq) 89 { 90 return ci_tx_free_bufs(txq, iavf_tx_desc_done); 91 } 92 93 static inline void 94 _iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq) 95 { 96 const unsigned int mask = rxq->nb_rx_desc - 1; 97 unsigned int i; 98 99 if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc) 100 return; 101 102 /* free all mbufs that are valid in the ring */ 103 if (rxq->rxrearm_nb == 0) { 104 for (i = 0; i < rxq->nb_rx_desc; i++) { 105 if (rxq->sw_ring[i]) 106 rte_pktmbuf_free_seg(rxq->sw_ring[i]); 107 } 108 } else { 109 for (i = rxq->rx_tail; 110 i != rxq->rxrearm_start; 111 i = (i + 1) & mask) { 112 if (rxq->sw_ring[i]) 113 rte_pktmbuf_free_seg(rxq->sw_ring[i]); 114 } 115 } 116 117 rxq->rxrearm_nb = rxq->nb_rx_desc; 118 119 /* set all entries to NULL */ 120 memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); 121 } 122 123 static inline int 124 iavf_rxq_vec_setup_default(struct iavf_rx_queue *rxq) 125 { 126 uintptr_t p; 127 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ 128 129 mb_def.nb_segs = 1; 130 mb_def.data_off = RTE_PKTMBUF_HEADROOM; 131 mb_def.port = rxq->port_id; 132 rte_mbuf_refcnt_set(&mb_def, 1); 133 134 /* prevent compiler reordering: rearm_data covers previous fields */ 135 rte_compiler_barrier(); 136 p = (uintptr_t)&mb_def.rearm_data; 137 rxq->mbuf_initializer = *(uint64_t *)p; 138 return 0; 139 } 140 141 static inline int 142 iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq) 143 { 144 if (!rxq) 145 return -1; 146 147 if (!rte_is_power_of_2(rxq->nb_rx_desc)) 148 return -1; 149 150 if (rxq->rx_free_thresh < IAVF_VPMD_RX_MAX_BURST) 151 return -1; 152 153 if (rxq->nb_rx_desc % rxq->rx_free_thresh) 154 return -1; 155 156 if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE) 157 return -1; 158 159 if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD) 160 return IAVF_VECTOR_OFFLOAD_PATH; 161 162 return IAVF_VECTOR_PATH; 163 } 164 165 static inline int 166 iavf_tx_vec_queue_default(struct ci_tx_queue *txq) 167 { 168 if (!txq) 169 return -1; 170 171 if (txq->tx_rs_thresh < IAVF_VPMD_TX_MAX_BURST || 172 txq->tx_rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF) 173 return -1; 174 175 if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) 176 return -1; 177 178 if (rte_pmd_iavf_tx_lldp_dynfield_offset > 0) { 179 txq->use_ctx = 1; 180 return IAVF_VECTOR_CTX_PATH; 181 } 182 183 /** 184 * Vlan tci needs to be inserted via ctx desc, if the vlan_flag is L2TAG2. 185 * Tunneling parameters and other fields need be configured in ctx desc 186 * if the outer checksum offload is enabled. 187 */ 188 if (txq->offloads & (IAVF_TX_VECTOR_OFFLOAD | IAVF_TX_VECTOR_OFFLOAD_CTX)) { 189 if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD_CTX) { 190 if (txq->vlan_flag == IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) { 191 txq->use_ctx = 1; 192 return IAVF_VECTOR_CTX_OFFLOAD_PATH; 193 } else { 194 return -1; 195 } 196 } else { 197 return IAVF_VECTOR_OFFLOAD_PATH; 198 } 199 } else { 200 return IAVF_VECTOR_PATH; 201 } 202 } 203 204 static inline int 205 iavf_rx_vec_dev_check_default(struct rte_eth_dev *dev) 206 { 207 int i; 208 struct iavf_rx_queue *rxq; 209 int ret; 210 int result = 0; 211 212 for (i = 0; i < dev->data->nb_rx_queues; i++) { 213 rxq = dev->data->rx_queues[i]; 214 ret = iavf_rx_vec_queue_default(rxq); 215 216 if (ret < 0) 217 return -1; 218 if (ret > result) 219 result = ret; 220 } 221 222 return result; 223 } 224 225 static inline int 226 iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev) 227 { 228 int i; 229 struct ci_tx_queue *txq; 230 int ret; 231 int result = 0; 232 233 for (i = 0; i < dev->data->nb_tx_queues; i++) { 234 txq = dev->data->tx_queues[i]; 235 ret = iavf_tx_vec_queue_default(txq); 236 237 if (ret < 0) 238 return -1; 239 if (ret > result) 240 result = ret; 241 } 242 243 return result; 244 } 245 246 /****************************************************************************** 247 * If user knows a specific offload is not enabled by APP, 248 * the macro can be commented to save the effort of fast path. 249 * Currently below 2 features are supported in TX path, 250 * 1, checksum offload 251 * 2, VLAN/QINQ insertion 252 ******************************************************************************/ 253 #define IAVF_TX_CSUM_OFFLOAD 254 #define IAVF_TX_VLAN_QINQ_OFFLOAD 255 256 static __rte_always_inline void 257 iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt, 258 uint64_t *txd_hi) 259 { 260 #if defined(IAVF_TX_CSUM_OFFLOAD) || defined(IAVF_TX_VLAN_QINQ_OFFLOAD) 261 uint64_t ol_flags = tx_pkt->ol_flags; 262 #endif 263 uint32_t td_cmd = 0; 264 #ifdef IAVF_TX_CSUM_OFFLOAD 265 uint32_t td_offset = 0; 266 #endif 267 268 #ifdef IAVF_TX_CSUM_OFFLOAD 269 /* Set MACLEN */ 270 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) 271 td_offset |= (tx_pkt->outer_l2_len >> 1) 272 << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 273 else 274 td_offset |= (tx_pkt->l2_len >> 1) 275 << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; 276 277 /* Enable L3 checksum offloads */ 278 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 279 if (ol_flags & RTE_MBUF_F_TX_IPV4) { 280 td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; 281 td_offset |= (tx_pkt->l3_len >> 2) << 282 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 283 } 284 } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { 285 td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4; 286 td_offset |= (tx_pkt->l3_len >> 2) << 287 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 288 } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { 289 td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; 290 td_offset |= (tx_pkt->l3_len >> 2) << 291 IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; 292 } 293 294 /* Enable L4 checksum offloads */ 295 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { 296 case RTE_MBUF_F_TX_TCP_CKSUM: 297 td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; 298 td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << 299 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 300 break; 301 case RTE_MBUF_F_TX_SCTP_CKSUM: 302 td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; 303 td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << 304 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 305 break; 306 case RTE_MBUF_F_TX_UDP_CKSUM: 307 td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; 308 td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << 309 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; 310 break; 311 default: 312 break; 313 } 314 315 *txd_hi |= ((uint64_t)td_offset) << IAVF_TXD_QW1_OFFSET_SHIFT; 316 #endif 317 318 #ifdef IAVF_TX_VLAN_QINQ_OFFLOAD 319 if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 320 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; 321 *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << 322 IAVF_TXD_QW1_L2TAG1_SHIFT); 323 } 324 #endif 325 326 *txd_hi |= ((uint64_t)td_cmd) << IAVF_TXD_QW1_CMD_SHIFT; 327 } 328 329 #ifdef RTE_ARCH_X86 330 static __rte_always_inline void 331 iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512) 332 { 333 int i; 334 uint16_t rx_id; 335 volatile union iavf_rx_desc *rxdp; 336 struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start]; 337 338 rxdp = rxq->rx_ring + rxq->rxrearm_start; 339 340 /* Pull 'n' more MBUFs into the software ring */ 341 if (rte_mempool_get_bulk(rxq->mp, 342 (void *)rxp, 343 IAVF_RXQ_REARM_THRESH) < 0) { 344 if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >= 345 rxq->nb_rx_desc) { 346 __m128i dma_addr0; 347 348 dma_addr0 = _mm_setzero_si128(); 349 for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) { 350 rxp[i] = &rxq->fake_mbuf; 351 _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i].read), 352 dma_addr0); 353 } 354 } 355 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += 356 IAVF_RXQ_REARM_THRESH; 357 return; 358 } 359 360 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC 361 struct rte_mbuf *mb0, *mb1; 362 __m128i dma_addr0, dma_addr1; 363 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, 364 RTE_PKTMBUF_HEADROOM); 365 /* Initialize the mbufs in vector, process 2 mbufs in one loop */ 366 for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxp += 2) { 367 __m128i vaddr0, vaddr1; 368 369 mb0 = rxp[0]; 370 mb1 = rxp[1]; 371 372 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 373 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 374 offsetof(struct rte_mbuf, buf_addr) + 8); 375 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 376 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 377 378 /* convert pa to dma_addr hdr/data */ 379 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); 380 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); 381 382 /* add headroom to pa values */ 383 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); 384 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); 385 386 /* flush desc with pa dma_addr */ 387 _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr0); 388 _mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr1); 389 } 390 #else 391 #ifdef CC_AVX512_SUPPORT 392 if (avx512) { 393 struct rte_mbuf *mb0, *mb1, *mb2, *mb3; 394 struct rte_mbuf *mb4, *mb5, *mb6, *mb7; 395 __m512i dma_addr0_3, dma_addr4_7; 396 __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM); 397 /* Initialize the mbufs in vector, process 8 mbufs in one loop */ 398 for (i = 0; i < IAVF_RXQ_REARM_THRESH; 399 i += 8, rxp += 8, rxdp += 8) { 400 __m128i vaddr0, vaddr1, vaddr2, vaddr3; 401 __m128i vaddr4, vaddr5, vaddr6, vaddr7; 402 __m256i vaddr0_1, vaddr2_3; 403 __m256i vaddr4_5, vaddr6_7; 404 __m512i vaddr0_3, vaddr4_7; 405 406 mb0 = rxp[0]; 407 mb1 = rxp[1]; 408 mb2 = rxp[2]; 409 mb3 = rxp[3]; 410 mb4 = rxp[4]; 411 mb5 = rxp[5]; 412 mb6 = rxp[6]; 413 mb7 = rxp[7]; 414 415 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 416 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 417 offsetof(struct rte_mbuf, buf_addr) + 8); 418 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 419 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 420 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); 421 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); 422 vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr); 423 vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr); 424 vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr); 425 vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr); 426 427 /** 428 * merge 0 & 1, by casting 0 to 256-bit and inserting 1 429 * into the high lanes. Similarly for 2 & 3, and so on. 430 */ 431 vaddr0_1 = 432 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), 433 vaddr1, 1); 434 vaddr2_3 = 435 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), 436 vaddr3, 1); 437 vaddr4_5 = 438 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4), 439 vaddr5, 1); 440 vaddr6_7 = 441 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6), 442 vaddr7, 1); 443 vaddr0_3 = 444 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1), 445 vaddr2_3, 1); 446 vaddr4_7 = 447 _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5), 448 vaddr6_7, 1); 449 450 /* convert pa to dma_addr hdr/data */ 451 dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, vaddr0_3); 452 dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, vaddr4_7); 453 454 /* add headroom to pa values */ 455 dma_addr0_3 = _mm512_add_epi64(dma_addr0_3, hdr_room); 456 dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room); 457 458 /* flush desc with pa dma_addr */ 459 _mm512_store_si512((__m512i *)&rxdp->read, dma_addr0_3); 460 _mm512_store_si512((__m512i *)&(rxdp + 4)->read, dma_addr4_7); 461 } 462 } else 463 #endif 464 { 465 struct rte_mbuf *mb0, *mb1, *mb2, *mb3; 466 __m256i dma_addr0_1, dma_addr2_3; 467 __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); 468 /* Initialize the mbufs in vector, process 4 mbufs in one loop */ 469 for (i = 0; i < IAVF_RXQ_REARM_THRESH; 470 i += 4, rxp += 4, rxdp += 4) { 471 __m128i vaddr0, vaddr1, vaddr2, vaddr3; 472 __m256i vaddr0_1, vaddr2_3; 473 474 mb0 = rxp[0]; 475 mb1 = rxp[1]; 476 mb2 = rxp[2]; 477 mb3 = rxp[3]; 478 479 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 480 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 481 offsetof(struct rte_mbuf, buf_addr) + 8); 482 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 483 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 484 vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); 485 vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); 486 487 /** 488 * merge 0 & 1, by casting 0 to 256-bit and inserting 1 489 * into the high lanes. Similarly for 2 & 3 490 */ 491 vaddr0_1 = 492 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), 493 vaddr1, 1); 494 vaddr2_3 = 495 _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), 496 vaddr3, 1); 497 498 /* convert pa to dma_addr hdr/data */ 499 dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); 500 dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); 501 502 /* add headroom to pa values */ 503 dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); 504 dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); 505 506 /* flush desc with pa dma_addr */ 507 _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); 508 _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); 509 } 510 } 511 512 #endif 513 514 rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH; 515 if (rxq->rxrearm_start >= rxq->nb_rx_desc) 516 rxq->rxrearm_start = 0; 517 518 rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH; 519 520 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? 521 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); 522 523 /* Update the tail pointer on the NIC */ 524 IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); 525 } 526 #endif 527 528 #endif 529