1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2015-2024 Beijing WangXun Technology Co., Ltd. 3 * Copyright(c) 2010-2015 Intel Corporation 4 */ 5 6 #include <ethdev_driver.h> 7 #include <rte_malloc.h> 8 9 #include "ngbe_type.h" 10 #include "ngbe_ethdev.h" 11 #include "ngbe_rxtx.h" 12 #include "ngbe_rxtx_vec_common.h" 13 14 #include <rte_vect.h> 15 16 static inline void 17 ngbe_rxq_rearm(struct ngbe_rx_queue *rxq) 18 { 19 int i; 20 uint16_t rx_id; 21 volatile struct ngbe_rx_desc *rxdp; 22 struct ngbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; 23 struct rte_mbuf *mb0, *mb1; 24 __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, 25 RTE_PKTMBUF_HEADROOM); 26 __m128i dma_addr0, dma_addr1; 27 28 const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX); 29 30 rxdp = rxq->rx_ring + rxq->rxrearm_start; 31 32 /* Pull 'n' more MBUFs into the software ring */ 33 if (rte_mempool_get_bulk(rxq->mb_pool, 34 (void *)rxep, 35 RTE_NGBE_RXQ_REARM_THRESH) < 0) { 36 if (rxq->rxrearm_nb + RTE_NGBE_RXQ_REARM_THRESH >= 37 rxq->nb_rx_desc) { 38 dma_addr0 = _mm_setzero_si128(); 39 for (i = 0; i < RTE_NGBE_DESCS_PER_LOOP; i++) { 40 rxep[i].mbuf = &rxq->fake_mbuf; 41 _mm_store_si128((__m128i *)(uintptr_t)&rxdp[i], 42 dma_addr0); 43 } 44 } 45 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += 46 RTE_NGBE_RXQ_REARM_THRESH; 47 return; 48 } 49 50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */ 51 for (i = 0; i < RTE_NGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) { 52 __m128i vaddr0, vaddr1; 53 54 mb0 = rxep[0].mbuf; 55 mb1 = rxep[1].mbuf; 56 57 /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ 58 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != 59 offsetof(struct rte_mbuf, buf_addr) + 8); 60 vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); 61 vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); 62 63 /* convert pa to dma_addr hdr/data */ 64 dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); 65 dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); 66 67 /* add headroom to pa values */ 68 dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); 69 dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); 70 71 /* set Header Buffer Address to zero */ 72 dma_addr0 = _mm_and_si128(dma_addr0, hba_msk); 73 dma_addr1 = _mm_and_si128(dma_addr1, hba_msk); 74 75 /* flush desc with pa dma_addr */ 76 _mm_store_si128((__m128i *)(uintptr_t)rxdp++, dma_addr0); 77 _mm_store_si128((__m128i *)(uintptr_t)rxdp++, dma_addr1); 78 } 79 80 rxq->rxrearm_start += RTE_NGBE_RXQ_REARM_THRESH; 81 if (rxq->rxrearm_start >= rxq->nb_rx_desc) 82 rxq->rxrearm_start = 0; 83 84 rxq->rxrearm_nb -= RTE_NGBE_RXQ_REARM_THRESH; 85 86 rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? 87 (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); 88 89 /* Update the tail pointer on the NIC */ 90 ngbe_set32(rxq->rdt_reg_addr, rx_id); 91 } 92 93 static inline void 94 desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags, 95 struct rte_mbuf **rx_pkts) 96 { 97 __m128i ptype0, ptype1, vtag0, vtag1, csum, vp; 98 __m128i rearm0, rearm1, rearm2, rearm3; 99 100 /* mask everything except rss type */ 101 const __m128i rsstype_msk = _mm_set_epi16(0x0000, 0x0000, 0x0000, 0x0000, 102 0x000F, 0x000F, 0x000F, 0x000F); 103 104 /* mask the lower byte of ol_flags */ 105 const __m128i ol_flags_msk = _mm_set_epi16(0x0000, 0x0000, 0x0000, 0x0000, 106 0x00FF, 0x00FF, 0x00FF, 0x00FF); 107 108 /* map rss type to rss hash flag */ 109 const __m128i rss_flags = _mm_set_epi8(RTE_MBUF_F_RX_FDIR, 0, 0, 0, 110 0, 0, 0, RTE_MBUF_F_RX_RSS_HASH, 111 RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH, 0, 112 RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, 0); 113 114 /* mask everything except vlan present and l4/ip csum error */ 115 const __m128i vlan_csum_msk = 116 _mm_set_epi16((NGBE_RXD_ERR_L4CS | NGBE_RXD_ERR_IPCS) >> 16, 117 (NGBE_RXD_ERR_L4CS | NGBE_RXD_ERR_IPCS) >> 16, 118 (NGBE_RXD_ERR_L4CS | NGBE_RXD_ERR_IPCS) >> 16, 119 (NGBE_RXD_ERR_L4CS | NGBE_RXD_ERR_IPCS) >> 16, 120 NGBE_RXD_STAT_VLAN, NGBE_RXD_STAT_VLAN, 121 NGBE_RXD_STAT_VLAN, NGBE_RXD_STAT_VLAN); 122 123 /* map vlan present and l4/ip csum error to ol_flags */ 124 const __m128i vlan_csum_map_lo = _mm_set_epi8(0, 0, 0, 0, 125 vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 126 vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_BAD, 127 vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 128 vlan_flags | RTE_MBUF_F_RX_IP_CKSUM_GOOD, 129 0, 0, 0, 0, 130 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 131 RTE_MBUF_F_RX_IP_CKSUM_BAD, 132 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 133 RTE_MBUF_F_RX_IP_CKSUM_GOOD); 134 135 const __m128i vlan_csum_map_hi = _mm_set_epi8(0, 0, 0, 0, 136 0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, 137 RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 138 0, 0, 0, 0, 139 0, RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0, 140 RTE_MBUF_F_RX_L4_CKSUM_GOOD >> sizeof(uint8_t)); 141 142 const __m128i vtag_msk = _mm_set_epi16(0x0000, 0x0000, 0x0000, 0x0000, 143 0x000F, 0x000F, 0x000F, 0x000F); 144 145 ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); 146 ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); 147 vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); 148 vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); 149 150 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); 151 ptype0 = _mm_and_si128(ptype0, rsstype_msk); 152 ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); 153 154 vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); 155 vtag1 = _mm_and_si128(vtag1, vlan_csum_msk); 156 157 /* csum bits are in the most significant, to use shuffle we need to 158 * shift them. Change mask to 0xc000 to 0x0003. 159 */ 160 csum = _mm_srli_epi16(vtag1, 14); 161 162 /* Change mask to 0x20 to 0x08. */ 163 vp = _mm_srli_epi16(vtag1, 2); 164 165 /* now or the most significant 64 bits containing the checksum 166 * flags with the vlan present flags. 167 */ 168 csum = _mm_srli_si128(csum, 8); 169 vtag1 = _mm_or_si128(csum, vtag1); 170 vtag1 = _mm_or_si128(vtag1, vp); 171 vtag1 = _mm_and_si128(vtag1, vtag_msk); 172 173 /* convert STAT_VLAN, ERR_IPCS, ERR_L4CS to ol_flags */ 174 vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1); 175 vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t)); 176 177 vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1); 178 vtag1 = _mm_and_si128(vtag1, ol_flags_msk); 179 vtag1 = _mm_or_si128(vtag0, vtag1); 180 181 vtag1 = _mm_or_si128(ptype0, vtag1); 182 183 /* 184 * At this point, we have the 4 sets of flags in the low 64-bits 185 * of vtag1 (4x16). 186 * We want to extract these, and merge them with the mbuf init data 187 * so we can do a single 16-byte write to the mbuf to set the flags 188 * and all the other initialization fields. Extracting the 189 * appropriate flags means that we have to do a shift and blend for 190 * each mbuf before we do the write. 191 */ 192 rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10); 193 rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10); 194 rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10); 195 rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10); 196 197 /* write the rearm data and the olflags in one write */ 198 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != 199 offsetof(struct rte_mbuf, rearm_data) + 8); 200 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != 201 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); 202 _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); 203 _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); 204 _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); 205 _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); 206 } 207 208 static inline void 209 desc_to_ptype_v(__m128i descs[4], uint16_t pkt_type_mask, 210 struct rte_mbuf **rx_pkts) 211 { 212 __m128i ptype_mask = _mm_set_epi32(pkt_type_mask, pkt_type_mask, 213 pkt_type_mask, pkt_type_mask); 214 215 __m128i ptype0 = _mm_unpacklo_epi32(descs[0], descs[2]); 216 __m128i ptype1 = _mm_unpacklo_epi32(descs[1], descs[3]); 217 218 /* interleave low 32 bits, 219 * now we have 4 ptypes in a XMM register 220 */ 221 ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); 222 223 /* shift left by NGBE_RXD_PTID_SHIFT, and apply ptype mask */ 224 ptype0 = _mm_and_si128(_mm_srli_epi32(ptype0, NGBE_RXD_PTID_SHIFT), 225 ptype_mask); 226 227 rx_pkts[0]->packet_type = ngbe_decode_ptype(_mm_extract_epi32(ptype0, 0)); 228 rx_pkts[1]->packet_type = ngbe_decode_ptype(_mm_extract_epi32(ptype0, 1)); 229 rx_pkts[2]->packet_type = ngbe_decode_ptype(_mm_extract_epi32(ptype0, 2)); 230 rx_pkts[3]->packet_type = ngbe_decode_ptype(_mm_extract_epi32(ptype0, 3)); 231 } 232 233 /* 234 * vPMD raw receive routine, only accept(nb_pkts >= RTE_NGBE_DESCS_PER_LOOP) 235 * 236 * Notice: 237 * - nb_pkts < RTE_NGBE_DESCS_PER_LOOP, just return no packet 238 * - floor align nb_pkts to a RTE_NGBE_DESC_PER_LOOP power-of-two 239 */ 240 static inline uint16_t 241 _recv_raw_pkts_vec(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 242 uint16_t nb_pkts, uint8_t *split_packet) 243 { 244 volatile struct ngbe_rx_desc *rxdp; 245 struct ngbe_rx_entry *sw_ring; 246 uint16_t nb_pkts_recd; 247 int pos; 248 uint64_t var; 249 __m128i shuf_msk; 250 __m128i crc_adjust = _mm_set_epi16(0, 0, 0, /* ignore non-length fields */ 251 -rxq->crc_len, /* sub crc on data_len */ 252 0, /* ignore high-16bits of pkt_len */ 253 -rxq->crc_len, /* sub crc on pkt_len */ 254 0, 0); /* ignore pkt_type field */ 255 256 /* 257 * compile-time check the above crc_adjust layout is correct. 258 * NOTE: the first field (lowest address) is given last in set_epi16 259 * call above. 260 */ 261 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != 262 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 263 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != 264 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 265 __m128i dd_check, eop_check; 266 __m128i mbuf_init; 267 uint8_t vlan_flags; 268 269 /* 270 * Under the circumstance that `rx_tail` wrap back to zero 271 * and the advance speed of `rx_tail` is greater than `rxrearm_start`, 272 * `rx_tail` will catch up with `rxrearm_start` and surpass it. 273 * This may cause some mbufs be reused by application. 274 * 275 * So we need to make some restrictions to ensure that 276 * `rx_tail` will not exceed `rxrearm_start`. 277 */ 278 nb_pkts = RTE_MIN(nb_pkts, RTE_NGBE_RXQ_REARM_THRESH); 279 280 /* nb_pkts has to be floor-aligned to RTE_NGBE_DESCS_PER_LOOP */ 281 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_NGBE_DESCS_PER_LOOP); 282 283 /* Just the act of getting into the function from the application is 284 * going to cost about 7 cycles 285 */ 286 rxdp = rxq->rx_ring + rxq->rx_tail; 287 288 rte_prefetch0(rxdp); 289 290 /* See if we need to rearm the RX queue - gives the prefetch a bit 291 * of time to act 292 */ 293 if (rxq->rxrearm_nb > RTE_NGBE_RXQ_REARM_THRESH) 294 ngbe_rxq_rearm(rxq); 295 296 /* Before we start moving massive data around, check to see if 297 * there is actually a packet available 298 */ 299 if (!(rxdp->qw1.lo.status & 300 rte_cpu_to_le_32(NGBE_RXD_STAT_DD))) 301 return 0; 302 303 /* 4 packets DD mask */ 304 dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); 305 306 /* 4 packets EOP mask */ 307 eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); 308 309 /* mask to shuffle from desc. to mbuf */ 310 shuf_msk = _mm_set_epi8(7, 6, 5, 4, /* octet 4~7, 32bits rss */ 311 15, 14, /* octet 14~15, low 16 bits vlan_macip */ 312 13, 12, /* octet 12~13, 16 bits data_len */ 313 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ 314 13, 12, /* octet 12~13, low 16 bits pkt_len */ 315 0xFF, 0xFF, /* skip 32 bit pkt_type */ 316 0xFF, 0xFF); 317 /* 318 * Compile-time verify the shuffle mask 319 * NOTE: some field positions already verified above, but duplicated 320 * here for completeness in case of future modifications. 321 */ 322 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != 323 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); 324 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != 325 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); 326 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != 327 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); 328 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != 329 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); 330 331 mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); 332 333 /* Cache is empty -> need to scan the buffer rings, but first move 334 * the next 'n' mbufs into the cache 335 */ 336 sw_ring = &rxq->sw_ring[rxq->rx_tail]; 337 338 /* ensure these 2 flags are in the lower 8 bits */ 339 RTE_BUILD_BUG_ON((RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED) > UINT8_MAX); 340 vlan_flags = rxq->vlan_flags & UINT8_MAX; 341 342 /* A. load 4 packet in one loop 343 * [A*. mask out 4 unused dirty field in desc] 344 * B. copy 4 mbuf point from swring to rx_pkts 345 * C. calc the number of DD bits among the 4 packets 346 * [C*. extract the end-of-packet bit, if requested] 347 * D. fill info. from desc to mbuf 348 */ 349 for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; 350 pos += RTE_NGBE_DESCS_PER_LOOP, 351 rxdp += RTE_NGBE_DESCS_PER_LOOP) { 352 __m128i descs[RTE_NGBE_DESCS_PER_LOOP]; 353 __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; 354 __m128i zero, staterr, sterr_tmp1, sterr_tmp2; 355 /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ 356 __m128i mbp1; 357 #if defined(RTE_ARCH_X86_64) 358 __m128i mbp2; 359 #endif 360 361 /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ 362 mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); 363 364 /* Read desc statuses backwards to avoid race condition */ 365 /* A.1 load desc[3] */ 366 descs[3] = _mm_loadu_si128((__m128i *)(uintptr_t)(rxdp + 3)); 367 rte_compiler_barrier(); 368 369 /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ 370 _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); 371 372 #if defined(RTE_ARCH_X86_64) 373 /* B.1 load 2 64 bit mbuf points */ 374 mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]); 375 #endif 376 377 /* A.1 load desc[2-0] */ 378 descs[2] = _mm_loadu_si128((__m128i *)(uintptr_t)(rxdp + 2)); 379 rte_compiler_barrier(); 380 descs[1] = _mm_loadu_si128((__m128i *)(uintptr_t)(rxdp + 1)); 381 rte_compiler_barrier(); 382 descs[0] = _mm_loadu_si128((__m128i *)(uintptr_t)(rxdp)); 383 384 #if defined(RTE_ARCH_X86_64) 385 /* B.2 copy 2 mbuf point into rx_pkts */ 386 _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2); 387 #endif 388 389 if (split_packet) { 390 rte_mbuf_prefetch_part2(rx_pkts[pos]); 391 rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); 392 rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); 393 rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); 394 } 395 396 /* avoid compiler reorder optimization */ 397 rte_compiler_barrier(); 398 399 /* D.1 pkt 3,4 convert format from desc to pktmbuf */ 400 pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); 401 pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); 402 403 /* D.1 pkt 1,2 convert format from desc to pktmbuf */ 404 pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); 405 pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); 406 407 /* C.1 4=>2 filter staterr info only */ 408 sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); 409 /* C.1 4=>2 filter staterr info only */ 410 sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); 411 412 /* set ol_flags with vlan packet type */ 413 desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]); 414 415 /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ 416 pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); 417 pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); 418 419 /* C.2 get 4 pkts staterr value */ 420 zero = _mm_xor_si128(dd_check, dd_check); 421 staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); 422 423 /* D.3 copy final 3,4 data to rx_pkts */ 424 _mm_storeu_si128((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, 425 pkt_mb4); 426 _mm_storeu_si128((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, 427 pkt_mb3); 428 429 /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ 430 pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); 431 pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); 432 433 /* C* extract and record EOP bit */ 434 if (split_packet) { 435 __m128i eop_shuf_mask = 436 _mm_set_epi8(0xFF, 0xFF, 0xFF, 0xFF, 437 0xFF, 0xFF, 0xFF, 0xFF, 438 0xFF, 0xFF, 0xFF, 0xFF, 439 0x04, 0x0C, 0x00, 0x08); 440 441 /* and with mask to extract bits, flipping 1-0 */ 442 __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); 443 /* the staterr values are not in order, as the count 444 * of dd bits doesn't care. However, for end of 445 * packet tracking, we do care, so shuffle. This also 446 * compresses the 32-bit values to 8-bit 447 */ 448 eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); 449 /* store the resulting 32-bit value */ 450 *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); 451 split_packet += RTE_NGBE_DESCS_PER_LOOP; 452 } 453 454 /* C.3 calc available number of desc */ 455 staterr = _mm_and_si128(staterr, dd_check); 456 staterr = _mm_packs_epi32(staterr, zero); 457 458 /* D.3 copy final 1,2 data to rx_pkts */ 459 _mm_storeu_si128((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, 460 pkt_mb2); 461 _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, 462 pkt_mb1); 463 464 desc_to_ptype_v(descs, NGBE_PTID_MASK, &rx_pkts[pos]); 465 466 /* C.4 calc available number of desc */ 467 var = rte_popcount64(_mm_cvtsi128_si64(staterr)); 468 nb_pkts_recd += var; 469 if (likely(var != RTE_NGBE_DESCS_PER_LOOP)) 470 break; 471 } 472 473 /* Update our internal tail pointer */ 474 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); 475 rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); 476 rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); 477 478 return nb_pkts_recd; 479 } 480 481 /* 482 * vPMD receive routine, only accept(nb_pkts >= RTE_NGBE_DESCS_PER_LOOP) 483 * 484 * Notice: 485 * - nb_pkts < RTE_NGBE_DESCS_PER_LOOP, just return no packet 486 * - floor align nb_pkts to a RTE_NGBE_DESC_PER_LOOP power-of-two 487 */ 488 uint16_t 489 ngbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 490 uint16_t nb_pkts) 491 { 492 return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); 493 } 494 495 /** 496 * vPMD receive routine that reassembles scattered packets 497 * 498 * Notice: 499 * - nb_pkts < RTE_NGBE_DESCS_PER_LOOP, just return no packet 500 * - floor align nb_pkts to a RTE_NGBE_DESC_PER_LOOP power-of-two 501 */ 502 static uint16_t 503 ngbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 504 uint16_t nb_pkts) 505 { 506 struct ngbe_rx_queue *rxq = rx_queue; 507 uint8_t split_flags[RTE_NGBE_MAX_RX_BURST] = {0}; 508 509 /* get some new buffers */ 510 uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, 511 split_flags); 512 if (nb_bufs == 0) 513 return 0; 514 515 /* happy day case, full burst + no packets to be joined */ 516 const uint64_t *split_fl64 = (uint64_t *)split_flags; 517 if (rxq->pkt_first_seg == NULL && 518 split_fl64[0] == 0 && split_fl64[1] == 0 && 519 split_fl64[2] == 0 && split_fl64[3] == 0) 520 return nb_bufs; 521 522 /* reassemble any packets that need reassembly*/ 523 unsigned int i = 0; 524 if (rxq->pkt_first_seg == NULL) { 525 /* find the first split flag, and only reassemble then*/ 526 while (i < nb_bufs && !split_flags[i]) 527 i++; 528 if (i == nb_bufs) 529 return nb_bufs; 530 rxq->pkt_first_seg = rx_pkts[i]; 531 } 532 return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, 533 &split_flags[i]); 534 } 535 536 /** 537 * vPMD receive routine that reassembles scattered packets. 538 */ 539 uint16_t 540 ngbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 541 uint16_t nb_pkts) 542 { 543 uint16_t retval = 0; 544 545 while (nb_pkts > RTE_NGBE_MAX_RX_BURST) { 546 uint16_t burst; 547 548 burst = ngbe_recv_scattered_burst_vec(rx_queue, 549 rx_pkts + retval, 550 RTE_NGBE_MAX_RX_BURST); 551 retval += burst; 552 nb_pkts -= burst; 553 if (burst < RTE_NGBE_MAX_RX_BURST) 554 return retval; 555 } 556 557 return retval + ngbe_recv_scattered_burst_vec(rx_queue, 558 rx_pkts + retval, 559 nb_pkts); 560 } 561 562 static inline void 563 vtx1(volatile struct ngbe_tx_desc *txdp, 564 struct rte_mbuf *pkt, uint64_t flags) 565 { 566 __m128i descriptor = _mm_set_epi64x((uint64_t)pkt->pkt_len << 45 | 567 flags | pkt->data_len, 568 pkt->buf_iova + pkt->data_off); 569 _mm_store_si128((__m128i *)(uintptr_t)txdp, descriptor); 570 } 571 572 static inline void 573 vtx(volatile struct ngbe_tx_desc *txdp, 574 struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) 575 { 576 int i; 577 578 for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) 579 vtx1(txdp, *pkt, flags); 580 } 581 582 uint16_t 583 ngbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 584 uint16_t nb_pkts) 585 { 586 struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue; 587 volatile struct ngbe_tx_desc *txdp; 588 struct ngbe_tx_entry_v *txep; 589 uint16_t n, nb_commit, tx_id; 590 uint64_t flags = NGBE_TXD_FLAGS; 591 uint64_t rs = NGBE_TXD_FLAGS; 592 int i; 593 594 /* cross rx_thresh boundary is not allowed */ 595 nb_pkts = RTE_MIN(nb_pkts, txq->tx_free_thresh); 596 597 if (txq->nb_tx_free < txq->tx_free_thresh) 598 ngbe_tx_free_bufs(txq); 599 600 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); 601 if (unlikely(nb_pkts == 0)) 602 return 0; 603 604 tx_id = txq->tx_tail; 605 txdp = &txq->tx_ring[tx_id]; 606 txep = &txq->sw_ring_v[tx_id]; 607 608 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); 609 610 n = (uint16_t)(txq->nb_tx_desc - tx_id); 611 nb_commit = nb_pkts; 612 if (nb_commit >= n) { 613 tx_backlog_entry(txep, tx_pkts, n); 614 615 for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) 616 vtx1(txdp, *tx_pkts, flags); 617 618 vtx1(txdp, *tx_pkts++, rs); 619 620 nb_commit = (uint16_t)(nb_commit - n); 621 622 tx_id = 0; 623 624 /* avoid reach the end of ring */ 625 txdp = &txq->tx_ring[tx_id]; 626 txep = &txq->sw_ring_v[tx_id]; 627 } 628 629 tx_backlog_entry(txep, tx_pkts, nb_commit); 630 631 vtx(txdp, tx_pkts, nb_commit, flags); 632 633 tx_id = (uint16_t)(tx_id + nb_commit); 634 635 txq->tx_tail = tx_id; 636 637 ngbe_set32(txq->tdt_reg_addr, txq->tx_tail); 638 639 return nb_pkts; 640 } 641 642 static void __rte_cold 643 ngbe_tx_queue_release_mbufs_vec(struct ngbe_tx_queue *txq) 644 { 645 _ngbe_tx_queue_release_mbufs_vec(txq); 646 } 647 648 void __rte_cold 649 ngbe_rx_queue_release_mbufs_vec(struct ngbe_rx_queue *rxq) 650 { 651 _ngbe_rx_queue_release_mbufs_vec(rxq); 652 } 653 654 static void __rte_cold 655 ngbe_tx_free_swring(struct ngbe_tx_queue *txq) 656 { 657 _ngbe_tx_free_swring_vec(txq); 658 } 659 660 static void __rte_cold 661 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq) 662 { 663 _ngbe_reset_tx_queue_vec(txq); 664 } 665 666 static const struct ngbe_txq_ops vec_txq_ops = { 667 .release_mbufs = ngbe_tx_queue_release_mbufs_vec, 668 .free_swring = ngbe_tx_free_swring, 669 .reset = ngbe_reset_tx_queue, 670 }; 671 672 int __rte_cold 673 ngbe_rxq_vec_setup(struct ngbe_rx_queue *rxq) 674 { 675 return ngbe_rxq_vec_setup_default(rxq); 676 } 677 678 int __rte_cold 679 ngbe_txq_vec_setup(struct ngbe_tx_queue *txq) 680 { 681 return ngbe_txq_vec_setup_default(txq, &vec_txq_ops); 682 } 683 684 int __rte_cold 685 ngbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) 686 { 687 return ngbe_rx_vec_dev_conf_condition_check_default(dev); 688 } 689