1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 11 #include <rte_cycles.h> 12 #include <rte_memory.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_mempool.h> 15 #include <rte_malloc.h> 16 #include <rte_mbuf.h> 17 #include <rte_ether.h> 18 #include <rte_ethdev_driver.h> 19 #include <rte_prefetch.h> 20 #include <rte_string_fns.h> 21 #include <rte_errno.h> 22 #include <rte_byteorder.h> 23 #include <rte_net.h> 24 #include <rte_ip.h> 25 #include <rte_udp.h> 26 #include <rte_tcp.h> 27 28 #include "virtio_logs.h" 29 #include "virtio_ethdev.h" 30 #include "virtio_pci.h" 31 #include "virtqueue.h" 32 #include "virtio_rxtx.h" 33 #include "virtio_rxtx_simple.h" 34 #include "virtio_ring.h" 35 36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP 37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) 38 #else 39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0) 40 #endif 41 42 int 43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset) 44 { 45 struct virtnet_rx *rxvq = rxq; 46 struct virtqueue *vq = rxvq->vq; 47 48 return virtqueue_nused(vq) >= offset; 49 } 50 51 void 52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num) 53 { 54 vq->vq_free_cnt += num; 55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1); 56 } 57 58 void 59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 60 { 61 struct vring_desc *dp, *dp_tail; 62 struct vq_desc_extra *dxp; 63 uint16_t desc_idx_last = desc_idx; 64 65 dp = &vq->vq_split.ring.desc[desc_idx]; 66 dxp = &vq->vq_descx[desc_idx]; 67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); 68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 69 while (dp->flags & VRING_DESC_F_NEXT) { 70 desc_idx_last = dp->next; 71 dp = &vq->vq_split.ring.desc[dp->next]; 72 } 73 } 74 dxp->ndescs = 0; 75 76 /* 77 * We must append the existing free chain, if any, to the end of 78 * newly freed chain. If the virtqueue was completely used, then 79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 80 */ 81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { 82 vq->vq_desc_head_idx = desc_idx; 83 } else { 84 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx]; 85 dp_tail->next = desc_idx; 86 } 87 88 vq->vq_desc_tail_idx = desc_idx_last; 89 dp->next = VQ_RING_DESC_CHAIN_END; 90 } 91 92 void 93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) 94 { 95 uint32_t s = mbuf->pkt_len; 96 struct rte_ether_addr *ea; 97 98 stats->bytes += s; 99 100 if (s == 64) { 101 stats->size_bins[1]++; 102 } else if (s > 64 && s < 1024) { 103 uint32_t bin; 104 105 /* count zeros, and offset into correct bin */ 106 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; 107 stats->size_bins[bin]++; 108 } else { 109 if (s < 64) 110 stats->size_bins[0]++; 111 else if (s < 1519) 112 stats->size_bins[6]++; 113 else 114 stats->size_bins[7]++; 115 } 116 117 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); 118 if (rte_is_multicast_ether_addr(ea)) { 119 if (rte_is_broadcast_ether_addr(ea)) 120 stats->broadcast++; 121 else 122 stats->multicast++; 123 } 124 } 125 126 static inline void 127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m) 128 { 129 VIRTIO_DUMP_PACKET(m, m->data_len); 130 131 virtio_update_packet_stats(&rxvq->stats, m); 132 } 133 134 static uint16_t 135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, 136 struct rte_mbuf **rx_pkts, 137 uint32_t *len, 138 uint16_t num) 139 { 140 struct rte_mbuf *cookie; 141 uint16_t used_idx; 142 uint16_t id; 143 struct vring_packed_desc *desc; 144 uint16_t i; 145 146 desc = vq->vq_packed.ring.desc; 147 148 for (i = 0; i < num; i++) { 149 used_idx = vq->vq_used_cons_idx; 150 /* desc_is_used has a load-acquire or rte_io_rmb inside 151 * and wait for used desc in virtqueue. 152 */ 153 if (!desc_is_used(&desc[used_idx], vq)) 154 return i; 155 len[i] = desc[used_idx].len; 156 id = desc[used_idx].id; 157 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie; 158 if (unlikely(cookie == NULL)) { 159 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", 160 vq->vq_used_cons_idx); 161 break; 162 } 163 rte_prefetch0(cookie); 164 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); 165 rx_pkts[i] = cookie; 166 167 vq->vq_free_cnt++; 168 vq->vq_used_cons_idx++; 169 if (vq->vq_used_cons_idx >= vq->vq_nentries) { 170 vq->vq_used_cons_idx -= vq->vq_nentries; 171 vq->vq_packed.used_wrap_counter ^= 1; 172 } 173 } 174 175 return i; 176 } 177 178 static uint16_t 179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, 180 uint32_t *len, uint16_t num) 181 { 182 struct vring_used_elem *uep; 183 struct rte_mbuf *cookie; 184 uint16_t used_idx, desc_idx; 185 uint16_t i; 186 187 /* Caller does the check */ 188 for (i = 0; i < num ; i++) { 189 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); 190 uep = &vq->vq_split.ring.used->ring[used_idx]; 191 desc_idx = (uint16_t) uep->id; 192 len[i] = uep->len; 193 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; 194 195 if (unlikely(cookie == NULL)) { 196 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", 197 vq->vq_used_cons_idx); 198 break; 199 } 200 201 rte_prefetch0(cookie); 202 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); 203 rx_pkts[i] = cookie; 204 vq->vq_used_cons_idx++; 205 vq_ring_free_chain(vq, desc_idx); 206 vq->vq_descx[desc_idx].cookie = NULL; 207 } 208 209 return i; 210 } 211 212 static uint16_t 213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq, 214 struct rte_mbuf **rx_pkts, 215 uint32_t *len, 216 uint16_t num) 217 { 218 struct vring_used_elem *uep; 219 struct rte_mbuf *cookie; 220 uint16_t used_idx = 0; 221 uint16_t i; 222 223 if (unlikely(num == 0)) 224 return 0; 225 226 for (i = 0; i < num; i++) { 227 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); 228 /* Desc idx same as used idx */ 229 uep = &vq->vq_split.ring.used->ring[used_idx]; 230 len[i] = uep->len; 231 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie; 232 233 if (unlikely(cookie == NULL)) { 234 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", 235 vq->vq_used_cons_idx); 236 break; 237 } 238 239 rte_prefetch0(cookie); 240 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); 241 rx_pkts[i] = cookie; 242 vq->vq_used_cons_idx++; 243 vq->vq_descx[used_idx].cookie = NULL; 244 } 245 246 vq_ring_free_inorder(vq, used_idx, i); 247 return i; 248 } 249 250 static inline int 251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq, 252 struct rte_mbuf **cookies, 253 uint16_t num) 254 { 255 struct vq_desc_extra *dxp; 256 struct virtio_hw *hw = vq->hw; 257 struct vring_desc *start_dp; 258 uint16_t head_idx, idx, i = 0; 259 260 if (unlikely(vq->vq_free_cnt == 0)) 261 return -ENOSPC; 262 if (unlikely(vq->vq_free_cnt < num)) 263 return -EMSGSIZE; 264 265 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1); 266 start_dp = vq->vq_split.ring.desc; 267 268 while (i < num) { 269 idx = head_idx & (vq->vq_nentries - 1); 270 dxp = &vq->vq_descx[idx]; 271 dxp->cookie = (void *)cookies[i]; 272 dxp->ndescs = 1; 273 274 start_dp[idx].addr = 275 VIRTIO_MBUF_ADDR(cookies[i], vq) + 276 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; 277 start_dp[idx].len = 278 cookies[i]->buf_len - 279 RTE_PKTMBUF_HEADROOM + 280 hw->vtnet_hdr_size; 281 start_dp[idx].flags = VRING_DESC_F_WRITE; 282 283 vq_update_avail_ring(vq, idx); 284 head_idx++; 285 i++; 286 } 287 288 vq->vq_desc_head_idx += num; 289 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 290 return 0; 291 } 292 293 static inline int 294 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie, 295 uint16_t num) 296 { 297 struct vq_desc_extra *dxp; 298 struct virtio_hw *hw = vq->hw; 299 struct vring_desc *start_dp = vq->vq_split.ring.desc; 300 uint16_t idx, i; 301 302 if (unlikely(vq->vq_free_cnt == 0)) 303 return -ENOSPC; 304 if (unlikely(vq->vq_free_cnt < num)) 305 return -EMSGSIZE; 306 307 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries)) 308 return -EFAULT; 309 310 for (i = 0; i < num; i++) { 311 idx = vq->vq_desc_head_idx; 312 dxp = &vq->vq_descx[idx]; 313 dxp->cookie = (void *)cookie[i]; 314 dxp->ndescs = 1; 315 316 start_dp[idx].addr = 317 VIRTIO_MBUF_ADDR(cookie[i], vq) + 318 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; 319 start_dp[idx].len = 320 cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM + 321 hw->vtnet_hdr_size; 322 start_dp[idx].flags = VRING_DESC_F_WRITE; 323 vq->vq_desc_head_idx = start_dp[idx].next; 324 vq_update_avail_ring(vq, idx); 325 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) { 326 vq->vq_desc_tail_idx = vq->vq_desc_head_idx; 327 break; 328 } 329 } 330 331 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 332 333 return 0; 334 } 335 336 static inline int 337 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, 338 struct rte_mbuf **cookie, uint16_t num) 339 { 340 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc; 341 uint16_t flags = vq->vq_packed.cached_flags; 342 struct virtio_hw *hw = vq->hw; 343 struct vq_desc_extra *dxp; 344 uint16_t idx; 345 int i; 346 347 if (unlikely(vq->vq_free_cnt == 0)) 348 return -ENOSPC; 349 if (unlikely(vq->vq_free_cnt < num)) 350 return -EMSGSIZE; 351 352 for (i = 0; i < num; i++) { 353 idx = vq->vq_avail_idx; 354 dxp = &vq->vq_descx[idx]; 355 dxp->cookie = (void *)cookie[i]; 356 dxp->ndescs = 1; 357 358 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) + 359 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; 360 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM 361 + hw->vtnet_hdr_size; 362 363 vq->vq_desc_head_idx = dxp->next; 364 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) 365 vq->vq_desc_tail_idx = vq->vq_desc_head_idx; 366 367 virtqueue_store_flags_packed(&start_dp[idx], flags, 368 hw->weak_barriers); 369 370 if (++vq->vq_avail_idx >= vq->vq_nentries) { 371 vq->vq_avail_idx -= vq->vq_nentries; 372 vq->vq_packed.cached_flags ^= 373 VRING_PACKED_DESC_F_AVAIL_USED; 374 flags = vq->vq_packed.cached_flags; 375 } 376 } 377 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 378 return 0; 379 } 380 381 /* When doing TSO, the IP length is not included in the pseudo header 382 * checksum of the packet given to the PMD, but for virtio it is 383 * expected. 384 */ 385 static void 386 virtio_tso_fix_cksum(struct rte_mbuf *m) 387 { 388 /* common case: header is not fragmented */ 389 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + 390 m->l4_len)) { 391 struct rte_ipv4_hdr *iph; 392 struct rte_ipv6_hdr *ip6h; 393 struct rte_tcp_hdr *th; 394 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; 395 uint32_t tmp; 396 397 iph = rte_pktmbuf_mtod_offset(m, 398 struct rte_ipv4_hdr *, m->l2_len); 399 th = RTE_PTR_ADD(iph, m->l3_len); 400 if ((iph->version_ihl >> 4) == 4) { 401 iph->hdr_checksum = 0; 402 iph->hdr_checksum = rte_ipv4_cksum(iph); 403 ip_len = iph->total_length; 404 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - 405 m->l3_len); 406 } else { 407 ip6h = (struct rte_ipv6_hdr *)iph; 408 ip_paylen = ip6h->payload_len; 409 } 410 411 /* calculate the new phdr checksum not including ip_paylen */ 412 prev_cksum = th->cksum; 413 tmp = prev_cksum; 414 tmp += ip_paylen; 415 tmp = (tmp & 0xffff) + (tmp >> 16); 416 new_cksum = tmp; 417 418 /* replace it in the packet */ 419 th->cksum = new_cksum; 420 } 421 } 422 423 424 425 426 static inline void 427 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, 428 struct rte_mbuf **cookies, 429 uint16_t num) 430 { 431 struct vq_desc_extra *dxp; 432 struct virtqueue *vq = txvq->vq; 433 struct vring_desc *start_dp; 434 struct virtio_net_hdr *hdr; 435 uint16_t idx; 436 int16_t head_size = vq->hw->vtnet_hdr_size; 437 uint16_t i = 0; 438 439 idx = vq->vq_desc_head_idx; 440 start_dp = vq->vq_split.ring.desc; 441 442 while (i < num) { 443 idx = idx & (vq->vq_nentries - 1); 444 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; 445 dxp->cookie = (void *)cookies[i]; 446 dxp->ndescs = 1; 447 virtio_update_packet_stats(&txvq->stats, cookies[i]); 448 449 hdr = rte_pktmbuf_mtod_offset(cookies[i], 450 struct virtio_net_hdr *, -head_size); 451 452 /* if offload disabled, hdr is not zeroed yet, do it now */ 453 if (!vq->hw->has_tx_offload) 454 virtqueue_clear_net_hdr(hdr); 455 else 456 virtqueue_xmit_offload(hdr, cookies[i], true); 457 458 start_dp[idx].addr = 459 VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size; 460 start_dp[idx].len = cookies[i]->data_len + head_size; 461 start_dp[idx].flags = 0; 462 463 464 vq_update_avail_ring(vq, idx); 465 466 idx++; 467 i++; 468 }; 469 470 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 471 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1); 472 } 473 474 static inline void 475 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, 476 struct rte_mbuf *cookie, 477 int in_order) 478 { 479 struct virtqueue *vq = txvq->vq; 480 struct vring_packed_desc *dp; 481 struct vq_desc_extra *dxp; 482 uint16_t idx, id, flags; 483 int16_t head_size = vq->hw->vtnet_hdr_size; 484 struct virtio_net_hdr *hdr; 485 486 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; 487 idx = vq->vq_avail_idx; 488 dp = &vq->vq_packed.ring.desc[idx]; 489 490 dxp = &vq->vq_descx[id]; 491 dxp->ndescs = 1; 492 dxp->cookie = cookie; 493 494 flags = vq->vq_packed.cached_flags; 495 496 /* prepend cannot fail, checked by caller */ 497 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, 498 -head_size); 499 500 /* if offload disabled, hdr is not zeroed yet, do it now */ 501 if (!vq->hw->has_tx_offload) 502 virtqueue_clear_net_hdr(hdr); 503 else 504 virtqueue_xmit_offload(hdr, cookie, true); 505 506 dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size; 507 dp->len = cookie->data_len + head_size; 508 dp->id = id; 509 510 if (++vq->vq_avail_idx >= vq->vq_nentries) { 511 vq->vq_avail_idx -= vq->vq_nentries; 512 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; 513 } 514 515 vq->vq_free_cnt--; 516 517 if (!in_order) { 518 vq->vq_desc_head_idx = dxp->next; 519 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) 520 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; 521 } 522 523 virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers); 524 } 525 526 static inline void 527 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, 528 uint16_t needed, int use_indirect, int can_push, 529 int in_order) 530 { 531 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; 532 struct vq_desc_extra *dxp; 533 struct virtqueue *vq = txvq->vq; 534 struct vring_desc *start_dp; 535 uint16_t seg_num = cookie->nb_segs; 536 uint16_t head_idx, idx; 537 int16_t head_size = vq->hw->vtnet_hdr_size; 538 bool prepend_header = false; 539 struct virtio_net_hdr *hdr; 540 541 head_idx = vq->vq_desc_head_idx; 542 idx = head_idx; 543 if (in_order) 544 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; 545 else 546 dxp = &vq->vq_descx[idx]; 547 dxp->cookie = (void *)cookie; 548 dxp->ndescs = needed; 549 550 start_dp = vq->vq_split.ring.desc; 551 552 if (can_push) { 553 /* prepend cannot fail, checked by caller */ 554 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, 555 -head_size); 556 prepend_header = true; 557 558 /* if offload disabled, it is not zeroed below, do it now */ 559 if (!vq->hw->has_tx_offload) 560 virtqueue_clear_net_hdr(hdr); 561 } else if (use_indirect) { 562 /* setup tx ring slot to point to indirect 563 * descriptor list stored in reserved region. 564 * 565 * the first slot in indirect ring is already preset 566 * to point to the header in reserved region 567 */ 568 start_dp[idx].addr = txvq->virtio_net_hdr_mem + 569 RTE_PTR_DIFF(&txr[idx].tx_indir, txr); 570 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc); 571 start_dp[idx].flags = VRING_DESC_F_INDIRECT; 572 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; 573 574 /* loop below will fill in rest of the indirect elements */ 575 start_dp = txr[idx].tx_indir; 576 idx = 1; 577 } else { 578 /* setup first tx ring slot to point to header 579 * stored in reserved region. 580 */ 581 start_dp[idx].addr = txvq->virtio_net_hdr_mem + 582 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); 583 start_dp[idx].len = vq->hw->vtnet_hdr_size; 584 start_dp[idx].flags = VRING_DESC_F_NEXT; 585 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; 586 587 idx = start_dp[idx].next; 588 } 589 590 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); 591 592 do { 593 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); 594 start_dp[idx].len = cookie->data_len; 595 if (prepend_header) { 596 start_dp[idx].addr -= head_size; 597 start_dp[idx].len += head_size; 598 prepend_header = false; 599 } 600 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0; 601 idx = start_dp[idx].next; 602 } while ((cookie = cookie->next) != NULL); 603 604 if (use_indirect) 605 idx = vq->vq_split.ring.desc[head_idx].next; 606 607 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); 608 609 vq->vq_desc_head_idx = idx; 610 vq_update_avail_ring(vq, head_idx); 611 612 if (!in_order) { 613 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) 614 vq->vq_desc_tail_idx = idx; 615 } 616 } 617 618 void 619 virtio_dev_cq_start(struct rte_eth_dev *dev) 620 { 621 struct virtio_hw *hw = dev->data->dev_private; 622 623 if (hw->cvq && hw->cvq->vq) { 624 rte_spinlock_init(&hw->cvq->lock); 625 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); 626 } 627 } 628 629 int 630 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, 631 uint16_t queue_idx, 632 uint16_t nb_desc, 633 unsigned int socket_id __rte_unused, 634 const struct rte_eth_rxconf *rx_conf, 635 struct rte_mempool *mp) 636 { 637 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; 638 struct virtio_hw *hw = dev->data->dev_private; 639 struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; 640 struct virtnet_rx *rxvq; 641 uint16_t rx_free_thresh; 642 643 PMD_INIT_FUNC_TRACE(); 644 645 if (rx_conf->rx_deferred_start) { 646 PMD_INIT_LOG(ERR, "Rx deferred start is not supported"); 647 return -EINVAL; 648 } 649 650 rx_free_thresh = rx_conf->rx_free_thresh; 651 if (rx_free_thresh == 0) 652 rx_free_thresh = 653 RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH); 654 655 if (rx_free_thresh & 0x3) { 656 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four." 657 " (rx_free_thresh=%u port=%u queue=%u)\n", 658 rx_free_thresh, dev->data->port_id, queue_idx); 659 return -EINVAL; 660 } 661 662 if (rx_free_thresh >= vq->vq_nentries) { 663 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the " 664 "number of RX entries (%u)." 665 " (rx_free_thresh=%u port=%u queue=%u)\n", 666 vq->vq_nentries, 667 rx_free_thresh, dev->data->port_id, queue_idx); 668 return -EINVAL; 669 } 670 vq->vq_free_thresh = rx_free_thresh; 671 672 if (nb_desc == 0 || nb_desc > vq->vq_nentries) 673 nb_desc = vq->vq_nentries; 674 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); 675 676 rxvq = &vq->rxq; 677 rxvq->queue_id = queue_idx; 678 rxvq->mpool = mp; 679 dev->data->rx_queues[queue_idx] = rxvq; 680 681 return 0; 682 } 683 684 int 685 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) 686 { 687 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; 688 struct virtio_hw *hw = dev->data->dev_private; 689 struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; 690 struct virtnet_rx *rxvq = &vq->rxq; 691 struct rte_mbuf *m; 692 uint16_t desc_idx; 693 int error, nbufs, i; 694 bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER); 695 696 PMD_INIT_FUNC_TRACE(); 697 698 /* Allocate blank mbufs for the each rx descriptor */ 699 nbufs = 0; 700 701 if (hw->use_vec_rx && !vtpci_packed_queue(hw)) { 702 for (desc_idx = 0; desc_idx < vq->vq_nentries; 703 desc_idx++) { 704 vq->vq_split.ring.avail->ring[desc_idx] = desc_idx; 705 vq->vq_split.ring.desc[desc_idx].flags = 706 VRING_DESC_F_WRITE; 707 } 708 709 virtio_rxq_vec_setup(rxvq); 710 } 711 712 memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); 713 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; 714 desc_idx++) { 715 vq->sw_ring[vq->vq_nentries + desc_idx] = 716 &rxvq->fake_mbuf; 717 } 718 719 if (hw->use_vec_rx && !vtpci_packed_queue(hw)) { 720 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { 721 virtio_rxq_rearm_vec(rxvq); 722 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; 723 } 724 } else if (!vtpci_packed_queue(vq->hw) && in_order) { 725 if ((!virtqueue_full(vq))) { 726 uint16_t free_cnt = vq->vq_free_cnt; 727 struct rte_mbuf *pkts[free_cnt]; 728 729 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, 730 free_cnt)) { 731 error = virtqueue_enqueue_refill_inorder(vq, 732 pkts, 733 free_cnt); 734 if (unlikely(error)) { 735 for (i = 0; i < free_cnt; i++) 736 rte_pktmbuf_free(pkts[i]); 737 } 738 } 739 740 nbufs += free_cnt; 741 vq_update_avail_idx(vq); 742 } 743 } else { 744 while (!virtqueue_full(vq)) { 745 m = rte_mbuf_raw_alloc(rxvq->mpool); 746 if (m == NULL) 747 break; 748 749 /* Enqueue allocated buffers */ 750 if (vtpci_packed_queue(vq->hw)) 751 error = virtqueue_enqueue_recv_refill_packed(vq, 752 &m, 1); 753 else 754 error = virtqueue_enqueue_recv_refill(vq, 755 &m, 1); 756 if (error) { 757 rte_pktmbuf_free(m); 758 break; 759 } 760 nbufs++; 761 } 762 763 if (!vtpci_packed_queue(vq->hw)) 764 vq_update_avail_idx(vq); 765 } 766 767 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); 768 769 VIRTQUEUE_DUMP(vq); 770 771 return 0; 772 } 773 774 /* 775 * struct rte_eth_dev *dev: Used to update dev 776 * uint16_t nb_desc: Defaults to values read from config space 777 * unsigned int socket_id: Used to allocate memzone 778 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine 779 * uint16_t queue_idx: Just used as an index in dev txq list 780 */ 781 int 782 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, 783 uint16_t queue_idx, 784 uint16_t nb_desc, 785 unsigned int socket_id __rte_unused, 786 const struct rte_eth_txconf *tx_conf) 787 { 788 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; 789 struct virtio_hw *hw = dev->data->dev_private; 790 struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; 791 struct virtnet_tx *txvq; 792 uint16_t tx_free_thresh; 793 794 PMD_INIT_FUNC_TRACE(); 795 796 if (tx_conf->tx_deferred_start) { 797 PMD_INIT_LOG(ERR, "Tx deferred start is not supported"); 798 return -EINVAL; 799 } 800 801 if (nb_desc == 0 || nb_desc > vq->vq_nentries) 802 nb_desc = vq->vq_nentries; 803 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); 804 805 txvq = &vq->txq; 806 txvq->queue_id = queue_idx; 807 808 tx_free_thresh = tx_conf->tx_free_thresh; 809 if (tx_free_thresh == 0) 810 tx_free_thresh = 811 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); 812 813 if (tx_free_thresh >= (vq->vq_nentries - 3)) { 814 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the " 815 "number of TX entries minus 3 (%u)." 816 " (tx_free_thresh=%u port=%u queue=%u)\n", 817 vq->vq_nentries - 3, 818 tx_free_thresh, dev->data->port_id, queue_idx); 819 return -EINVAL; 820 } 821 822 vq->vq_free_thresh = tx_free_thresh; 823 824 dev->data->tx_queues[queue_idx] = txvq; 825 return 0; 826 } 827 828 int 829 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, 830 uint16_t queue_idx) 831 { 832 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; 833 struct virtio_hw *hw = dev->data->dev_private; 834 struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; 835 836 PMD_INIT_FUNC_TRACE(); 837 838 if (!vtpci_packed_queue(hw)) { 839 if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) 840 vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0; 841 } 842 843 VIRTQUEUE_DUMP(vq); 844 845 return 0; 846 } 847 848 static inline void 849 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) 850 { 851 int error; 852 /* 853 * Requeue the discarded mbuf. This should always be 854 * successful since it was just dequeued. 855 */ 856 if (vtpci_packed_queue(vq->hw)) 857 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1); 858 else 859 error = virtqueue_enqueue_recv_refill(vq, &m, 1); 860 861 if (unlikely(error)) { 862 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf"); 863 rte_pktmbuf_free(m); 864 } 865 } 866 867 static inline void 868 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m) 869 { 870 int error; 871 872 error = virtqueue_enqueue_refill_inorder(vq, &m, 1); 873 if (unlikely(error)) { 874 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf"); 875 rte_pktmbuf_free(m); 876 } 877 } 878 879 /* Optionally fill offload information in structure */ 880 static inline int 881 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) 882 { 883 struct rte_net_hdr_lens hdr_lens; 884 uint32_t hdrlen, ptype; 885 int l4_supported = 0; 886 887 /* nothing to do */ 888 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) 889 return 0; 890 891 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; 892 893 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK); 894 m->packet_type = ptype; 895 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP || 896 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP || 897 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) 898 l4_supported = 1; 899 900 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 901 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len; 902 if (hdr->csum_start <= hdrlen && l4_supported) { 903 m->ol_flags |= PKT_RX_L4_CKSUM_NONE; 904 } else { 905 /* Unknown proto or tunnel, do sw cksum. We can assume 906 * the cksum field is in the first segment since the 907 * buffers we provided to the host are large enough. 908 * In case of SCTP, this will be wrong since it's a CRC 909 * but there's nothing we can do. 910 */ 911 uint16_t csum = 0, off; 912 913 if (rte_raw_cksum_mbuf(m, hdr->csum_start, 914 rte_pktmbuf_pkt_len(m) - hdr->csum_start, 915 &csum) < 0) 916 return -EINVAL; 917 if (likely(csum != 0xffff)) 918 csum = ~csum; 919 off = hdr->csum_offset + hdr->csum_start; 920 if (rte_pktmbuf_data_len(m) >= off + 1) 921 *rte_pktmbuf_mtod_offset(m, uint16_t *, 922 off) = csum; 923 } 924 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) { 925 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 926 } 927 928 /* GSO request, save required information in mbuf */ 929 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 930 /* Check unsupported modes */ 931 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) || 932 (hdr->gso_size == 0)) { 933 return -EINVAL; 934 } 935 936 /* Update mss lengthes in mbuf */ 937 m->tso_segsz = hdr->gso_size; 938 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 939 case VIRTIO_NET_HDR_GSO_TCPV4: 940 case VIRTIO_NET_HDR_GSO_TCPV6: 941 m->ol_flags |= PKT_RX_LRO | \ 942 PKT_RX_L4_CKSUM_NONE; 943 break; 944 default: 945 return -EINVAL; 946 } 947 } 948 949 return 0; 950 } 951 952 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) 953 uint16_t 954 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 955 { 956 struct virtnet_rx *rxvq = rx_queue; 957 struct virtqueue *vq = rxvq->vq; 958 struct virtio_hw *hw = vq->hw; 959 struct rte_mbuf *rxm; 960 uint16_t nb_used, num, nb_rx; 961 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 962 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 963 int error; 964 uint32_t i, nb_enqueued; 965 uint32_t hdr_size; 966 struct virtio_net_hdr *hdr; 967 968 nb_rx = 0; 969 if (unlikely(hw->started == 0)) 970 return nb_rx; 971 972 nb_used = virtqueue_nused(vq); 973 974 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; 975 if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) 976 num = VIRTIO_MBUF_BURST_SZ; 977 if (likely(num > DESC_PER_CACHELINE)) 978 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 979 980 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); 981 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); 982 983 nb_enqueued = 0; 984 hdr_size = hw->vtnet_hdr_size; 985 986 for (i = 0; i < num ; i++) { 987 rxm = rcv_pkts[i]; 988 989 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 990 991 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 992 PMD_RX_LOG(ERR, "Packet drop"); 993 nb_enqueued++; 994 virtio_discard_rxbuf(vq, rxm); 995 rxvq->stats.errors++; 996 continue; 997 } 998 999 rxm->port = rxvq->port_id; 1000 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1001 rxm->ol_flags = 0; 1002 rxm->vlan_tci = 0; 1003 1004 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1005 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1006 1007 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + 1008 RTE_PKTMBUF_HEADROOM - hdr_size); 1009 1010 if (hw->vlan_strip) 1011 rte_vlan_strip(rxm); 1012 1013 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { 1014 virtio_discard_rxbuf(vq, rxm); 1015 rxvq->stats.errors++; 1016 continue; 1017 } 1018 1019 virtio_rx_stats_updated(rxvq, rxm); 1020 1021 rx_pkts[nb_rx++] = rxm; 1022 } 1023 1024 rxvq->stats.packets += nb_rx; 1025 1026 /* Allocate new mbuf for the used descriptor */ 1027 if (likely(!virtqueue_full(vq))) { 1028 uint16_t free_cnt = vq->vq_free_cnt; 1029 struct rte_mbuf *new_pkts[free_cnt]; 1030 1031 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, 1032 free_cnt) == 0)) { 1033 error = virtqueue_enqueue_recv_refill(vq, new_pkts, 1034 free_cnt); 1035 if (unlikely(error)) { 1036 for (i = 0; i < free_cnt; i++) 1037 rte_pktmbuf_free(new_pkts[i]); 1038 } 1039 nb_enqueued += free_cnt; 1040 } else { 1041 struct rte_eth_dev *dev = 1042 &rte_eth_devices[rxvq->port_id]; 1043 dev->data->rx_mbuf_alloc_failed += free_cnt; 1044 } 1045 } 1046 1047 if (likely(nb_enqueued)) { 1048 vq_update_avail_idx(vq); 1049 1050 if (unlikely(virtqueue_kick_prepare(vq))) { 1051 virtqueue_notify(vq); 1052 PMD_RX_LOG(DEBUG, "Notified"); 1053 } 1054 } 1055 1056 return nb_rx; 1057 } 1058 1059 uint16_t 1060 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, 1061 uint16_t nb_pkts) 1062 { 1063 struct virtnet_rx *rxvq = rx_queue; 1064 struct virtqueue *vq = rxvq->vq; 1065 struct virtio_hw *hw = vq->hw; 1066 struct rte_mbuf *rxm; 1067 uint16_t num, nb_rx; 1068 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1069 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1070 int error; 1071 uint32_t i, nb_enqueued; 1072 uint32_t hdr_size; 1073 struct virtio_net_hdr *hdr; 1074 1075 nb_rx = 0; 1076 if (unlikely(hw->started == 0)) 1077 return nb_rx; 1078 1079 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts); 1080 if (likely(num > DESC_PER_CACHELINE)) 1081 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 1082 1083 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); 1084 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1085 1086 nb_enqueued = 0; 1087 hdr_size = hw->vtnet_hdr_size; 1088 1089 for (i = 0; i < num; i++) { 1090 rxm = rcv_pkts[i]; 1091 1092 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1093 1094 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1095 PMD_RX_LOG(ERR, "Packet drop"); 1096 nb_enqueued++; 1097 virtio_discard_rxbuf(vq, rxm); 1098 rxvq->stats.errors++; 1099 continue; 1100 } 1101 1102 rxm->port = rxvq->port_id; 1103 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1104 rxm->ol_flags = 0; 1105 rxm->vlan_tci = 0; 1106 1107 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1108 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1109 1110 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + 1111 RTE_PKTMBUF_HEADROOM - hdr_size); 1112 1113 if (hw->vlan_strip) 1114 rte_vlan_strip(rxm); 1115 1116 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { 1117 virtio_discard_rxbuf(vq, rxm); 1118 rxvq->stats.errors++; 1119 continue; 1120 } 1121 1122 virtio_rx_stats_updated(rxvq, rxm); 1123 1124 rx_pkts[nb_rx++] = rxm; 1125 } 1126 1127 rxvq->stats.packets += nb_rx; 1128 1129 /* Allocate new mbuf for the used descriptor */ 1130 if (likely(!virtqueue_full(vq))) { 1131 uint16_t free_cnt = vq->vq_free_cnt; 1132 struct rte_mbuf *new_pkts[free_cnt]; 1133 1134 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, 1135 free_cnt) == 0)) { 1136 error = virtqueue_enqueue_recv_refill_packed(vq, 1137 new_pkts, free_cnt); 1138 if (unlikely(error)) { 1139 for (i = 0; i < free_cnt; i++) 1140 rte_pktmbuf_free(new_pkts[i]); 1141 } 1142 nb_enqueued += free_cnt; 1143 } else { 1144 struct rte_eth_dev *dev = 1145 &rte_eth_devices[rxvq->port_id]; 1146 dev->data->rx_mbuf_alloc_failed += free_cnt; 1147 } 1148 } 1149 1150 if (likely(nb_enqueued)) { 1151 if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1152 virtqueue_notify(vq); 1153 PMD_RX_LOG(DEBUG, "Notified"); 1154 } 1155 } 1156 1157 return nb_rx; 1158 } 1159 1160 1161 uint16_t 1162 virtio_recv_pkts_inorder(void *rx_queue, 1163 struct rte_mbuf **rx_pkts, 1164 uint16_t nb_pkts) 1165 { 1166 struct virtnet_rx *rxvq = rx_queue; 1167 struct virtqueue *vq = rxvq->vq; 1168 struct virtio_hw *hw = vq->hw; 1169 struct rte_mbuf *rxm; 1170 struct rte_mbuf *prev = NULL; 1171 uint16_t nb_used, num, nb_rx; 1172 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1173 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1174 int error; 1175 uint32_t nb_enqueued; 1176 uint32_t seg_num; 1177 uint32_t seg_res; 1178 uint32_t hdr_size; 1179 int32_t i; 1180 1181 nb_rx = 0; 1182 if (unlikely(hw->started == 0)) 1183 return nb_rx; 1184 1185 nb_used = virtqueue_nused(vq); 1186 nb_used = RTE_MIN(nb_used, nb_pkts); 1187 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ); 1188 1189 PMD_RX_LOG(DEBUG, "used:%d", nb_used); 1190 1191 nb_enqueued = 0; 1192 seg_num = 1; 1193 seg_res = 0; 1194 hdr_size = hw->vtnet_hdr_size; 1195 1196 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used); 1197 1198 for (i = 0; i < num; i++) { 1199 struct virtio_net_hdr_mrg_rxbuf *header; 1200 1201 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1202 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1203 1204 rxm = rcv_pkts[i]; 1205 1206 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1207 PMD_RX_LOG(ERR, "Packet drop"); 1208 nb_enqueued++; 1209 virtio_discard_rxbuf_inorder(vq, rxm); 1210 rxvq->stats.errors++; 1211 continue; 1212 } 1213 1214 header = (struct virtio_net_hdr_mrg_rxbuf *) 1215 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM 1216 - hdr_size); 1217 1218 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { 1219 seg_num = header->num_buffers; 1220 if (seg_num == 0) 1221 seg_num = 1; 1222 } else { 1223 seg_num = 1; 1224 } 1225 1226 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1227 rxm->nb_segs = seg_num; 1228 rxm->ol_flags = 0; 1229 rxm->vlan_tci = 0; 1230 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1231 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1232 1233 rxm->port = rxvq->port_id; 1234 1235 rx_pkts[nb_rx] = rxm; 1236 prev = rxm; 1237 1238 if (vq->hw->has_rx_offload && 1239 virtio_rx_offload(rxm, &header->hdr) < 0) { 1240 virtio_discard_rxbuf_inorder(vq, rxm); 1241 rxvq->stats.errors++; 1242 continue; 1243 } 1244 1245 if (hw->vlan_strip) 1246 rte_vlan_strip(rx_pkts[nb_rx]); 1247 1248 seg_res = seg_num - 1; 1249 1250 /* Merge remaining segments */ 1251 while (seg_res != 0 && i < (num - 1)) { 1252 i++; 1253 1254 rxm = rcv_pkts[i]; 1255 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1256 rxm->pkt_len = (uint32_t)(len[i]); 1257 rxm->data_len = (uint16_t)(len[i]); 1258 1259 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); 1260 1261 prev->next = rxm; 1262 prev = rxm; 1263 seg_res -= 1; 1264 } 1265 1266 if (!seg_res) { 1267 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1268 nb_rx++; 1269 } 1270 } 1271 1272 /* Last packet still need merge segments */ 1273 while (seg_res != 0) { 1274 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, 1275 VIRTIO_MBUF_BURST_SZ); 1276 1277 if (likely(virtqueue_nused(vq) >= rcv_cnt)) { 1278 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, 1279 rcv_cnt); 1280 uint16_t extra_idx = 0; 1281 1282 rcv_cnt = num; 1283 while (extra_idx < rcv_cnt) { 1284 rxm = rcv_pkts[extra_idx]; 1285 rxm->data_off = 1286 RTE_PKTMBUF_HEADROOM - hdr_size; 1287 rxm->pkt_len = (uint32_t)(len[extra_idx]); 1288 rxm->data_len = (uint16_t)(len[extra_idx]); 1289 prev->next = rxm; 1290 prev = rxm; 1291 rx_pkts[nb_rx]->pkt_len += len[extra_idx]; 1292 extra_idx += 1; 1293 }; 1294 seg_res -= rcv_cnt; 1295 1296 if (!seg_res) { 1297 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1298 nb_rx++; 1299 } 1300 } else { 1301 PMD_RX_LOG(ERR, 1302 "No enough segments for packet."); 1303 rte_pktmbuf_free(rx_pkts[nb_rx]); 1304 rxvq->stats.errors++; 1305 break; 1306 } 1307 } 1308 1309 rxvq->stats.packets += nb_rx; 1310 1311 /* Allocate new mbuf for the used descriptor */ 1312 1313 if (likely(!virtqueue_full(vq))) { 1314 /* free_cnt may include mrg descs */ 1315 uint16_t free_cnt = vq->vq_free_cnt; 1316 struct rte_mbuf *new_pkts[free_cnt]; 1317 1318 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { 1319 error = virtqueue_enqueue_refill_inorder(vq, new_pkts, 1320 free_cnt); 1321 if (unlikely(error)) { 1322 for (i = 0; i < free_cnt; i++) 1323 rte_pktmbuf_free(new_pkts[i]); 1324 } 1325 nb_enqueued += free_cnt; 1326 } else { 1327 struct rte_eth_dev *dev = 1328 &rte_eth_devices[rxvq->port_id]; 1329 dev->data->rx_mbuf_alloc_failed += free_cnt; 1330 } 1331 } 1332 1333 if (likely(nb_enqueued)) { 1334 vq_update_avail_idx(vq); 1335 1336 if (unlikely(virtqueue_kick_prepare(vq))) { 1337 virtqueue_notify(vq); 1338 PMD_RX_LOG(DEBUG, "Notified"); 1339 } 1340 } 1341 1342 return nb_rx; 1343 } 1344 1345 uint16_t 1346 virtio_recv_mergeable_pkts(void *rx_queue, 1347 struct rte_mbuf **rx_pkts, 1348 uint16_t nb_pkts) 1349 { 1350 struct virtnet_rx *rxvq = rx_queue; 1351 struct virtqueue *vq = rxvq->vq; 1352 struct virtio_hw *hw = vq->hw; 1353 struct rte_mbuf *rxm; 1354 struct rte_mbuf *prev = NULL; 1355 uint16_t nb_used, num, nb_rx = 0; 1356 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1357 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1358 int error; 1359 uint32_t nb_enqueued = 0; 1360 uint32_t seg_num = 0; 1361 uint32_t seg_res = 0; 1362 uint32_t hdr_size = hw->vtnet_hdr_size; 1363 int32_t i; 1364 1365 if (unlikely(hw->started == 0)) 1366 return nb_rx; 1367 1368 nb_used = virtqueue_nused(vq); 1369 1370 PMD_RX_LOG(DEBUG, "used:%d", nb_used); 1371 1372 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; 1373 if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) 1374 num = VIRTIO_MBUF_BURST_SZ; 1375 if (likely(num > DESC_PER_CACHELINE)) 1376 num = num - ((vq->vq_used_cons_idx + num) % 1377 DESC_PER_CACHELINE); 1378 1379 1380 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); 1381 1382 for (i = 0; i < num; i++) { 1383 struct virtio_net_hdr_mrg_rxbuf *header; 1384 1385 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1386 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1387 1388 rxm = rcv_pkts[i]; 1389 1390 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1391 PMD_RX_LOG(ERR, "Packet drop"); 1392 nb_enqueued++; 1393 virtio_discard_rxbuf(vq, rxm); 1394 rxvq->stats.errors++; 1395 continue; 1396 } 1397 1398 header = (struct virtio_net_hdr_mrg_rxbuf *) 1399 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM 1400 - hdr_size); 1401 seg_num = header->num_buffers; 1402 if (seg_num == 0) 1403 seg_num = 1; 1404 1405 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1406 rxm->nb_segs = seg_num; 1407 rxm->ol_flags = 0; 1408 rxm->vlan_tci = 0; 1409 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1410 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1411 1412 rxm->port = rxvq->port_id; 1413 1414 rx_pkts[nb_rx] = rxm; 1415 prev = rxm; 1416 1417 if (hw->has_rx_offload && 1418 virtio_rx_offload(rxm, &header->hdr) < 0) { 1419 virtio_discard_rxbuf(vq, rxm); 1420 rxvq->stats.errors++; 1421 continue; 1422 } 1423 1424 if (hw->vlan_strip) 1425 rte_vlan_strip(rx_pkts[nb_rx]); 1426 1427 seg_res = seg_num - 1; 1428 1429 /* Merge remaining segments */ 1430 while (seg_res != 0 && i < (num - 1)) { 1431 i++; 1432 1433 rxm = rcv_pkts[i]; 1434 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1435 rxm->pkt_len = (uint32_t)(len[i]); 1436 rxm->data_len = (uint16_t)(len[i]); 1437 1438 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); 1439 1440 prev->next = rxm; 1441 prev = rxm; 1442 seg_res -= 1; 1443 } 1444 1445 if (!seg_res) { 1446 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1447 nb_rx++; 1448 } 1449 } 1450 1451 /* Last packet still need merge segments */ 1452 while (seg_res != 0) { 1453 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, 1454 VIRTIO_MBUF_BURST_SZ); 1455 1456 if (likely(virtqueue_nused(vq) >= rcv_cnt)) { 1457 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1458 rcv_cnt); 1459 uint16_t extra_idx = 0; 1460 1461 rcv_cnt = num; 1462 while (extra_idx < rcv_cnt) { 1463 rxm = rcv_pkts[extra_idx]; 1464 rxm->data_off = 1465 RTE_PKTMBUF_HEADROOM - hdr_size; 1466 rxm->pkt_len = (uint32_t)(len[extra_idx]); 1467 rxm->data_len = (uint16_t)(len[extra_idx]); 1468 prev->next = rxm; 1469 prev = rxm; 1470 rx_pkts[nb_rx]->pkt_len += len[extra_idx]; 1471 extra_idx += 1; 1472 }; 1473 seg_res -= rcv_cnt; 1474 1475 if (!seg_res) { 1476 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1477 nb_rx++; 1478 } 1479 } else { 1480 PMD_RX_LOG(ERR, 1481 "No enough segments for packet."); 1482 rte_pktmbuf_free(rx_pkts[nb_rx]); 1483 rxvq->stats.errors++; 1484 break; 1485 } 1486 } 1487 1488 rxvq->stats.packets += nb_rx; 1489 1490 /* Allocate new mbuf for the used descriptor */ 1491 if (likely(!virtqueue_full(vq))) { 1492 /* free_cnt may include mrg descs */ 1493 uint16_t free_cnt = vq->vq_free_cnt; 1494 struct rte_mbuf *new_pkts[free_cnt]; 1495 1496 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { 1497 error = virtqueue_enqueue_recv_refill(vq, new_pkts, 1498 free_cnt); 1499 if (unlikely(error)) { 1500 for (i = 0; i < free_cnt; i++) 1501 rte_pktmbuf_free(new_pkts[i]); 1502 } 1503 nb_enqueued += free_cnt; 1504 } else { 1505 struct rte_eth_dev *dev = 1506 &rte_eth_devices[rxvq->port_id]; 1507 dev->data->rx_mbuf_alloc_failed += free_cnt; 1508 } 1509 } 1510 1511 if (likely(nb_enqueued)) { 1512 vq_update_avail_idx(vq); 1513 1514 if (unlikely(virtqueue_kick_prepare(vq))) { 1515 virtqueue_notify(vq); 1516 PMD_RX_LOG(DEBUG, "Notified"); 1517 } 1518 } 1519 1520 return nb_rx; 1521 } 1522 1523 uint16_t 1524 virtio_recv_mergeable_pkts_packed(void *rx_queue, 1525 struct rte_mbuf **rx_pkts, 1526 uint16_t nb_pkts) 1527 { 1528 struct virtnet_rx *rxvq = rx_queue; 1529 struct virtqueue *vq = rxvq->vq; 1530 struct virtio_hw *hw = vq->hw; 1531 struct rte_mbuf *rxm; 1532 struct rte_mbuf *prev = NULL; 1533 uint16_t num, nb_rx = 0; 1534 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1535 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1536 uint32_t nb_enqueued = 0; 1537 uint32_t seg_num = 0; 1538 uint32_t seg_res = 0; 1539 uint32_t hdr_size = hw->vtnet_hdr_size; 1540 int32_t i; 1541 int error; 1542 1543 if (unlikely(hw->started == 0)) 1544 return nb_rx; 1545 1546 1547 num = nb_pkts; 1548 if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) 1549 num = VIRTIO_MBUF_BURST_SZ; 1550 if (likely(num > DESC_PER_CACHELINE)) 1551 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 1552 1553 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); 1554 1555 for (i = 0; i < num; i++) { 1556 struct virtio_net_hdr_mrg_rxbuf *header; 1557 1558 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1559 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1560 1561 rxm = rcv_pkts[i]; 1562 1563 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1564 PMD_RX_LOG(ERR, "Packet drop"); 1565 nb_enqueued++; 1566 virtio_discard_rxbuf(vq, rxm); 1567 rxvq->stats.errors++; 1568 continue; 1569 } 1570 1571 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *) 1572 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size); 1573 seg_num = header->num_buffers; 1574 1575 if (seg_num == 0) 1576 seg_num = 1; 1577 1578 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1579 rxm->nb_segs = seg_num; 1580 rxm->ol_flags = 0; 1581 rxm->vlan_tci = 0; 1582 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1583 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1584 1585 rxm->port = rxvq->port_id; 1586 rx_pkts[nb_rx] = rxm; 1587 prev = rxm; 1588 1589 if (hw->has_rx_offload && 1590 virtio_rx_offload(rxm, &header->hdr) < 0) { 1591 virtio_discard_rxbuf(vq, rxm); 1592 rxvq->stats.errors++; 1593 continue; 1594 } 1595 1596 if (hw->vlan_strip) 1597 rte_vlan_strip(rx_pkts[nb_rx]); 1598 1599 seg_res = seg_num - 1; 1600 1601 /* Merge remaining segments */ 1602 while (seg_res != 0 && i < (num - 1)) { 1603 i++; 1604 1605 rxm = rcv_pkts[i]; 1606 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1607 rxm->pkt_len = (uint32_t)(len[i]); 1608 rxm->data_len = (uint16_t)(len[i]); 1609 1610 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); 1611 1612 prev->next = rxm; 1613 prev = rxm; 1614 seg_res -= 1; 1615 } 1616 1617 if (!seg_res) { 1618 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1619 nb_rx++; 1620 } 1621 } 1622 1623 /* Last packet still need merge segments */ 1624 while (seg_res != 0) { 1625 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, 1626 VIRTIO_MBUF_BURST_SZ); 1627 uint16_t extra_idx = 0; 1628 1629 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, 1630 len, rcv_cnt); 1631 if (unlikely(rcv_cnt == 0)) { 1632 PMD_RX_LOG(ERR, "No enough segments for packet."); 1633 rte_pktmbuf_free(rx_pkts[nb_rx]); 1634 rxvq->stats.errors++; 1635 break; 1636 } 1637 1638 while (extra_idx < rcv_cnt) { 1639 rxm = rcv_pkts[extra_idx]; 1640 1641 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1642 rxm->pkt_len = (uint32_t)(len[extra_idx]); 1643 rxm->data_len = (uint16_t)(len[extra_idx]); 1644 1645 prev->next = rxm; 1646 prev = rxm; 1647 rx_pkts[nb_rx]->pkt_len += len[extra_idx]; 1648 extra_idx += 1; 1649 } 1650 seg_res -= rcv_cnt; 1651 if (!seg_res) { 1652 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1653 nb_rx++; 1654 } 1655 } 1656 1657 rxvq->stats.packets += nb_rx; 1658 1659 /* Allocate new mbuf for the used descriptor */ 1660 if (likely(!virtqueue_full(vq))) { 1661 /* free_cnt may include mrg descs */ 1662 uint16_t free_cnt = vq->vq_free_cnt; 1663 struct rte_mbuf *new_pkts[free_cnt]; 1664 1665 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { 1666 error = virtqueue_enqueue_recv_refill_packed(vq, 1667 new_pkts, free_cnt); 1668 if (unlikely(error)) { 1669 for (i = 0; i < free_cnt; i++) 1670 rte_pktmbuf_free(new_pkts[i]); 1671 } 1672 nb_enqueued += free_cnt; 1673 } else { 1674 struct rte_eth_dev *dev = 1675 &rte_eth_devices[rxvq->port_id]; 1676 dev->data->rx_mbuf_alloc_failed += free_cnt; 1677 } 1678 } 1679 1680 if (likely(nb_enqueued)) { 1681 if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1682 virtqueue_notify(vq); 1683 PMD_RX_LOG(DEBUG, "Notified"); 1684 } 1685 } 1686 1687 return nb_rx; 1688 } 1689 1690 uint16_t 1691 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, 1692 uint16_t nb_pkts) 1693 { 1694 uint16_t nb_tx; 1695 int error; 1696 1697 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1698 struct rte_mbuf *m = tx_pkts[nb_tx]; 1699 1700 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 1701 error = rte_validate_tx_offload(m); 1702 if (unlikely(error)) { 1703 rte_errno = -error; 1704 break; 1705 } 1706 #endif 1707 1708 /* Do VLAN tag insertion */ 1709 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) { 1710 error = rte_vlan_insert(&m); 1711 /* rte_vlan_insert() may change pointer 1712 * even in the case of failure 1713 */ 1714 tx_pkts[nb_tx] = m; 1715 1716 if (unlikely(error)) { 1717 rte_errno = -error; 1718 break; 1719 } 1720 } 1721 1722 error = rte_net_intel_cksum_prepare(m); 1723 if (unlikely(error)) { 1724 rte_errno = -error; 1725 break; 1726 } 1727 1728 if (m->ol_flags & PKT_TX_TCP_SEG) 1729 virtio_tso_fix_cksum(m); 1730 } 1731 1732 return nb_tx; 1733 } 1734 1735 uint16_t 1736 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, 1737 uint16_t nb_pkts) 1738 { 1739 struct virtnet_tx *txvq = tx_queue; 1740 struct virtqueue *vq = txvq->vq; 1741 struct virtio_hw *hw = vq->hw; 1742 uint16_t hdr_size = hw->vtnet_hdr_size; 1743 uint16_t nb_tx = 0; 1744 bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER); 1745 1746 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 1747 return nb_tx; 1748 1749 if (unlikely(nb_pkts < 1)) 1750 return nb_pkts; 1751 1752 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 1753 1754 if (nb_pkts > vq->vq_free_cnt) 1755 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, 1756 in_order); 1757 1758 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1759 struct rte_mbuf *txm = tx_pkts[nb_tx]; 1760 int can_push = 0, use_indirect = 0, slots, need; 1761 1762 /* optimize ring usage */ 1763 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || 1764 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && 1765 rte_mbuf_refcnt_read(txm) == 1 && 1766 RTE_MBUF_DIRECT(txm) && 1767 txm->nb_segs == 1 && 1768 rte_pktmbuf_headroom(txm) >= hdr_size && 1769 rte_is_aligned(rte_pktmbuf_mtod(txm, char *), 1770 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) 1771 can_push = 1; 1772 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && 1773 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) 1774 use_indirect = 1; 1775 /* How many main ring entries are needed to this Tx? 1776 * indirect => 1 1777 * any_layout => number of segments 1778 * default => number of segments + 1 1779 */ 1780 slots = use_indirect ? 1 : (txm->nb_segs + !can_push); 1781 need = slots - vq->vq_free_cnt; 1782 1783 /* Positive value indicates it need free vring descriptors */ 1784 if (unlikely(need > 0)) { 1785 virtio_xmit_cleanup_packed(vq, need, in_order); 1786 need = slots - vq->vq_free_cnt; 1787 if (unlikely(need > 0)) { 1788 PMD_TX_LOG(ERR, 1789 "No free tx descriptors to transmit"); 1790 break; 1791 } 1792 } 1793 1794 /* Enqueue Packet buffers */ 1795 if (can_push) 1796 virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order); 1797 else 1798 virtqueue_enqueue_xmit_packed(txvq, txm, slots, 1799 use_indirect, 0, 1800 in_order); 1801 1802 virtio_update_packet_stats(&txvq->stats, txm); 1803 } 1804 1805 txvq->stats.packets += nb_tx; 1806 1807 if (likely(nb_tx)) { 1808 if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1809 virtqueue_notify(vq); 1810 PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 1811 } 1812 } 1813 1814 return nb_tx; 1815 } 1816 1817 uint16_t 1818 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 1819 { 1820 struct virtnet_tx *txvq = tx_queue; 1821 struct virtqueue *vq = txvq->vq; 1822 struct virtio_hw *hw = vq->hw; 1823 uint16_t hdr_size = hw->vtnet_hdr_size; 1824 uint16_t nb_used, nb_tx = 0; 1825 1826 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 1827 return nb_tx; 1828 1829 if (unlikely(nb_pkts < 1)) 1830 return nb_pkts; 1831 1832 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 1833 1834 nb_used = virtqueue_nused(vq); 1835 1836 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) 1837 virtio_xmit_cleanup(vq, nb_used); 1838 1839 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1840 struct rte_mbuf *txm = tx_pkts[nb_tx]; 1841 int can_push = 0, use_indirect = 0, slots, need; 1842 1843 /* optimize ring usage */ 1844 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || 1845 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && 1846 rte_mbuf_refcnt_read(txm) == 1 && 1847 RTE_MBUF_DIRECT(txm) && 1848 txm->nb_segs == 1 && 1849 rte_pktmbuf_headroom(txm) >= hdr_size && 1850 rte_is_aligned(rte_pktmbuf_mtod(txm, char *), 1851 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) 1852 can_push = 1; 1853 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && 1854 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) 1855 use_indirect = 1; 1856 1857 /* How many main ring entries are needed to this Tx? 1858 * any_layout => number of segments 1859 * indirect => 1 1860 * default => number of segments + 1 1861 */ 1862 slots = use_indirect ? 1 : (txm->nb_segs + !can_push); 1863 need = slots - vq->vq_free_cnt; 1864 1865 /* Positive value indicates it need free vring descriptors */ 1866 if (unlikely(need > 0)) { 1867 nb_used = virtqueue_nused(vq); 1868 1869 need = RTE_MIN(need, (int)nb_used); 1870 1871 virtio_xmit_cleanup(vq, need); 1872 need = slots - vq->vq_free_cnt; 1873 if (unlikely(need > 0)) { 1874 PMD_TX_LOG(ERR, 1875 "No free tx descriptors to transmit"); 1876 break; 1877 } 1878 } 1879 1880 /* Enqueue Packet buffers */ 1881 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, 1882 can_push, 0); 1883 1884 virtio_update_packet_stats(&txvq->stats, txm); 1885 } 1886 1887 txvq->stats.packets += nb_tx; 1888 1889 if (likely(nb_tx)) { 1890 vq_update_avail_idx(vq); 1891 1892 if (unlikely(virtqueue_kick_prepare(vq))) { 1893 virtqueue_notify(vq); 1894 PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 1895 } 1896 } 1897 1898 return nb_tx; 1899 } 1900 1901 static __rte_always_inline int 1902 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need) 1903 { 1904 uint16_t nb_used, nb_clean, nb_descs; 1905 1906 nb_descs = vq->vq_free_cnt + need; 1907 nb_used = virtqueue_nused(vq); 1908 nb_clean = RTE_MIN(need, (int)nb_used); 1909 1910 virtio_xmit_cleanup_inorder(vq, nb_clean); 1911 1912 return nb_descs - vq->vq_free_cnt; 1913 } 1914 1915 uint16_t 1916 virtio_xmit_pkts_inorder(void *tx_queue, 1917 struct rte_mbuf **tx_pkts, 1918 uint16_t nb_pkts) 1919 { 1920 struct virtnet_tx *txvq = tx_queue; 1921 struct virtqueue *vq = txvq->vq; 1922 struct virtio_hw *hw = vq->hw; 1923 uint16_t hdr_size = hw->vtnet_hdr_size; 1924 uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0; 1925 struct rte_mbuf *inorder_pkts[nb_pkts]; 1926 int need; 1927 1928 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 1929 return nb_tx; 1930 1931 if (unlikely(nb_pkts < 1)) 1932 return nb_pkts; 1933 1934 VIRTQUEUE_DUMP(vq); 1935 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 1936 nb_used = virtqueue_nused(vq); 1937 1938 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) 1939 virtio_xmit_cleanup_inorder(vq, nb_used); 1940 1941 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1942 struct rte_mbuf *txm = tx_pkts[nb_tx]; 1943 int slots; 1944 1945 /* optimize ring usage */ 1946 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || 1947 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && 1948 rte_mbuf_refcnt_read(txm) == 1 && 1949 RTE_MBUF_DIRECT(txm) && 1950 txm->nb_segs == 1 && 1951 rte_pktmbuf_headroom(txm) >= hdr_size && 1952 rte_is_aligned(rte_pktmbuf_mtod(txm, char *), 1953 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) { 1954 inorder_pkts[nb_inorder_pkts] = txm; 1955 nb_inorder_pkts++; 1956 1957 continue; 1958 } 1959 1960 if (nb_inorder_pkts) { 1961 need = nb_inorder_pkts - vq->vq_free_cnt; 1962 if (unlikely(need > 0)) { 1963 need = virtio_xmit_try_cleanup_inorder(vq, 1964 need); 1965 if (unlikely(need > 0)) { 1966 PMD_TX_LOG(ERR, 1967 "No free tx descriptors to " 1968 "transmit"); 1969 break; 1970 } 1971 } 1972 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, 1973 nb_inorder_pkts); 1974 nb_inorder_pkts = 0; 1975 } 1976 1977 slots = txm->nb_segs + 1; 1978 need = slots - vq->vq_free_cnt; 1979 if (unlikely(need > 0)) { 1980 need = virtio_xmit_try_cleanup_inorder(vq, slots); 1981 1982 if (unlikely(need > 0)) { 1983 PMD_TX_LOG(ERR, 1984 "No free tx descriptors to transmit"); 1985 break; 1986 } 1987 } 1988 /* Enqueue Packet buffers */ 1989 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1); 1990 1991 virtio_update_packet_stats(&txvq->stats, txm); 1992 } 1993 1994 /* Transmit all inorder packets */ 1995 if (nb_inorder_pkts) { 1996 need = nb_inorder_pkts - vq->vq_free_cnt; 1997 if (unlikely(need > 0)) { 1998 need = virtio_xmit_try_cleanup_inorder(vq, 1999 need); 2000 if (unlikely(need > 0)) { 2001 PMD_TX_LOG(ERR, 2002 "No free tx descriptors to transmit"); 2003 nb_inorder_pkts = vq->vq_free_cnt; 2004 nb_tx -= need; 2005 } 2006 } 2007 2008 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, 2009 nb_inorder_pkts); 2010 } 2011 2012 txvq->stats.packets += nb_tx; 2013 2014 if (likely(nb_tx)) { 2015 vq_update_avail_idx(vq); 2016 2017 if (unlikely(virtqueue_kick_prepare(vq))) { 2018 virtqueue_notify(vq); 2019 PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 2020 } 2021 } 2022 2023 VIRTQUEUE_DUMP(vq); 2024 2025 return nb_tx; 2026 } 2027 2028 #ifndef CC_AVX512_SUPPORT 2029 uint16_t 2030 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused, 2031 struct rte_mbuf **rx_pkts __rte_unused, 2032 uint16_t nb_pkts __rte_unused) 2033 { 2034 return 0; 2035 } 2036 2037 uint16_t 2038 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused, 2039 struct rte_mbuf **tx_pkts __rte_unused, 2040 uint16_t nb_pkts __rte_unused) 2041 { 2042 return 0; 2043 } 2044 #endif /* ifndef CC_AVX512_SUPPORT */ 2045