1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdint.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <string.h> 9 #include <errno.h> 10 11 #include <rte_cycles.h> 12 #include <rte_memory.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_mempool.h> 15 #include <rte_malloc.h> 16 #include <rte_mbuf.h> 17 #include <rte_ether.h> 18 #include <ethdev_driver.h> 19 #include <rte_prefetch.h> 20 #include <rte_string_fns.h> 21 #include <rte_errno.h> 22 #include <rte_byteorder.h> 23 #include <rte_net.h> 24 #include <rte_ip.h> 25 #include <rte_udp.h> 26 #include <rte_tcp.h> 27 28 #include "virtio_logs.h" 29 #include "virtio_ethdev.h" 30 #include "virtio.h" 31 #include "virtqueue.h" 32 #include "virtio_rxtx.h" 33 #include "virtio_rxtx_simple.h" 34 #include "virtio_ring.h" 35 36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP 37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) 38 #else 39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0) 40 #endif 41 42 int 43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset) 44 { 45 struct virtnet_rx *rxvq = rxq; 46 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 47 48 return virtqueue_nused(vq) >= offset; 49 } 50 51 void 52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num) 53 { 54 vq->vq_free_cnt += num; 55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1); 56 } 57 58 void 59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) 60 { 61 struct vring_desc *dp, *dp_tail; 62 struct vq_desc_extra *dxp; 63 uint16_t desc_idx_last = desc_idx; 64 65 dp = &vq->vq_split.ring.desc[desc_idx]; 66 dxp = &vq->vq_descx[desc_idx]; 67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); 68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { 69 while (dp->flags & VRING_DESC_F_NEXT) { 70 desc_idx_last = dp->next; 71 dp = &vq->vq_split.ring.desc[dp->next]; 72 } 73 } 74 dxp->ndescs = 0; 75 76 /* 77 * We must append the existing free chain, if any, to the end of 78 * newly freed chain. If the virtqueue was completely used, then 79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). 80 */ 81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { 82 vq->vq_desc_head_idx = desc_idx; 83 } else { 84 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx]; 85 dp_tail->next = desc_idx; 86 } 87 88 vq->vq_desc_tail_idx = desc_idx_last; 89 dp->next = VQ_RING_DESC_CHAIN_END; 90 } 91 92 void 93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) 94 { 95 uint32_t s = mbuf->pkt_len; 96 struct rte_ether_addr *ea; 97 98 stats->bytes += s; 99 100 if (s == 64) { 101 stats->size_bins[1]++; 102 } else if (s > 64 && s < 1024) { 103 uint32_t bin; 104 105 /* count zeros, and offset into correct bin */ 106 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; 107 stats->size_bins[bin]++; 108 } else { 109 if (s < 64) 110 stats->size_bins[0]++; 111 else if (s < 1519) 112 stats->size_bins[6]++; 113 else 114 stats->size_bins[7]++; 115 } 116 117 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); 118 if (rte_is_multicast_ether_addr(ea)) { 119 if (rte_is_broadcast_ether_addr(ea)) 120 stats->broadcast++; 121 else 122 stats->multicast++; 123 } 124 } 125 126 static inline void 127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m) 128 { 129 VIRTIO_DUMP_PACKET(m, m->data_len); 130 131 virtio_update_packet_stats(&rxvq->stats, m); 132 } 133 134 static uint16_t 135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, 136 struct rte_mbuf **rx_pkts, 137 uint32_t *len, 138 uint16_t num) 139 { 140 struct rte_mbuf *cookie; 141 uint16_t used_idx; 142 uint16_t id; 143 struct vring_packed_desc *desc; 144 uint16_t i; 145 146 desc = vq->vq_packed.ring.desc; 147 148 for (i = 0; i < num; i++) { 149 used_idx = vq->vq_used_cons_idx; 150 /* desc_is_used has a load-acquire or rte_io_rmb inside 151 * and wait for used desc in virtqueue. 152 */ 153 if (!desc_is_used(&desc[used_idx], vq)) 154 return i; 155 len[i] = desc[used_idx].len; 156 id = desc[used_idx].id; 157 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie; 158 if (unlikely(cookie == NULL)) { 159 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", 160 vq->vq_used_cons_idx); 161 break; 162 } 163 rte_prefetch0(cookie); 164 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); 165 rx_pkts[i] = cookie; 166 167 vq->vq_free_cnt++; 168 vq->vq_used_cons_idx++; 169 if (vq->vq_used_cons_idx >= vq->vq_nentries) { 170 vq->vq_used_cons_idx -= vq->vq_nentries; 171 vq->vq_packed.used_wrap_counter ^= 1; 172 } 173 } 174 175 return i; 176 } 177 178 static uint16_t 179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, 180 uint32_t *len, uint16_t num) 181 { 182 struct vring_used_elem *uep; 183 struct rte_mbuf *cookie; 184 uint16_t used_idx, desc_idx; 185 uint16_t i; 186 187 /* Caller does the check */ 188 for (i = 0; i < num ; i++) { 189 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); 190 uep = &vq->vq_split.ring.used->ring[used_idx]; 191 desc_idx = (uint16_t) uep->id; 192 len[i] = uep->len; 193 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; 194 195 if (unlikely(cookie == NULL)) { 196 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", 197 vq->vq_used_cons_idx); 198 break; 199 } 200 201 rte_prefetch0(cookie); 202 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); 203 rx_pkts[i] = cookie; 204 vq->vq_used_cons_idx++; 205 vq_ring_free_chain(vq, desc_idx); 206 vq->vq_descx[desc_idx].cookie = NULL; 207 } 208 209 return i; 210 } 211 212 static uint16_t 213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq, 214 struct rte_mbuf **rx_pkts, 215 uint32_t *len, 216 uint16_t num) 217 { 218 struct vring_used_elem *uep; 219 struct rte_mbuf *cookie; 220 uint16_t used_idx = 0; 221 uint16_t i; 222 223 if (unlikely(num == 0)) 224 return 0; 225 226 for (i = 0; i < num; i++) { 227 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); 228 /* Desc idx same as used idx */ 229 uep = &vq->vq_split.ring.used->ring[used_idx]; 230 len[i] = uep->len; 231 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie; 232 233 if (unlikely(cookie == NULL)) { 234 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", 235 vq->vq_used_cons_idx); 236 break; 237 } 238 239 rte_prefetch0(cookie); 240 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); 241 rx_pkts[i] = cookie; 242 vq->vq_used_cons_idx++; 243 vq->vq_descx[used_idx].cookie = NULL; 244 } 245 246 vq_ring_free_inorder(vq, used_idx, i); 247 return i; 248 } 249 250 static inline int 251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq, 252 struct rte_mbuf **cookies, 253 uint16_t num) 254 { 255 struct vq_desc_extra *dxp; 256 struct virtio_hw *hw = vq->hw; 257 struct vring_desc *start_dp; 258 uint16_t head_idx, idx, i = 0; 259 260 if (unlikely(vq->vq_free_cnt == 0)) 261 return -ENOSPC; 262 if (unlikely(vq->vq_free_cnt < num)) 263 return -EMSGSIZE; 264 265 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1); 266 start_dp = vq->vq_split.ring.desc; 267 268 while (i < num) { 269 idx = head_idx & (vq->vq_nentries - 1); 270 dxp = &vq->vq_descx[idx]; 271 dxp->cookie = (void *)cookies[i]; 272 dxp->ndescs = 1; 273 274 start_dp[idx].addr = cookies[i]->buf_iova + 275 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; 276 start_dp[idx].len = cookies[i]->buf_len - 277 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; 278 start_dp[idx].flags = VRING_DESC_F_WRITE; 279 280 vq_update_avail_ring(vq, idx); 281 head_idx++; 282 i++; 283 } 284 285 vq->vq_desc_head_idx += num; 286 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 287 return 0; 288 } 289 290 static inline int 291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie, 292 uint16_t num) 293 { 294 struct vq_desc_extra *dxp; 295 struct virtio_hw *hw = vq->hw; 296 struct vring_desc *start_dp = vq->vq_split.ring.desc; 297 uint16_t idx, i; 298 299 if (unlikely(vq->vq_free_cnt == 0)) 300 return -ENOSPC; 301 if (unlikely(vq->vq_free_cnt < num)) 302 return -EMSGSIZE; 303 304 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries)) 305 return -EFAULT; 306 307 for (i = 0; i < num; i++) { 308 idx = vq->vq_desc_head_idx; 309 dxp = &vq->vq_descx[idx]; 310 dxp->cookie = (void *)cookie[i]; 311 dxp->ndescs = 1; 312 313 start_dp[idx].addr = cookie[i]->buf_iova + 314 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; 315 start_dp[idx].len = cookie[i]->buf_len - 316 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; 317 start_dp[idx].flags = VRING_DESC_F_WRITE; 318 vq->vq_desc_head_idx = start_dp[idx].next; 319 vq_update_avail_ring(vq, idx); 320 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) { 321 vq->vq_desc_tail_idx = vq->vq_desc_head_idx; 322 break; 323 } 324 } 325 326 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 327 328 return 0; 329 } 330 331 static inline void 332 virtqueue_refill_single_packed(struct virtqueue *vq, 333 struct vring_packed_desc *dp, 334 struct rte_mbuf *cookie) 335 { 336 uint16_t flags = vq->vq_packed.cached_flags; 337 struct virtio_hw *hw = vq->hw; 338 339 dp->addr = cookie->buf_iova + 340 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; 341 dp->len = cookie->buf_len - 342 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; 343 344 virtqueue_store_flags_packed(dp, flags, 345 hw->weak_barriers); 346 347 if (++vq->vq_avail_idx >= vq->vq_nentries) { 348 vq->vq_avail_idx -= vq->vq_nentries; 349 vq->vq_packed.cached_flags ^= 350 VRING_PACKED_DESC_F_AVAIL_USED; 351 flags = vq->vq_packed.cached_flags; 352 } 353 } 354 355 static inline int 356 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq, 357 struct rte_mbuf **cookie, uint16_t num) 358 { 359 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc; 360 struct vq_desc_extra *dxp; 361 uint16_t idx; 362 int i; 363 364 if (unlikely(vq->vq_free_cnt == 0)) 365 return -ENOSPC; 366 if (unlikely(vq->vq_free_cnt < num)) 367 return -EMSGSIZE; 368 369 for (i = 0; i < num; i++) { 370 idx = vq->vq_avail_idx; 371 dxp = &vq->vq_descx[idx]; 372 dxp->cookie = (void *)cookie[i]; 373 dxp->ndescs = 1; 374 375 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]); 376 } 377 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 378 return 0; 379 } 380 381 static inline int 382 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, 383 struct rte_mbuf **cookie, uint16_t num) 384 { 385 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc; 386 struct vq_desc_extra *dxp; 387 uint16_t idx, did; 388 int i; 389 390 if (unlikely(vq->vq_free_cnt == 0)) 391 return -ENOSPC; 392 if (unlikely(vq->vq_free_cnt < num)) 393 return -EMSGSIZE; 394 395 for (i = 0; i < num; i++) { 396 idx = vq->vq_avail_idx; 397 did = start_dp[idx].id; 398 dxp = &vq->vq_descx[did]; 399 dxp->cookie = (void *)cookie[i]; 400 dxp->ndescs = 1; 401 402 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]); 403 } 404 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 405 return 0; 406 } 407 408 /* When doing TSO, the IP length is not included in the pseudo header 409 * checksum of the packet given to the PMD, but for virtio it is 410 * expected. 411 */ 412 static void 413 virtio_tso_fix_cksum(struct rte_mbuf *m) 414 { 415 /* common case: header is not fragmented */ 416 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + 417 m->l4_len)) { 418 struct rte_ipv4_hdr *iph; 419 struct rte_ipv6_hdr *ip6h; 420 struct rte_tcp_hdr *th; 421 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; 422 uint32_t tmp; 423 424 iph = rte_pktmbuf_mtod_offset(m, 425 struct rte_ipv4_hdr *, m->l2_len); 426 th = RTE_PTR_ADD(iph, m->l3_len); 427 if ((iph->version_ihl >> 4) == 4) { 428 iph->hdr_checksum = 0; 429 iph->hdr_checksum = rte_ipv4_cksum(iph); 430 ip_len = iph->total_length; 431 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - 432 m->l3_len); 433 } else { 434 ip6h = (struct rte_ipv6_hdr *)iph; 435 ip_paylen = ip6h->payload_len; 436 } 437 438 /* calculate the new phdr checksum not including ip_paylen */ 439 prev_cksum = th->cksum; 440 tmp = prev_cksum; 441 tmp += ip_paylen; 442 tmp = (tmp & 0xffff) + (tmp >> 16); 443 new_cksum = tmp; 444 445 /* replace it in the packet */ 446 th->cksum = new_cksum; 447 } 448 } 449 450 451 452 453 static inline void 454 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, 455 struct rte_mbuf **cookies, 456 uint16_t num) 457 { 458 struct vq_desc_extra *dxp; 459 struct virtqueue *vq = virtnet_txq_to_vq(txvq); 460 struct vring_desc *start_dp; 461 struct virtio_net_hdr *hdr; 462 uint16_t idx; 463 int16_t head_size = vq->hw->vtnet_hdr_size; 464 uint16_t i = 0; 465 466 idx = vq->vq_desc_head_idx; 467 start_dp = vq->vq_split.ring.desc; 468 469 while (i < num) { 470 idx = idx & (vq->vq_nentries - 1); 471 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; 472 dxp->cookie = (void *)cookies[i]; 473 dxp->ndescs = 1; 474 virtio_update_packet_stats(&txvq->stats, cookies[i]); 475 476 hdr = rte_pktmbuf_mtod_offset(cookies[i], 477 struct virtio_net_hdr *, -head_size); 478 479 /* if offload disabled, hdr is not zeroed yet, do it now */ 480 if (!vq->hw->has_tx_offload) 481 virtqueue_clear_net_hdr(hdr); 482 else 483 virtqueue_xmit_offload(hdr, cookies[i]); 484 485 start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size; 486 start_dp[idx].len = cookies[i]->data_len + head_size; 487 start_dp[idx].flags = 0; 488 489 490 vq_update_avail_ring(vq, idx); 491 492 idx++; 493 i++; 494 }; 495 496 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); 497 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1); 498 } 499 500 static inline void 501 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, 502 struct rte_mbuf *cookie, 503 int in_order) 504 { 505 struct virtqueue *vq = virtnet_txq_to_vq(txvq); 506 struct vring_packed_desc *dp; 507 struct vq_desc_extra *dxp; 508 uint16_t idx, id, flags; 509 int16_t head_size = vq->hw->vtnet_hdr_size; 510 struct virtio_net_hdr *hdr; 511 512 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; 513 idx = vq->vq_avail_idx; 514 dp = &vq->vq_packed.ring.desc[idx]; 515 516 dxp = &vq->vq_descx[id]; 517 dxp->ndescs = 1; 518 dxp->cookie = cookie; 519 520 flags = vq->vq_packed.cached_flags; 521 522 /* prepend cannot fail, checked by caller */ 523 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, 524 -head_size); 525 526 /* if offload disabled, hdr is not zeroed yet, do it now */ 527 if (!vq->hw->has_tx_offload) 528 virtqueue_clear_net_hdr(hdr); 529 else 530 virtqueue_xmit_offload(hdr, cookie); 531 532 dp->addr = rte_mbuf_data_iova(cookie) - head_size; 533 dp->len = cookie->data_len + head_size; 534 dp->id = id; 535 536 if (++vq->vq_avail_idx >= vq->vq_nentries) { 537 vq->vq_avail_idx -= vq->vq_nentries; 538 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; 539 } 540 541 vq->vq_free_cnt--; 542 543 if (!in_order) { 544 vq->vq_desc_head_idx = dxp->next; 545 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) 546 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; 547 } 548 549 virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers); 550 } 551 552 static inline void 553 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, 554 uint16_t needed, int use_indirect, int can_push, 555 int in_order) 556 { 557 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; 558 struct vq_desc_extra *dxp; 559 struct virtqueue *vq = virtnet_txq_to_vq(txvq); 560 struct vring_desc *start_dp; 561 uint16_t seg_num = cookie->nb_segs; 562 uint16_t head_idx, idx; 563 int16_t head_size = vq->hw->vtnet_hdr_size; 564 bool prepend_header = false; 565 struct virtio_net_hdr *hdr; 566 567 head_idx = vq->vq_desc_head_idx; 568 idx = head_idx; 569 if (in_order) 570 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; 571 else 572 dxp = &vq->vq_descx[idx]; 573 dxp->cookie = (void *)cookie; 574 dxp->ndescs = needed; 575 576 start_dp = vq->vq_split.ring.desc; 577 578 if (can_push) { 579 /* prepend cannot fail, checked by caller */ 580 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, 581 -head_size); 582 prepend_header = true; 583 584 /* if offload disabled, it is not zeroed below, do it now */ 585 if (!vq->hw->has_tx_offload) 586 virtqueue_clear_net_hdr(hdr); 587 } else if (use_indirect) { 588 /* setup tx ring slot to point to indirect 589 * descriptor list stored in reserved region. 590 * 591 * the first slot in indirect ring is already preset 592 * to point to the header in reserved region 593 */ 594 start_dp[idx].addr = txvq->virtio_net_hdr_mem + 595 RTE_PTR_DIFF(&txr[idx].tx_indir, txr); 596 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc); 597 start_dp[idx].flags = VRING_DESC_F_INDIRECT; 598 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; 599 600 /* loop below will fill in rest of the indirect elements */ 601 start_dp = txr[idx].tx_indir; 602 idx = 1; 603 } else { 604 /* setup first tx ring slot to point to header 605 * stored in reserved region. 606 */ 607 start_dp[idx].addr = txvq->virtio_net_hdr_mem + 608 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); 609 start_dp[idx].len = vq->hw->vtnet_hdr_size; 610 start_dp[idx].flags = VRING_DESC_F_NEXT; 611 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; 612 613 idx = start_dp[idx].next; 614 } 615 616 if (vq->hw->has_tx_offload) 617 virtqueue_xmit_offload(hdr, cookie); 618 619 do { 620 start_dp[idx].addr = rte_mbuf_data_iova(cookie); 621 start_dp[idx].len = cookie->data_len; 622 if (prepend_header) { 623 start_dp[idx].addr -= head_size; 624 start_dp[idx].len += head_size; 625 prepend_header = false; 626 } 627 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0; 628 idx = start_dp[idx].next; 629 } while ((cookie = cookie->next) != NULL); 630 631 if (use_indirect) 632 idx = vq->vq_split.ring.desc[head_idx].next; 633 634 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); 635 636 vq->vq_desc_head_idx = idx; 637 vq_update_avail_ring(vq, head_idx); 638 639 if (!in_order) { 640 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) 641 vq->vq_desc_tail_idx = idx; 642 } 643 } 644 645 void 646 virtio_dev_cq_start(struct rte_eth_dev *dev) 647 { 648 struct virtio_hw *hw = dev->data->dev_private; 649 650 if (hw->cvq) { 651 rte_spinlock_init(&hw->cvq->lock); 652 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq)); 653 } 654 } 655 656 int 657 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, 658 uint16_t queue_idx, 659 uint16_t nb_desc, 660 unsigned int socket_id __rte_unused, 661 const struct rte_eth_rxconf *rx_conf, 662 struct rte_mempool *mp) 663 { 664 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; 665 struct virtio_hw *hw = dev->data->dev_private; 666 struct virtqueue *vq = hw->vqs[vq_idx]; 667 struct virtnet_rx *rxvq; 668 uint16_t rx_free_thresh; 669 uint16_t buf_size; 670 const char *error; 671 672 PMD_INIT_FUNC_TRACE(); 673 674 if (rx_conf->rx_deferred_start) { 675 PMD_INIT_LOG(ERR, "Rx deferred start is not supported"); 676 return -EINVAL; 677 } 678 679 buf_size = virtio_rx_mem_pool_buf_size(mp); 680 if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size, 681 hw->rx_ol_scatter, &error)) { 682 PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s", 683 queue_idx, error); 684 return -EINVAL; 685 } 686 687 rx_free_thresh = rx_conf->rx_free_thresh; 688 if (rx_free_thresh == 0) 689 rx_free_thresh = 690 RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH); 691 692 if (rx_free_thresh & 0x3) { 693 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four." 694 " (rx_free_thresh=%u port=%u queue=%u)\n", 695 rx_free_thresh, dev->data->port_id, queue_idx); 696 return -EINVAL; 697 } 698 699 if (rx_free_thresh >= vq->vq_nentries) { 700 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the " 701 "number of RX entries (%u)." 702 " (rx_free_thresh=%u port=%u queue=%u)\n", 703 vq->vq_nentries, 704 rx_free_thresh, dev->data->port_id, queue_idx); 705 return -EINVAL; 706 } 707 vq->vq_free_thresh = rx_free_thresh; 708 709 /* 710 * For split ring vectorized path descriptors number must be 711 * equal to the ring size. 712 */ 713 if (nb_desc > vq->vq_nentries || 714 (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) { 715 nb_desc = vq->vq_nentries; 716 } 717 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); 718 719 rxvq = &vq->rxq; 720 rxvq->queue_id = queue_idx; 721 rxvq->mpool = mp; 722 dev->data->rx_queues[queue_idx] = rxvq; 723 724 return 0; 725 } 726 727 int 728 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) 729 { 730 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; 731 struct virtio_hw *hw = dev->data->dev_private; 732 struct virtqueue *vq = hw->vqs[vq_idx]; 733 struct virtnet_rx *rxvq = &vq->rxq; 734 struct rte_mbuf *m; 735 uint16_t desc_idx; 736 int error, nbufs, i; 737 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER); 738 739 PMD_INIT_FUNC_TRACE(); 740 741 /* Allocate blank mbufs for the each rx descriptor */ 742 nbufs = 0; 743 744 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) { 745 for (desc_idx = 0; desc_idx < vq->vq_nentries; 746 desc_idx++) { 747 vq->vq_split.ring.avail->ring[desc_idx] = desc_idx; 748 vq->vq_split.ring.desc[desc_idx].flags = 749 VRING_DESC_F_WRITE; 750 } 751 752 virtio_rxq_vec_setup(rxvq); 753 } 754 755 memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf)); 756 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) 757 vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf; 758 759 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) { 760 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { 761 virtio_rxq_rearm_vec(rxvq); 762 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; 763 } 764 } else if (!virtio_with_packed_queue(vq->hw) && in_order) { 765 if ((!virtqueue_full(vq))) { 766 uint16_t free_cnt = vq->vq_free_cnt; 767 struct rte_mbuf *pkts[free_cnt]; 768 769 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, 770 free_cnt)) { 771 error = virtqueue_enqueue_refill_inorder(vq, 772 pkts, 773 free_cnt); 774 if (unlikely(error)) { 775 for (i = 0; i < free_cnt; i++) 776 rte_pktmbuf_free(pkts[i]); 777 } else { 778 nbufs += free_cnt; 779 } 780 } 781 782 vq_update_avail_idx(vq); 783 } 784 } else { 785 while (!virtqueue_full(vq)) { 786 m = rte_mbuf_raw_alloc(rxvq->mpool); 787 if (m == NULL) 788 break; 789 790 /* Enqueue allocated buffers */ 791 if (virtio_with_packed_queue(vq->hw)) 792 error = virtqueue_enqueue_recv_refill_packed_init(vq, 793 &m, 1); 794 else 795 error = virtqueue_enqueue_recv_refill(vq, 796 &m, 1); 797 if (error) { 798 rte_pktmbuf_free(m); 799 break; 800 } 801 nbufs++; 802 } 803 804 if (!virtio_with_packed_queue(vq->hw)) 805 vq_update_avail_idx(vq); 806 } 807 808 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); 809 810 VIRTQUEUE_DUMP(vq); 811 812 return 0; 813 } 814 815 /* 816 * struct rte_eth_dev *dev: Used to update dev 817 * uint16_t nb_desc: Defaults to values read from config space 818 * unsigned int socket_id: Used to allocate memzone 819 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine 820 * uint16_t queue_idx: Just used as an index in dev txq list 821 */ 822 int 823 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, 824 uint16_t queue_idx, 825 uint16_t nb_desc, 826 unsigned int socket_id __rte_unused, 827 const struct rte_eth_txconf *tx_conf) 828 { 829 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; 830 struct virtio_hw *hw = dev->data->dev_private; 831 struct virtqueue *vq = hw->vqs[vq_idx]; 832 struct virtnet_tx *txvq; 833 uint16_t tx_free_thresh; 834 835 PMD_INIT_FUNC_TRACE(); 836 837 if (tx_conf->tx_deferred_start) { 838 PMD_INIT_LOG(ERR, "Tx deferred start is not supported"); 839 return -EINVAL; 840 } 841 842 if (nb_desc == 0 || nb_desc > vq->vq_nentries) 843 nb_desc = vq->vq_nentries; 844 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); 845 846 txvq = &vq->txq; 847 txvq->queue_id = queue_idx; 848 849 tx_free_thresh = tx_conf->tx_free_thresh; 850 if (tx_free_thresh == 0) 851 tx_free_thresh = 852 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); 853 854 if (tx_free_thresh >= (vq->vq_nentries - 3)) { 855 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the " 856 "number of TX entries minus 3 (%u)." 857 " (tx_free_thresh=%u port=%u queue=%u)\n", 858 vq->vq_nentries - 3, 859 tx_free_thresh, dev->data->port_id, queue_idx); 860 return -EINVAL; 861 } 862 863 vq->vq_free_thresh = tx_free_thresh; 864 865 dev->data->tx_queues[queue_idx] = txvq; 866 return 0; 867 } 868 869 int 870 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, 871 uint16_t queue_idx) 872 { 873 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; 874 struct virtio_hw *hw = dev->data->dev_private; 875 struct virtqueue *vq = hw->vqs[vq_idx]; 876 877 PMD_INIT_FUNC_TRACE(); 878 879 if (!virtio_with_packed_queue(hw)) { 880 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) 881 vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0; 882 } 883 884 VIRTQUEUE_DUMP(vq); 885 886 return 0; 887 } 888 889 static inline void 890 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) 891 { 892 int error; 893 /* 894 * Requeue the discarded mbuf. This should always be 895 * successful since it was just dequeued. 896 */ 897 if (virtio_with_packed_queue(vq->hw)) 898 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1); 899 else 900 error = virtqueue_enqueue_recv_refill(vq, &m, 1); 901 902 if (unlikely(error)) { 903 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf"); 904 rte_pktmbuf_free(m); 905 } 906 } 907 908 static inline void 909 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m) 910 { 911 int error; 912 913 error = virtqueue_enqueue_refill_inorder(vq, &m, 1); 914 if (unlikely(error)) { 915 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf"); 916 rte_pktmbuf_free(m); 917 } 918 } 919 920 /* Optionally fill offload information in structure */ 921 static inline int 922 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) 923 { 924 struct rte_net_hdr_lens hdr_lens; 925 uint32_t hdrlen, ptype; 926 int l4_supported = 0; 927 928 /* nothing to do */ 929 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) 930 return 0; 931 932 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; 933 934 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK); 935 m->packet_type = ptype; 936 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP || 937 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP || 938 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) 939 l4_supported = 1; 940 941 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 942 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len; 943 if (hdr->csum_start <= hdrlen && l4_supported) { 944 m->ol_flags |= PKT_RX_L4_CKSUM_NONE; 945 } else { 946 /* Unknown proto or tunnel, do sw cksum. We can assume 947 * the cksum field is in the first segment since the 948 * buffers we provided to the host are large enough. 949 * In case of SCTP, this will be wrong since it's a CRC 950 * but there's nothing we can do. 951 */ 952 uint16_t csum = 0, off; 953 954 if (rte_raw_cksum_mbuf(m, hdr->csum_start, 955 rte_pktmbuf_pkt_len(m) - hdr->csum_start, 956 &csum) < 0) 957 return -EINVAL; 958 if (likely(csum != 0xffff)) 959 csum = ~csum; 960 off = hdr->csum_offset + hdr->csum_start; 961 if (rte_pktmbuf_data_len(m) >= off + 1) 962 *rte_pktmbuf_mtod_offset(m, uint16_t *, 963 off) = csum; 964 } 965 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) { 966 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; 967 } 968 969 /* GSO request, save required information in mbuf */ 970 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 971 /* Check unsupported modes */ 972 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) || 973 (hdr->gso_size == 0)) { 974 return -EINVAL; 975 } 976 977 /* Update mss lengthes in mbuf */ 978 m->tso_segsz = hdr->gso_size; 979 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 980 case VIRTIO_NET_HDR_GSO_TCPV4: 981 case VIRTIO_NET_HDR_GSO_TCPV6: 982 m->ol_flags |= PKT_RX_LRO | \ 983 PKT_RX_L4_CKSUM_NONE; 984 break; 985 default: 986 return -EINVAL; 987 } 988 } 989 990 return 0; 991 } 992 993 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) 994 uint16_t 995 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 996 { 997 struct virtnet_rx *rxvq = rx_queue; 998 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 999 struct virtio_hw *hw = vq->hw; 1000 struct rte_mbuf *rxm; 1001 uint16_t nb_used, num, nb_rx; 1002 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1003 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1004 int error; 1005 uint32_t i, nb_enqueued; 1006 uint32_t hdr_size; 1007 struct virtio_net_hdr *hdr; 1008 1009 nb_rx = 0; 1010 if (unlikely(hw->started == 0)) 1011 return nb_rx; 1012 1013 nb_used = virtqueue_nused(vq); 1014 1015 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; 1016 if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) 1017 num = VIRTIO_MBUF_BURST_SZ; 1018 if (likely(num > DESC_PER_CACHELINE)) 1019 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 1020 1021 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); 1022 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); 1023 1024 nb_enqueued = 0; 1025 hdr_size = hw->vtnet_hdr_size; 1026 1027 for (i = 0; i < num ; i++) { 1028 rxm = rcv_pkts[i]; 1029 1030 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1031 1032 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1033 PMD_RX_LOG(ERR, "Packet drop"); 1034 nb_enqueued++; 1035 virtio_discard_rxbuf(vq, rxm); 1036 rxvq->stats.errors++; 1037 continue; 1038 } 1039 1040 rxm->port = rxvq->port_id; 1041 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1042 rxm->ol_flags = 0; 1043 rxm->vlan_tci = 0; 1044 1045 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1046 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1047 1048 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + 1049 RTE_PKTMBUF_HEADROOM - hdr_size); 1050 1051 if (hw->vlan_strip) 1052 rte_vlan_strip(rxm); 1053 1054 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { 1055 virtio_discard_rxbuf(vq, rxm); 1056 rxvq->stats.errors++; 1057 continue; 1058 } 1059 1060 virtio_rx_stats_updated(rxvq, rxm); 1061 1062 rx_pkts[nb_rx++] = rxm; 1063 } 1064 1065 rxvq->stats.packets += nb_rx; 1066 1067 /* Allocate new mbuf for the used descriptor */ 1068 if (likely(!virtqueue_full(vq))) { 1069 uint16_t free_cnt = vq->vq_free_cnt; 1070 struct rte_mbuf *new_pkts[free_cnt]; 1071 1072 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, 1073 free_cnt) == 0)) { 1074 error = virtqueue_enqueue_recv_refill(vq, new_pkts, 1075 free_cnt); 1076 if (unlikely(error)) { 1077 for (i = 0; i < free_cnt; i++) 1078 rte_pktmbuf_free(new_pkts[i]); 1079 } 1080 nb_enqueued += free_cnt; 1081 } else { 1082 struct rte_eth_dev *dev = 1083 &rte_eth_devices[rxvq->port_id]; 1084 dev->data->rx_mbuf_alloc_failed += free_cnt; 1085 } 1086 } 1087 1088 if (likely(nb_enqueued)) { 1089 vq_update_avail_idx(vq); 1090 1091 if (unlikely(virtqueue_kick_prepare(vq))) { 1092 virtqueue_notify(vq); 1093 PMD_RX_LOG(DEBUG, "Notified"); 1094 } 1095 } 1096 1097 return nb_rx; 1098 } 1099 1100 uint16_t 1101 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, 1102 uint16_t nb_pkts) 1103 { 1104 struct virtnet_rx *rxvq = rx_queue; 1105 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 1106 struct virtio_hw *hw = vq->hw; 1107 struct rte_mbuf *rxm; 1108 uint16_t num, nb_rx; 1109 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1110 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1111 int error; 1112 uint32_t i, nb_enqueued; 1113 uint32_t hdr_size; 1114 struct virtio_net_hdr *hdr; 1115 1116 nb_rx = 0; 1117 if (unlikely(hw->started == 0)) 1118 return nb_rx; 1119 1120 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts); 1121 if (likely(num > DESC_PER_CACHELINE)) 1122 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 1123 1124 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); 1125 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1126 1127 nb_enqueued = 0; 1128 hdr_size = hw->vtnet_hdr_size; 1129 1130 for (i = 0; i < num; i++) { 1131 rxm = rcv_pkts[i]; 1132 1133 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1134 1135 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1136 PMD_RX_LOG(ERR, "Packet drop"); 1137 nb_enqueued++; 1138 virtio_discard_rxbuf(vq, rxm); 1139 rxvq->stats.errors++; 1140 continue; 1141 } 1142 1143 rxm->port = rxvq->port_id; 1144 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1145 rxm->ol_flags = 0; 1146 rxm->vlan_tci = 0; 1147 1148 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1149 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1150 1151 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + 1152 RTE_PKTMBUF_HEADROOM - hdr_size); 1153 1154 if (hw->vlan_strip) 1155 rte_vlan_strip(rxm); 1156 1157 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { 1158 virtio_discard_rxbuf(vq, rxm); 1159 rxvq->stats.errors++; 1160 continue; 1161 } 1162 1163 virtio_rx_stats_updated(rxvq, rxm); 1164 1165 rx_pkts[nb_rx++] = rxm; 1166 } 1167 1168 rxvq->stats.packets += nb_rx; 1169 1170 /* Allocate new mbuf for the used descriptor */ 1171 if (likely(!virtqueue_full(vq))) { 1172 uint16_t free_cnt = vq->vq_free_cnt; 1173 struct rte_mbuf *new_pkts[free_cnt]; 1174 1175 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, 1176 free_cnt) == 0)) { 1177 error = virtqueue_enqueue_recv_refill_packed(vq, 1178 new_pkts, free_cnt); 1179 if (unlikely(error)) { 1180 for (i = 0; i < free_cnt; i++) 1181 rte_pktmbuf_free(new_pkts[i]); 1182 } 1183 nb_enqueued += free_cnt; 1184 } else { 1185 struct rte_eth_dev *dev = 1186 &rte_eth_devices[rxvq->port_id]; 1187 dev->data->rx_mbuf_alloc_failed += free_cnt; 1188 } 1189 } 1190 1191 if (likely(nb_enqueued)) { 1192 if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1193 virtqueue_notify(vq); 1194 PMD_RX_LOG(DEBUG, "Notified"); 1195 } 1196 } 1197 1198 return nb_rx; 1199 } 1200 1201 1202 uint16_t 1203 virtio_recv_pkts_inorder(void *rx_queue, 1204 struct rte_mbuf **rx_pkts, 1205 uint16_t nb_pkts) 1206 { 1207 struct virtnet_rx *rxvq = rx_queue; 1208 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 1209 struct virtio_hw *hw = vq->hw; 1210 struct rte_mbuf *rxm; 1211 struct rte_mbuf *prev = NULL; 1212 uint16_t nb_used, num, nb_rx; 1213 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1214 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1215 int error; 1216 uint32_t nb_enqueued; 1217 uint32_t seg_num; 1218 uint32_t seg_res; 1219 uint32_t hdr_size; 1220 int32_t i; 1221 1222 nb_rx = 0; 1223 if (unlikely(hw->started == 0)) 1224 return nb_rx; 1225 1226 nb_used = virtqueue_nused(vq); 1227 nb_used = RTE_MIN(nb_used, nb_pkts); 1228 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ); 1229 1230 PMD_RX_LOG(DEBUG, "used:%d", nb_used); 1231 1232 nb_enqueued = 0; 1233 seg_num = 1; 1234 seg_res = 0; 1235 hdr_size = hw->vtnet_hdr_size; 1236 1237 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used); 1238 1239 for (i = 0; i < num; i++) { 1240 struct virtio_net_hdr_mrg_rxbuf *header; 1241 1242 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1243 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1244 1245 rxm = rcv_pkts[i]; 1246 1247 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1248 PMD_RX_LOG(ERR, "Packet drop"); 1249 nb_enqueued++; 1250 virtio_discard_rxbuf_inorder(vq, rxm); 1251 rxvq->stats.errors++; 1252 continue; 1253 } 1254 1255 header = (struct virtio_net_hdr_mrg_rxbuf *) 1256 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM 1257 - hdr_size); 1258 1259 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { 1260 seg_num = header->num_buffers; 1261 if (seg_num == 0) 1262 seg_num = 1; 1263 } else { 1264 seg_num = 1; 1265 } 1266 1267 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1268 rxm->nb_segs = seg_num; 1269 rxm->ol_flags = 0; 1270 rxm->vlan_tci = 0; 1271 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1272 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1273 1274 rxm->port = rxvq->port_id; 1275 1276 rx_pkts[nb_rx] = rxm; 1277 prev = rxm; 1278 1279 if (vq->hw->has_rx_offload && 1280 virtio_rx_offload(rxm, &header->hdr) < 0) { 1281 virtio_discard_rxbuf_inorder(vq, rxm); 1282 rxvq->stats.errors++; 1283 continue; 1284 } 1285 1286 if (hw->vlan_strip) 1287 rte_vlan_strip(rx_pkts[nb_rx]); 1288 1289 seg_res = seg_num - 1; 1290 1291 /* Merge remaining segments */ 1292 while (seg_res != 0 && i < (num - 1)) { 1293 i++; 1294 1295 rxm = rcv_pkts[i]; 1296 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1297 rxm->pkt_len = (uint32_t)(len[i]); 1298 rxm->data_len = (uint16_t)(len[i]); 1299 1300 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); 1301 1302 prev->next = rxm; 1303 prev = rxm; 1304 seg_res -= 1; 1305 } 1306 1307 if (!seg_res) { 1308 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1309 nb_rx++; 1310 } 1311 } 1312 1313 /* Last packet still need merge segments */ 1314 while (seg_res != 0) { 1315 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, 1316 VIRTIO_MBUF_BURST_SZ); 1317 1318 if (likely(virtqueue_nused(vq) >= rcv_cnt)) { 1319 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, 1320 rcv_cnt); 1321 uint16_t extra_idx = 0; 1322 1323 rcv_cnt = num; 1324 while (extra_idx < rcv_cnt) { 1325 rxm = rcv_pkts[extra_idx]; 1326 rxm->data_off = 1327 RTE_PKTMBUF_HEADROOM - hdr_size; 1328 rxm->pkt_len = (uint32_t)(len[extra_idx]); 1329 rxm->data_len = (uint16_t)(len[extra_idx]); 1330 prev->next = rxm; 1331 prev = rxm; 1332 rx_pkts[nb_rx]->pkt_len += len[extra_idx]; 1333 extra_idx += 1; 1334 }; 1335 seg_res -= rcv_cnt; 1336 1337 if (!seg_res) { 1338 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1339 nb_rx++; 1340 } 1341 } else { 1342 PMD_RX_LOG(ERR, 1343 "No enough segments for packet."); 1344 rte_pktmbuf_free(rx_pkts[nb_rx]); 1345 rxvq->stats.errors++; 1346 break; 1347 } 1348 } 1349 1350 rxvq->stats.packets += nb_rx; 1351 1352 /* Allocate new mbuf for the used descriptor */ 1353 1354 if (likely(!virtqueue_full(vq))) { 1355 /* free_cnt may include mrg descs */ 1356 uint16_t free_cnt = vq->vq_free_cnt; 1357 struct rte_mbuf *new_pkts[free_cnt]; 1358 1359 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { 1360 error = virtqueue_enqueue_refill_inorder(vq, new_pkts, 1361 free_cnt); 1362 if (unlikely(error)) { 1363 for (i = 0; i < free_cnt; i++) 1364 rte_pktmbuf_free(new_pkts[i]); 1365 } 1366 nb_enqueued += free_cnt; 1367 } else { 1368 struct rte_eth_dev *dev = 1369 &rte_eth_devices[rxvq->port_id]; 1370 dev->data->rx_mbuf_alloc_failed += free_cnt; 1371 } 1372 } 1373 1374 if (likely(nb_enqueued)) { 1375 vq_update_avail_idx(vq); 1376 1377 if (unlikely(virtqueue_kick_prepare(vq))) { 1378 virtqueue_notify(vq); 1379 PMD_RX_LOG(DEBUG, "Notified"); 1380 } 1381 } 1382 1383 return nb_rx; 1384 } 1385 1386 uint16_t 1387 virtio_recv_mergeable_pkts(void *rx_queue, 1388 struct rte_mbuf **rx_pkts, 1389 uint16_t nb_pkts) 1390 { 1391 struct virtnet_rx *rxvq = rx_queue; 1392 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 1393 struct virtio_hw *hw = vq->hw; 1394 struct rte_mbuf *rxm; 1395 struct rte_mbuf *prev = NULL; 1396 uint16_t nb_used, num, nb_rx = 0; 1397 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1398 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1399 int error; 1400 uint32_t nb_enqueued = 0; 1401 uint32_t seg_num = 0; 1402 uint32_t seg_res = 0; 1403 uint32_t hdr_size = hw->vtnet_hdr_size; 1404 int32_t i; 1405 1406 if (unlikely(hw->started == 0)) 1407 return nb_rx; 1408 1409 nb_used = virtqueue_nused(vq); 1410 1411 PMD_RX_LOG(DEBUG, "used:%d", nb_used); 1412 1413 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; 1414 if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) 1415 num = VIRTIO_MBUF_BURST_SZ; 1416 if (likely(num > DESC_PER_CACHELINE)) 1417 num = num - ((vq->vq_used_cons_idx + num) % 1418 DESC_PER_CACHELINE); 1419 1420 1421 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); 1422 1423 for (i = 0; i < num; i++) { 1424 struct virtio_net_hdr_mrg_rxbuf *header; 1425 1426 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1427 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1428 1429 rxm = rcv_pkts[i]; 1430 1431 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1432 PMD_RX_LOG(ERR, "Packet drop"); 1433 nb_enqueued++; 1434 virtio_discard_rxbuf(vq, rxm); 1435 rxvq->stats.errors++; 1436 continue; 1437 } 1438 1439 header = (struct virtio_net_hdr_mrg_rxbuf *) 1440 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM 1441 - hdr_size); 1442 seg_num = header->num_buffers; 1443 if (seg_num == 0) 1444 seg_num = 1; 1445 1446 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1447 rxm->nb_segs = seg_num; 1448 rxm->ol_flags = 0; 1449 rxm->vlan_tci = 0; 1450 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1451 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1452 1453 rxm->port = rxvq->port_id; 1454 1455 rx_pkts[nb_rx] = rxm; 1456 prev = rxm; 1457 1458 if (hw->has_rx_offload && 1459 virtio_rx_offload(rxm, &header->hdr) < 0) { 1460 virtio_discard_rxbuf(vq, rxm); 1461 rxvq->stats.errors++; 1462 continue; 1463 } 1464 1465 if (hw->vlan_strip) 1466 rte_vlan_strip(rx_pkts[nb_rx]); 1467 1468 seg_res = seg_num - 1; 1469 1470 /* Merge remaining segments */ 1471 while (seg_res != 0 && i < (num - 1)) { 1472 i++; 1473 1474 rxm = rcv_pkts[i]; 1475 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1476 rxm->pkt_len = (uint32_t)(len[i]); 1477 rxm->data_len = (uint16_t)(len[i]); 1478 1479 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); 1480 1481 prev->next = rxm; 1482 prev = rxm; 1483 seg_res -= 1; 1484 } 1485 1486 if (!seg_res) { 1487 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1488 nb_rx++; 1489 } 1490 } 1491 1492 /* Last packet still need merge segments */ 1493 while (seg_res != 0) { 1494 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, 1495 VIRTIO_MBUF_BURST_SZ); 1496 1497 if (likely(virtqueue_nused(vq) >= rcv_cnt)) { 1498 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1499 rcv_cnt); 1500 uint16_t extra_idx = 0; 1501 1502 rcv_cnt = num; 1503 while (extra_idx < rcv_cnt) { 1504 rxm = rcv_pkts[extra_idx]; 1505 rxm->data_off = 1506 RTE_PKTMBUF_HEADROOM - hdr_size; 1507 rxm->pkt_len = (uint32_t)(len[extra_idx]); 1508 rxm->data_len = (uint16_t)(len[extra_idx]); 1509 prev->next = rxm; 1510 prev = rxm; 1511 rx_pkts[nb_rx]->pkt_len += len[extra_idx]; 1512 extra_idx += 1; 1513 }; 1514 seg_res -= rcv_cnt; 1515 1516 if (!seg_res) { 1517 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1518 nb_rx++; 1519 } 1520 } else { 1521 PMD_RX_LOG(ERR, 1522 "No enough segments for packet."); 1523 rte_pktmbuf_free(rx_pkts[nb_rx]); 1524 rxvq->stats.errors++; 1525 break; 1526 } 1527 } 1528 1529 rxvq->stats.packets += nb_rx; 1530 1531 /* Allocate new mbuf for the used descriptor */ 1532 if (likely(!virtqueue_full(vq))) { 1533 /* free_cnt may include mrg descs */ 1534 uint16_t free_cnt = vq->vq_free_cnt; 1535 struct rte_mbuf *new_pkts[free_cnt]; 1536 1537 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { 1538 error = virtqueue_enqueue_recv_refill(vq, new_pkts, 1539 free_cnt); 1540 if (unlikely(error)) { 1541 for (i = 0; i < free_cnt; i++) 1542 rte_pktmbuf_free(new_pkts[i]); 1543 } 1544 nb_enqueued += free_cnt; 1545 } else { 1546 struct rte_eth_dev *dev = 1547 &rte_eth_devices[rxvq->port_id]; 1548 dev->data->rx_mbuf_alloc_failed += free_cnt; 1549 } 1550 } 1551 1552 if (likely(nb_enqueued)) { 1553 vq_update_avail_idx(vq); 1554 1555 if (unlikely(virtqueue_kick_prepare(vq))) { 1556 virtqueue_notify(vq); 1557 PMD_RX_LOG(DEBUG, "Notified"); 1558 } 1559 } 1560 1561 return nb_rx; 1562 } 1563 1564 uint16_t 1565 virtio_recv_mergeable_pkts_packed(void *rx_queue, 1566 struct rte_mbuf **rx_pkts, 1567 uint16_t nb_pkts) 1568 { 1569 struct virtnet_rx *rxvq = rx_queue; 1570 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 1571 struct virtio_hw *hw = vq->hw; 1572 struct rte_mbuf *rxm; 1573 struct rte_mbuf *prev = NULL; 1574 uint16_t num, nb_rx = 0; 1575 uint32_t len[VIRTIO_MBUF_BURST_SZ]; 1576 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; 1577 uint32_t nb_enqueued = 0; 1578 uint32_t seg_num = 0; 1579 uint32_t seg_res = 0; 1580 uint32_t hdr_size = hw->vtnet_hdr_size; 1581 int32_t i; 1582 int error; 1583 1584 if (unlikely(hw->started == 0)) 1585 return nb_rx; 1586 1587 1588 num = nb_pkts; 1589 if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) 1590 num = VIRTIO_MBUF_BURST_SZ; 1591 if (likely(num > DESC_PER_CACHELINE)) 1592 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); 1593 1594 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); 1595 1596 for (i = 0; i < num; i++) { 1597 struct virtio_net_hdr_mrg_rxbuf *header; 1598 1599 PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1600 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); 1601 1602 rxm = rcv_pkts[i]; 1603 1604 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { 1605 PMD_RX_LOG(ERR, "Packet drop"); 1606 nb_enqueued++; 1607 virtio_discard_rxbuf(vq, rxm); 1608 rxvq->stats.errors++; 1609 continue; 1610 } 1611 1612 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *) 1613 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size); 1614 seg_num = header->num_buffers; 1615 1616 if (seg_num == 0) 1617 seg_num = 1; 1618 1619 rxm->data_off = RTE_PKTMBUF_HEADROOM; 1620 rxm->nb_segs = seg_num; 1621 rxm->ol_flags = 0; 1622 rxm->vlan_tci = 0; 1623 rxm->pkt_len = (uint32_t)(len[i] - hdr_size); 1624 rxm->data_len = (uint16_t)(len[i] - hdr_size); 1625 1626 rxm->port = rxvq->port_id; 1627 rx_pkts[nb_rx] = rxm; 1628 prev = rxm; 1629 1630 if (hw->has_rx_offload && 1631 virtio_rx_offload(rxm, &header->hdr) < 0) { 1632 virtio_discard_rxbuf(vq, rxm); 1633 rxvq->stats.errors++; 1634 continue; 1635 } 1636 1637 if (hw->vlan_strip) 1638 rte_vlan_strip(rx_pkts[nb_rx]); 1639 1640 seg_res = seg_num - 1; 1641 1642 /* Merge remaining segments */ 1643 while (seg_res != 0 && i < (num - 1)) { 1644 i++; 1645 1646 rxm = rcv_pkts[i]; 1647 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1648 rxm->pkt_len = (uint32_t)(len[i]); 1649 rxm->data_len = (uint16_t)(len[i]); 1650 1651 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); 1652 1653 prev->next = rxm; 1654 prev = rxm; 1655 seg_res -= 1; 1656 } 1657 1658 if (!seg_res) { 1659 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1660 nb_rx++; 1661 } 1662 } 1663 1664 /* Last packet still need merge segments */ 1665 while (seg_res != 0) { 1666 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, 1667 VIRTIO_MBUF_BURST_SZ); 1668 uint16_t extra_idx = 0; 1669 1670 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, 1671 len, rcv_cnt); 1672 if (unlikely(rcv_cnt == 0)) { 1673 PMD_RX_LOG(ERR, "No enough segments for packet."); 1674 rte_pktmbuf_free(rx_pkts[nb_rx]); 1675 rxvq->stats.errors++; 1676 break; 1677 } 1678 1679 while (extra_idx < rcv_cnt) { 1680 rxm = rcv_pkts[extra_idx]; 1681 1682 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; 1683 rxm->pkt_len = (uint32_t)(len[extra_idx]); 1684 rxm->data_len = (uint16_t)(len[extra_idx]); 1685 1686 prev->next = rxm; 1687 prev = rxm; 1688 rx_pkts[nb_rx]->pkt_len += len[extra_idx]; 1689 extra_idx += 1; 1690 } 1691 seg_res -= rcv_cnt; 1692 if (!seg_res) { 1693 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); 1694 nb_rx++; 1695 } 1696 } 1697 1698 rxvq->stats.packets += nb_rx; 1699 1700 /* Allocate new mbuf for the used descriptor */ 1701 if (likely(!virtqueue_full(vq))) { 1702 /* free_cnt may include mrg descs */ 1703 uint16_t free_cnt = vq->vq_free_cnt; 1704 struct rte_mbuf *new_pkts[free_cnt]; 1705 1706 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { 1707 error = virtqueue_enqueue_recv_refill_packed(vq, 1708 new_pkts, free_cnt); 1709 if (unlikely(error)) { 1710 for (i = 0; i < free_cnt; i++) 1711 rte_pktmbuf_free(new_pkts[i]); 1712 } 1713 nb_enqueued += free_cnt; 1714 } else { 1715 struct rte_eth_dev *dev = 1716 &rte_eth_devices[rxvq->port_id]; 1717 dev->data->rx_mbuf_alloc_failed += free_cnt; 1718 } 1719 } 1720 1721 if (likely(nb_enqueued)) { 1722 if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1723 virtqueue_notify(vq); 1724 PMD_RX_LOG(DEBUG, "Notified"); 1725 } 1726 } 1727 1728 return nb_rx; 1729 } 1730 1731 uint16_t 1732 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, 1733 uint16_t nb_pkts) 1734 { 1735 uint16_t nb_tx; 1736 int error; 1737 1738 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1739 struct rte_mbuf *m = tx_pkts[nb_tx]; 1740 1741 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 1742 error = rte_validate_tx_offload(m); 1743 if (unlikely(error)) { 1744 rte_errno = -error; 1745 break; 1746 } 1747 #endif 1748 1749 /* Do VLAN tag insertion */ 1750 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) { 1751 error = rte_vlan_insert(&m); 1752 /* rte_vlan_insert() may change pointer 1753 * even in the case of failure 1754 */ 1755 tx_pkts[nb_tx] = m; 1756 1757 if (unlikely(error)) { 1758 rte_errno = -error; 1759 break; 1760 } 1761 } 1762 1763 error = rte_net_intel_cksum_prepare(m); 1764 if (unlikely(error)) { 1765 rte_errno = -error; 1766 break; 1767 } 1768 1769 if (m->ol_flags & PKT_TX_TCP_SEG) 1770 virtio_tso_fix_cksum(m); 1771 } 1772 1773 return nb_tx; 1774 } 1775 1776 uint16_t 1777 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, 1778 uint16_t nb_pkts) 1779 { 1780 struct virtnet_tx *txvq = tx_queue; 1781 struct virtqueue *vq = virtnet_txq_to_vq(txvq); 1782 struct virtio_hw *hw = vq->hw; 1783 uint16_t hdr_size = hw->vtnet_hdr_size; 1784 uint16_t nb_tx = 0; 1785 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER); 1786 1787 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 1788 return nb_tx; 1789 1790 if (unlikely(nb_pkts < 1)) 1791 return nb_pkts; 1792 1793 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 1794 1795 if (nb_pkts > vq->vq_free_cnt) 1796 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, 1797 in_order); 1798 1799 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1800 struct rte_mbuf *txm = tx_pkts[nb_tx]; 1801 int can_push = 0, use_indirect = 0, slots, need; 1802 1803 /* optimize ring usage */ 1804 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || 1805 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) && 1806 rte_mbuf_refcnt_read(txm) == 1 && 1807 RTE_MBUF_DIRECT(txm) && 1808 txm->nb_segs == 1 && 1809 rte_pktmbuf_headroom(txm) >= hdr_size && 1810 rte_is_aligned(rte_pktmbuf_mtod(txm, char *), 1811 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) 1812 can_push = 1; 1813 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && 1814 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) 1815 use_indirect = 1; 1816 /* How many main ring entries are needed to this Tx? 1817 * indirect => 1 1818 * any_layout => number of segments 1819 * default => number of segments + 1 1820 */ 1821 slots = use_indirect ? 1 : (txm->nb_segs + !can_push); 1822 need = slots - vq->vq_free_cnt; 1823 1824 /* Positive value indicates it need free vring descriptors */ 1825 if (unlikely(need > 0)) { 1826 virtio_xmit_cleanup_packed(vq, need, in_order); 1827 need = slots - vq->vq_free_cnt; 1828 if (unlikely(need > 0)) { 1829 PMD_TX_LOG(ERR, 1830 "No free tx descriptors to transmit"); 1831 break; 1832 } 1833 } 1834 1835 /* Enqueue Packet buffers */ 1836 if (can_push) 1837 virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order); 1838 else 1839 virtqueue_enqueue_xmit_packed(txvq, txm, slots, 1840 use_indirect, 0, 1841 in_order); 1842 1843 virtio_update_packet_stats(&txvq->stats, txm); 1844 } 1845 1846 txvq->stats.packets += nb_tx; 1847 1848 if (likely(nb_tx)) { 1849 if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1850 virtqueue_notify(vq); 1851 PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 1852 } 1853 } 1854 1855 return nb_tx; 1856 } 1857 1858 uint16_t 1859 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 1860 { 1861 struct virtnet_tx *txvq = tx_queue; 1862 struct virtqueue *vq = virtnet_txq_to_vq(txvq); 1863 struct virtio_hw *hw = vq->hw; 1864 uint16_t hdr_size = hw->vtnet_hdr_size; 1865 uint16_t nb_used, nb_tx = 0; 1866 1867 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 1868 return nb_tx; 1869 1870 if (unlikely(nb_pkts < 1)) 1871 return nb_pkts; 1872 1873 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 1874 1875 nb_used = virtqueue_nused(vq); 1876 1877 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) 1878 virtio_xmit_cleanup(vq, nb_used); 1879 1880 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1881 struct rte_mbuf *txm = tx_pkts[nb_tx]; 1882 int can_push = 0, use_indirect = 0, slots, need; 1883 1884 /* optimize ring usage */ 1885 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || 1886 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) && 1887 rte_mbuf_refcnt_read(txm) == 1 && 1888 RTE_MBUF_DIRECT(txm) && 1889 txm->nb_segs == 1 && 1890 rte_pktmbuf_headroom(txm) >= hdr_size && 1891 rte_is_aligned(rte_pktmbuf_mtod(txm, char *), 1892 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) 1893 can_push = 1; 1894 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && 1895 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) 1896 use_indirect = 1; 1897 1898 /* How many main ring entries are needed to this Tx? 1899 * any_layout => number of segments 1900 * indirect => 1 1901 * default => number of segments + 1 1902 */ 1903 slots = use_indirect ? 1 : (txm->nb_segs + !can_push); 1904 need = slots - vq->vq_free_cnt; 1905 1906 /* Positive value indicates it need free vring descriptors */ 1907 if (unlikely(need > 0)) { 1908 nb_used = virtqueue_nused(vq); 1909 1910 need = RTE_MIN(need, (int)nb_used); 1911 1912 virtio_xmit_cleanup(vq, need); 1913 need = slots - vq->vq_free_cnt; 1914 if (unlikely(need > 0)) { 1915 PMD_TX_LOG(ERR, 1916 "No free tx descriptors to transmit"); 1917 break; 1918 } 1919 } 1920 1921 /* Enqueue Packet buffers */ 1922 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, 1923 can_push, 0); 1924 1925 virtio_update_packet_stats(&txvq->stats, txm); 1926 } 1927 1928 txvq->stats.packets += nb_tx; 1929 1930 if (likely(nb_tx)) { 1931 vq_update_avail_idx(vq); 1932 1933 if (unlikely(virtqueue_kick_prepare(vq))) { 1934 virtqueue_notify(vq); 1935 PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 1936 } 1937 } 1938 1939 return nb_tx; 1940 } 1941 1942 static __rte_always_inline int 1943 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need) 1944 { 1945 uint16_t nb_used, nb_clean, nb_descs; 1946 1947 nb_descs = vq->vq_free_cnt + need; 1948 nb_used = virtqueue_nused(vq); 1949 nb_clean = RTE_MIN(need, (int)nb_used); 1950 1951 virtio_xmit_cleanup_inorder(vq, nb_clean); 1952 1953 return nb_descs - vq->vq_free_cnt; 1954 } 1955 1956 uint16_t 1957 virtio_xmit_pkts_inorder(void *tx_queue, 1958 struct rte_mbuf **tx_pkts, 1959 uint16_t nb_pkts) 1960 { 1961 struct virtnet_tx *txvq = tx_queue; 1962 struct virtqueue *vq = virtnet_txq_to_vq(txvq); 1963 struct virtio_hw *hw = vq->hw; 1964 uint16_t hdr_size = hw->vtnet_hdr_size; 1965 uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0; 1966 struct rte_mbuf *inorder_pkts[nb_pkts]; 1967 int need; 1968 1969 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 1970 return nb_tx; 1971 1972 if (unlikely(nb_pkts < 1)) 1973 return nb_pkts; 1974 1975 VIRTQUEUE_DUMP(vq); 1976 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 1977 nb_used = virtqueue_nused(vq); 1978 1979 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) 1980 virtio_xmit_cleanup_inorder(vq, nb_used); 1981 1982 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 1983 struct rte_mbuf *txm = tx_pkts[nb_tx]; 1984 int slots; 1985 1986 /* optimize ring usage */ 1987 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || 1988 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) && 1989 rte_mbuf_refcnt_read(txm) == 1 && 1990 RTE_MBUF_DIRECT(txm) && 1991 txm->nb_segs == 1 && 1992 rte_pktmbuf_headroom(txm) >= hdr_size && 1993 rte_is_aligned(rte_pktmbuf_mtod(txm, char *), 1994 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) { 1995 inorder_pkts[nb_inorder_pkts] = txm; 1996 nb_inorder_pkts++; 1997 1998 continue; 1999 } 2000 2001 if (nb_inorder_pkts) { 2002 need = nb_inorder_pkts - vq->vq_free_cnt; 2003 if (unlikely(need > 0)) { 2004 need = virtio_xmit_try_cleanup_inorder(vq, 2005 need); 2006 if (unlikely(need > 0)) { 2007 PMD_TX_LOG(ERR, 2008 "No free tx descriptors to " 2009 "transmit"); 2010 break; 2011 } 2012 } 2013 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, 2014 nb_inorder_pkts); 2015 nb_inorder_pkts = 0; 2016 } 2017 2018 slots = txm->nb_segs + 1; 2019 need = slots - vq->vq_free_cnt; 2020 if (unlikely(need > 0)) { 2021 need = virtio_xmit_try_cleanup_inorder(vq, slots); 2022 2023 if (unlikely(need > 0)) { 2024 PMD_TX_LOG(ERR, 2025 "No free tx descriptors to transmit"); 2026 break; 2027 } 2028 } 2029 /* Enqueue Packet buffers */ 2030 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1); 2031 2032 virtio_update_packet_stats(&txvq->stats, txm); 2033 } 2034 2035 /* Transmit all inorder packets */ 2036 if (nb_inorder_pkts) { 2037 need = nb_inorder_pkts - vq->vq_free_cnt; 2038 if (unlikely(need > 0)) { 2039 need = virtio_xmit_try_cleanup_inorder(vq, 2040 need); 2041 if (unlikely(need > 0)) { 2042 PMD_TX_LOG(ERR, 2043 "No free tx descriptors to transmit"); 2044 nb_inorder_pkts = vq->vq_free_cnt; 2045 nb_tx -= need; 2046 } 2047 } 2048 2049 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, 2050 nb_inorder_pkts); 2051 } 2052 2053 txvq->stats.packets += nb_tx; 2054 2055 if (likely(nb_tx)) { 2056 vq_update_avail_idx(vq); 2057 2058 if (unlikely(virtqueue_kick_prepare(vq))) { 2059 virtqueue_notify(vq); 2060 PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 2061 } 2062 } 2063 2064 VIRTQUEUE_DUMP(vq); 2065 2066 return nb_tx; 2067 } 2068 2069 __rte_weak uint16_t 2070 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused, 2071 struct rte_mbuf **rx_pkts __rte_unused, 2072 uint16_t nb_pkts __rte_unused) 2073 { 2074 return 0; 2075 } 2076 2077 __rte_weak uint16_t 2078 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused, 2079 struct rte_mbuf **tx_pkts __rte_unused, 2080 uint16_t nb_pkts __rte_unused) 2081 { 2082 return 0; 2083 } 2084