10eaf7fc2SJoyce Kong /* SPDX-License-Identifier: BSD-3-Clause 20eaf7fc2SJoyce Kong * Copyright(c) 2010-2020 Intel Corporation 30eaf7fc2SJoyce Kong */ 40eaf7fc2SJoyce Kong 50eaf7fc2SJoyce Kong #include <stdint.h> 60eaf7fc2SJoyce Kong #include <stdio.h> 70eaf7fc2SJoyce Kong #include <stdlib.h> 80eaf7fc2SJoyce Kong #include <string.h> 90eaf7fc2SJoyce Kong #include <errno.h> 100eaf7fc2SJoyce Kong 110eaf7fc2SJoyce Kong #include <rte_net.h> 120eaf7fc2SJoyce Kong 130eaf7fc2SJoyce Kong #include "virtio_logs.h" 140eaf7fc2SJoyce Kong #include "virtio_ethdev.h" 150eaf7fc2SJoyce Kong #include "virtio_pci.h" 160eaf7fc2SJoyce Kong #include "virtio_rxtx_packed.h" 170eaf7fc2SJoyce Kong #include "virtqueue.h" 180eaf7fc2SJoyce Kong 190eaf7fc2SJoyce Kong #ifdef CC_AVX512_SUPPORT 200eaf7fc2SJoyce Kong #include "virtio_rxtx_packed_avx.h" 219ef38ddbSJoyce Kong #elif defined(RTE_ARCH_ARM) 229ef38ddbSJoyce Kong #include "virtio_rxtx_packed_neon.h" 230eaf7fc2SJoyce Kong #endif 240eaf7fc2SJoyce Kong 250eaf7fc2SJoyce Kong uint16_t 260eaf7fc2SJoyce Kong virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts, 270eaf7fc2SJoyce Kong uint16_t nb_pkts) 280eaf7fc2SJoyce Kong { 290eaf7fc2SJoyce Kong struct virtnet_tx *txvq = tx_queue; 303169550fSMaxime Coquelin struct virtqueue *vq = virtnet_txq_to_vq(txvq); 310eaf7fc2SJoyce Kong struct virtio_hw *hw = vq->hw; 320eaf7fc2SJoyce Kong uint16_t nb_tx = 0; 330eaf7fc2SJoyce Kong uint16_t remained; 340eaf7fc2SJoyce Kong 350eaf7fc2SJoyce Kong if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) 360eaf7fc2SJoyce Kong return nb_tx; 370eaf7fc2SJoyce Kong 380eaf7fc2SJoyce Kong if (unlikely(nb_pkts < 1)) 390eaf7fc2SJoyce Kong return nb_pkts; 400eaf7fc2SJoyce Kong 410eaf7fc2SJoyce Kong PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); 420eaf7fc2SJoyce Kong 430eaf7fc2SJoyce Kong if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh) 440eaf7fc2SJoyce Kong virtio_xmit_cleanup_inorder_packed(vq, vq->vq_free_thresh); 450eaf7fc2SJoyce Kong 460eaf7fc2SJoyce Kong remained = RTE_MIN(nb_pkts, vq->vq_free_cnt); 470eaf7fc2SJoyce Kong 480eaf7fc2SJoyce Kong while (remained) { 490eaf7fc2SJoyce Kong if (remained >= PACKED_BATCH_SIZE) { 500eaf7fc2SJoyce Kong if (!virtqueue_enqueue_batch_packed_vec(txvq, 510eaf7fc2SJoyce Kong &tx_pkts[nb_tx])) { 520eaf7fc2SJoyce Kong nb_tx += PACKED_BATCH_SIZE; 530eaf7fc2SJoyce Kong remained -= PACKED_BATCH_SIZE; 540eaf7fc2SJoyce Kong continue; 550eaf7fc2SJoyce Kong } 560eaf7fc2SJoyce Kong } 570eaf7fc2SJoyce Kong if (!virtqueue_enqueue_single_packed_vec(txvq, 580eaf7fc2SJoyce Kong tx_pkts[nb_tx])) { 590eaf7fc2SJoyce Kong nb_tx++; 600eaf7fc2SJoyce Kong remained--; 610eaf7fc2SJoyce Kong continue; 620eaf7fc2SJoyce Kong } 630eaf7fc2SJoyce Kong break; 640eaf7fc2SJoyce Kong }; 650eaf7fc2SJoyce Kong 660eaf7fc2SJoyce Kong txvq->stats.packets += nb_tx; 670eaf7fc2SJoyce Kong 680eaf7fc2SJoyce Kong if (likely(nb_tx)) { 690eaf7fc2SJoyce Kong if (unlikely(virtqueue_kick_prepare_packed(vq))) { 700eaf7fc2SJoyce Kong virtqueue_notify(vq); 710eaf7fc2SJoyce Kong PMD_TX_LOG(DEBUG, "Notified backend after xmit"); 720eaf7fc2SJoyce Kong } 730eaf7fc2SJoyce Kong } 740eaf7fc2SJoyce Kong 750eaf7fc2SJoyce Kong return nb_tx; 760eaf7fc2SJoyce Kong } 770eaf7fc2SJoyce Kong 780eaf7fc2SJoyce Kong uint16_t 790eaf7fc2SJoyce Kong virtio_recv_pkts_packed_vec(void *rx_queue, 800eaf7fc2SJoyce Kong struct rte_mbuf **rx_pkts, 810eaf7fc2SJoyce Kong uint16_t nb_pkts) 820eaf7fc2SJoyce Kong { 830eaf7fc2SJoyce Kong struct virtnet_rx *rxvq = rx_queue; 843169550fSMaxime Coquelin struct virtqueue *vq = virtnet_rxq_to_vq(rxvq); 850eaf7fc2SJoyce Kong struct virtio_hw *hw = vq->hw; 860eaf7fc2SJoyce Kong uint16_t num, nb_rx = 0; 870eaf7fc2SJoyce Kong uint32_t nb_enqueued = 0; 880eaf7fc2SJoyce Kong uint16_t free_cnt = vq->vq_free_thresh; 890eaf7fc2SJoyce Kong 900eaf7fc2SJoyce Kong if (unlikely(hw->started == 0)) 910eaf7fc2SJoyce Kong return nb_rx; 920eaf7fc2SJoyce Kong 930eaf7fc2SJoyce Kong num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts); 940eaf7fc2SJoyce Kong if (likely(num > PACKED_BATCH_SIZE)) 950eaf7fc2SJoyce Kong num = num - ((vq->vq_used_cons_idx + num) % PACKED_BATCH_SIZE); 960eaf7fc2SJoyce Kong 970eaf7fc2SJoyce Kong while (num) { 98*99ebada2SMarvin Liu if (num >= PACKED_BATCH_SIZE) { 990eaf7fc2SJoyce Kong if (!virtqueue_dequeue_batch_packed_vec(rxvq, 1000eaf7fc2SJoyce Kong &rx_pkts[nb_rx])) { 1010eaf7fc2SJoyce Kong nb_rx += PACKED_BATCH_SIZE; 1020eaf7fc2SJoyce Kong num -= PACKED_BATCH_SIZE; 1030eaf7fc2SJoyce Kong continue; 1040eaf7fc2SJoyce Kong } 105*99ebada2SMarvin Liu } 1060eaf7fc2SJoyce Kong if (!virtqueue_dequeue_single_packed_vec(rxvq, 1070eaf7fc2SJoyce Kong &rx_pkts[nb_rx])) { 1080eaf7fc2SJoyce Kong nb_rx++; 1090eaf7fc2SJoyce Kong num--; 1100eaf7fc2SJoyce Kong continue; 1110eaf7fc2SJoyce Kong } 1120eaf7fc2SJoyce Kong break; 1130eaf7fc2SJoyce Kong }; 1140eaf7fc2SJoyce Kong 1150eaf7fc2SJoyce Kong PMD_RX_LOG(DEBUG, "dequeue:%d", num); 1160eaf7fc2SJoyce Kong 1170eaf7fc2SJoyce Kong rxvq->stats.packets += nb_rx; 1180eaf7fc2SJoyce Kong 1190eaf7fc2SJoyce Kong if (likely(vq->vq_free_cnt >= free_cnt)) { 1200eaf7fc2SJoyce Kong struct rte_mbuf *new_pkts[free_cnt]; 1210eaf7fc2SJoyce Kong if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, 1220eaf7fc2SJoyce Kong free_cnt) == 0)) { 1230eaf7fc2SJoyce Kong virtio_recv_refill_packed_vec(rxvq, new_pkts, 1240eaf7fc2SJoyce Kong free_cnt); 1250eaf7fc2SJoyce Kong nb_enqueued += free_cnt; 1260eaf7fc2SJoyce Kong } else { 1270eaf7fc2SJoyce Kong struct rte_eth_dev *dev = 1280eaf7fc2SJoyce Kong &rte_eth_devices[rxvq->port_id]; 1290eaf7fc2SJoyce Kong dev->data->rx_mbuf_alloc_failed += free_cnt; 1300eaf7fc2SJoyce Kong } 1310eaf7fc2SJoyce Kong } 1320eaf7fc2SJoyce Kong 1330eaf7fc2SJoyce Kong if (likely(nb_enqueued)) { 1340eaf7fc2SJoyce Kong if (unlikely(virtqueue_kick_prepare_packed(vq))) { 1350eaf7fc2SJoyce Kong virtqueue_notify(vq); 1360eaf7fc2SJoyce Kong PMD_RX_LOG(DEBUG, "Notified"); 1370eaf7fc2SJoyce Kong } 1380eaf7fc2SJoyce Kong } 1390eaf7fc2SJoyce Kong 1400eaf7fc2SJoyce Kong return nb_rx; 1410eaf7fc2SJoyce Kong } 142