xref: /dpdk/drivers/net/virtio/virtio_rxtx_packed.c (revision 4e6bcde92cc46845ee64958912e5cbeedad869b4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_net.h>
12 
13 #include "virtio_logs.h"
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
18 
19 #ifdef CC_AVX512_SUPPORT
20 #include "virtio_rxtx_packed_avx.h"
21 #elif defined(RTE_ARCH_ARM)
22 #include "virtio_rxtx_packed_neon.h"
23 #endif
24 
25 uint16_t
virtio_xmit_pkts_packed_vec(void * tx_queue,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)26 virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
27 			uint16_t nb_pkts)
28 {
29 	struct virtnet_tx *txvq = tx_queue;
30 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
31 	struct virtio_hw *hw = vq->hw;
32 	uint16_t nb_tx = 0;
33 	uint16_t remained;
34 
35 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
36 		return nb_tx;
37 
38 	if (unlikely(nb_pkts < 1))
39 		return nb_pkts;
40 
41 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
42 
43 	if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh)
44 		virtio_xmit_cleanup_inorder_packed(vq, vq->vq_free_thresh);
45 
46 	remained = RTE_MIN(nb_pkts, vq->vq_free_cnt);
47 
48 	while (remained) {
49 		if (remained >= PACKED_BATCH_SIZE) {
50 			if (!virtqueue_enqueue_batch_packed_vec(txvq,
51 						&tx_pkts[nb_tx])) {
52 				nb_tx += PACKED_BATCH_SIZE;
53 				remained -= PACKED_BATCH_SIZE;
54 				continue;
55 			}
56 		}
57 		if (!virtqueue_enqueue_single_packed_vec(txvq,
58 					tx_pkts[nb_tx])) {
59 			nb_tx++;
60 			remained--;
61 			continue;
62 		}
63 		break;
64 	};
65 
66 	txvq->stats.packets += nb_tx;
67 
68 	if (likely(nb_tx)) {
69 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
70 			virtqueue_notify(vq);
71 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
72 		}
73 	}
74 
75 	return nb_tx;
76 }
77 
78 uint16_t
virtio_recv_pkts_packed_vec(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)79 virtio_recv_pkts_packed_vec(void *rx_queue,
80 			    struct rte_mbuf **rx_pkts,
81 			    uint16_t nb_pkts)
82 {
83 	struct virtnet_rx *rxvq = rx_queue;
84 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
85 	struct virtio_hw *hw = vq->hw;
86 	uint16_t num, nb_rx = 0;
87 	uint32_t nb_enqueued = 0;
88 	uint16_t free_cnt = vq->vq_free_thresh;
89 
90 	if (unlikely(hw->started == 0))
91 		return nb_rx;
92 
93 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
94 	if (likely(num > PACKED_BATCH_SIZE))
95 		num = num - ((vq->vq_used_cons_idx + num) % PACKED_BATCH_SIZE);
96 
97 	while (num) {
98 		if (num >= PACKED_BATCH_SIZE) {
99 			if (!virtqueue_dequeue_batch_packed_vec(rxvq,
100 						&rx_pkts[nb_rx])) {
101 				nb_rx += PACKED_BATCH_SIZE;
102 				num -= PACKED_BATCH_SIZE;
103 				continue;
104 			}
105 		}
106 		if (!virtqueue_dequeue_single_packed_vec(rxvq,
107 					&rx_pkts[nb_rx])) {
108 			nb_rx++;
109 			num--;
110 			continue;
111 		}
112 		break;
113 	};
114 
115 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
116 
117 	rxvq->stats.packets += nb_rx;
118 
119 	if (likely(vq->vq_free_cnt >= free_cnt)) {
120 		struct rte_mbuf *new_pkts[free_cnt];
121 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
122 						free_cnt) == 0)) {
123 			virtio_recv_refill_packed_vec(rxvq, new_pkts,
124 					free_cnt);
125 			nb_enqueued += free_cnt;
126 		} else {
127 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
128 			dev->data->rx_mbuf_alloc_failed += free_cnt;
129 		}
130 	}
131 
132 	if (likely(nb_enqueued)) {
133 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
134 			virtqueue_notify(vq);
135 			PMD_RX_LOG(DEBUG, "Notified");
136 		}
137 	}
138 
139 	return nb_rx;
140 }
141