xref: /dpdk/drivers/net/virtio/virtio_rxtx_packed.c (revision 0eaf7fc2fe8e1efa04a722efcebb632d6a0096c7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_net.h>
12 
13 #include "virtio_logs.h"
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
18 
19 #ifdef CC_AVX512_SUPPORT
20 #include "virtio_rxtx_packed_avx.h"
21 #endif
22 
23 uint16_t
24 virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
25 			uint16_t nb_pkts)
26 {
27 	struct virtnet_tx *txvq = tx_queue;
28 	struct virtqueue *vq = txvq->vq;
29 	struct virtio_hw *hw = vq->hw;
30 	uint16_t nb_tx = 0;
31 	uint16_t remained;
32 
33 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
34 		return nb_tx;
35 
36 	if (unlikely(nb_pkts < 1))
37 		return nb_pkts;
38 
39 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
40 
41 	if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh)
42 		virtio_xmit_cleanup_inorder_packed(vq, vq->vq_free_thresh);
43 
44 	remained = RTE_MIN(nb_pkts, vq->vq_free_cnt);
45 
46 	while (remained) {
47 		if (remained >= PACKED_BATCH_SIZE) {
48 			if (!virtqueue_enqueue_batch_packed_vec(txvq,
49 						&tx_pkts[nb_tx])) {
50 				nb_tx += PACKED_BATCH_SIZE;
51 				remained -= PACKED_BATCH_SIZE;
52 				continue;
53 			}
54 		}
55 		if (!virtqueue_enqueue_single_packed_vec(txvq,
56 					tx_pkts[nb_tx])) {
57 			nb_tx++;
58 			remained--;
59 			continue;
60 		}
61 		break;
62 	};
63 
64 	txvq->stats.packets += nb_tx;
65 
66 	if (likely(nb_tx)) {
67 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
68 			virtqueue_notify(vq);
69 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
70 		}
71 	}
72 
73 	return nb_tx;
74 }
75 
76 uint16_t
77 virtio_recv_pkts_packed_vec(void *rx_queue,
78 			    struct rte_mbuf **rx_pkts,
79 			    uint16_t nb_pkts)
80 {
81 	struct virtnet_rx *rxvq = rx_queue;
82 	struct virtqueue *vq = rxvq->vq;
83 	struct virtio_hw *hw = vq->hw;
84 	uint16_t num, nb_rx = 0;
85 	uint32_t nb_enqueued = 0;
86 	uint16_t free_cnt = vq->vq_free_thresh;
87 
88 	if (unlikely(hw->started == 0))
89 		return nb_rx;
90 
91 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
92 	if (likely(num > PACKED_BATCH_SIZE))
93 		num = num - ((vq->vq_used_cons_idx + num) % PACKED_BATCH_SIZE);
94 
95 	while (num) {
96 		if (!virtqueue_dequeue_batch_packed_vec(rxvq,
97 					&rx_pkts[nb_rx])) {
98 			nb_rx += PACKED_BATCH_SIZE;
99 			num -= PACKED_BATCH_SIZE;
100 			continue;
101 		}
102 		if (!virtqueue_dequeue_single_packed_vec(rxvq,
103 					&rx_pkts[nb_rx])) {
104 			nb_rx++;
105 			num--;
106 			continue;
107 		}
108 		break;
109 	};
110 
111 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
112 
113 	rxvq->stats.packets += nb_rx;
114 
115 	if (likely(vq->vq_free_cnt >= free_cnt)) {
116 		struct rte_mbuf *new_pkts[free_cnt];
117 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
118 						free_cnt) == 0)) {
119 			virtio_recv_refill_packed_vec(rxvq, new_pkts,
120 					free_cnt);
121 			nb_enqueued += free_cnt;
122 		} else {
123 			struct rte_eth_dev *dev =
124 				&rte_eth_devices[rxvq->port_id];
125 			dev->data->rx_mbuf_alloc_failed += free_cnt;
126 		}
127 	}
128 
129 	if (likely(nb_enqueued)) {
130 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
131 			virtqueue_notify(vq);
132 			PMD_RX_LOG(DEBUG, "Notified");
133 		}
134 	}
135 
136 	return nb_rx;
137 }
138