xref: /dpdk/drivers/net/virtio/virtio_rxtx_simple.c (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 
24 #include "virtio_rxtx_simple.h"
25 
26 #ifndef __INTEL_COMPILER
27 #pragma GCC diagnostic ignored "-Wcast-qual"
28 #endif
29 
30 int __attribute__((cold))
31 virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
32 	struct rte_mbuf *cookie)
33 {
34 	struct vq_desc_extra *dxp;
35 	struct vring_desc *start_dp;
36 	uint16_t desc_idx;
37 
38 	cookie->port = vq->rxq.port_id;
39 
40 	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
41 	dxp = &vq->vq_descx[desc_idx];
42 	dxp->cookie = (void *)cookie;
43 	vq->sw_ring[desc_idx] = cookie;
44 
45 	start_dp = vq->vq_ring.desc;
46 	start_dp[desc_idx].addr =
47 		VIRTIO_MBUF_ADDR(cookie, vq) +
48 		RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
49 	start_dp[desc_idx].len = cookie->buf_len -
50 		RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
51 
52 	vq->vq_free_cnt--;
53 	vq->vq_avail_idx++;
54 
55 	return 0;
56 }
57 
58 uint16_t
59 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
60 	uint16_t nb_pkts)
61 {
62 	struct virtnet_tx *txvq = tx_queue;
63 	struct virtqueue *vq = txvq->vq;
64 	struct virtio_hw *hw = vq->hw;
65 	uint16_t nb_used;
66 	uint16_t desc_idx;
67 	struct vring_desc *start_dp;
68 	uint16_t nb_tail, nb_commit;
69 	int i;
70 	uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
71 	uint16_t nb_tx = 0;
72 
73 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
74 		return nb_tx;
75 
76 	nb_used = VIRTQUEUE_NUSED(vq);
77 	rte_compiler_barrier();
78 
79 	if (nb_used >= VIRTIO_TX_FREE_THRESH)
80 		virtio_xmit_cleanup(vq);
81 
82 	nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
83 	desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
84 	start_dp = vq->vq_ring.desc;
85 	nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
86 
87 	if (nb_commit >= nb_tail) {
88 		for (i = 0; i < nb_tail; i++)
89 			vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
90 		for (i = 0; i < nb_tail; i++) {
91 			start_dp[desc_idx].addr =
92 				VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
93 			start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
94 			tx_pkts++;
95 			desc_idx++;
96 		}
97 		nb_commit -= nb_tail;
98 		desc_idx = 0;
99 	}
100 	for (i = 0; i < nb_commit; i++)
101 		vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
102 	for (i = 0; i < nb_commit; i++) {
103 		start_dp[desc_idx].addr =
104 			VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
105 		start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
106 		tx_pkts++;
107 		desc_idx++;
108 	}
109 
110 	rte_compiler_barrier();
111 
112 	vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
113 	vq->vq_avail_idx += nb_pkts;
114 	vq->vq_ring.avail->idx = vq->vq_avail_idx;
115 	txvq->stats.packets += nb_pkts;
116 
117 	if (likely(nb_pkts)) {
118 		if (unlikely(virtqueue_kick_prepare(vq)))
119 			virtqueue_notify(vq);
120 	}
121 
122 	return nb_pkts;
123 }
124 
125 int __attribute__((cold))
126 virtio_rxq_vec_setup(struct virtnet_rx *rxq)
127 {
128 	uintptr_t p;
129 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
130 
131 	mb_def.nb_segs = 1;
132 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
133 	mb_def.port = rxq->port_id;
134 	rte_mbuf_refcnt_set(&mb_def, 1);
135 
136 	/* prevent compiler reordering: rearm_data covers previous fields */
137 	rte_compiler_barrier();
138 	p = (uintptr_t)&mb_def.rearm_data;
139 	rxq->mbuf_initializer = *(uint64_t *)p;
140 
141 	return 0;
142 }
143 
144 /* Stub for linkage when arch specific implementation is not available */
145 uint16_t __attribute__((weak))
146 virtio_recv_pkts_vec(void *rx_queue __rte_unused,
147 		     struct rte_mbuf **rx_pkts __rte_unused,
148 		     uint16_t nb_pkts __rte_unused)
149 {
150 	rte_panic("Wrong weak function linked by linker\n");
151 	return 0;
152 }
153