xref: /dpdk/drivers/net/virtio/virtio_rxtx_simple.c (revision d429cc0b53735cc7b1e304ec1d0f35ae06ace7d0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 
24 #include "virtio_rxtx_simple.h"
25 
26 #ifndef __INTEL_COMPILER
27 #pragma GCC diagnostic ignored "-Wcast-qual"
28 #endif
29 
30 uint16_t
31 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
32 	uint16_t nb_pkts)
33 {
34 	struct virtnet_tx *txvq = tx_queue;
35 	struct virtqueue *vq = txvq->vq;
36 	struct virtio_hw *hw = vq->hw;
37 	uint16_t nb_used;
38 	uint16_t desc_idx;
39 	struct vring_desc *start_dp;
40 	uint16_t nb_tail, nb_commit;
41 	int i;
42 	uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
43 	uint16_t nb_tx = 0;
44 
45 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
46 		return nb_tx;
47 
48 	nb_used = VIRTQUEUE_NUSED(vq);
49 	rte_compiler_barrier();
50 
51 	if (nb_used >= VIRTIO_TX_FREE_THRESH)
52 		virtio_xmit_cleanup_simple(vq);
53 
54 	nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
55 	desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
56 	start_dp = vq->vq_ring.desc;
57 	nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
58 
59 	if (nb_commit >= nb_tail) {
60 		for (i = 0; i < nb_tail; i++)
61 			vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
62 		for (i = 0; i < nb_tail; i++) {
63 			start_dp[desc_idx].addr =
64 				VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
65 			start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
66 			tx_pkts++;
67 			desc_idx++;
68 		}
69 		nb_commit -= nb_tail;
70 		desc_idx = 0;
71 	}
72 	for (i = 0; i < nb_commit; i++)
73 		vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
74 	for (i = 0; i < nb_commit; i++) {
75 		start_dp[desc_idx].addr =
76 			VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
77 		start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
78 		tx_pkts++;
79 		desc_idx++;
80 	}
81 
82 	rte_compiler_barrier();
83 
84 	vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
85 	vq->vq_avail_idx += nb_pkts;
86 	vq->vq_ring.avail->idx = vq->vq_avail_idx;
87 	txvq->stats.packets += nb_pkts;
88 
89 	if (likely(nb_pkts)) {
90 		if (unlikely(virtqueue_kick_prepare(vq)))
91 			virtqueue_notify(vq);
92 	}
93 
94 	return nb_pkts;
95 }
96 
97 int __attribute__((cold))
98 virtio_rxq_vec_setup(struct virtnet_rx *rxq)
99 {
100 	uintptr_t p;
101 	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
102 
103 	mb_def.nb_segs = 1;
104 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
105 	mb_def.port = rxq->port_id;
106 	rte_mbuf_refcnt_set(&mb_def, 1);
107 
108 	/* prevent compiler reordering: rearm_data covers previous fields */
109 	rte_compiler_barrier();
110 	p = (uintptr_t)&mb_def.rearm_data;
111 	rxq->mbuf_initializer = *(uint64_t *)p;
112 
113 	return 0;
114 }
115 
116 /* Stub for linkage when arch specific implementation is not available */
117 uint16_t __attribute__((weak))
118 virtio_recv_pkts_vec(void *rx_queue __rte_unused,
119 		     struct rte_mbuf **rx_pkts __rte_unused,
120 		     uint16_t nb_pkts __rte_unused)
121 {
122 	rte_panic("Wrong weak function linked by linker\n");
123 	return 0;
124 }
125