xref: /dpdk/drivers/net/virtio/virtio_rxtx_simple_sse.c (revision 3e0ceb9f17fff027fc6c8f18de35e11719ffa61e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <errno.h>
39 
40 #include <tmmintrin.h>
41 
42 #include <rte_byteorder.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_cycles.h>
45 #include <rte_ether.h>
46 #include <rte_ethdev.h>
47 #include <rte_errno.h>
48 #include <rte_memory.h>
49 #include <rte_mempool.h>
50 #include <rte_malloc.h>
51 #include <rte_mbuf.h>
52 #include <rte_prefetch.h>
53 #include <rte_string_fns.h>
54 
55 #include "virtio_rxtx_simple.h"
56 
57 #define RTE_VIRTIO_VPMD_RX_BURST 32
58 #define RTE_VIRTIO_DESC_PER_LOOP 8
59 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
60 
61 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
62  *
63  * This routine is for non-mergeable RX, one desc for each guest buffer.
64  * This routine is based on the RX ring layout optimization. Each entry in the
65  * avail ring points to the desc with the same index in the desc ring and this
66  * will never be changed in the driver.
67  *
68  * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
69  */
70 uint16_t
71 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
72 	uint16_t nb_pkts)
73 {
74 	struct virtnet_rx *rxvq = rx_queue;
75 	struct virtqueue *vq = rxvq->vq;
76 	struct virtio_hw *hw = vq->hw;
77 	uint16_t nb_used;
78 	uint16_t desc_idx;
79 	struct vring_used_elem *rused;
80 	struct rte_mbuf **sw_ring;
81 	struct rte_mbuf **sw_ring_end;
82 	uint16_t nb_pkts_received = 0;
83 	__m128i shuf_msk1, shuf_msk2, len_adjust;
84 
85 	shuf_msk1 = _mm_set_epi8(
86 		0xFF, 0xFF, 0xFF, 0xFF,
87 		0xFF, 0xFF,		/* vlan tci */
88 		5, 4,			/* dat len */
89 		0xFF, 0xFF, 5, 4,	/* pkt len */
90 		0xFF, 0xFF, 0xFF, 0xFF	/* packet type */
91 
92 	);
93 
94 	shuf_msk2 = _mm_set_epi8(
95 		0xFF, 0xFF, 0xFF, 0xFF,
96 		0xFF, 0xFF,		/* vlan tci */
97 		13, 12,			/* dat len */
98 		0xFF, 0xFF, 13, 12,	/* pkt len */
99 		0xFF, 0xFF, 0xFF, 0xFF	/* packet type */
100 	);
101 
102 	/* Subtract the header length.
103 	*  In which case do we need the header length in used->len ?
104 	*/
105 	len_adjust = _mm_set_epi16(
106 		0, 0,
107 		0,
108 		(uint16_t)-vq->hw->vtnet_hdr_size,
109 		0, (uint16_t)-vq->hw->vtnet_hdr_size,
110 		0, 0);
111 
112 	if (unlikely(hw->started == 0))
113 		return nb_pkts_received;
114 
115 	if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
116 		return 0;
117 
118 	nb_used = VIRTQUEUE_NUSED(vq);
119 
120 	rte_compiler_barrier();
121 
122 	if (unlikely(nb_used == 0))
123 		return 0;
124 
125 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
126 	nb_used = RTE_MIN(nb_used, nb_pkts);
127 
128 	desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
129 	rused = &vq->vq_ring.used->ring[desc_idx];
130 	sw_ring  = &vq->sw_ring[desc_idx];
131 	sw_ring_end = &vq->sw_ring[vq->vq_nentries];
132 
133 	rte_prefetch0(rused);
134 
135 	if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
136 		virtio_rxq_rearm_vec(rxvq);
137 		if (unlikely(virtqueue_kick_prepare(vq)))
138 			virtqueue_notify(vq);
139 	}
140 
141 	for (nb_pkts_received = 0;
142 		nb_pkts_received < nb_used;) {
143 		__m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
144 		__m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
145 		__m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
146 
147 		mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
148 		desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
149 		_mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
150 
151 		mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
152 		desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
153 		_mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
154 
155 		mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
156 		desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
157 		_mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
158 
159 		mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
160 		desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
161 		_mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
162 
163 		pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
164 		pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
165 		pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
166 		pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
167 		_mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
168 			pkt_mb[1]);
169 		_mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
170 			pkt_mb[0]);
171 
172 		pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
173 		pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
174 		pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
175 		pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
176 		_mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
177 			pkt_mb[3]);
178 		_mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
179 			pkt_mb[2]);
180 
181 		pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
182 		pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
183 		pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
184 		pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
185 		_mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
186 			pkt_mb[5]);
187 		_mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
188 			pkt_mb[4]);
189 
190 		pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
191 		pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
192 		pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
193 		pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
194 		_mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
195 			pkt_mb[7]);
196 		_mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
197 			pkt_mb[6]);
198 
199 		if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
200 			if (sw_ring + nb_used <= sw_ring_end)
201 				nb_pkts_received += nb_used;
202 			else
203 				nb_pkts_received += sw_ring_end - sw_ring;
204 			break;
205 		} else {
206 			if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
207 				sw_ring_end)) {
208 				nb_pkts_received += sw_ring_end - sw_ring;
209 				break;
210 			} else {
211 				nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
212 
213 				rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
214 				sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
215 				rused   += RTE_VIRTIO_DESC_PER_LOOP;
216 				nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
217 			}
218 		}
219 	}
220 
221 	vq->vq_used_cons_idx += nb_pkts_received;
222 	vq->vq_free_cnt += nb_pkts_received;
223 	rxvq->stats.packets += nb_pkts_received;
224 	return nb_pkts_received;
225 }
226