xref: /dpdk/drivers/net/virtio/virtio_rxtx_packed_neon.h (revision ba55c94a7ebc386d2288d6578ed57aad6cb92657)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Arm Corporation
3  */
4 
5 #include <stdlib.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_net.h>
12 #include <rte_vect.h>
13 
14 #include "virtio_ethdev.h"
15 #include "virtio.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
18 
19 static inline int
virtqueue_enqueue_batch_packed_vec(struct virtnet_tx * txvq,struct rte_mbuf ** tx_pkts)20 virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
21 				   struct rte_mbuf **tx_pkts)
22 {
23 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
24 	uint16_t head_size = vq->hw->vtnet_hdr_size;
25 	uint16_t idx = vq->vq_avail_idx;
26 	struct virtio_net_hdr *hdr;
27 	struct vq_desc_extra *dxp;
28 	struct vring_packed_desc *p_desc;
29 	uint16_t i;
30 
31 	if (idx & PACKED_BATCH_MASK)
32 		return -1;
33 
34 	if (unlikely((idx + PACKED_BATCH_SIZE) > vq->vq_nentries))
35 		return -1;
36 
37 	/* Map four refcnt and nb_segs from mbufs to one NEON register. */
38 	uint8x16_t ref_seg_msk = {
39 		2, 3, 4, 5,
40 		10, 11, 12, 13,
41 		18, 19, 20, 21,
42 		26, 27, 28, 29
43 	};
44 
45 	/* Map four data_off from mbufs to one NEON register. */
46 	uint8x8_t data_msk = {
47 		0, 1,
48 		8, 9,
49 		16, 17,
50 		24, 25
51 	};
52 
53 	uint16x8_t net_hdr_msk = {
54 		0xFFFF, 0xFFFF,
55 		0, 0, 0, 0
56 	};
57 
58 	uint16x4_t pkts[PACKED_BATCH_SIZE];
59 	uint8x16x2_t mbuf;
60 	/* Load four mbufs rearm data. */
61 	RTE_BUILD_BUG_ON(REFCNT_BITS_OFFSET >= 64);
62 	pkts[0] = vld1_u16((uint16_t *)&tx_pkts[0]->rearm_data);
63 	pkts[1] = vld1_u16((uint16_t *)&tx_pkts[1]->rearm_data);
64 	pkts[2] = vld1_u16((uint16_t *)&tx_pkts[2]->rearm_data);
65 	pkts[3] = vld1_u16((uint16_t *)&tx_pkts[3]->rearm_data);
66 
67 	mbuf.val[0] = vreinterpretq_u8_u16(vcombine_u16(pkts[0], pkts[1]));
68 	mbuf.val[1] = vreinterpretq_u8_u16(vcombine_u16(pkts[2], pkts[3]));
69 
70 	/* refcnt = 1 and nb_segs = 1 */
71 	uint32x4_t def_ref_seg = vdupq_n_u32(0x10001);
72 	/* Check refcnt and nb_segs. */
73 	uint32x4_t ref_seg = vreinterpretq_u32_u8(vqtbl2q_u8(mbuf, ref_seg_msk));
74 	uint64x2_t cmp1 = vreinterpretq_u64_u32(~vceqq_u32(ref_seg, def_ref_seg));
75 	if (unlikely(vgetq_lane_u64(cmp1, 0) || vgetq_lane_u64(cmp1, 1)))
76 		return -1;
77 
78 	/* Check headroom is enough. */
79 	uint16x4_t head_rooms = vdup_n_u16(head_size);
80 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
81 			 offsetof(struct rte_mbuf, rearm_data));
82 	uint16x4_t data_offset = vreinterpret_u16_u8(vqtbl2_u8(mbuf, data_msk));
83 	uint64x1_t cmp2 = vreinterpret_u64_u16(vclt_u16(data_offset, head_rooms));
84 	if (unlikely(vget_lane_u64(cmp2, 0)))
85 		return -1;
86 
87 	virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
88 		dxp = &vq->vq_descx[idx + i];
89 		dxp->ndescs = 1;
90 		dxp->cookie = tx_pkts[i];
91 	}
92 
93 	virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
94 		tx_pkts[i]->data_off -= head_size;
95 		tx_pkts[i]->data_len += head_size;
96 	}
97 
98 	uint64x2x2_t desc[PACKED_BATCH_SIZE / 2];
99 	uint64x2_t base_addr0 = {
100 		VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off,
101 		VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off
102 	};
103 	uint64x2_t base_addr1 = {
104 		VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off,
105 		VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off
106 	};
107 
108 	desc[0].val[0] = base_addr0;
109 	desc[1].val[0] = base_addr1;
110 
111 	uint64_t flags = (uint64_t)vq->vq_packed.cached_flags << FLAGS_LEN_BITS_OFFSET;
112 	uint64x2_t tx_desc0 = {
113 		flags | (uint64_t)idx << ID_BITS_OFFSET | tx_pkts[0]->data_len,
114 		flags | (uint64_t)(idx + 1) << ID_BITS_OFFSET | tx_pkts[1]->data_len
115 	};
116 
117 	uint64x2_t tx_desc1 = {
118 		flags | (uint64_t)(idx + 2) << ID_BITS_OFFSET | tx_pkts[2]->data_len,
119 		flags | (uint64_t)(idx + 3) << ID_BITS_OFFSET | tx_pkts[3]->data_len
120 	};
121 
122 	desc[0].val[1] = tx_desc0;
123 	desc[1].val[1] = tx_desc1;
124 
125 	if (!vq->hw->has_tx_offload) {
126 		virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
127 			hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
128 					struct virtio_net_hdr *, -head_size);
129 			/* Clear net hdr. */
130 			uint16x8_t v_hdr = vld1q_u16((void *)hdr);
131 			vst1q_u16((void *)hdr, vandq_u16(v_hdr, net_hdr_msk));
132 		}
133 	} else {
134 		virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
135 			hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
136 					struct virtio_net_hdr *, -head_size);
137 			virtqueue_xmit_offload(hdr, tx_pkts[i]);
138 		}
139 	}
140 
141 	/* Enqueue packet buffers. */
142 	p_desc = &vq->vq_packed.ring.desc[idx];
143 	vst2q_u64((uint64_t *)p_desc, desc[0]);
144 	vst2q_u64((uint64_t *)(p_desc + 2), desc[1]);
145 
146 	virtio_update_batch_stats(&txvq->stats, tx_pkts[0]->pkt_len,
147 			tx_pkts[1]->pkt_len, tx_pkts[2]->pkt_len,
148 			tx_pkts[3]->pkt_len);
149 
150 	vq->vq_avail_idx += PACKED_BATCH_SIZE;
151 	vq->vq_free_cnt -= PACKED_BATCH_SIZE;
152 
153 	if (vq->vq_avail_idx >= vq->vq_nentries) {
154 		vq->vq_avail_idx -= vq->vq_nentries;
155 		vq->vq_packed.cached_flags ^=
156 			VRING_PACKED_DESC_F_AVAIL_USED;
157 	}
158 
159 	return 0;
160 }
161 
162 static inline int
virtqueue_dequeue_batch_packed_vec(struct virtnet_rx * rxvq,struct rte_mbuf ** rx_pkts)163 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
164 				   struct rte_mbuf **rx_pkts)
165 {
166 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
167 	struct virtio_hw *hw = vq->hw;
168 	uint16_t head_size = hw->vtnet_hdr_size;
169 	uint16_t id = vq->vq_used_cons_idx;
170 	struct vring_packed_desc *p_desc;
171 	uint16_t i;
172 
173 	if (id & PACKED_BATCH_MASK)
174 		return -1;
175 
176 	if (unlikely((id + PACKED_BATCH_SIZE) > vq->vq_nentries))
177 		return -1;
178 
179 	/* Map packed descriptor to mbuf fields. */
180 	uint8x16_t shuf_msk1 = {
181 		0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
182 		0, 1,			/* octet 1~0, low 16 bits pkt_len */
183 		0xFF, 0xFF,		/* skip high 16 bits of pkt_len, zero out */
184 		0, 1,			/* octet 1~0, 16 bits data_len */
185 		0xFF, 0xFF,		/* vlan tci set as unknown */
186 		0xFF, 0xFF, 0xFF, 0xFF
187 	};
188 
189 	uint8x16_t shuf_msk2 = {
190 		0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type set as unknown */
191 		8, 9,			/* octet 9~8, low 16 bits pkt_len */
192 		0xFF, 0xFF,		/* skip high 16 bits of pkt_len, zero out */
193 		8, 9,			/* octet 9~8, 16 bits data_len */
194 		0xFF, 0xFF,		/* vlan tci set as unknown */
195 		0xFF, 0xFF, 0xFF, 0xFF
196 	};
197 
198 	/* Subtract the header length. */
199 	uint16x8_t len_adjust = {
200 		0, 0,		/* ignore pkt_type field */
201 		head_size,	/* sub head_size on pkt_len */
202 		0,		/* ignore high 16 bits of pkt_len */
203 		head_size,	/* sub head_size on data_len */
204 		0, 0, 0		/* ignore non-length fields */
205 	};
206 
207 	uint64x2_t desc[PACKED_BATCH_SIZE / 2];
208 	uint64x2x2_t mbp[PACKED_BATCH_SIZE / 2];
209 	uint64x2_t pkt_mb[PACKED_BATCH_SIZE];
210 
211 	p_desc = &vq->vq_packed.ring.desc[id];
212 	/* Load high 64 bits of packed descriptor 0,1. */
213 	desc[0] = vld2q_u64((uint64_t *)(p_desc)).val[1];
214 	/* Load high 64 bits of packed descriptor 2,3. */
215 	desc[1] = vld2q_u64((uint64_t *)(p_desc + 2)).val[1];
216 
217 	/* Only care avail/used bits. */
218 	uint32x4_t v_mask = vdupq_n_u32(PACKED_FLAGS_MASK);
219 	/* Extract high 32 bits of packed descriptor (id, flags). */
220 	uint32x4_t v_desc = vuzp2q_u32(vreinterpretq_u32_u64(desc[0]),
221 				vreinterpretq_u32_u64(desc[1]));
222 	uint32x4_t v_flag = vandq_u32(v_desc, v_mask);
223 
224 	uint32x4_t v_used_flag = vdupq_n_u32(0);
225 	if (vq->vq_packed.used_wrap_counter)
226 		v_used_flag = vdupq_n_u32(PACKED_FLAGS_MASK);
227 
228 	uint64x2_t desc_stats = vreinterpretq_u64_u32(~vceqq_u32(v_flag, v_used_flag));
229 
230 	/* Check all descs are used. */
231 	if (unlikely(vgetq_lane_u64(desc_stats, 0) || vgetq_lane_u64(desc_stats, 1)))
232 		return -1;
233 
234 	/* Load 2 mbuf pointers per time. */
235 	mbp[0] = vld2q_u64((uint64_t *)&vq->vq_descx[id]);
236 	vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0].val[0]);
237 
238 	mbp[1] = vld2q_u64((uint64_t *)&vq->vq_descx[id + 2]);
239 	vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1].val[0]);
240 
241 	/**
242 	 *  Update data length and packet length for descriptor.
243 	 *  structure of pkt_mb:
244 	 *  --------------------------------------------------------------------
245 	 *  |32 bits pkt_type|32 bits pkt_len|16 bits data_len|16 bits vlan_tci|
246 	 *  --------------------------------------------------------------------
247 	 */
248 	pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
249 			vreinterpretq_u8_u64(desc[0]), shuf_msk1));
250 	pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
251 			vreinterpretq_u8_u64(desc[0]), shuf_msk2));
252 	pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
253 			vreinterpretq_u8_u64(desc[1]), shuf_msk1));
254 	pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
255 			vreinterpretq_u8_u64(desc[1]), shuf_msk2));
256 
257 	pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
258 			vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
259 	pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
260 			vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
261 	pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
262 			vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
263 	pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
264 			vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
265 
266 	vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1, pkt_mb[0]);
267 	vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1, pkt_mb[1]);
268 	vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1, pkt_mb[2]);
269 	vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1, pkt_mb[3]);
270 
271 	if (hw->has_rx_offload) {
272 		virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
273 			char *addr = (char *)rx_pkts[i]->buf_addr +
274 				RTE_PKTMBUF_HEADROOM - head_size;
275 			virtio_vec_rx_offload(rx_pkts[i],
276 					(struct virtio_net_hdr *)addr);
277 		}
278 	}
279 
280 	virtio_update_batch_stats(&rxvq->stats, rx_pkts[0]->pkt_len,
281 			rx_pkts[1]->pkt_len, rx_pkts[2]->pkt_len,
282 			rx_pkts[3]->pkt_len);
283 
284 	vq->vq_free_cnt += PACKED_BATCH_SIZE;
285 
286 	vq->vq_used_cons_idx += PACKED_BATCH_SIZE;
287 	if (vq->vq_used_cons_idx >= vq->vq_nentries) {
288 		vq->vq_used_cons_idx -= vq->vq_nentries;
289 		vq->vq_packed.used_wrap_counter ^= 1;
290 	}
291 
292 	return 0;
293 }
294