xref: /dpdk/drivers/net/intel/i40e/i40e_rxtx_vec_sse.c (revision bb8a37a95bad6734c9818ad6f7dee931c2f48c5b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <ethdev_driver.h>
7 #include <rte_malloc.h>
8 
9 #include "base/i40e_prototype.h"
10 #include "base/i40e_type.h"
11 #include "i40e_ethdev.h"
12 #include "i40e_rxtx.h"
13 #include "i40e_rxtx_vec_common.h"
14 
15 #include <rte_vect.h>
16 
17 static inline void
18 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
19 {
20 	int i;
21 	uint16_t rx_id;
22 	volatile union i40e_rx_desc *rxdp;
23 	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
24 	struct rte_mbuf *mb0, *mb1;
25 	__m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
26 			RTE_PKTMBUF_HEADROOM);
27 	__m128i dma_addr0, dma_addr1;
28 
29 	rxdp = rxq->rx_ring + rxq->rxrearm_start;
30 
31 	/* Pull 'n' more MBUFs into the software ring */
32 	if (rte_mempool_get_bulk(rxq->mp,
33 				 (void *)rxep,
34 				 RTE_I40E_RXQ_REARM_THRESH) < 0) {
35 		if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
36 		    rxq->nb_rx_desc) {
37 			dma_addr0 = _mm_setzero_si128();
38 			for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
39 				rxep[i].mbuf = &rxq->fake_mbuf;
40 				_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i].read),
41 						dma_addr0);
42 			}
43 		}
44 		rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
45 			RTE_I40E_RXQ_REARM_THRESH;
46 		return;
47 	}
48 
49 	/* Initialize the mbufs in vector, process 2 mbufs in one loop */
50 	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
51 		__m128i vaddr0, vaddr1;
52 
53 		mb0 = rxep[0].mbuf;
54 		mb1 = rxep[1].mbuf;
55 
56 		/* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
57 		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
58 				offsetof(struct rte_mbuf, buf_addr) + 8);
59 		vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
60 		vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
61 
62 		/* convert pa to dma_addr hdr/data */
63 		dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
64 		dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
65 
66 		/* add headroom to pa values */
67 		dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
68 		dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
69 
70 		/* flush desc with pa dma_addr */
71 		_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr0);
72 		_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp++->read), dma_addr1);
73 	}
74 
75 	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
76 	rx_id = rxq->rxrearm_start - 1;
77 
78 	if (unlikely(rxq->rxrearm_start >= rxq->nb_rx_desc)) {
79 		rxq->rxrearm_start = 0;
80 		rx_id = rxq->nb_rx_desc - 1;
81 	}
82 
83 	rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
84 
85 	/* Update the tail pointer on the NIC */
86 	I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
87 }
88 
89 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
90 /* SSE version of FDIR mark extraction for 4 32B descriptors at a time */
91 static inline __m128i
92 descs_to_fdir_32b(volatile union i40e_rx_desc *rxdp, struct rte_mbuf **rx_pkt)
93 {
94 	/* 32B descriptors: Load 2nd half of descriptors for FDIR ID data */
95 	__m128i desc0_qw23, desc1_qw23, desc2_qw23, desc3_qw23;
96 	desc0_qw23 = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, &(rxdp + 0)->wb.qword2));
97 	desc1_qw23 = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, &(rxdp + 1)->wb.qword2));
98 	desc2_qw23 = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, &(rxdp + 2)->wb.qword2));
99 	desc3_qw23 = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, &(rxdp + 3)->wb.qword2));
100 
101 	/* FDIR ID data: move last u32 of each desc to 4 u32 lanes */
102 	__m128i v_unpack_01, v_unpack_23;
103 	v_unpack_01 = _mm_unpackhi_epi32(desc0_qw23, desc1_qw23);
104 	v_unpack_23 = _mm_unpackhi_epi32(desc2_qw23, desc3_qw23);
105 	__m128i v_fdir_ids = _mm_unpackhi_epi64(v_unpack_01, v_unpack_23);
106 
107 	/* Extended Status: extract from each lower 32 bits, to u32 lanes */
108 	v_unpack_01 = _mm_unpacklo_epi32(desc0_qw23, desc1_qw23);
109 	v_unpack_23 = _mm_unpacklo_epi32(desc2_qw23, desc3_qw23);
110 	__m128i v_flt_status = _mm_unpacklo_epi64(v_unpack_01, v_unpack_23);
111 
112 	/* Shift u32 left and right to "mask away" bits not required.
113 	 * Data required is 4:5 (zero based), so left shift by 26 (32-6)
114 	 * and then right shift by 30 (32 - 2 bits required).
115 	 */
116 	v_flt_status = _mm_slli_epi32(v_flt_status, 26);
117 	v_flt_status = _mm_srli_epi32(v_flt_status, 30);
118 
119 	/* Generate constant 1 in all u32 lanes and compare */
120 	RTE_BUILD_BUG_ON(I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID != 1);
121 	__m128i v_zeros = _mm_setzero_si128();
122 	__m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros);
123 	__m128i v_u32_one = _mm_srli_epi32(v_ffff, 31);
124 
125 	/* per desc mask, bits set if FDIR ID is valid */
126 	__m128i v_fd_id_mask = _mm_cmpeq_epi32(v_flt_status, v_u32_one);
127 
128 	/* Mask ID data to zero if the FD_ID bit not set in desc */
129 	v_fdir_ids = _mm_and_si128(v_fdir_ids, v_fd_id_mask);
130 
131 	/* Extract and store as u32. No advantage to combining into SSE
132 	 * stores, there are no surrounding stores to around fdir.hi
133 	 */
134 	rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0);
135 	rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1);
136 	rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2);
137 	rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3);
138 
139 	/* convert fdir_id_mask into a single bit, then shift as required for
140 	 * correct location in the mbuf->olflags
141 	 */
142 	RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << I40E_FDIR_ID_BIT_SHIFT));
143 	v_fd_id_mask = _mm_srli_epi32(v_fd_id_mask, 31);
144 	v_fd_id_mask = _mm_slli_epi32(v_fd_id_mask, I40E_FDIR_ID_BIT_SHIFT);
145 
146 	/* The returned value must be combined into each mbuf. This is already
147 	 * being done for RSS and VLAN mbuf olflags, so return bits to OR in.
148 	 */
149 	return v_fd_id_mask;
150 }
151 
152 #else /* 32 or 16B FDIR ID handling */
153 
154 /* Handle 16B descriptor FDIR ID flag setting based on FLM. See scalar driver
155  * for scalar implementation of the same functionality.
156  */
157 static inline __m128i
158 descs_to_fdir_16b(__m128i fltstat, __m128i descs[4], struct rte_mbuf **rx_pkt)
159 {
160 	/* unpack filter-status data from descriptors */
161 	__m128i v_tmp_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
162 	__m128i v_tmp_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
163 	__m128i v_fdir_ids = _mm_unpackhi_epi64(v_tmp_01, v_tmp_23);
164 
165 	/* Generate one bit in each u32 lane */
166 	__m128i v_zeros = _mm_setzero_si128();
167 	__m128i v_ffff = _mm_cmpeq_epi32(v_zeros, v_zeros);
168 	__m128i v_111_mask = _mm_srli_epi32(v_ffff, 29);
169 	__m128i v_11_mask = _mm_srli_epi32(v_ffff, 30);
170 
171 	/* Top lane ones mask for FDIR isolation */
172 	__m128i v_desc_fdir_mask = _mm_insert_epi32(v_zeros, UINT32_MAX, 1);
173 
174 	/* Compare and mask away FDIR ID data if bit not set */
175 	__m128i v_u32_bits = _mm_and_si128(v_111_mask, fltstat);
176 	__m128i v_fdir_id_mask = _mm_cmpeq_epi32(v_u32_bits, v_11_mask);
177 	v_fdir_ids = _mm_and_si128(v_fdir_id_mask, v_fdir_ids);
178 
179 	/* Store data to fdir.hi in mbuf */
180 	rx_pkt[0]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 0);
181 	rx_pkt[1]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 1);
182 	rx_pkt[2]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 2);
183 	rx_pkt[3]->hash.fdir.hi = _mm_extract_epi32(v_fdir_ids, 3);
184 
185 	/* Move fdir_id_mask to correct lane, blend RSS to zero on hits */
186 	__m128i v_desc3_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 8);
187 	__m128i v_desc3_mask = _mm_and_si128(v_desc_fdir_mask, v_desc3_shift);
188 	descs[3] = _mm_blendv_epi8(descs[3], _mm_setzero_si128(), v_desc3_mask);
189 
190 	__m128i v_desc2_shift = _mm_alignr_epi8(v_zeros, v_fdir_id_mask, 4);
191 	__m128i v_desc2_mask = _mm_and_si128(v_desc_fdir_mask, v_desc2_shift);
192 	descs[2] = _mm_blendv_epi8(descs[2], _mm_setzero_si128(), v_desc2_mask);
193 
194 	__m128i v_desc1_shift = v_fdir_id_mask;
195 	__m128i v_desc1_mask = _mm_and_si128(v_desc_fdir_mask, v_desc1_shift);
196 	descs[1] = _mm_blendv_epi8(descs[1], _mm_setzero_si128(), v_desc1_mask);
197 
198 	__m128i v_desc0_shift = _mm_alignr_epi8(v_fdir_id_mask, v_zeros, 12);
199 	__m128i v_desc0_mask = _mm_and_si128(v_desc_fdir_mask, v_desc0_shift);
200 	descs[0] = _mm_blendv_epi8(descs[0], _mm_setzero_si128(), v_desc0_mask);
201 
202 	/* Shift to 1 or 0 bit per u32 lane, then to RTE_MBUF_F_RX_FDIR_ID offset */
203 	RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << I40E_FDIR_ID_BIT_SHIFT));
204 	__m128i v_mask_one_bit = _mm_srli_epi32(v_fdir_id_mask, 31);
205 	return _mm_slli_epi32(v_mask_one_bit, I40E_FDIR_ID_BIT_SHIFT);
206 }
207 #endif
208 
209 static inline void
210 desc_to_olflags_v(struct i40e_rx_queue *rxq, volatile union i40e_rx_desc *rxdp,
211 		  __m128i descs[4], struct rte_mbuf **rx_pkts)
212 {
213 	const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
214 	__m128i rearm0, rearm1, rearm2, rearm3;
215 
216 	__m128i vlan0, vlan1, rss, l3_l4e;
217 
218 	/* mask everything except RSS, flow director and VLAN flags
219 	 * bit2 is for VLAN tag, bit11 for flow director indication
220 	 * bit13:12 for RSS indication.
221 	 */
222 	const __m128i rss_vlan_msk = _mm_set_epi32(
223 			0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
224 
225 	const __m128i cksum_mask = _mm_set_epi32(
226 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
227 			RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
228 			RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
229 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
230 			RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
231 			RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
232 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
233 			RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
234 			RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
235 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
236 			RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
237 			RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
238 
239 	/* map rss and vlan type to rss hash and vlan flag */
240 	const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
241 			0, 0, 0, 0,
242 			0, 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
243 			0, 0, 0, 0);
244 
245 	const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
246 			0, 0, 0, 0,
247 			RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
248 			0, 0, RTE_MBUF_F_RX_FDIR, 0);
249 
250 	const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
251 			/* shift right 1 bit to make sure it not exceed 255 */
252 			(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD  |
253 			 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
254 			(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD  |
255 			 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
256 			(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
257 			 RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
258 			(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
259 			 RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
260 			(RTE_MBUF_F_RX_L4_CKSUM_BAD  | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
261 			(RTE_MBUF_F_RX_L4_CKSUM_BAD  | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
262 			(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
263 			(RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
264 
265 	/* Unpack "status" from quadword 1, bits 0:32 */
266 	vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
267 	vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
268 	vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
269 
270 	vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
271 	vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
272 
273 	const __m128i desc_fltstat = _mm_srli_epi32(vlan1, 11);
274 	rss = _mm_shuffle_epi8(rss_flags, desc_fltstat);
275 
276 	l3_l4e = _mm_srli_epi32(vlan1, 22);
277 	l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
278 	/* then we shift left 1 bit */
279 	l3_l4e = _mm_slli_epi32(l3_l4e, 1);
280 	/* we need to mask out the redundant bits */
281 	l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
282 
283 	vlan0 = _mm_or_si128(vlan0, rss);
284 	vlan0 = _mm_or_si128(vlan0, l3_l4e);
285 
286 	/* Extract FDIR ID only if FDIR is enabled to avoid useless work */
287 	if (rxq->fdir_enabled) {
288 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
289 		__m128i v_fdir_ol_flags = descs_to_fdir_32b(rxdp, rx_pkts);
290 #else
291 		(void)rxdp; /* rxdp not required for 16B desc mode */
292 		__m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,
293 							    descs, rx_pkts);
294 #endif
295 		/* OR in ol_flag bits after descriptor specific extraction */
296 		vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);
297 	}
298 
299 	/*
300 	 * At this point, we have the 4 sets of flags in the low 16-bits
301 	 * of each 32-bit value in vlan0.
302 	 * We want to extract these, and merge them with the mbuf init data
303 	 * so we can do a single 16-byte write to the mbuf to set the flags
304 	 * and all the other initialization fields. Extracting the
305 	 * appropriate flags means that we have to do a shift and blend for
306 	 * each mbuf before we do the write.
307 	 */
308 	rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
309 	rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
310 	rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
311 	rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
312 
313 	/* write the rearm data and the olflags in one write */
314 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
315 			offsetof(struct rte_mbuf, rearm_data) + 8);
316 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
317 			RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
318 	_mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
319 	_mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
320 	_mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
321 	_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
322 }
323 
324 #define PKTLEN_SHIFT     10
325 
326 static inline void
327 desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
328 		uint32_t *ptype_tbl)
329 {
330 	__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
331 	__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
332 
333 	ptype0 = _mm_srli_epi64(ptype0, 30);
334 	ptype1 = _mm_srli_epi64(ptype1, 30);
335 
336 	rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
337 	rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
338 	rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
339 	rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
340 }
341 
342 /**
343  * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
344  *
345  * Notice:
346  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
347  * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
348  */
349 static inline uint16_t
350 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
351 		   uint16_t nb_pkts, uint8_t *split_packet)
352 {
353 	volatile union i40e_rx_desc *rxdp;
354 	struct i40e_rx_entry *sw_ring;
355 	uint16_t nb_pkts_recd;
356 	int pos;
357 	uint64_t var;
358 	__m128i shuf_msk;
359 	uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
360 
361 	__m128i crc_adjust = _mm_set_epi16(
362 				0, 0, 0,    /* ignore non-length fields */
363 				-rxq->crc_len, /* sub crc on data_len */
364 				0,          /* ignore high-16bits of pkt_len */
365 				-rxq->crc_len, /* sub crc on pkt_len */
366 				0, 0            /* ignore pkt_type field */
367 			);
368 	/*
369 	 * compile-time check the above crc_adjust layout is correct.
370 	 * NOTE: the first field (lowest address) is given last in set_epi16
371 	 * call above.
372 	 */
373 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
374 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
375 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
376 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
377 	__m128i dd_check, eop_check;
378 
379 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
380 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
381 
382 	/* Just the act of getting into the function from the application is
383 	 * going to cost about 7 cycles
384 	 */
385 	rxdp = rxq->rx_ring + rxq->rx_tail;
386 
387 	rte_prefetch0(rxdp);
388 
389 	/* See if we need to rearm the RX queue - gives the prefetch a bit
390 	 * of time to act
391 	 */
392 	if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
393 		i40e_rxq_rearm(rxq);
394 
395 	/* Before we start moving massive data around, check to see if
396 	 * there is actually a packet available
397 	 */
398 	if (!(rxdp->wb.qword1.status_error_len &
399 			rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
400 		return 0;
401 
402 	/* 4 packets DD mask */
403 	dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
404 
405 	/* 4 packets EOP mask */
406 	eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
407 
408 	/* mask to shuffle from desc. to mbuf */
409 	shuf_msk = _mm_set_epi8(
410 		7, 6, 5, 4,  /* octet 4~7, 32bits rss */
411 		3, 2,        /* octet 2~3, low 16 bits vlan_macip */
412 		15, 14,      /* octet 15~14, 16 bits data_len */
413 		0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
414 		15, 14,      /* octet 15~14, low 16 bits pkt_len */
415 		0xFF, 0xFF,  /* pkt_type set as unknown */
416 		0xFF, 0xFF  /*pkt_type set as unknown */
417 		);
418 	/*
419 	 * Compile-time verify the shuffle mask
420 	 * NOTE: some field positions already verified above, but duplicated
421 	 * here for completeness in case of future modifications.
422 	 */
423 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
424 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
425 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
426 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
427 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
428 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
429 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
430 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
431 
432 	/* Cache is empty -> need to scan the buffer rings, but first move
433 	 * the next 'n' mbufs into the cache
434 	 */
435 	sw_ring = &rxq->sw_ring[rxq->rx_tail];
436 
437 	/* A. load 4 packet in one loop
438 	 * [A*. mask out 4 unused dirty field in desc]
439 	 * B. copy 4 mbuf point from swring to rx_pkts
440 	 * C. calc the number of DD bits among the 4 packets
441 	 * [C*. extract the end-of-packet bit, if requested]
442 	 * D. fill info. from desc to mbuf
443 	 */
444 
445 	for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
446 			pos += RTE_I40E_DESCS_PER_LOOP,
447 			rxdp += RTE_I40E_DESCS_PER_LOOP) {
448 		__m128i descs[RTE_I40E_DESCS_PER_LOOP];
449 		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
450 		__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
451 		/* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
452 		__m128i mbp1;
453 #if defined(RTE_ARCH_X86_64)
454 		__m128i mbp2;
455 #endif
456 
457 		/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
458 		mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
459 		/* Read desc statuses backwards to avoid race condition */
460 		/* A.1 load desc[3] */
461 		descs[3] = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, rxdp + 3));
462 		rte_compiler_barrier();
463 
464 		/* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
465 		_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
466 
467 #if defined(RTE_ARCH_X86_64)
468 		/* B.1 load 2 64 bit mbuf points */
469 		mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
470 #endif
471 
472 		/* A.1 load desc[2-0] */
473 		descs[2] = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, rxdp + 2));
474 		rte_compiler_barrier();
475 		descs[1] = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, rxdp + 1));
476 		rte_compiler_barrier();
477 		descs[0] = _mm_loadu_si128(RTE_CAST_PTR(const __m128i *, rxdp));
478 
479 #if defined(RTE_ARCH_X86_64)
480 		/* B.2 copy 2 mbuf point into rx_pkts  */
481 		_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
482 #endif
483 
484 		if (split_packet) {
485 			rte_mbuf_prefetch_part2(rx_pkts[pos]);
486 			rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
487 			rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
488 			rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
489 		}
490 
491 		/* avoid compiler reorder optimization */
492 		rte_compiler_barrier();
493 
494 		/* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
495 		const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
496 		const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
497 
498 		/* merge the now-aligned packet length fields back in */
499 		descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
500 		descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
501 
502 		/* C.1 4=>2 filter staterr info only */
503 		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
504 		/* C.1 4=>2 filter staterr info only */
505 		sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
506 
507 		desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]);
508 
509 		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
510 		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
511 		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
512 
513 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
514 		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
515 		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
516 
517 		/* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
518 		const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
519 		const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
520 
521 		/* merge the now-aligned packet length fields back in */
522 		descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
523 		descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
524 
525 		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
526 		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
527 		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
528 
529 		/* C.2 get 4 pkts staterr value  */
530 		zero = _mm_xor_si128(dd_check, dd_check);
531 		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
532 
533 		/* D.3 copy final 3,4 data to rx_pkts */
534 		_mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
535 				 pkt_mb4);
536 		_mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
537 				 pkt_mb3);
538 
539 		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
540 		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
541 		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
542 
543 		/* C* extract and record EOP bit */
544 		if (split_packet) {
545 			__m128i eop_shuf_mask = _mm_set_epi8(
546 					0xFF, 0xFF, 0xFF, 0xFF,
547 					0xFF, 0xFF, 0xFF, 0xFF,
548 					0xFF, 0xFF, 0xFF, 0xFF,
549 					0x04, 0x0C, 0x00, 0x08
550 					);
551 
552 			/* and with mask to extract bits, flipping 1-0 */
553 			__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
554 			/* the staterr values are not in order, as the count
555 			 * of dd bits doesn't care. However, for end of
556 			 * packet tracking, we do care, so shuffle. This also
557 			 * compresses the 32-bit values to 8-bit
558 			 */
559 			eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
560 			/* store the resulting 32-bit value */
561 			*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
562 			split_packet += RTE_I40E_DESCS_PER_LOOP;
563 		}
564 
565 		/* C.3 calc available number of desc */
566 		staterr = _mm_and_si128(staterr, dd_check);
567 		staterr = _mm_packs_epi32(staterr, zero);
568 
569 		/* D.3 copy final 1,2 data to rx_pkts */
570 		_mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
571 				 pkt_mb2);
572 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
573 				 pkt_mb1);
574 		desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
575 		/* C.4 calc available number of desc */
576 		var = rte_popcount64(_mm_cvtsi128_si64(staterr));
577 		nb_pkts_recd += var;
578 		if (likely(var != RTE_I40E_DESCS_PER_LOOP))
579 			break;
580 	}
581 
582 	/* Update our internal tail pointer */
583 	rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
584 	rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
585 	rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
586 
587 	return nb_pkts_recd;
588 }
589 
590  /*
591  * Notice:
592  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
593  */
594 uint16_t
595 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
596 		   uint16_t nb_pkts)
597 {
598 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
599 }
600 
601 /**
602  * vPMD receive routine that reassembles single burst of 32 scattered packets
603  *
604  * Notice:
605  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
606  */
607 static uint16_t
608 i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
609 			      uint16_t nb_pkts)
610 {
611 
612 	struct i40e_rx_queue *rxq = rx_queue;
613 	uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
614 
615 	/* get some new buffers */
616 	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
617 			split_flags);
618 	if (nb_bufs == 0)
619 		return 0;
620 
621 	/* happy day case, full burst + no packets to be joined */
622 	const uint64_t *split_fl64 = (uint64_t *)split_flags;
623 
624 	if (rxq->pkt_first_seg == NULL &&
625 			split_fl64[0] == 0 && split_fl64[1] == 0 &&
626 			split_fl64[2] == 0 && split_fl64[3] == 0)
627 		return nb_bufs;
628 
629 	/* reassemble any packets that need reassembly*/
630 	unsigned i = 0;
631 
632 	if (rxq->pkt_first_seg == NULL) {
633 		/* find the first split flag, and only reassemble then*/
634 		while (i < nb_bufs && !split_flags[i])
635 			i++;
636 		if (i == nb_bufs)
637 			return nb_bufs;
638 		rxq->pkt_first_seg = rx_pkts[i];
639 	}
640 	return i + ci_rx_reassemble_packets(&rx_pkts[i], nb_bufs - i, &split_flags[i],
641 			&rxq->pkt_first_seg, &rxq->pkt_last_seg, rxq->crc_len);
642 }
643 
644 /**
645  * vPMD receive routine that reassembles scattered packets.
646  */
647 uint16_t
648 i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
649 			     uint16_t nb_pkts)
650 {
651 	uint16_t retval = 0;
652 
653 	while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
654 		uint16_t burst;
655 
656 		burst = i40e_recv_scattered_burst_vec(rx_queue,
657 						      rx_pkts + retval,
658 						      RTE_I40E_VPMD_RX_BURST);
659 		retval += burst;
660 		nb_pkts -= burst;
661 		if (burst < RTE_I40E_VPMD_RX_BURST)
662 			return retval;
663 	}
664 
665 	return retval + i40e_recv_scattered_burst_vec(rx_queue,
666 						      rx_pkts + retval,
667 						      nb_pkts);
668 }
669 
670 static inline void
671 vtx1(volatile struct i40e_tx_desc *txdp,
672 		struct rte_mbuf *pkt, uint64_t flags)
673 {
674 	uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
675 			((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
676 			((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
677 
678 	__m128i descriptor = _mm_set_epi64x(high_qw,
679 				pkt->buf_iova + pkt->data_off);
680 	_mm_store_si128(RTE_CAST_PTR(__m128i *, txdp), descriptor);
681 }
682 
683 static inline void
684 vtx(volatile struct i40e_tx_desc *txdp,
685 		struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
686 {
687 	int i;
688 
689 	for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
690 		vtx1(txdp, *pkt, flags);
691 }
692 
693 uint16_t
694 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
695 			  uint16_t nb_pkts)
696 {
697 	struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
698 	volatile struct i40e_tx_desc *txdp;
699 	struct ci_tx_entry *txep;
700 	uint16_t n, nb_commit, tx_id;
701 	uint64_t flags = I40E_TD_CMD;
702 	uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
703 	int i;
704 
705 	if (txq->nb_tx_free < txq->tx_free_thresh)
706 		i40e_tx_free_bufs(txq);
707 
708 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
709 	if (unlikely(nb_pkts == 0))
710 		return 0;
711 
712 	tx_id = txq->tx_tail;
713 	txdp = &txq->tx_ring[tx_id];
714 	txep = &txq->sw_ring[tx_id];
715 
716 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
717 
718 	n = (uint16_t)(txq->nb_tx_desc - tx_id);
719 	if (nb_commit >= n) {
720 		ci_tx_backlog_entry(txep, tx_pkts, n);
721 
722 		for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
723 			vtx1(txdp, *tx_pkts, flags);
724 
725 		vtx1(txdp, *tx_pkts++, rs);
726 
727 		nb_commit = (uint16_t)(nb_commit - n);
728 
729 		tx_id = 0;
730 		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
731 
732 		/* avoid reach the end of ring */
733 		txdp = &txq->tx_ring[tx_id];
734 		txep = &txq->sw_ring[tx_id];
735 	}
736 
737 	ci_tx_backlog_entry(txep, tx_pkts, nb_commit);
738 
739 	vtx(txdp, tx_pkts, nb_commit, flags);
740 
741 	tx_id = (uint16_t)(tx_id + nb_commit);
742 	if (tx_id > txq->tx_next_rs) {
743 		txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
744 			rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
745 						I40E_TXD_QW1_CMD_SHIFT);
746 		txq->tx_next_rs =
747 			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
748 	}
749 
750 	txq->tx_tail = tx_id;
751 
752 	I40E_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
753 
754 	return nb_pkts;
755 }
756 
757 void __rte_cold
758 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
759 {
760 	_i40e_rx_queue_release_mbufs_vec(rxq);
761 }
762 
763 int __rte_cold
764 i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
765 {
766 	return i40e_rxq_vec_setup_default(rxq);
767 }
768 
769 int __rte_cold
770 i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
771 {
772 	return 0;
773 }
774 
775 int __rte_cold
776 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
777 {
778 	return i40e_rx_vec_dev_conf_condition_check_default(dev);
779 }
780