xref: /dpdk/drivers/net/mlx5/mlx5_rx.c (revision a41f593f1bce27cd94eae0e85a8085c592b14b30)
1a96102c8SMichael Baum /* SPDX-License-Identifier: BSD-3-Clause
2a96102c8SMichael Baum  * Copyright 2021 6WIND S.A.
3a96102c8SMichael Baum  * Copyright 2021 Mellanox Technologies, Ltd
4a96102c8SMichael Baum  */
5a96102c8SMichael Baum 
6a96102c8SMichael Baum #include <stdint.h>
7a96102c8SMichael Baum #include <string.h>
8a96102c8SMichael Baum #include <stdlib.h>
9a96102c8SMichael Baum 
10a96102c8SMichael Baum #include <rte_mbuf.h>
11a96102c8SMichael Baum #include <rte_mempool.h>
12a96102c8SMichael Baum #include <rte_prefetch.h>
13a96102c8SMichael Baum #include <rte_common.h>
14a96102c8SMichael Baum #include <rte_branch_prediction.h>
15a96102c8SMichael Baum #include <rte_ether.h>
16a96102c8SMichael Baum #include <rte_cycles.h>
17a96102c8SMichael Baum #include <rte_flow.h>
18a96102c8SMichael Baum 
19a96102c8SMichael Baum #include <mlx5_prm.h>
20a96102c8SMichael Baum #include <mlx5_common.h>
21fc59a1ecSMichael Baum #include <mlx5_common_mr.h>
22a96102c8SMichael Baum 
23a96102c8SMichael Baum #include "mlx5_autoconf.h"
24a96102c8SMichael Baum #include "mlx5_defs.h"
25a96102c8SMichael Baum #include "mlx5.h"
26a96102c8SMichael Baum #include "mlx5_utils.h"
27a96102c8SMichael Baum #include "mlx5_rxtx.h"
28a96102c8SMichael Baum #include "mlx5_rx.h"
29a96102c8SMichael Baum 
30a96102c8SMichael Baum 
31a96102c8SMichael Baum static __rte_always_inline uint32_t
32a96102c8SMichael Baum rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
33a96102c8SMichael Baum 		   volatile struct mlx5_mini_cqe8 *mcqe);
34a96102c8SMichael Baum 
35a96102c8SMichael Baum static __rte_always_inline int
36a96102c8SMichael Baum mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
37a96102c8SMichael Baum 		 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
38a96102c8SMichael Baum 
39a96102c8SMichael Baum static __rte_always_inline uint32_t
40a96102c8SMichael Baum rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
41a96102c8SMichael Baum 
42a96102c8SMichael Baum static __rte_always_inline void
43a96102c8SMichael Baum rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
44a96102c8SMichael Baum 	       volatile struct mlx5_cqe *cqe,
45a96102c8SMichael Baum 	       volatile struct mlx5_mini_cqe8 *mcqe);
46a96102c8SMichael Baum 
47a96102c8SMichael Baum static inline void
48a96102c8SMichael Baum mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
49a96102c8SMichael Baum 			volatile struct mlx5_cqe *__rte_restrict cqe,
50a96102c8SMichael Baum 			uint32_t phcsum, uint8_t l4_type);
51a96102c8SMichael Baum 
52a96102c8SMichael Baum static inline void
53a96102c8SMichael Baum mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
54a96102c8SMichael Baum 		    volatile struct mlx5_cqe *__rte_restrict cqe,
55a96102c8SMichael Baum 		    volatile struct mlx5_mini_cqe8 *mcqe,
56a96102c8SMichael Baum 		    struct mlx5_rxq_data *rxq, uint32_t len);
57a96102c8SMichael Baum 
58a96102c8SMichael Baum 
59a96102c8SMichael Baum /**
60a96102c8SMichael Baum  * Internal function to compute the number of used descriptors in an RX queue.
61a96102c8SMichael Baum  *
62a96102c8SMichael Baum  * @param rxq
63a96102c8SMichael Baum  *   The Rx queue.
64a96102c8SMichael Baum  *
65a96102c8SMichael Baum  * @return
66a96102c8SMichael Baum  *   The number of used Rx descriptor.
67a96102c8SMichael Baum  */
68a96102c8SMichael Baum static uint32_t
69a96102c8SMichael Baum rx_queue_count(struct mlx5_rxq_data *rxq)
70a96102c8SMichael Baum {
71a96102c8SMichael Baum 	struct rxq_zip *zip = &rxq->zip;
72a96102c8SMichael Baum 	volatile struct mlx5_cqe *cqe;
73a96102c8SMichael Baum 	const unsigned int cqe_n = (1 << rxq->cqe_n);
74a96102c8SMichael Baum 	const unsigned int sges_n = (1 << rxq->sges_n);
75a96102c8SMichael Baum 	const unsigned int elts_n = (1 << rxq->elts_n);
760947ed38SMichael Baum 	const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num);
77a96102c8SMichael Baum 	const unsigned int cqe_cnt = cqe_n - 1;
78a96102c8SMichael Baum 	unsigned int cq_ci, used;
79a96102c8SMichael Baum 
80a96102c8SMichael Baum 	/* if we are processing a compressed cqe */
81a96102c8SMichael Baum 	if (zip->ai) {
82a96102c8SMichael Baum 		used = zip->cqe_cnt - zip->ai;
83a96102c8SMichael Baum 		cq_ci = zip->cq_ci;
84a96102c8SMichael Baum 	} else {
85a96102c8SMichael Baum 		used = 0;
86a96102c8SMichael Baum 		cq_ci = rxq->cq_ci;
87a96102c8SMichael Baum 	}
88a96102c8SMichael Baum 	cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
89a96102c8SMichael Baum 	while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
90a96102c8SMichael Baum 		int8_t op_own;
91a96102c8SMichael Baum 		unsigned int n;
92a96102c8SMichael Baum 
93a96102c8SMichael Baum 		op_own = cqe->op_own;
94a96102c8SMichael Baum 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
95a96102c8SMichael Baum 			n = rte_be_to_cpu_32(cqe->byte_cnt);
96a96102c8SMichael Baum 		else
97a96102c8SMichael Baum 			n = 1;
98a96102c8SMichael Baum 		cq_ci += n;
99a96102c8SMichael Baum 		used += n;
100a96102c8SMichael Baum 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
101a96102c8SMichael Baum 	}
102a96102c8SMichael Baum 	used = RTE_MIN(used * sges_n, elts_n * strd_n);
103a96102c8SMichael Baum 	return used;
104a96102c8SMichael Baum }
105a96102c8SMichael Baum 
106a96102c8SMichael Baum /**
107a96102c8SMichael Baum  * DPDK callback to check the status of a Rx descriptor.
108a96102c8SMichael Baum  *
109a96102c8SMichael Baum  * @param rx_queue
110a96102c8SMichael Baum  *   The Rx queue.
111a96102c8SMichael Baum  * @param[in] offset
112a96102c8SMichael Baum  *   The index of the descriptor in the ring.
113a96102c8SMichael Baum  *
114a96102c8SMichael Baum  * @return
115a96102c8SMichael Baum  *   The status of the Rx descriptor.
116a96102c8SMichael Baum  */
117a96102c8SMichael Baum int
118a96102c8SMichael Baum mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
119a96102c8SMichael Baum {
120a96102c8SMichael Baum 	struct mlx5_rxq_data *rxq = rx_queue;
121a96102c8SMichael Baum 
122a96102c8SMichael Baum 	if (offset >= (1 << rxq->cqe_n)) {
123a96102c8SMichael Baum 		rte_errno = EINVAL;
124a96102c8SMichael Baum 		return -rte_errno;
125a96102c8SMichael Baum 	}
126a96102c8SMichael Baum 	if (offset < rx_queue_count(rxq))
127a96102c8SMichael Baum 		return RTE_ETH_RX_DESC_DONE;
128a96102c8SMichael Baum 	return RTE_ETH_RX_DESC_AVAIL;
129a96102c8SMichael Baum }
130a96102c8SMichael Baum 
131a96102c8SMichael Baum /**
132a96102c8SMichael Baum  * DPDK callback to get the RX queue information.
133a96102c8SMichael Baum  *
134a96102c8SMichael Baum  * @param dev
135a96102c8SMichael Baum  *   Pointer to the device structure.
136a96102c8SMichael Baum  *
137a96102c8SMichael Baum  * @param rx_queue_id
138a96102c8SMichael Baum  *   Rx queue identificator.
139a96102c8SMichael Baum  *
140a96102c8SMichael Baum  * @param qinfo
141a96102c8SMichael Baum  *   Pointer to the RX queue information structure.
142a96102c8SMichael Baum  *
143a96102c8SMichael Baum  * @return
144a96102c8SMichael Baum  *   None.
145a96102c8SMichael Baum  */
146a96102c8SMichael Baum 
147a96102c8SMichael Baum void
148a96102c8SMichael Baum mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
149a96102c8SMichael Baum 		  struct rte_eth_rxq_info *qinfo)
150a96102c8SMichael Baum {
1515cf0707fSXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id);
1525cf0707fSXueming Li 	struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
153a96102c8SMichael Baum 
154a96102c8SMichael Baum 	if (!rxq)
155a96102c8SMichael Baum 		return;
156a96102c8SMichael Baum 	qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
157a96102c8SMichael Baum 					rxq->mprq_mp : rxq->mp;
158a96102c8SMichael Baum 	qinfo->conf.rx_thresh.pthresh = 0;
159a96102c8SMichael Baum 	qinfo->conf.rx_thresh.hthresh = 0;
160a96102c8SMichael Baum 	qinfo->conf.rx_thresh.wthresh = 0;
161a96102c8SMichael Baum 	qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
162a96102c8SMichael Baum 	qinfo->conf.rx_drop_en = 1;
1635cf0707fSXueming Li 	if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL)
1645cf0707fSXueming Li 		qinfo->conf.rx_deferred_start = 0;
1655cf0707fSXueming Li 	else
1665cf0707fSXueming Li 		qinfo->conf.rx_deferred_start = 1;
167a96102c8SMichael Baum 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
168a96102c8SMichael Baum 	qinfo->scattered_rx = dev->data->scattered_rx;
169a96102c8SMichael Baum 	qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
1700947ed38SMichael Baum 		RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
1710947ed38SMichael Baum 		RTE_BIT32(rxq->elts_n);
172a96102c8SMichael Baum }
173a96102c8SMichael Baum 
174a96102c8SMichael Baum /**
175a96102c8SMichael Baum  * DPDK callback to get the RX packet burst mode information.
176a96102c8SMichael Baum  *
177a96102c8SMichael Baum  * @param dev
178a96102c8SMichael Baum  *   Pointer to the device structure.
179a96102c8SMichael Baum  *
180a96102c8SMichael Baum  * @param rx_queue_id
1817be78d02SJosh Soref  *   Rx queue identification.
182a96102c8SMichael Baum  *
183a96102c8SMichael Baum  * @param mode
184a96102c8SMichael Baum  *   Pointer to the burts mode information.
185a96102c8SMichael Baum  *
186a96102c8SMichael Baum  * @return
187a96102c8SMichael Baum  *   0 as success, -EINVAL as failure.
188a96102c8SMichael Baum  */
189a96102c8SMichael Baum int
190a96102c8SMichael Baum mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
191a96102c8SMichael Baum 		       uint16_t rx_queue_id __rte_unused,
192a96102c8SMichael Baum 		       struct rte_eth_burst_mode *mode)
193a96102c8SMichael Baum {
194a96102c8SMichael Baum 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
1955cf0707fSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
196a96102c8SMichael Baum 
197a96102c8SMichael Baum 	if (!rxq) {
198a96102c8SMichael Baum 		rte_errno = EINVAL;
199a96102c8SMichael Baum 		return -rte_errno;
200a96102c8SMichael Baum 	}
201a96102c8SMichael Baum 	if (pkt_burst == mlx5_rx_burst) {
202a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
203a96102c8SMichael Baum 	} else if (pkt_burst == mlx5_rx_burst_mprq) {
204a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
205a96102c8SMichael Baum 	} else if (pkt_burst == mlx5_rx_burst_vec) {
206a96102c8SMichael Baum #if defined RTE_ARCH_X86_64
207a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
208a96102c8SMichael Baum #elif defined RTE_ARCH_ARM64
209a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
210a96102c8SMichael Baum #elif defined RTE_ARCH_PPC_64
211a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
212a96102c8SMichael Baum #else
213a96102c8SMichael Baum 		return -EINVAL;
214a96102c8SMichael Baum #endif
215a96102c8SMichael Baum 	} else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
216a96102c8SMichael Baum #if defined RTE_ARCH_X86_64
217a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
218a96102c8SMichael Baum #elif defined RTE_ARCH_ARM64
219a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
220a96102c8SMichael Baum #elif defined RTE_ARCH_PPC_64
221a96102c8SMichael Baum 		snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
222a96102c8SMichael Baum #else
223a96102c8SMichael Baum 		return -EINVAL;
224a96102c8SMichael Baum #endif
225a96102c8SMichael Baum 	} else {
226a96102c8SMichael Baum 		return -EINVAL;
227a96102c8SMichael Baum 	}
228a96102c8SMichael Baum 	return 0;
229a96102c8SMichael Baum }
230a96102c8SMichael Baum 
231a96102c8SMichael Baum /**
232a96102c8SMichael Baum  * DPDK callback to get the number of used descriptors in a RX queue.
233a96102c8SMichael Baum  *
2348d7d4fcdSKonstantin Ananyev  * @param rx_queue
2358d7d4fcdSKonstantin Ananyev  *   The Rx queue pointer.
236a96102c8SMichael Baum  *
237a96102c8SMichael Baum  * @return
238a96102c8SMichael Baum  *   The number of used rx descriptor.
239a96102c8SMichael Baum  *   -EINVAL if the queue is invalid
240a96102c8SMichael Baum  */
241a96102c8SMichael Baum uint32_t
2428d7d4fcdSKonstantin Ananyev mlx5_rx_queue_count(void *rx_queue)
243a96102c8SMichael Baum {
2448d7d4fcdSKonstantin Ananyev 	struct mlx5_rxq_data *rxq = rx_queue;
2458d7d4fcdSKonstantin Ananyev 	struct rte_eth_dev *dev;
2468d7d4fcdSKonstantin Ananyev 
2478d7d4fcdSKonstantin Ananyev 	if (!rxq) {
2488d7d4fcdSKonstantin Ananyev 		rte_errno = EINVAL;
2498d7d4fcdSKonstantin Ananyev 		return -rte_errno;
2508d7d4fcdSKonstantin Ananyev 	}
2518d7d4fcdSKonstantin Ananyev 
2528d7d4fcdSKonstantin Ananyev 	dev = &rte_eth_devices[rxq->port_id];
253a96102c8SMichael Baum 
254a96102c8SMichael Baum 	if (dev->rx_pkt_burst == NULL ||
255*a41f593fSFerruh Yigit 	    dev->rx_pkt_burst == rte_eth_pkt_burst_dummy) {
256a96102c8SMichael Baum 		rte_errno = ENOTSUP;
257a96102c8SMichael Baum 		return -rte_errno;
258a96102c8SMichael Baum 	}
2598d7d4fcdSKonstantin Ananyev 
260a96102c8SMichael Baum 	return rx_queue_count(rxq);
261a96102c8SMichael Baum }
262a96102c8SMichael Baum 
2636afc4bafSAnatoly Burakov #define CLB_VAL_IDX 0
2646afc4bafSAnatoly Burakov #define CLB_MSK_IDX 1
2656afc4bafSAnatoly Burakov static int
2666afc4bafSAnatoly Burakov mlx5_monitor_callback(const uint64_t value,
2676afc4bafSAnatoly Burakov 		const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
2686afc4bafSAnatoly Burakov {
2696afc4bafSAnatoly Burakov 	const uint64_t m = opaque[CLB_MSK_IDX];
2706afc4bafSAnatoly Burakov 	const uint64_t v = opaque[CLB_VAL_IDX];
2716afc4bafSAnatoly Burakov 
2726afc4bafSAnatoly Burakov 	return (value & m) == v ? -1 : 0;
2736afc4bafSAnatoly Burakov }
2746afc4bafSAnatoly Burakov 
275a8f0df6bSAlexander Kozyrev int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
276a8f0df6bSAlexander Kozyrev {
277a8f0df6bSAlexander Kozyrev 	struct mlx5_rxq_data *rxq = rx_queue;
278a8f0df6bSAlexander Kozyrev 	const unsigned int cqe_num = 1 << rxq->cqe_n;
279a8f0df6bSAlexander Kozyrev 	const unsigned int cqe_mask = cqe_num - 1;
280a8f0df6bSAlexander Kozyrev 	const uint16_t idx = rxq->cq_ci & cqe_num;
281a8f0df6bSAlexander Kozyrev 	volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
282a8f0df6bSAlexander Kozyrev 
283a8f0df6bSAlexander Kozyrev 	if (unlikely(rxq->cqes == NULL)) {
284a8f0df6bSAlexander Kozyrev 		rte_errno = EINVAL;
285a8f0df6bSAlexander Kozyrev 		return -rte_errno;
286a8f0df6bSAlexander Kozyrev 	}
287a8f0df6bSAlexander Kozyrev 	pmc->addr = &cqe->op_own;
2886afc4bafSAnatoly Burakov 	pmc->opaque[CLB_VAL_IDX] = !!idx;
2896afc4bafSAnatoly Burakov 	pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK;
2906afc4bafSAnatoly Burakov 	pmc->fn = mlx5_monitor_callback;
291a8f0df6bSAlexander Kozyrev 	pmc->size = sizeof(uint8_t);
292a8f0df6bSAlexander Kozyrev 	return 0;
293a8f0df6bSAlexander Kozyrev }
294a8f0df6bSAlexander Kozyrev 
295a96102c8SMichael Baum /**
296a96102c8SMichael Baum  * Translate RX completion flags to packet type.
297a96102c8SMichael Baum  *
298a96102c8SMichael Baum  * @param[in] rxq
299a96102c8SMichael Baum  *   Pointer to RX queue structure.
300a96102c8SMichael Baum  * @param[in] cqe
301a96102c8SMichael Baum  *   Pointer to CQE.
302a96102c8SMichael Baum  *
303a96102c8SMichael Baum  * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
304a96102c8SMichael Baum  *
305a96102c8SMichael Baum  * @return
306a96102c8SMichael Baum  *   Packet type for struct rte_mbuf.
307a96102c8SMichael Baum  */
308a96102c8SMichael Baum static inline uint32_t
309a96102c8SMichael Baum rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
310a96102c8SMichael Baum 				   volatile struct mlx5_mini_cqe8 *mcqe)
311a96102c8SMichael Baum {
312a96102c8SMichael Baum 	uint8_t idx;
313a96102c8SMichael Baum 	uint8_t ptype;
314a96102c8SMichael Baum 	uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
315a96102c8SMichael Baum 
316a96102c8SMichael Baum 	/* Get l3/l4 header from mini-CQE in case L3/L4 format*/
317a96102c8SMichael Baum 	if (mcqe == NULL ||
318a96102c8SMichael Baum 	    rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
319a96102c8SMichael Baum 		ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
320a96102c8SMichael Baum 	else
321a96102c8SMichael Baum 		ptype = mcqe->hdr_type >> 2;
322a96102c8SMichael Baum 	/*
323a96102c8SMichael Baum 	 * The index to the array should have:
324a96102c8SMichael Baum 	 * bit[1:0] = l3_hdr_type
325a96102c8SMichael Baum 	 * bit[4:2] = l4_hdr_type
326a96102c8SMichael Baum 	 * bit[5] = ip_frag
327a96102c8SMichael Baum 	 * bit[6] = tunneled
328a96102c8SMichael Baum 	 * bit[7] = outer_l3_type
329a96102c8SMichael Baum 	 */
330a96102c8SMichael Baum 	idx = pinfo | ptype;
331a96102c8SMichael Baum 	return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
332a96102c8SMichael Baum }
333a96102c8SMichael Baum 
334a96102c8SMichael Baum /**
335a96102c8SMichael Baum  * Initialize Rx WQ and indexes.
336a96102c8SMichael Baum  *
337a96102c8SMichael Baum  * @param[in] rxq
338a96102c8SMichael Baum  *   Pointer to RX queue structure.
339a96102c8SMichael Baum  */
340a96102c8SMichael Baum void
341a96102c8SMichael Baum mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
342a96102c8SMichael Baum {
343a96102c8SMichael Baum 	const unsigned int wqe_n = 1 << rxq->elts_n;
344a96102c8SMichael Baum 	unsigned int i;
345a96102c8SMichael Baum 
346a96102c8SMichael Baum 	for (i = 0; (i != wqe_n); ++i) {
347a96102c8SMichael Baum 		volatile struct mlx5_wqe_data_seg *scat;
348a96102c8SMichael Baum 		uintptr_t addr;
349a96102c8SMichael Baum 		uint32_t byte_count;
350077be91dSDmitry Kozlyuk 		uint32_t lkey;
351a96102c8SMichael Baum 
352a96102c8SMichael Baum 		if (mlx5_rxq_mprq_enabled(rxq)) {
353a96102c8SMichael Baum 			struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
354a96102c8SMichael Baum 
355a96102c8SMichael Baum 			scat = &((volatile struct mlx5_wqe_mprq *)
356a96102c8SMichael Baum 				rxq->wqes)[i].dseg;
3570947ed38SMichael Baum 			addr = (uintptr_t)mlx5_mprq_buf_addr
3580947ed38SMichael Baum 					(buf, RTE_BIT32(rxq->log_strd_num));
3590947ed38SMichael Baum 			byte_count = RTE_BIT32(rxq->log_strd_sz) *
3600947ed38SMichael Baum 				     RTE_BIT32(rxq->log_strd_num);
361077be91dSDmitry Kozlyuk 			lkey = mlx5_rx_addr2mr(rxq, addr);
362a96102c8SMichael Baum 		} else {
363a96102c8SMichael Baum 			struct rte_mbuf *buf = (*rxq->elts)[i];
364a96102c8SMichael Baum 
365a96102c8SMichael Baum 			scat = &((volatile struct mlx5_wqe_data_seg *)
366a96102c8SMichael Baum 					rxq->wqes)[i];
367a96102c8SMichael Baum 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
368a96102c8SMichael Baum 			byte_count = DATA_LEN(buf);
369077be91dSDmitry Kozlyuk 			lkey = mlx5_rx_mb2mr(rxq, buf);
370a96102c8SMichael Baum 		}
371a96102c8SMichael Baum 		/* scat->addr must be able to store a pointer. */
372a96102c8SMichael Baum 		MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
373a96102c8SMichael Baum 		*scat = (struct mlx5_wqe_data_seg){
374a96102c8SMichael Baum 			.addr = rte_cpu_to_be_64(addr),
375a96102c8SMichael Baum 			.byte_count = rte_cpu_to_be_32(byte_count),
376077be91dSDmitry Kozlyuk 			.lkey = lkey,
377a96102c8SMichael Baum 		};
378a96102c8SMichael Baum 	}
379a96102c8SMichael Baum 	rxq->consumed_strd = 0;
380a96102c8SMichael Baum 	rxq->decompressed = 0;
381a96102c8SMichael Baum 	rxq->rq_pi = 0;
382a96102c8SMichael Baum 	rxq->zip = (struct rxq_zip){
383a96102c8SMichael Baum 		.ai = 0,
384a96102c8SMichael Baum 	};
385a96102c8SMichael Baum 	rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
3860947ed38SMichael Baum 		(wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0;
387a96102c8SMichael Baum 	/* Update doorbell counter. */
388a96102c8SMichael Baum 	rxq->rq_ci = wqe_n >> rxq->sges_n;
389a96102c8SMichael Baum 	rte_io_wmb();
390a96102c8SMichael Baum 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
391a96102c8SMichael Baum }
392a96102c8SMichael Baum 
393a96102c8SMichael Baum /**
394a96102c8SMichael Baum  * Handle a Rx error.
395a96102c8SMichael Baum  * The function inserts the RQ state to reset when the first error CQE is
396a96102c8SMichael Baum  * shown, then drains the CQ by the caller function loop. When the CQ is empty,
397a96102c8SMichael Baum  * it moves the RQ state to ready and initializes the RQ.
398a96102c8SMichael Baum  * Next CQE identification and error counting are in the caller responsibility.
399a96102c8SMichael Baum  *
400a96102c8SMichael Baum  * @param[in] rxq
401a96102c8SMichael Baum  *   Pointer to RX queue structure.
402a96102c8SMichael Baum  * @param[in] vec
403a96102c8SMichael Baum  *   1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
404a96102c8SMichael Baum  *   0 when called from non-vectorized Rx burst.
405a96102c8SMichael Baum  *
406a96102c8SMichael Baum  * @return
407a96102c8SMichael Baum  *   -1 in case of recovery error, otherwise the CQE status.
408a96102c8SMichael Baum  */
409a96102c8SMichael Baum int
410a96102c8SMichael Baum mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
411a96102c8SMichael Baum {
412a96102c8SMichael Baum 	const uint16_t cqe_n = 1 << rxq->cqe_n;
413a96102c8SMichael Baum 	const uint16_t cqe_mask = cqe_n - 1;
414a96102c8SMichael Baum 	const uint16_t wqe_n = 1 << rxq->elts_n;
4150947ed38SMichael Baum 	const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num);
416a96102c8SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl =
417a96102c8SMichael Baum 			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
418a96102c8SMichael Baum 	union {
419a96102c8SMichael Baum 		volatile struct mlx5_cqe *cqe;
420a96102c8SMichael Baum 		volatile struct mlx5_err_cqe *err_cqe;
421a96102c8SMichael Baum 	} u = {
422a96102c8SMichael Baum 		.cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
423a96102c8SMichael Baum 	};
424a96102c8SMichael Baum 	struct mlx5_mp_arg_queue_state_modify sm;
425a96102c8SMichael Baum 	int ret;
426a96102c8SMichael Baum 
427a96102c8SMichael Baum 	switch (rxq->err_state) {
428a96102c8SMichael Baum 	case MLX5_RXQ_ERR_STATE_NO_ERROR:
429a96102c8SMichael Baum 		rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
430a96102c8SMichael Baum 		/* Fall-through */
431a96102c8SMichael Baum 	case MLX5_RXQ_ERR_STATE_NEED_RESET:
432a96102c8SMichael Baum 		sm.is_wq = 1;
433a96102c8SMichael Baum 		sm.queue_id = rxq->idx;
434a96102c8SMichael Baum 		sm.state = IBV_WQS_RESET;
4355db77fefSXueming Li 		if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
436a96102c8SMichael Baum 			return -1;
437a96102c8SMichael Baum 		if (rxq_ctrl->dump_file_n <
4385db77fefSXueming Li 		    RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
439a96102c8SMichael Baum 			MKSTR(err_str, "Unexpected CQE error syndrome "
440a96102c8SMichael Baum 			      "0x%02x CQN = %u RQN = %u wqe_counter = %u"
441a96102c8SMichael Baum 			      " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
442a96102c8SMichael Baum 			      rxq->cqn, rxq_ctrl->wqn,
443a96102c8SMichael Baum 			      rte_be_to_cpu_16(u.err_cqe->wqe_counter),
444a96102c8SMichael Baum 			      rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
445a96102c8SMichael Baum 			MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
446a96102c8SMichael Baum 			      rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
447a96102c8SMichael Baum 			mlx5_dump_debug_information(name, NULL, err_str, 0);
448a96102c8SMichael Baum 			mlx5_dump_debug_information(name, "MLX5 Error CQ:",
449a96102c8SMichael Baum 						    (const void *)((uintptr_t)
450a96102c8SMichael Baum 								    rxq->cqes),
451a96102c8SMichael Baum 						    sizeof(*u.cqe) * cqe_n);
452a96102c8SMichael Baum 			mlx5_dump_debug_information(name, "MLX5 Error RQ:",
453a96102c8SMichael Baum 						    (const void *)((uintptr_t)
454a96102c8SMichael Baum 								    rxq->wqes),
455a96102c8SMichael Baum 						    16 * wqe_n);
456a96102c8SMichael Baum 			rxq_ctrl->dump_file_n++;
457a96102c8SMichael Baum 		}
458a96102c8SMichael Baum 		rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
459a96102c8SMichael Baum 		/* Fall-through */
460a96102c8SMichael Baum 	case MLX5_RXQ_ERR_STATE_NEED_READY:
461a96102c8SMichael Baum 		ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
462a96102c8SMichael Baum 		if (ret == MLX5_CQE_STATUS_HW_OWN) {
463a96102c8SMichael Baum 			rte_io_wmb();
464a96102c8SMichael Baum 			*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
465a96102c8SMichael Baum 			rte_io_wmb();
466a96102c8SMichael Baum 			/*
467a96102c8SMichael Baum 			 * The RQ consumer index must be zeroed while moving
468a96102c8SMichael Baum 			 * from RESET state to RDY state.
469a96102c8SMichael Baum 			 */
470a96102c8SMichael Baum 			*rxq->rq_db = rte_cpu_to_be_32(0);
471a96102c8SMichael Baum 			rte_io_wmb();
472a96102c8SMichael Baum 			sm.is_wq = 1;
473a96102c8SMichael Baum 			sm.queue_id = rxq->idx;
474a96102c8SMichael Baum 			sm.state = IBV_WQS_RDY;
4755db77fefSXueming Li 			if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
476a96102c8SMichael Baum 				return -1;
477a96102c8SMichael Baum 			if (vec) {
478a96102c8SMichael Baum 				const uint32_t elts_n =
479a96102c8SMichael Baum 					mlx5_rxq_mprq_enabled(rxq) ?
480a96102c8SMichael Baum 					wqe_n * strd_n : wqe_n;
481a96102c8SMichael Baum 				const uint32_t e_mask = elts_n - 1;
482a96102c8SMichael Baum 				uint32_t elts_ci =
483a96102c8SMichael Baum 					mlx5_rxq_mprq_enabled(rxq) ?
484a96102c8SMichael Baum 					rxq->elts_ci : rxq->rq_ci;
485a96102c8SMichael Baum 				uint32_t elt_idx;
486a96102c8SMichael Baum 				struct rte_mbuf **elt;
487a96102c8SMichael Baum 				int i;
488a96102c8SMichael Baum 				unsigned int n = elts_n - (elts_ci -
489a96102c8SMichael Baum 							  rxq->rq_pi);
490a96102c8SMichael Baum 
491a96102c8SMichael Baum 				for (i = 0; i < (int)n; ++i) {
492a96102c8SMichael Baum 					elt_idx = (elts_ci + i) & e_mask;
493a96102c8SMichael Baum 					elt = &(*rxq->elts)[elt_idx];
494a96102c8SMichael Baum 					*elt = rte_mbuf_raw_alloc(rxq->mp);
495a96102c8SMichael Baum 					if (!*elt) {
496a96102c8SMichael Baum 						for (i--; i >= 0; --i) {
497a96102c8SMichael Baum 							elt_idx = (elts_ci +
498a96102c8SMichael Baum 								   i) & elts_n;
499a96102c8SMichael Baum 							elt = &(*rxq->elts)
500a96102c8SMichael Baum 								[elt_idx];
501a96102c8SMichael Baum 							rte_pktmbuf_free_seg
502a96102c8SMichael Baum 								(*elt);
503a96102c8SMichael Baum 						}
504a96102c8SMichael Baum 						return -1;
505a96102c8SMichael Baum 					}
506a96102c8SMichael Baum 				}
507a96102c8SMichael Baum 				for (i = 0; i < (int)elts_n; ++i) {
508a96102c8SMichael Baum 					elt = &(*rxq->elts)[i];
509a96102c8SMichael Baum 					DATA_LEN(*elt) =
510a96102c8SMichael Baum 						(uint16_t)((*elt)->buf_len -
511a96102c8SMichael Baum 						rte_pktmbuf_headroom(*elt));
512a96102c8SMichael Baum 				}
513a96102c8SMichael Baum 				/* Padding with a fake mbuf for vec Rx. */
514a96102c8SMichael Baum 				for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
515a96102c8SMichael Baum 					(*rxq->elts)[elts_n + i] =
516a96102c8SMichael Baum 								&rxq->fake_mbuf;
517a96102c8SMichael Baum 			}
518a96102c8SMichael Baum 			mlx5_rxq_initialize(rxq);
519a96102c8SMichael Baum 			rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
520a96102c8SMichael Baum 		}
521a96102c8SMichael Baum 		return ret;
522a96102c8SMichael Baum 	default:
523a96102c8SMichael Baum 		return -1;
524a96102c8SMichael Baum 	}
525a96102c8SMichael Baum }
526a96102c8SMichael Baum 
527a96102c8SMichael Baum /**
528a96102c8SMichael Baum  * Get size of the next packet for a given CQE. For compressed CQEs, the
529a96102c8SMichael Baum  * consumer index is updated only once all packets of the current one have
530a96102c8SMichael Baum  * been processed.
531a96102c8SMichael Baum  *
532a96102c8SMichael Baum  * @param rxq
533a96102c8SMichael Baum  *   Pointer to RX queue.
534a96102c8SMichael Baum  * @param cqe
535a96102c8SMichael Baum  *   CQE to process.
536a96102c8SMichael Baum  * @param[out] mcqe
537a96102c8SMichael Baum  *   Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
538a96102c8SMichael Baum  *   written.
539a96102c8SMichael Baum  *
540a96102c8SMichael Baum  * @return
541a96102c8SMichael Baum  *   0 in case of empty CQE, otherwise the packet size in bytes.
542a96102c8SMichael Baum  */
543a96102c8SMichael Baum static inline int
544a96102c8SMichael Baum mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
545a96102c8SMichael Baum 		 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
546a96102c8SMichael Baum {
547a96102c8SMichael Baum 	struct rxq_zip *zip = &rxq->zip;
548a96102c8SMichael Baum 	uint16_t cqe_n = cqe_cnt + 1;
549a96102c8SMichael Baum 	int len;
550a96102c8SMichael Baum 	uint16_t idx, end;
551a96102c8SMichael Baum 
552a96102c8SMichael Baum 	do {
553a96102c8SMichael Baum 		len = 0;
554a96102c8SMichael Baum 		/* Process compressed data in the CQE and mini arrays. */
555a96102c8SMichael Baum 		if (zip->ai) {
556a96102c8SMichael Baum 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
557a96102c8SMichael Baum 				(volatile struct mlx5_mini_cqe8 (*)[8])
558a96102c8SMichael Baum 				(uintptr_t)(&(*rxq->cqes)[zip->ca &
559a96102c8SMichael Baum 							  cqe_cnt].pkt_info);
560a96102c8SMichael Baum 			len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
561a96102c8SMichael Baum 					       rxq->byte_mask);
562a96102c8SMichael Baum 			*mcqe = &(*mc)[zip->ai & 7];
563a96102c8SMichael Baum 			if ((++zip->ai & 7) == 0) {
564a96102c8SMichael Baum 				/* Invalidate consumed CQEs */
565a96102c8SMichael Baum 				idx = zip->ca;
566a96102c8SMichael Baum 				end = zip->na;
567a96102c8SMichael Baum 				while (idx != end) {
568a96102c8SMichael Baum 					(*rxq->cqes)[idx & cqe_cnt].op_own =
569a96102c8SMichael Baum 						MLX5_CQE_INVALIDATE;
570a96102c8SMichael Baum 					++idx;
571a96102c8SMichael Baum 				}
572a96102c8SMichael Baum 				/*
573a96102c8SMichael Baum 				 * Increment consumer index to skip the number
574a96102c8SMichael Baum 				 * of CQEs consumed. Hardware leaves holes in
575a96102c8SMichael Baum 				 * the CQ ring for software use.
576a96102c8SMichael Baum 				 */
577a96102c8SMichael Baum 				zip->ca = zip->na;
578a96102c8SMichael Baum 				zip->na += 8;
579a96102c8SMichael Baum 			}
580a96102c8SMichael Baum 			if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
581a96102c8SMichael Baum 				/* Invalidate the rest */
582a96102c8SMichael Baum 				idx = zip->ca;
583a96102c8SMichael Baum 				end = zip->cq_ci;
584a96102c8SMichael Baum 
585a96102c8SMichael Baum 				while (idx != end) {
586a96102c8SMichael Baum 					(*rxq->cqes)[idx & cqe_cnt].op_own =
587a96102c8SMichael Baum 						MLX5_CQE_INVALIDATE;
588a96102c8SMichael Baum 					++idx;
589a96102c8SMichael Baum 				}
590a96102c8SMichael Baum 				rxq->cq_ci = zip->cq_ci;
591a96102c8SMichael Baum 				zip->ai = 0;
592a96102c8SMichael Baum 			}
593a96102c8SMichael Baum 		/*
594a96102c8SMichael Baum 		 * No compressed data, get next CQE and verify if it is
595a96102c8SMichael Baum 		 * compressed.
596a96102c8SMichael Baum 		 */
597a96102c8SMichael Baum 		} else {
598a96102c8SMichael Baum 			int ret;
599a96102c8SMichael Baum 			int8_t op_own;
600a96102c8SMichael Baum 			uint32_t cq_ci;
601a96102c8SMichael Baum 
602a96102c8SMichael Baum 			ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
603a96102c8SMichael Baum 			if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
604a96102c8SMichael Baum 				if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
605a96102c8SMichael Baum 					     rxq->err_state)) {
606a96102c8SMichael Baum 					ret = mlx5_rx_err_handle(rxq, 0);
607a96102c8SMichael Baum 					if (ret == MLX5_CQE_STATUS_HW_OWN ||
608a96102c8SMichael Baum 					    ret == -1)
609a96102c8SMichael Baum 						return 0;
610a96102c8SMichael Baum 				} else {
611a96102c8SMichael Baum 					return 0;
612a96102c8SMichael Baum 				}
613a96102c8SMichael Baum 			}
614a96102c8SMichael Baum 			/*
615a96102c8SMichael Baum 			 * Introduce the local variable to have queue cq_ci
616a96102c8SMichael Baum 			 * index in queue structure always consistent with
617a96102c8SMichael Baum 			 * actual CQE boundary (not pointing to the middle
618a96102c8SMichael Baum 			 * of compressed CQE session).
619a96102c8SMichael Baum 			 */
620a96102c8SMichael Baum 			cq_ci = rxq->cq_ci + 1;
621a96102c8SMichael Baum 			op_own = cqe->op_own;
622a96102c8SMichael Baum 			if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
623a96102c8SMichael Baum 				volatile struct mlx5_mini_cqe8 (*mc)[8] =
624a96102c8SMichael Baum 					(volatile struct mlx5_mini_cqe8 (*)[8])
625a96102c8SMichael Baum 					(uintptr_t)(&(*rxq->cqes)
626a96102c8SMichael Baum 						[cq_ci & cqe_cnt].pkt_info);
627a96102c8SMichael Baum 
628a96102c8SMichael Baum 				/* Fix endianness. */
629a96102c8SMichael Baum 				zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
630a96102c8SMichael Baum 				/*
631a96102c8SMichael Baum 				 * Current mini array position is the one
632a96102c8SMichael Baum 				 * returned by check_cqe64().
633a96102c8SMichael Baum 				 *
634a96102c8SMichael Baum 				 * If completion comprises several mini arrays,
635a96102c8SMichael Baum 				 * as a special case the second one is located
636a96102c8SMichael Baum 				 * 7 CQEs after the initial CQE instead of 8
637a96102c8SMichael Baum 				 * for subsequent ones.
638a96102c8SMichael Baum 				 */
639a96102c8SMichael Baum 				zip->ca = cq_ci;
640a96102c8SMichael Baum 				zip->na = zip->ca + 7;
641a96102c8SMichael Baum 				/* Compute the next non compressed CQE. */
642a96102c8SMichael Baum 				zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
643a96102c8SMichael Baum 				/* Get packet size to return. */
644a96102c8SMichael Baum 				len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
645a96102c8SMichael Baum 						       rxq->byte_mask);
646a96102c8SMichael Baum 				*mcqe = &(*mc)[0];
647a96102c8SMichael Baum 				zip->ai = 1;
648a96102c8SMichael Baum 				/* Prefetch all to be invalidated */
649a96102c8SMichael Baum 				idx = zip->ca;
650a96102c8SMichael Baum 				end = zip->cq_ci;
651a96102c8SMichael Baum 				while (idx != end) {
652a96102c8SMichael Baum 					rte_prefetch0(&(*rxq->cqes)[(idx) &
653a96102c8SMichael Baum 								    cqe_cnt]);
654a96102c8SMichael Baum 					++idx;
655a96102c8SMichael Baum 				}
656a96102c8SMichael Baum 			} else {
657a96102c8SMichael Baum 				rxq->cq_ci = cq_ci;
658a96102c8SMichael Baum 				len = rte_be_to_cpu_32(cqe->byte_cnt);
659a96102c8SMichael Baum 			}
660a96102c8SMichael Baum 		}
661a96102c8SMichael Baum 		if (unlikely(rxq->err_state)) {
662a96102c8SMichael Baum 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
663a96102c8SMichael Baum 			++rxq->stats.idropped;
664a96102c8SMichael Baum 		} else {
665a96102c8SMichael Baum 			return len;
666a96102c8SMichael Baum 		}
667a96102c8SMichael Baum 	} while (1);
668a96102c8SMichael Baum }
669a96102c8SMichael Baum 
670a96102c8SMichael Baum /**
671a96102c8SMichael Baum  * Translate RX completion flags to offload flags.
672a96102c8SMichael Baum  *
673a96102c8SMichael Baum  * @param[in] cqe
674a96102c8SMichael Baum  *   Pointer to CQE.
675a96102c8SMichael Baum  *
676a96102c8SMichael Baum  * @return
677a96102c8SMichael Baum  *   Offload flags (ol_flags) for struct rte_mbuf.
678a96102c8SMichael Baum  */
679a96102c8SMichael Baum static inline uint32_t
680a96102c8SMichael Baum rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
681a96102c8SMichael Baum {
682a96102c8SMichael Baum 	uint32_t ol_flags = 0;
683a96102c8SMichael Baum 	uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
684a96102c8SMichael Baum 
685a96102c8SMichael Baum 	ol_flags =
686a96102c8SMichael Baum 		TRANSPOSE(flags,
687a96102c8SMichael Baum 			  MLX5_CQE_RX_L3_HDR_VALID,
688daa02b5cSOlivier Matz 			  RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
689a96102c8SMichael Baum 		TRANSPOSE(flags,
690a96102c8SMichael Baum 			  MLX5_CQE_RX_L4_HDR_VALID,
691daa02b5cSOlivier Matz 			  RTE_MBUF_F_RX_L4_CKSUM_GOOD);
692a96102c8SMichael Baum 	return ol_flags;
693a96102c8SMichael Baum }
694a96102c8SMichael Baum 
695a96102c8SMichael Baum /**
696a96102c8SMichael Baum  * Fill in mbuf fields from RX completion flags.
697a96102c8SMichael Baum  * Note that pkt->ol_flags should be initialized outside of this function.
698a96102c8SMichael Baum  *
699a96102c8SMichael Baum  * @param rxq
700a96102c8SMichael Baum  *   Pointer to RX queue.
701a96102c8SMichael Baum  * @param pkt
702a96102c8SMichael Baum  *   mbuf to fill.
703a96102c8SMichael Baum  * @param cqe
704a96102c8SMichael Baum  *   CQE to process.
705a96102c8SMichael Baum  * @param rss_hash_res
706a96102c8SMichael Baum  *   Packet RSS Hash result.
707a96102c8SMichael Baum  */
708a96102c8SMichael Baum static inline void
709a96102c8SMichael Baum rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
710a96102c8SMichael Baum 	       volatile struct mlx5_cqe *cqe,
711a96102c8SMichael Baum 	       volatile struct mlx5_mini_cqe8 *mcqe)
712a96102c8SMichael Baum {
713a96102c8SMichael Baum 	/* Update packet information. */
714a96102c8SMichael Baum 	pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
71525ed2ebfSViacheslav Ovsiienko 	pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id;
716a96102c8SMichael Baum 
717a96102c8SMichael Baum 	if (rxq->rss_hash) {
718a96102c8SMichael Baum 		uint32_t rss_hash_res = 0;
719a96102c8SMichael Baum 
720a96102c8SMichael Baum 		/* If compressed, take hash result from mini-CQE. */
721a96102c8SMichael Baum 		if (mcqe == NULL ||
722a96102c8SMichael Baum 		    rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
723a96102c8SMichael Baum 			rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
724a96102c8SMichael Baum 		else
725a96102c8SMichael Baum 			rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
726a96102c8SMichael Baum 		if (rss_hash_res) {
727a96102c8SMichael Baum 			pkt->hash.rss = rss_hash_res;
728daa02b5cSOlivier Matz 			pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
729a96102c8SMichael Baum 		}
730a96102c8SMichael Baum 	}
731a96102c8SMichael Baum 	if (rxq->mark) {
732a96102c8SMichael Baum 		uint32_t mark = 0;
733a96102c8SMichael Baum 
734a96102c8SMichael Baum 		/* If compressed, take flow tag from mini-CQE. */
735a96102c8SMichael Baum 		if (mcqe == NULL ||
736a96102c8SMichael Baum 		    rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
737a96102c8SMichael Baum 			mark = cqe->sop_drop_qpn;
738a96102c8SMichael Baum 		else
739a96102c8SMichael Baum 			mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
740a96102c8SMichael Baum 				(mcqe->flow_tag_high << 16);
741a96102c8SMichael Baum 		if (MLX5_FLOW_MARK_IS_VALID(mark)) {
742daa02b5cSOlivier Matz 			pkt->ol_flags |= RTE_MBUF_F_RX_FDIR;
743a96102c8SMichael Baum 			if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
744daa02b5cSOlivier Matz 				pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
745a96102c8SMichael Baum 				pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
746a96102c8SMichael Baum 			}
747a96102c8SMichael Baum 		}
748a96102c8SMichael Baum 	}
749a96102c8SMichael Baum 	if (rxq->dynf_meta) {
7506d5735c1SAlexander Kozyrev 		uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) &
751a96102c8SMichael Baum 			rxq->flow_meta_port_mask;
752a96102c8SMichael Baum 
753a96102c8SMichael Baum 		if (meta) {
754a96102c8SMichael Baum 			pkt->ol_flags |= rxq->flow_meta_mask;
755a96102c8SMichael Baum 			*RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
756a96102c8SMichael Baum 						uint32_t *) = meta;
757a96102c8SMichael Baum 		}
758a96102c8SMichael Baum 	}
759a96102c8SMichael Baum 	if (rxq->csum)
760a96102c8SMichael Baum 		pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
761a96102c8SMichael Baum 	if (rxq->vlan_strip) {
762a96102c8SMichael Baum 		bool vlan_strip;
763a96102c8SMichael Baum 
764a96102c8SMichael Baum 		if (mcqe == NULL ||
765a96102c8SMichael Baum 		    rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
766a96102c8SMichael Baum 			vlan_strip = cqe->hdr_type_etc &
767a96102c8SMichael Baum 				     RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
768a96102c8SMichael Baum 		else
769a96102c8SMichael Baum 			vlan_strip = mcqe->hdr_type &
770a96102c8SMichael Baum 				     RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
771a96102c8SMichael Baum 		if (vlan_strip) {
772daa02b5cSOlivier Matz 			pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
773a96102c8SMichael Baum 			pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
774a96102c8SMichael Baum 		}
775a96102c8SMichael Baum 	}
776a96102c8SMichael Baum 	if (rxq->hw_timestamp) {
777a96102c8SMichael Baum 		uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
778a96102c8SMichael Baum 
779a96102c8SMichael Baum 		if (rxq->rt_timestamp)
780a96102c8SMichael Baum 			ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
781a96102c8SMichael Baum 		mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
782a96102c8SMichael Baum 		pkt->ol_flags |= rxq->timestamp_rx_flag;
783a96102c8SMichael Baum 	}
784a96102c8SMichael Baum }
785a96102c8SMichael Baum 
786a96102c8SMichael Baum /**
787a96102c8SMichael Baum  * DPDK callback for RX.
788a96102c8SMichael Baum  *
789a96102c8SMichael Baum  * @param dpdk_rxq
790a96102c8SMichael Baum  *   Generic pointer to RX queue structure.
791a96102c8SMichael Baum  * @param[out] pkts
792a96102c8SMichael Baum  *   Array to store received packets.
793a96102c8SMichael Baum  * @param pkts_n
794a96102c8SMichael Baum  *   Maximum number of packets in array.
795a96102c8SMichael Baum  *
796a96102c8SMichael Baum  * @return
797a96102c8SMichael Baum  *   Number of packets successfully received (<= pkts_n).
798a96102c8SMichael Baum  */
799a96102c8SMichael Baum uint16_t
800a96102c8SMichael Baum mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
801a96102c8SMichael Baum {
802a96102c8SMichael Baum 	struct mlx5_rxq_data *rxq = dpdk_rxq;
803a96102c8SMichael Baum 	const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
804a96102c8SMichael Baum 	const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
805a96102c8SMichael Baum 	const unsigned int sges_n = rxq->sges_n;
806a96102c8SMichael Baum 	struct rte_mbuf *pkt = NULL;
807a96102c8SMichael Baum 	struct rte_mbuf *seg = NULL;
808a96102c8SMichael Baum 	volatile struct mlx5_cqe *cqe =
809a96102c8SMichael Baum 		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
810a96102c8SMichael Baum 	unsigned int i = 0;
811a96102c8SMichael Baum 	unsigned int rq_ci = rxq->rq_ci << sges_n;
812a96102c8SMichael Baum 	int len = 0; /* keep its value across iterations. */
813a96102c8SMichael Baum 
814a96102c8SMichael Baum 	while (pkts_n) {
815a96102c8SMichael Baum 		unsigned int idx = rq_ci & wqe_cnt;
816a96102c8SMichael Baum 		volatile struct mlx5_wqe_data_seg *wqe =
817a96102c8SMichael Baum 			&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
818a96102c8SMichael Baum 		struct rte_mbuf *rep = (*rxq->elts)[idx];
819a96102c8SMichael Baum 		volatile struct mlx5_mini_cqe8 *mcqe = NULL;
820a96102c8SMichael Baum 
821a96102c8SMichael Baum 		if (pkt)
822a96102c8SMichael Baum 			NEXT(seg) = rep;
823a96102c8SMichael Baum 		seg = rep;
824a96102c8SMichael Baum 		rte_prefetch0(seg);
825a96102c8SMichael Baum 		rte_prefetch0(cqe);
826a96102c8SMichael Baum 		rte_prefetch0(wqe);
827a96102c8SMichael Baum 		/* Allocate the buf from the same pool. */
828a96102c8SMichael Baum 		rep = rte_mbuf_raw_alloc(seg->pool);
829a96102c8SMichael Baum 		if (unlikely(rep == NULL)) {
830a96102c8SMichael Baum 			++rxq->stats.rx_nombuf;
831a96102c8SMichael Baum 			if (!pkt) {
832a96102c8SMichael Baum 				/*
833a96102c8SMichael Baum 				 * no buffers before we even started,
834a96102c8SMichael Baum 				 * bail out silently.
835a96102c8SMichael Baum 				 */
836a96102c8SMichael Baum 				break;
837a96102c8SMichael Baum 			}
838a96102c8SMichael Baum 			while (pkt != seg) {
839a96102c8SMichael Baum 				MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
840a96102c8SMichael Baum 				rep = NEXT(pkt);
841a96102c8SMichael Baum 				NEXT(pkt) = NULL;
842a96102c8SMichael Baum 				NB_SEGS(pkt) = 1;
843a96102c8SMichael Baum 				rte_mbuf_raw_free(pkt);
844a96102c8SMichael Baum 				pkt = rep;
845a96102c8SMichael Baum 			}
846a96102c8SMichael Baum 			rq_ci >>= sges_n;
847a96102c8SMichael Baum 			++rq_ci;
848a96102c8SMichael Baum 			rq_ci <<= sges_n;
849a96102c8SMichael Baum 			break;
850a96102c8SMichael Baum 		}
851a96102c8SMichael Baum 		if (!pkt) {
852a96102c8SMichael Baum 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
853a96102c8SMichael Baum 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
854a96102c8SMichael Baum 			if (!len) {
855a96102c8SMichael Baum 				rte_mbuf_raw_free(rep);
856a96102c8SMichael Baum 				break;
857a96102c8SMichael Baum 			}
858a96102c8SMichael Baum 			pkt = seg;
859a96102c8SMichael Baum 			MLX5_ASSERT(len >= (rxq->crc_present << 2));
860daa02b5cSOlivier Matz 			pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
861a96102c8SMichael Baum 			rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
862a96102c8SMichael Baum 			if (rxq->crc_present)
863a96102c8SMichael Baum 				len -= RTE_ETHER_CRC_LEN;
864a96102c8SMichael Baum 			PKT_LEN(pkt) = len;
865a96102c8SMichael Baum 			if (cqe->lro_num_seg > 1) {
866a96102c8SMichael Baum 				mlx5_lro_update_hdr
867a96102c8SMichael Baum 					(rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
868a96102c8SMichael Baum 					 mcqe, rxq, len);
869daa02b5cSOlivier Matz 				pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
870a96102c8SMichael Baum 				pkt->tso_segsz = len / cqe->lro_num_seg;
871a96102c8SMichael Baum 			}
872a96102c8SMichael Baum 		}
873a96102c8SMichael Baum 		DATA_LEN(rep) = DATA_LEN(seg);
874a96102c8SMichael Baum 		PKT_LEN(rep) = PKT_LEN(seg);
875a96102c8SMichael Baum 		SET_DATA_OFF(rep, DATA_OFF(seg));
876a96102c8SMichael Baum 		PORT(rep) = PORT(seg);
877a96102c8SMichael Baum 		(*rxq->elts)[idx] = rep;
878a96102c8SMichael Baum 		/*
879a96102c8SMichael Baum 		 * Fill NIC descriptor with the new buffer. The lkey and size
880a96102c8SMichael Baum 		 * of the buffers are already known, only the buffer address
881a96102c8SMichael Baum 		 * changes.
882a96102c8SMichael Baum 		 */
883a96102c8SMichael Baum 		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
884a96102c8SMichael Baum 		/* If there's only one MR, no need to replace LKey in WQE. */
885a96102c8SMichael Baum 		if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
886a96102c8SMichael Baum 			wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
887a96102c8SMichael Baum 		if (len > DATA_LEN(seg)) {
888a96102c8SMichael Baum 			len -= DATA_LEN(seg);
889a96102c8SMichael Baum 			++NB_SEGS(pkt);
890a96102c8SMichael Baum 			++rq_ci;
891a96102c8SMichael Baum 			continue;
892a96102c8SMichael Baum 		}
893a96102c8SMichael Baum 		DATA_LEN(seg) = len;
894a96102c8SMichael Baum #ifdef MLX5_PMD_SOFT_COUNTERS
895a96102c8SMichael Baum 		/* Increment bytes counter. */
896a96102c8SMichael Baum 		rxq->stats.ibytes += PKT_LEN(pkt);
897a96102c8SMichael Baum #endif
898a96102c8SMichael Baum 		/* Return packet. */
899a96102c8SMichael Baum 		*(pkts++) = pkt;
900a96102c8SMichael Baum 		pkt = NULL;
901a96102c8SMichael Baum 		--pkts_n;
902a96102c8SMichael Baum 		++i;
903a96102c8SMichael Baum 		/* Align consumer index to the next stride. */
904a96102c8SMichael Baum 		rq_ci >>= sges_n;
905a96102c8SMichael Baum 		++rq_ci;
906a96102c8SMichael Baum 		rq_ci <<= sges_n;
907a96102c8SMichael Baum 	}
908a96102c8SMichael Baum 	if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci)))
909a96102c8SMichael Baum 		return 0;
910a96102c8SMichael Baum 	/* Update the consumer index. */
911a96102c8SMichael Baum 	rxq->rq_ci = rq_ci >> sges_n;
912a96102c8SMichael Baum 	rte_io_wmb();
913a96102c8SMichael Baum 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
914a96102c8SMichael Baum 	rte_io_wmb();
915a96102c8SMichael Baum 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
916a96102c8SMichael Baum #ifdef MLX5_PMD_SOFT_COUNTERS
917a96102c8SMichael Baum 	/* Increment packets counter. */
918a96102c8SMichael Baum 	rxq->stats.ipackets += i;
919a96102c8SMichael Baum #endif
920a96102c8SMichael Baum 	return i;
921a96102c8SMichael Baum }
922a96102c8SMichael Baum 
923a96102c8SMichael Baum /**
924a96102c8SMichael Baum  * Update LRO packet TCP header.
925a96102c8SMichael Baum  * The HW LRO feature doesn't update the TCP header after coalescing the
926a96102c8SMichael Baum  * TCP segments but supplies information in CQE to fill it by SW.
927a96102c8SMichael Baum  *
928a96102c8SMichael Baum  * @param tcp
929a96102c8SMichael Baum  *   Pointer to the TCP header.
930a96102c8SMichael Baum  * @param cqe
931a96102c8SMichael Baum  *   Pointer to the completion entry.
932a96102c8SMichael Baum  * @param phcsum
933a96102c8SMichael Baum  *   The L3 pseudo-header checksum.
934a96102c8SMichael Baum  */
935a96102c8SMichael Baum static inline void
936a96102c8SMichael Baum mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
937a96102c8SMichael Baum 			volatile struct mlx5_cqe *__rte_restrict cqe,
938a96102c8SMichael Baum 			uint32_t phcsum, uint8_t l4_type)
939a96102c8SMichael Baum {
940a96102c8SMichael Baum 	/*
941a96102c8SMichael Baum 	 * The HW calculates only the TCP payload checksum, need to complete
942a96102c8SMichael Baum 	 * the TCP header checksum and the L3 pseudo-header checksum.
943a96102c8SMichael Baum 	 */
944a96102c8SMichael Baum 	uint32_t csum = phcsum + cqe->csum;
945a96102c8SMichael Baum 
946a96102c8SMichael Baum 	if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
947a96102c8SMichael Baum 	    l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
948a96102c8SMichael Baum 		tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
949a96102c8SMichael Baum 		tcp->recv_ack = cqe->lro_ack_seq_num;
950a96102c8SMichael Baum 		tcp->rx_win = cqe->lro_tcp_win;
951a96102c8SMichael Baum 	}
952a96102c8SMichael Baum 	if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
953a96102c8SMichael Baum 		tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
954a96102c8SMichael Baum 	tcp->cksum = 0;
955a96102c8SMichael Baum 	csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
956a96102c8SMichael Baum 	csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
957a96102c8SMichael Baum 	csum = (~csum) & 0xffff;
958a96102c8SMichael Baum 	if (csum == 0)
959a96102c8SMichael Baum 		csum = 0xffff;
960a96102c8SMichael Baum 	tcp->cksum = csum;
961a96102c8SMichael Baum }
962a96102c8SMichael Baum 
963a96102c8SMichael Baum /**
964a96102c8SMichael Baum  * Update LRO packet headers.
965a96102c8SMichael Baum  * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
966a96102c8SMichael Baum  * TCP segments but supply information in CQE to fill it by SW.
967a96102c8SMichael Baum  *
968a96102c8SMichael Baum  * @param padd
969a96102c8SMichael Baum  *   The packet address.
970a96102c8SMichael Baum  * @param cqe
971a96102c8SMichael Baum  *   Pointer to the completion entry.
972a96102c8SMichael Baum  * @param len
973a96102c8SMichael Baum  *   The packet length.
974a96102c8SMichael Baum  */
975a96102c8SMichael Baum static inline void
976a96102c8SMichael Baum mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
977a96102c8SMichael Baum 		    volatile struct mlx5_cqe *__rte_restrict cqe,
978a96102c8SMichael Baum 		    volatile struct mlx5_mini_cqe8 *mcqe,
979a96102c8SMichael Baum 		    struct mlx5_rxq_data *rxq, uint32_t len)
980a96102c8SMichael Baum {
981a96102c8SMichael Baum 	union {
982a96102c8SMichael Baum 		struct rte_ether_hdr *eth;
983a96102c8SMichael Baum 		struct rte_vlan_hdr *vlan;
984a96102c8SMichael Baum 		struct rte_ipv4_hdr *ipv4;
985a96102c8SMichael Baum 		struct rte_ipv6_hdr *ipv6;
986a96102c8SMichael Baum 		struct rte_tcp_hdr *tcp;
987a96102c8SMichael Baum 		uint8_t *hdr;
988a96102c8SMichael Baum 	} h = {
989a96102c8SMichael Baum 		.hdr = padd,
990a96102c8SMichael Baum 	};
991a96102c8SMichael Baum 	uint16_t proto = h.eth->ether_type;
992a96102c8SMichael Baum 	uint32_t phcsum;
993a96102c8SMichael Baum 	uint8_t l4_type;
994a96102c8SMichael Baum 
995a96102c8SMichael Baum 	h.eth++;
996a96102c8SMichael Baum 	while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
997a96102c8SMichael Baum 	       proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
998a96102c8SMichael Baum 		proto = h.vlan->eth_proto;
999a96102c8SMichael Baum 		h.vlan++;
1000a96102c8SMichael Baum 	}
1001a96102c8SMichael Baum 	if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1002a96102c8SMichael Baum 		h.ipv4->time_to_live = cqe->lro_min_ttl;
1003a96102c8SMichael Baum 		h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1004a96102c8SMichael Baum 		h.ipv4->hdr_checksum = 0;
1005a96102c8SMichael Baum 		h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1006a96102c8SMichael Baum 		phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1007a96102c8SMichael Baum 		h.ipv4++;
1008a96102c8SMichael Baum 	} else {
1009a96102c8SMichael Baum 		h.ipv6->hop_limits = cqe->lro_min_ttl;
1010a96102c8SMichael Baum 		h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1011a96102c8SMichael Baum 						       sizeof(*h.ipv6));
1012a96102c8SMichael Baum 		phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1013a96102c8SMichael Baum 		h.ipv6++;
1014a96102c8SMichael Baum 	}
1015a96102c8SMichael Baum 	if (mcqe == NULL ||
1016a96102c8SMichael Baum 	    rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1017a96102c8SMichael Baum 		l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1018a96102c8SMichael Baum 			   MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1019a96102c8SMichael Baum 	else
1020a96102c8SMichael Baum 		l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1021a96102c8SMichael Baum 			   MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1022a96102c8SMichael Baum 	mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1023a96102c8SMichael Baum }
1024a96102c8SMichael Baum 
1025a96102c8SMichael Baum void
1026a96102c8SMichael Baum mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1027a96102c8SMichael Baum {
1028a96102c8SMichael Baum 	mlx5_mprq_buf_free_cb(NULL, buf);
1029a96102c8SMichael Baum }
1030a96102c8SMichael Baum 
1031a96102c8SMichael Baum /**
1032a96102c8SMichael Baum  * DPDK callback for RX with Multi-Packet RQ support.
1033a96102c8SMichael Baum  *
1034a96102c8SMichael Baum  * @param dpdk_rxq
1035a96102c8SMichael Baum  *   Generic pointer to RX queue structure.
1036a96102c8SMichael Baum  * @param[out] pkts
1037a96102c8SMichael Baum  *   Array to store received packets.
1038a96102c8SMichael Baum  * @param pkts_n
1039a96102c8SMichael Baum  *   Maximum number of packets in array.
1040a96102c8SMichael Baum  *
1041a96102c8SMichael Baum  * @return
1042a96102c8SMichael Baum  *   Number of packets successfully received (<= pkts_n).
1043a96102c8SMichael Baum  */
1044a96102c8SMichael Baum uint16_t
1045a96102c8SMichael Baum mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1046a96102c8SMichael Baum {
1047a96102c8SMichael Baum 	struct mlx5_rxq_data *rxq = dpdk_rxq;
10480947ed38SMichael Baum 	const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
10490947ed38SMichael Baum 	const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
1050a96102c8SMichael Baum 	const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1051a96102c8SMichael Baum 	const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1052a96102c8SMichael Baum 	volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1053a96102c8SMichael Baum 	unsigned int i = 0;
1054a96102c8SMichael Baum 	uint32_t rq_ci = rxq->rq_ci;
1055a96102c8SMichael Baum 	uint16_t consumed_strd = rxq->consumed_strd;
1056a96102c8SMichael Baum 	struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1057a96102c8SMichael Baum 
1058a96102c8SMichael Baum 	while (i < pkts_n) {
1059a96102c8SMichael Baum 		struct rte_mbuf *pkt;
1060a96102c8SMichael Baum 		int ret;
1061a96102c8SMichael Baum 		uint32_t len;
1062a96102c8SMichael Baum 		uint16_t strd_cnt;
1063a96102c8SMichael Baum 		uint16_t strd_idx;
1064a96102c8SMichael Baum 		uint32_t byte_cnt;
1065a96102c8SMichael Baum 		volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1066a96102c8SMichael Baum 		enum mlx5_rqx_code rxq_code;
1067a96102c8SMichael Baum 
1068a96102c8SMichael Baum 		if (consumed_strd == strd_n) {
1069a96102c8SMichael Baum 			/* Replace WQE if the buffer is still in use. */
1070a96102c8SMichael Baum 			mprq_buf_replace(rxq, rq_ci & wq_mask);
1071a96102c8SMichael Baum 			/* Advance to the next WQE. */
1072a96102c8SMichael Baum 			consumed_strd = 0;
1073a96102c8SMichael Baum 			++rq_ci;
1074a96102c8SMichael Baum 			buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1075a96102c8SMichael Baum 		}
1076a96102c8SMichael Baum 		cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1077a96102c8SMichael Baum 		ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1078a96102c8SMichael Baum 		if (!ret)
1079a96102c8SMichael Baum 			break;
1080a96102c8SMichael Baum 		byte_cnt = ret;
1081a96102c8SMichael Baum 		len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1082a96102c8SMichael Baum 		MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1083a96102c8SMichael Baum 		if (rxq->crc_present)
1084a96102c8SMichael Baum 			len -= RTE_ETHER_CRC_LEN;
1085a96102c8SMichael Baum 		if (mcqe &&
1086a96102c8SMichael Baum 		    rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1087a96102c8SMichael Baum 			strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1088a96102c8SMichael Baum 		else
1089a96102c8SMichael Baum 			strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1090a96102c8SMichael Baum 				   MLX5_MPRQ_STRIDE_NUM_SHIFT;
1091a96102c8SMichael Baum 		MLX5_ASSERT(strd_cnt);
1092a96102c8SMichael Baum 		consumed_strd += strd_cnt;
1093a96102c8SMichael Baum 		if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1094a96102c8SMichael Baum 			continue;
1095a96102c8SMichael Baum 		strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1096a96102c8SMichael Baum 					cqe->wqe_counter :
1097a96102c8SMichael Baum 					mcqe->stride_idx);
1098a96102c8SMichael Baum 		MLX5_ASSERT(strd_idx < strd_n);
1099a96102c8SMichael Baum 		MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1100a96102c8SMichael Baum 			    wq_mask));
1101a96102c8SMichael Baum 		pkt = rte_pktmbuf_alloc(rxq->mp);
1102a96102c8SMichael Baum 		if (unlikely(pkt == NULL)) {
1103a96102c8SMichael Baum 			++rxq->stats.rx_nombuf;
1104a96102c8SMichael Baum 			break;
1105a96102c8SMichael Baum 		}
1106a96102c8SMichael Baum 		len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1107a96102c8SMichael Baum 		MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1108a96102c8SMichael Baum 		if (rxq->crc_present)
1109a96102c8SMichael Baum 			len -= RTE_ETHER_CRC_LEN;
1110a96102c8SMichael Baum 		rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1111a96102c8SMichael Baum 					   strd_idx, strd_cnt);
1112a96102c8SMichael Baum 		if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1113a96102c8SMichael Baum 			rte_pktmbuf_free_seg(pkt);
1114a96102c8SMichael Baum 			if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1115a96102c8SMichael Baum 				++rxq->stats.idropped;
1116a96102c8SMichael Baum 				continue;
1117a96102c8SMichael Baum 			}
1118a96102c8SMichael Baum 			if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1119a96102c8SMichael Baum 				++rxq->stats.rx_nombuf;
1120a96102c8SMichael Baum 				break;
1121a96102c8SMichael Baum 			}
1122a96102c8SMichael Baum 		}
1123a96102c8SMichael Baum 		rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1124a96102c8SMichael Baum 		if (cqe->lro_num_seg > 1) {
1125a96102c8SMichael Baum 			mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1126a96102c8SMichael Baum 					    cqe, mcqe, rxq, len);
1127daa02b5cSOlivier Matz 			pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
1128a96102c8SMichael Baum 			pkt->tso_segsz = len / cqe->lro_num_seg;
1129a96102c8SMichael Baum 		}
1130a96102c8SMichael Baum 		PKT_LEN(pkt) = len;
1131a96102c8SMichael Baum 		PORT(pkt) = rxq->port_id;
1132a96102c8SMichael Baum #ifdef MLX5_PMD_SOFT_COUNTERS
1133a96102c8SMichael Baum 		/* Increment bytes counter. */
1134a96102c8SMichael Baum 		rxq->stats.ibytes += PKT_LEN(pkt);
1135a96102c8SMichael Baum #endif
1136a96102c8SMichael Baum 		/* Return packet. */
1137a96102c8SMichael Baum 		*(pkts++) = pkt;
1138a96102c8SMichael Baum 		++i;
1139a96102c8SMichael Baum 	}
1140a96102c8SMichael Baum 	/* Update the consumer indexes. */
1141a96102c8SMichael Baum 	rxq->consumed_strd = consumed_strd;
1142a96102c8SMichael Baum 	rte_io_wmb();
1143a96102c8SMichael Baum 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1144a96102c8SMichael Baum 	if (rq_ci != rxq->rq_ci) {
1145a96102c8SMichael Baum 		rxq->rq_ci = rq_ci;
1146a96102c8SMichael Baum 		rte_io_wmb();
1147a96102c8SMichael Baum 		*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1148a96102c8SMichael Baum 	}
1149a96102c8SMichael Baum #ifdef MLX5_PMD_SOFT_COUNTERS
1150a96102c8SMichael Baum 	/* Increment packets counter. */
1151a96102c8SMichael Baum 	rxq->stats.ipackets += i;
1152a96102c8SMichael Baum #endif
1153a96102c8SMichael Baum 	return i;
1154a96102c8SMichael Baum }
1155a96102c8SMichael Baum 
1156a96102c8SMichael Baum /*
1157a96102c8SMichael Baum  * Vectorized Rx routines are not compiled in when required vector instructions
1158a96102c8SMichael Baum  * are not supported on a target architecture.
1159a96102c8SMichael Baum  * The following null stubs are needed for linkage when those are not included
1160a96102c8SMichael Baum  * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86).
1161a96102c8SMichael Baum  */
1162a96102c8SMichael Baum 
1163a96102c8SMichael Baum __rte_weak uint16_t
1164a96102c8SMichael Baum mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused,
1165a96102c8SMichael Baum 		  struct rte_mbuf **pkts __rte_unused,
1166a96102c8SMichael Baum 		  uint16_t pkts_n __rte_unused)
1167a96102c8SMichael Baum {
1168a96102c8SMichael Baum 	return 0;
1169a96102c8SMichael Baum }
1170a96102c8SMichael Baum 
1171a96102c8SMichael Baum __rte_weak uint16_t
1172a96102c8SMichael Baum mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused,
1173a96102c8SMichael Baum 		       struct rte_mbuf **pkts __rte_unused,
1174a96102c8SMichael Baum 		       uint16_t pkts_n __rte_unused)
1175a96102c8SMichael Baum {
1176a96102c8SMichael Baum 	return 0;
1177a96102c8SMichael Baum }
1178a96102c8SMichael Baum 
1179a96102c8SMichael Baum __rte_weak int
1180a96102c8SMichael Baum mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1181a96102c8SMichael Baum {
1182a96102c8SMichael Baum 	return -ENOTSUP;
1183a96102c8SMichael Baum }
1184a96102c8SMichael Baum 
1185a96102c8SMichael Baum __rte_weak int
1186a96102c8SMichael Baum mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
1187a96102c8SMichael Baum {
1188a96102c8SMichael Baum 	return -ENOTSUP;
1189a96102c8SMichael Baum }
1190a96102c8SMichael Baum 
1191