xref: /dpdk/drivers/net/mlx5/mlx5_rxq.c (revision f8f294c66b5ff6ee89590cce56a3d733513ff9a0)
18fd92a66SOlivier Matz /* SPDX-License-Identifier: BSD-3-Clause
22e22920bSAdrien Mazarguil  * Copyright 2015 6WIND S.A.
35feecc57SShahaf Shuler  * Copyright 2015 Mellanox Technologies, Ltd
42e22920bSAdrien Mazarguil  */
52e22920bSAdrien Mazarguil 
62e22920bSAdrien Mazarguil #include <stddef.h>
72e22920bSAdrien Mazarguil #include <errno.h>
82e22920bSAdrien Mazarguil #include <string.h>
92e22920bSAdrien Mazarguil #include <stdint.h>
103c7d44afSShahaf Shuler #include <fcntl.h>
11a1366b1aSNélio Laranjeiro #include <sys/queue.h>
122e22920bSAdrien Mazarguil 
132e22920bSAdrien Mazarguil #include <rte_mbuf.h>
142e22920bSAdrien Mazarguil #include <rte_malloc.h>
15df96fd0dSBruce Richardson #include <ethdev_driver.h>
162e22920bSAdrien Mazarguil #include <rte_common.h>
173c7d44afSShahaf Shuler #include <rte_interrupts.h>
183b57c21eSNélio Laranjeiro #include <rte_debug.h>
1943e9d979SShachar Beiser #include <rte_io.h>
202aba9fc7SOphir Munk #include <rte_eal_paging.h>
212e22920bSAdrien Mazarguil 
227b4f1e6bSMatan Azrad #include <mlx5_glue.h>
2383c2047cSSuanming Mou #include <mlx5_malloc.h>
245dfa003dSMichael Baum #include <mlx5_common.h>
25fc59a1ecSMichael Baum #include <mlx5_common_mr.h>
267b4f1e6bSMatan Azrad 
277b4f1e6bSMatan Azrad #include "mlx5_defs.h"
282e22920bSAdrien Mazarguil #include "mlx5.h"
29151cbe3aSMichael Baum #include "mlx5_rx.h"
302e22920bSAdrien Mazarguil #include "mlx5_utils.h"
3176f5c99eSYaacov Hazan #include "mlx5_autoconf.h"
3209c25553SXueming Li #include "mlx5_devx.h"
3380f872eeSMichael Baum #include "rte_pmd_mlx5.h"
347b4f1e6bSMatan Azrad 
352e22920bSAdrien Mazarguil 
36ecc1c29dSAdrien Mazarguil /* Default RSS hash key also used for ConnectX-3. */
372f97422eSNelio Laranjeiro uint8_t rss_hash_default_key[] = {
38ecc1c29dSAdrien Mazarguil 	0x2c, 0xc6, 0x81, 0xd1,
39ecc1c29dSAdrien Mazarguil 	0x5b, 0xdb, 0xf4, 0xf7,
40ecc1c29dSAdrien Mazarguil 	0xfc, 0xa2, 0x83, 0x19,
41ecc1c29dSAdrien Mazarguil 	0xdb, 0x1a, 0x3e, 0x94,
42ecc1c29dSAdrien Mazarguil 	0x6b, 0x9e, 0x38, 0xd9,
43ecc1c29dSAdrien Mazarguil 	0x2c, 0x9c, 0x03, 0xd1,
44ecc1c29dSAdrien Mazarguil 	0xad, 0x99, 0x44, 0xa7,
45ecc1c29dSAdrien Mazarguil 	0xd9, 0x56, 0x3d, 0x59,
46ecc1c29dSAdrien Mazarguil 	0x06, 0x3c, 0x25, 0xf3,
47ecc1c29dSAdrien Mazarguil 	0xfc, 0x1f, 0xdc, 0x2a,
48ecc1c29dSAdrien Mazarguil };
49ecc1c29dSAdrien Mazarguil 
502f97422eSNelio Laranjeiro /* Length of the default RSS hash key. */
51c388a2f6SNelio Laranjeiro static_assert(MLX5_RSS_HASH_KEY_LEN ==
52c388a2f6SNelio Laranjeiro 	      (unsigned int)sizeof(rss_hash_default_key),
53c388a2f6SNelio Laranjeiro 	      "wrong RSS default key size.");
542f97422eSNelio Laranjeiro 
55ecc1c29dSAdrien Mazarguil /**
56675911d0SMichael Baum  * Calculate the number of CQEs in CQ for the Rx queue.
57675911d0SMichael Baum  *
58675911d0SMichael Baum  *  @param rxq_data
59675911d0SMichael Baum  *     Pointer to receive queue structure.
60675911d0SMichael Baum  *
61675911d0SMichael Baum  * @return
62675911d0SMichael Baum  *   Number of CQEs in CQ.
63675911d0SMichael Baum  */
64675911d0SMichael Baum unsigned int
65675911d0SMichael Baum mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
66675911d0SMichael Baum {
67675911d0SMichael Baum 	unsigned int cqe_n;
68675911d0SMichael Baum 	unsigned int wqe_n = 1 << rxq_data->elts_n;
69675911d0SMichael Baum 
70675911d0SMichael Baum 	if (mlx5_rxq_mprq_enabled(rxq_data))
710947ed38SMichael Baum 		cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
72675911d0SMichael Baum 	else
73675911d0SMichael Baum 		cqe_n = wqe_n - 1;
74675911d0SMichael Baum 	return cqe_n;
75675911d0SMichael Baum }
76675911d0SMichael Baum 
77675911d0SMichael Baum /**
787d6bf6b8SYongseok Koh  * Allocate RX queue elements for Multi-Packet RQ.
792e22920bSAdrien Mazarguil  *
800cdddf4dSNélio Laranjeiro  * @param rxq_ctrl
812e22920bSAdrien Mazarguil  *   Pointer to RX queue structure.
822e22920bSAdrien Mazarguil  *
832e22920bSAdrien Mazarguil  * @return
84a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
852e22920bSAdrien Mazarguil  */
867d6bf6b8SYongseok Koh static int
877d6bf6b8SYongseok Koh rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
887d6bf6b8SYongseok Koh {
897d6bf6b8SYongseok Koh 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
907d6bf6b8SYongseok Koh 	unsigned int wqe_n = 1 << rxq->elts_n;
917d6bf6b8SYongseok Koh 	unsigned int i;
927d6bf6b8SYongseok Koh 	int err;
937d6bf6b8SYongseok Koh 
947d6bf6b8SYongseok Koh 	/* Iterate on segments. */
957d6bf6b8SYongseok Koh 	for (i = 0; i <= wqe_n; ++i) {
967d6bf6b8SYongseok Koh 		struct mlx5_mprq_buf *buf;
977d6bf6b8SYongseok Koh 
987d6bf6b8SYongseok Koh 		if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
997d6bf6b8SYongseok Koh 			DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
1007d6bf6b8SYongseok Koh 			rte_errno = ENOMEM;
1017d6bf6b8SYongseok Koh 			goto error;
1027d6bf6b8SYongseok Koh 		}
1037d6bf6b8SYongseok Koh 		if (i < wqe_n)
1047d6bf6b8SYongseok Koh 			(*rxq->mprq_bufs)[i] = buf;
1057d6bf6b8SYongseok Koh 		else
1067d6bf6b8SYongseok Koh 			rxq->mprq_repl = buf;
1077d6bf6b8SYongseok Koh 	}
1087d6bf6b8SYongseok Koh 	DRV_LOG(DEBUG,
1090f20acbfSAlexander Kozyrev 		"port %u MPRQ queue %u allocated and configured %u segments",
110d5c900d1SYongseok Koh 		rxq->port_id, rxq->idx, wqe_n);
1117d6bf6b8SYongseok Koh 	return 0;
1127d6bf6b8SYongseok Koh error:
1137d6bf6b8SYongseok Koh 	err = rte_errno; /* Save rte_errno before cleanup. */
1147d6bf6b8SYongseok Koh 	wqe_n = i;
1157d6bf6b8SYongseok Koh 	for (i = 0; (i != wqe_n); ++i) {
116c9ec2192SYongseok Koh 		if ((*rxq->mprq_bufs)[i] != NULL)
1177d6bf6b8SYongseok Koh 			rte_mempool_put(rxq->mprq_mp,
1187d6bf6b8SYongseok Koh 					(*rxq->mprq_bufs)[i]);
1197d6bf6b8SYongseok Koh 		(*rxq->mprq_bufs)[i] = NULL;
1207d6bf6b8SYongseok Koh 	}
1210f20acbfSAlexander Kozyrev 	DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
122d5c900d1SYongseok Koh 		rxq->port_id, rxq->idx);
1237d6bf6b8SYongseok Koh 	rte_errno = err; /* Restore rte_errno. */
1247d6bf6b8SYongseok Koh 	return -rte_errno;
1257d6bf6b8SYongseok Koh }
1267d6bf6b8SYongseok Koh 
1277d6bf6b8SYongseok Koh /**
1287d6bf6b8SYongseok Koh  * Allocate RX queue elements for Single-Packet RQ.
1297d6bf6b8SYongseok Koh  *
1307d6bf6b8SYongseok Koh  * @param rxq_ctrl
1317d6bf6b8SYongseok Koh  *   Pointer to RX queue structure.
1327d6bf6b8SYongseok Koh  *
1337d6bf6b8SYongseok Koh  * @return
134fdb67b84SXueming Li  *   0 on success, negative errno value on failure.
1357d6bf6b8SYongseok Koh  */
1367d6bf6b8SYongseok Koh static int
1377d6bf6b8SYongseok Koh rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
1382e22920bSAdrien Mazarguil {
1399964b965SNélio Laranjeiro 	const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
1400f20acbfSAlexander Kozyrev 	unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
1410947ed38SMichael Baum 			      RTE_BIT32(rxq_ctrl->rxq.elts_n) *
1420947ed38SMichael Baum 			      RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
1430947ed38SMichael Baum 			      RTE_BIT32(rxq_ctrl->rxq.elts_n);
144637582afSDmitry Kozlyuk 	bool has_vec_support = mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0;
1452e22920bSAdrien Mazarguil 	unsigned int i;
146a6d83b6aSNélio Laranjeiro 	int err;
1472e22920bSAdrien Mazarguil 
1489964b965SNélio Laranjeiro 	/* Iterate on segments. */
1492e22920bSAdrien Mazarguil 	for (i = 0; (i != elts_n); ++i) {
1507f162008SViacheslav Ovsiienko 		struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
1512e22920bSAdrien Mazarguil 		struct rte_mbuf *buf;
1522e22920bSAdrien Mazarguil 
1537f162008SViacheslav Ovsiienko 		buf = rte_pktmbuf_alloc(seg->mp);
1542e22920bSAdrien Mazarguil 		if (buf == NULL) {
1555db77fefSXueming Li 			if (rxq_ctrl->share_group == 0)
1565db77fefSXueming Li 				DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
1575db77fefSXueming Li 					RXQ_PORT_ID(rxq_ctrl),
1585db77fefSXueming Li 					rxq_ctrl->rxq.idx);
1595db77fefSXueming Li 			else
1605db77fefSXueming Li 				DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
1615db77fefSXueming Li 					rxq_ctrl->share_group,
1625db77fefSXueming Li 					rxq_ctrl->share_qid);
163a6d83b6aSNélio Laranjeiro 			rte_errno = ENOMEM;
1642e22920bSAdrien Mazarguil 			goto error;
1652e22920bSAdrien Mazarguil 		}
166637582afSDmitry Kozlyuk 		/* Only vectored Rx routines rely on headroom size. */
167637582afSDmitry Kozlyuk 		MLX5_ASSERT(!has_vec_support ||
168637582afSDmitry Kozlyuk 			    DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
1692e22920bSAdrien Mazarguil 		/* Buffer is supposed to be empty. */
1708e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
1718e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
1728e46d4e1SAlexander Kozyrev 		MLX5_ASSERT(!buf->next);
1737f162008SViacheslav Ovsiienko 		SET_DATA_OFF(buf, seg->offset);
1746218063bSNélio Laranjeiro 		PORT(buf) = rxq_ctrl->rxq.port_id;
1757f162008SViacheslav Ovsiienko 		DATA_LEN(buf) = seg->length;
1767f162008SViacheslav Ovsiienko 		PKT_LEN(buf) = seg->length;
1776218063bSNélio Laranjeiro 		NB_SEGS(buf) = 1;
1786218063bSNélio Laranjeiro 		(*rxq_ctrl->rxq.elts)[i] = buf;
1792e22920bSAdrien Mazarguil 	}
180a1366b1aSNélio Laranjeiro 	/* If Rx vector is activated. */
181637582afSDmitry Kozlyuk 	if (has_vec_support) {
18278142aacSNélio Laranjeiro 		struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
183301271bcSNélio Laranjeiro 		struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
184bdb8e5b1SViacheslav Ovsiienko 		struct rte_pktmbuf_pool_private *priv =
185bdb8e5b1SViacheslav Ovsiienko 			(struct rte_pktmbuf_pool_private *)
186bdb8e5b1SViacheslav Ovsiienko 				rte_mempool_get_priv(rxq_ctrl->rxq.mp);
187a1366b1aSNélio Laranjeiro 		int j;
188301271bcSNélio Laranjeiro 
189301271bcSNélio Laranjeiro 		/* Initialize default rearm_data for vPMD. */
190301271bcSNélio Laranjeiro 		mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
191301271bcSNélio Laranjeiro 		rte_mbuf_refcnt_set(mbuf_init, 1);
192301271bcSNélio Laranjeiro 		mbuf_init->nb_segs = 1;
19325ed2ebfSViacheslav Ovsiienko 		/* For shared queues port is provided in CQE */
19425ed2ebfSViacheslav Ovsiienko 		mbuf_init->port = rxq->shared ? 0 : rxq->port_id;
195bdb8e5b1SViacheslav Ovsiienko 		if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
196daa02b5cSOlivier Matz 			mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL;
197301271bcSNélio Laranjeiro 		/*
198301271bcSNélio Laranjeiro 		 * prevent compiler reordering:
199301271bcSNélio Laranjeiro 		 * rearm_data covers previous fields.
200301271bcSNélio Laranjeiro 		 */
201301271bcSNélio Laranjeiro 		rte_compiler_barrier();
202a1366b1aSNélio Laranjeiro 		rxq->mbuf_initializer =
203bdb8e5b1SViacheslav Ovsiienko 			*(rte_xmm_t *)&mbuf_init->rearm_data;
204301271bcSNélio Laranjeiro 		/* Padding with a fake mbuf for vectorized Rx. */
205a1366b1aSNélio Laranjeiro 		for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
206a1366b1aSNélio Laranjeiro 			(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
207301271bcSNélio Laranjeiro 	}
2085db77fefSXueming Li 	if (rxq_ctrl->share_group == 0)
209a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG,
2105db77fefSXueming Li 			"port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
2115db77fefSXueming Li 			RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
2125db77fefSXueming Li 			elts_n / (1 << rxq_ctrl->rxq.sges_n));
2135db77fefSXueming Li 	else
2145db77fefSXueming Li 		DRV_LOG(DEBUG,
2155db77fefSXueming Li 			"share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
2165db77fefSXueming Li 			rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
217a170a30dSNélio Laranjeiro 			elts_n / (1 << rxq_ctrl->rxq.sges_n));
2182e22920bSAdrien Mazarguil 	return 0;
2192e22920bSAdrien Mazarguil error:
220a6d83b6aSNélio Laranjeiro 	err = rte_errno; /* Save rte_errno before cleanup. */
2216218063bSNélio Laranjeiro 	elts_n = i;
2226218063bSNélio Laranjeiro 	for (i = 0; (i != elts_n); ++i) {
2236218063bSNélio Laranjeiro 		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
2246218063bSNélio Laranjeiro 			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
2256218063bSNélio Laranjeiro 		(*rxq_ctrl->rxq.elts)[i] = NULL;
2262e22920bSAdrien Mazarguil 	}
2275db77fefSXueming Li 	if (rxq_ctrl->share_group == 0)
2280f20acbfSAlexander Kozyrev 		DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
2295db77fefSXueming Li 			RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
2305db77fefSXueming Li 	else
2315db77fefSXueming Li 		DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
2325db77fefSXueming Li 			rxq_ctrl->share_group, rxq_ctrl->share_qid);
233a6d83b6aSNélio Laranjeiro 	rte_errno = err; /* Restore rte_errno. */
234a6d83b6aSNélio Laranjeiro 	return -rte_errno;
2352e22920bSAdrien Mazarguil }
2362e22920bSAdrien Mazarguil 
2372e22920bSAdrien Mazarguil /**
2387d6bf6b8SYongseok Koh  * Allocate RX queue elements.
2397d6bf6b8SYongseok Koh  *
2407d6bf6b8SYongseok Koh  * @param rxq_ctrl
2417d6bf6b8SYongseok Koh  *   Pointer to RX queue structure.
2427d6bf6b8SYongseok Koh  *
2437d6bf6b8SYongseok Koh  * @return
244fdb67b84SXueming Li  *   0 on success, negative errno value on failure.
2457d6bf6b8SYongseok Koh  */
2467d6bf6b8SYongseok Koh int
2477d6bf6b8SYongseok Koh rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
2487d6bf6b8SYongseok Koh {
2490f20acbfSAlexander Kozyrev 	int ret = 0;
2500f20acbfSAlexander Kozyrev 
2510f20acbfSAlexander Kozyrev 	/**
2520f20acbfSAlexander Kozyrev 	 * For MPRQ we need to allocate both MPRQ buffers
2530f20acbfSAlexander Kozyrev 	 * for WQEs and simple mbufs for vector processing.
2540f20acbfSAlexander Kozyrev 	 */
2550f20acbfSAlexander Kozyrev 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
2560f20acbfSAlexander Kozyrev 		ret = rxq_alloc_elts_mprq(rxq_ctrl);
257fdb67b84SXueming Li 	if (ret == 0)
258fdb67b84SXueming Li 		ret = rxq_alloc_elts_sprq(rxq_ctrl);
259fdb67b84SXueming Li 	return ret;
2607d6bf6b8SYongseok Koh }
2617d6bf6b8SYongseok Koh 
2627d6bf6b8SYongseok Koh /**
2637d6bf6b8SYongseok Koh  * Free RX queue elements for Multi-Packet RQ.
2642e22920bSAdrien Mazarguil  *
2650cdddf4dSNélio Laranjeiro  * @param rxq_ctrl
2662e22920bSAdrien Mazarguil  *   Pointer to RX queue structure.
2672e22920bSAdrien Mazarguil  */
2682e22920bSAdrien Mazarguil static void
2697d6bf6b8SYongseok Koh rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
2707d6bf6b8SYongseok Koh {
2717d6bf6b8SYongseok Koh 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
2727d6bf6b8SYongseok Koh 	uint16_t i;
2737d6bf6b8SYongseok Koh 
2740f20acbfSAlexander Kozyrev 	DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
2750f20acbfSAlexander Kozyrev 		rxq->port_id, rxq->idx, (1u << rxq->elts_n));
2767d6bf6b8SYongseok Koh 	if (rxq->mprq_bufs == NULL)
2777d6bf6b8SYongseok Koh 		return;
2787d6bf6b8SYongseok Koh 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
2797d6bf6b8SYongseok Koh 		if ((*rxq->mprq_bufs)[i] != NULL)
2807d6bf6b8SYongseok Koh 			mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
2817d6bf6b8SYongseok Koh 		(*rxq->mprq_bufs)[i] = NULL;
2827d6bf6b8SYongseok Koh 	}
2837d6bf6b8SYongseok Koh 	if (rxq->mprq_repl != NULL) {
2847d6bf6b8SYongseok Koh 		mlx5_mprq_buf_free(rxq->mprq_repl);
2857d6bf6b8SYongseok Koh 		rxq->mprq_repl = NULL;
2867d6bf6b8SYongseok Koh 	}
2877d6bf6b8SYongseok Koh }
2887d6bf6b8SYongseok Koh 
2897d6bf6b8SYongseok Koh /**
2907d6bf6b8SYongseok Koh  * Free RX queue elements for Single-Packet RQ.
2917d6bf6b8SYongseok Koh  *
2927d6bf6b8SYongseok Koh  * @param rxq_ctrl
2937d6bf6b8SYongseok Koh  *   Pointer to RX queue structure.
2947d6bf6b8SYongseok Koh  */
2957d6bf6b8SYongseok Koh static void
2967d6bf6b8SYongseok Koh rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
2972e22920bSAdrien Mazarguil {
29878142aacSNélio Laranjeiro 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
2990f20acbfSAlexander Kozyrev 	const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
3000947ed38SMichael Baum 		RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
3010947ed38SMichael Baum 		RTE_BIT32(rxq->elts_n);
3022dc76e40SNélio Laranjeiro 	const uint16_t q_mask = q_n - 1;
303ac340e1fSAlexander Kozyrev 	uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
304ac340e1fSAlexander Kozyrev 		rxq->elts_ci : rxq->rq_ci;
305ac340e1fSAlexander Kozyrev 	uint16_t used = q_n - (elts_ci - rxq->rq_pi);
3062dc76e40SNélio Laranjeiro 	uint16_t i;
3072e22920bSAdrien Mazarguil 
3085db77fefSXueming Li 	if (rxq_ctrl->share_group == 0)
3090f20acbfSAlexander Kozyrev 		DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
3105db77fefSXueming Li 			RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
3115db77fefSXueming Li 	else
3125db77fefSXueming Li 		DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
3135db77fefSXueming Li 			rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
3142dc76e40SNélio Laranjeiro 	if (rxq->elts == NULL)
3152e22920bSAdrien Mazarguil 		return;
3162dc76e40SNélio Laranjeiro 	/**
3170f20acbfSAlexander Kozyrev 	 * Some mbuf in the Ring belongs to the application.
3180f20acbfSAlexander Kozyrev 	 * They cannot be freed.
3192dc76e40SNélio Laranjeiro 	 */
320af4f09f2SNélio Laranjeiro 	if (mlx5_rxq_check_vec_support(rxq) > 0) {
3212dc76e40SNélio Laranjeiro 		for (i = 0; i < used; ++i)
322ac340e1fSAlexander Kozyrev 			(*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
323ac340e1fSAlexander Kozyrev 		rxq->rq_pi = elts_ci;
3242dc76e40SNélio Laranjeiro 	}
3250f20acbfSAlexander Kozyrev 	for (i = 0; i != q_n; ++i) {
3262dc76e40SNélio Laranjeiro 		if ((*rxq->elts)[i] != NULL)
3272dc76e40SNélio Laranjeiro 			rte_pktmbuf_free_seg((*rxq->elts)[i]);
3282dc76e40SNélio Laranjeiro 		(*rxq->elts)[i] = NULL;
3292e22920bSAdrien Mazarguil 	}
3302e22920bSAdrien Mazarguil }
3312e22920bSAdrien Mazarguil 
3322e22920bSAdrien Mazarguil /**
3337d6bf6b8SYongseok Koh  * Free RX queue elements.
3347d6bf6b8SYongseok Koh  *
3357d6bf6b8SYongseok Koh  * @param rxq_ctrl
3367d6bf6b8SYongseok Koh  *   Pointer to RX queue structure.
3377d6bf6b8SYongseok Koh  */
3381260a87bSMichael Baum static void
3397d6bf6b8SYongseok Koh rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
3407d6bf6b8SYongseok Koh {
3410f20acbfSAlexander Kozyrev 	/*
3420f20acbfSAlexander Kozyrev 	 * For MPRQ we need to allocate both MPRQ buffers
3430f20acbfSAlexander Kozyrev 	 * for WQEs and simple mbufs for vector processing.
3440f20acbfSAlexander Kozyrev 	 */
3457d6bf6b8SYongseok Koh 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
3467d6bf6b8SYongseok Koh 		rxq_free_elts_mprq(rxq_ctrl);
3477d6bf6b8SYongseok Koh 	rxq_free_elts_sprq(rxq_ctrl);
3487d6bf6b8SYongseok Koh }
3497d6bf6b8SYongseok Koh 
3507d6bf6b8SYongseok Koh /**
35117b843ebSShahaf Shuler  * Returns the per-queue supported offloads.
35217b843ebSShahaf Shuler  *
353af4f09f2SNélio Laranjeiro  * @param dev
354af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
35517b843ebSShahaf Shuler  *
35617b843ebSShahaf Shuler  * @return
35717b843ebSShahaf Shuler  *   Supported Rx offloads.
35817b843ebSShahaf Shuler  */
35917b843ebSShahaf Shuler uint64_t
360af4f09f2SNélio Laranjeiro mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
36117b843ebSShahaf Shuler {
362dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
363295968d1SFerruh Yigit 	uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
364295968d1SFerruh Yigit 			     RTE_ETH_RX_OFFLOAD_TIMESTAMP |
365295968d1SFerruh Yigit 			     RTE_ETH_RX_OFFLOAD_RSS_HASH);
36617b843ebSShahaf Shuler 
367a13ec19cSMichael Baum 	if (!priv->config.mprq.enabled)
368ddb03843SViacheslav Ovsiienko 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
369a13ec19cSMichael Baum 	if (priv->sh->config.hw_fcs_strip)
370295968d1SFerruh Yigit 		offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
37187af0d1eSMichael Baum 	if (priv->sh->dev_cap.hw_csum)
372295968d1SFerruh Yigit 		offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
373295968d1SFerruh Yigit 			     RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
374295968d1SFerruh Yigit 			     RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
37587af0d1eSMichael Baum 	if (priv->sh->dev_cap.hw_vlan_strip)
376295968d1SFerruh Yigit 		offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
377593f913aSMichael Baum 	if (priv->sh->config.lro_allowed)
378295968d1SFerruh Yigit 		offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
37917b843ebSShahaf Shuler 	return offloads;
38017b843ebSShahaf Shuler }
38117b843ebSShahaf Shuler 
38217b843ebSShahaf Shuler 
38317b843ebSShahaf Shuler /**
38417b843ebSShahaf Shuler  * Returns the per-port supported offloads.
38517b843ebSShahaf Shuler  *
38617b843ebSShahaf Shuler  * @return
38717b843ebSShahaf Shuler  *   Supported Rx offloads.
38817b843ebSShahaf Shuler  */
38917b843ebSShahaf Shuler uint64_t
39017ed314cSMatan Azrad mlx5_get_rx_port_offloads(void)
39117b843ebSShahaf Shuler {
392295968d1SFerruh Yigit 	uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
39317b843ebSShahaf Shuler 
39417b843ebSShahaf Shuler 	return offloads;
39517b843ebSShahaf Shuler }
39617b843ebSShahaf Shuler 
39717b843ebSShahaf Shuler /**
39850254b6dSDekel Peled  * Verify if the queue can be released.
39950254b6dSDekel Peled  *
40050254b6dSDekel Peled  * @param dev
40150254b6dSDekel Peled  *   Pointer to Ethernet device.
40250254b6dSDekel Peled  * @param idx
40350254b6dSDekel Peled  *   RX queue index.
40450254b6dSDekel Peled  *
40550254b6dSDekel Peled  * @return
40650254b6dSDekel Peled  *   1 if the queue can be released
40750254b6dSDekel Peled  *   0 if the queue can not be released, there are references to it.
40850254b6dSDekel Peled  *   Negative errno and rte_errno is set if queue doesn't exist.
40950254b6dSDekel Peled  */
41050254b6dSDekel Peled static int
41150254b6dSDekel Peled mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
41250254b6dSDekel Peled {
4130cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
41450254b6dSDekel Peled 
4150cedf34dSXueming Li 	if (rxq == NULL) {
41650254b6dSDekel Peled 		rte_errno = EINVAL;
41750254b6dSDekel Peled 		return -rte_errno;
41850254b6dSDekel Peled 	}
419e12a0166STyler Retzlaff 	return (rte_atomic_load_explicit(&rxq->refcnt, rte_memory_order_relaxed) == 1);
42050254b6dSDekel Peled }
42150254b6dSDekel Peled 
422161d103bSViacheslav Ovsiienko /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
423161d103bSViacheslav Ovsiienko static void
424161d103bSViacheslav Ovsiienko rxq_sync_cq(struct mlx5_rxq_data *rxq)
425161d103bSViacheslav Ovsiienko {
426161d103bSViacheslav Ovsiienko 	const uint16_t cqe_n = 1 << rxq->cqe_n;
427161d103bSViacheslav Ovsiienko 	const uint16_t cqe_mask = cqe_n - 1;
428161d103bSViacheslav Ovsiienko 	volatile struct mlx5_cqe *cqe;
429161d103bSViacheslav Ovsiienko 	int ret, i;
430161d103bSViacheslav Ovsiienko 
431161d103bSViacheslav Ovsiienko 	i = cqe_n;
432161d103bSViacheslav Ovsiienko 	do {
433161d103bSViacheslav Ovsiienko 		cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
434161d103bSViacheslav Ovsiienko 		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
435161d103bSViacheslav Ovsiienko 		if (ret == MLX5_CQE_STATUS_HW_OWN)
436161d103bSViacheslav Ovsiienko 			break;
437161d103bSViacheslav Ovsiienko 		if (ret == MLX5_CQE_STATUS_ERR) {
438161d103bSViacheslav Ovsiienko 			rxq->cq_ci++;
439161d103bSViacheslav Ovsiienko 			continue;
440161d103bSViacheslav Ovsiienko 		}
441161d103bSViacheslav Ovsiienko 		MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
442161d103bSViacheslav Ovsiienko 		if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
443161d103bSViacheslav Ovsiienko 			rxq->cq_ci++;
444161d103bSViacheslav Ovsiienko 			continue;
445161d103bSViacheslav Ovsiienko 		}
446161d103bSViacheslav Ovsiienko 		/* Compute the next non compressed CQE. */
44702a6195cSAlexander Kozyrev 		rxq->cq_ci += rxq->cqe_comp_layout ?
44802a6195cSAlexander Kozyrev 			(MLX5_CQE_NUM_MINIS(cqe->op_own) + 1U) :
44902a6195cSAlexander Kozyrev 			rte_be_to_cpu_32(cqe->byte_cnt);
450161d103bSViacheslav Ovsiienko 
451161d103bSViacheslav Ovsiienko 	} while (--i);
452161d103bSViacheslav Ovsiienko 	/* Move all CQEs to HW ownership, including possible MiniCQEs. */
453161d103bSViacheslav Ovsiienko 	for (i = 0; i < cqe_n; i++) {
454161d103bSViacheslav Ovsiienko 		cqe = &(*rxq->cqes)[i];
45502a6195cSAlexander Kozyrev 		cqe->validity_iteration_count = MLX5_CQE_VIC_INIT;
456161d103bSViacheslav Ovsiienko 		cqe->op_own = MLX5_CQE_INVALIDATE;
457161d103bSViacheslav Ovsiienko 	}
458161d103bSViacheslav Ovsiienko 	/* Resync CQE and WQE (WQ in RESET state). */
459f0f5d844SPhil Yang 	rte_io_wmb();
460161d103bSViacheslav Ovsiienko 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
461f0f5d844SPhil Yang 	rte_io_wmb();
462161d103bSViacheslav Ovsiienko 	*rxq->rq_db = rte_cpu_to_be_32(0);
463f0f5d844SPhil Yang 	rte_io_wmb();
464161d103bSViacheslav Ovsiienko }
465161d103bSViacheslav Ovsiienko 
466161d103bSViacheslav Ovsiienko /**
467161d103bSViacheslav Ovsiienko  * Rx queue stop. Device queue goes to the RESET state,
468161d103bSViacheslav Ovsiienko  * all involved mbufs are freed from WQ.
469161d103bSViacheslav Ovsiienko  *
470161d103bSViacheslav Ovsiienko  * @param dev
471161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
472161d103bSViacheslav Ovsiienko  * @param idx
473161d103bSViacheslav Ovsiienko  *   RX queue index.
474161d103bSViacheslav Ovsiienko  *
475161d103bSViacheslav Ovsiienko  * @return
476161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
477161d103bSViacheslav Ovsiienko  */
478161d103bSViacheslav Ovsiienko int
479161d103bSViacheslav Ovsiienko mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
480161d103bSViacheslav Ovsiienko {
481161d103bSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
4825ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
4835ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
484161d103bSViacheslav Ovsiienko 	int ret;
485161d103bSViacheslav Ovsiienko 
4865ceb3a02SXueming Li 	MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
487161d103bSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
4885ceb3a02SXueming Li 	ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
489161d103bSViacheslav Ovsiienko 	if (ret) {
490161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Cannot change Rx WQ state to RESET:  %s",
491161d103bSViacheslav Ovsiienko 			strerror(errno));
492161d103bSViacheslav Ovsiienko 		rte_errno = errno;
493161d103bSViacheslav Ovsiienko 		return ret;
494161d103bSViacheslav Ovsiienko 	}
495161d103bSViacheslav Ovsiienko 	/* Remove all processes CQEs. */
4965ceb3a02SXueming Li 	rxq_sync_cq(&rxq_ctrl->rxq);
497161d103bSViacheslav Ovsiienko 	/* Free all involved mbufs. */
498161d103bSViacheslav Ovsiienko 	rxq_free_elts(rxq_ctrl);
499161d103bSViacheslav Ovsiienko 	/* Set the actual queue state. */
500161d103bSViacheslav Ovsiienko 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
501161d103bSViacheslav Ovsiienko 	return 0;
502161d103bSViacheslav Ovsiienko }
503161d103bSViacheslav Ovsiienko 
504161d103bSViacheslav Ovsiienko /**
505161d103bSViacheslav Ovsiienko  * Rx queue stop. Device queue goes to the RESET state,
506161d103bSViacheslav Ovsiienko  * all involved mbufs are freed from WQ.
507161d103bSViacheslav Ovsiienko  *
508161d103bSViacheslav Ovsiienko  * @param dev
509161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
510161d103bSViacheslav Ovsiienko  * @param idx
511161d103bSViacheslav Ovsiienko  *   RX queue index.
512161d103bSViacheslav Ovsiienko  *
513161d103bSViacheslav Ovsiienko  * @return
514161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
515161d103bSViacheslav Ovsiienko  */
516161d103bSViacheslav Ovsiienko int
517161d103bSViacheslav Ovsiienko mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
518161d103bSViacheslav Ovsiienko {
519161d103bSViacheslav Ovsiienko 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
520161d103bSViacheslav Ovsiienko 	int ret;
521161d103bSViacheslav Ovsiienko 
5228682e492SFerruh Yigit 	if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
523161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
524161d103bSViacheslav Ovsiienko 		rte_errno = EINVAL;
525161d103bSViacheslav Ovsiienko 		return -EINVAL;
526161d103bSViacheslav Ovsiienko 	}
527161d103bSViacheslav Ovsiienko 	if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
528161d103bSViacheslav Ovsiienko 		return 0;
529161d103bSViacheslav Ovsiienko 	/*
530161d103bSViacheslav Ovsiienko 	 * Vectorized Rx burst requires the CQ and RQ indices
531161d103bSViacheslav Ovsiienko 	 * synchronized, that might be broken on RQ restart
532161d103bSViacheslav Ovsiienko 	 * and cause Rx malfunction, so queue stopping is
533161d103bSViacheslav Ovsiienko 	 * not supported if vectorized Rx burst is engaged.
534f0b2d50eSAlexander Kozyrev 	 * The routine pointer depends on the process type,
535f0b2d50eSAlexander Kozyrev 	 * should perform check there. MPRQ is not supported as well.
536161d103bSViacheslav Ovsiienko 	 */
537f0b2d50eSAlexander Kozyrev 	if (pkt_burst != mlx5_rx_burst) {
538f0b2d50eSAlexander Kozyrev 		DRV_LOG(ERR, "Rx queue stop is only supported "
539f0b2d50eSAlexander Kozyrev 			"for non-vectorized single-packet Rx");
540161d103bSViacheslav Ovsiienko 		rte_errno = EINVAL;
541161d103bSViacheslav Ovsiienko 		return -EINVAL;
542161d103bSViacheslav Ovsiienko 	}
543161d103bSViacheslav Ovsiienko 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
544161d103bSViacheslav Ovsiienko 		ret = mlx5_mp_os_req_queue_control(dev, idx,
545161d103bSViacheslav Ovsiienko 						   MLX5_MP_REQ_QUEUE_RX_STOP);
546161d103bSViacheslav Ovsiienko 	} else {
547161d103bSViacheslav Ovsiienko 		ret = mlx5_rx_queue_stop_primary(dev, idx);
548161d103bSViacheslav Ovsiienko 	}
549161d103bSViacheslav Ovsiienko 	return ret;
550161d103bSViacheslav Ovsiienko }
551161d103bSViacheslav Ovsiienko 
552161d103bSViacheslav Ovsiienko /**
553161d103bSViacheslav Ovsiienko  * Rx queue start. Device queue goes to the ready state,
554161d103bSViacheslav Ovsiienko  * all required mbufs are allocated and WQ is replenished.
555161d103bSViacheslav Ovsiienko  *
556161d103bSViacheslav Ovsiienko  * @param dev
557161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
558161d103bSViacheslav Ovsiienko  * @param idx
559161d103bSViacheslav Ovsiienko  *   RX queue index.
560161d103bSViacheslav Ovsiienko  *
561161d103bSViacheslav Ovsiienko  * @return
562161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
563161d103bSViacheslav Ovsiienko  */
564161d103bSViacheslav Ovsiienko int
565161d103bSViacheslav Ovsiienko mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
566161d103bSViacheslav Ovsiienko {
567161d103bSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
5685ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
5695ceb3a02SXueming Li 	struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
570161d103bSViacheslav Ovsiienko 	int ret;
571161d103bSViacheslav Ovsiienko 
5725ceb3a02SXueming Li 	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
573161d103bSViacheslav Ovsiienko 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
574161d103bSViacheslav Ovsiienko 	/* Allocate needed buffers. */
5755ceb3a02SXueming Li 	ret = rxq_alloc_elts(rxq->ctrl);
576161d103bSViacheslav Ovsiienko 	if (ret) {
577161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
578161d103bSViacheslav Ovsiienko 		rte_errno = errno;
579161d103bSViacheslav Ovsiienko 		return ret;
580161d103bSViacheslav Ovsiienko 	}
581f0f5d844SPhil Yang 	rte_io_wmb();
5825ceb3a02SXueming Li 	*rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
583f0f5d844SPhil Yang 	rte_io_wmb();
58470d83ebbSViacheslav Ovsiienko 	/* Reset RQ consumer before moving queue to READY state. */
5855ceb3a02SXueming Li 	*rxq_data->rq_db = rte_cpu_to_be_32(0);
586f0f5d844SPhil Yang 	rte_io_wmb();
5875ceb3a02SXueming Li 	ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
588161d103bSViacheslav Ovsiienko 	if (ret) {
589161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Cannot change Rx WQ state to READY:  %s",
590161d103bSViacheslav Ovsiienko 			strerror(errno));
591161d103bSViacheslav Ovsiienko 		rte_errno = errno;
592161d103bSViacheslav Ovsiienko 		return ret;
593161d103bSViacheslav Ovsiienko 	}
594161d103bSViacheslav Ovsiienko 	/* Reinitialize RQ - set WQEs. */
5955ceb3a02SXueming Li 	mlx5_rxq_initialize(rxq_data);
5965ceb3a02SXueming Li 	rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
597161d103bSViacheslav Ovsiienko 	/* Set actual queue state. */
598161d103bSViacheslav Ovsiienko 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
599161d103bSViacheslav Ovsiienko 	return 0;
600161d103bSViacheslav Ovsiienko }
601161d103bSViacheslav Ovsiienko 
602161d103bSViacheslav Ovsiienko /**
603161d103bSViacheslav Ovsiienko  * Rx queue start. Device queue goes to the ready state,
604161d103bSViacheslav Ovsiienko  * all required mbufs are allocated and WQ is replenished.
605161d103bSViacheslav Ovsiienko  *
606161d103bSViacheslav Ovsiienko  * @param dev
607161d103bSViacheslav Ovsiienko  *   Pointer to Ethernet device structure.
608161d103bSViacheslav Ovsiienko  * @param idx
609161d103bSViacheslav Ovsiienko  *   RX queue index.
610161d103bSViacheslav Ovsiienko  *
611161d103bSViacheslav Ovsiienko  * @return
612161d103bSViacheslav Ovsiienko  *   0 on success, a negative errno value otherwise and rte_errno is set.
613161d103bSViacheslav Ovsiienko  */
614161d103bSViacheslav Ovsiienko int
615161d103bSViacheslav Ovsiienko mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
616161d103bSViacheslav Ovsiienko {
617161d103bSViacheslav Ovsiienko 	int ret;
618161d103bSViacheslav Ovsiienko 
6198682e492SFerruh Yigit 	if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
620161d103bSViacheslav Ovsiienko 		DRV_LOG(ERR, "Hairpin queue can't be started");
621161d103bSViacheslav Ovsiienko 		rte_errno = EINVAL;
622161d103bSViacheslav Ovsiienko 		return -EINVAL;
623161d103bSViacheslav Ovsiienko 	}
624161d103bSViacheslav Ovsiienko 	if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
625161d103bSViacheslav Ovsiienko 		return 0;
626161d103bSViacheslav Ovsiienko 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
627161d103bSViacheslav Ovsiienko 		ret = mlx5_mp_os_req_queue_control(dev, idx,
628161d103bSViacheslav Ovsiienko 						   MLX5_MP_REQ_QUEUE_RX_START);
629161d103bSViacheslav Ovsiienko 	} else {
630161d103bSViacheslav Ovsiienko 		ret = mlx5_rx_queue_start_primary(dev, idx);
631161d103bSViacheslav Ovsiienko 	}
632161d103bSViacheslav Ovsiienko 	return ret;
633161d103bSViacheslav Ovsiienko }
634161d103bSViacheslav Ovsiienko 
63550254b6dSDekel Peled /**
636e79c9be9SOri Kam  * Rx queue presetup checks.
637e79c9be9SOri Kam  *
638e79c9be9SOri Kam  * @param dev
639e79c9be9SOri Kam  *   Pointer to Ethernet device structure.
640e79c9be9SOri Kam  * @param idx
641e79c9be9SOri Kam  *   RX queue index.
642e79c9be9SOri Kam  * @param desc
643e79c9be9SOri Kam  *   Number of descriptors to configure in queue.
64409c25553SXueming Li  * @param[out] rxq_ctrl
64509c25553SXueming Li  *   Address of pointer to shared Rx queue control.
646e79c9be9SOri Kam  *
647e79c9be9SOri Kam  * @return
648e79c9be9SOri Kam  *   0 on success, a negative errno value otherwise and rte_errno is set.
649e79c9be9SOri Kam  */
650e79c9be9SOri Kam static int
65109c25553SXueming Li mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc,
65209c25553SXueming Li 			struct mlx5_rxq_ctrl **rxq_ctrl)
653e79c9be9SOri Kam {
654e79c9be9SOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
65509c25553SXueming Li 	struct mlx5_rxq_priv *rxq;
65609c25553SXueming Li 	bool empty;
657e79c9be9SOri Kam 
6584c3d7961SIgor Gutorov 	if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) {
6594c3d7961SIgor Gutorov 		DRV_LOG(ERR,
6604c3d7961SIgor Gutorov 			"port %u number of descriptors requested for Rx queue"
6614c3d7961SIgor Gutorov 			" %u is more than supported",
6624c3d7961SIgor Gutorov 			dev->data->port_id, idx);
6634c3d7961SIgor Gutorov 		rte_errno = EINVAL;
6644c3d7961SIgor Gutorov 		return -EINVAL;
6654c3d7961SIgor Gutorov 	}
666e891b54aSAlexander Kozyrev 	if (!rte_is_power_of_2(*desc)) {
667e891b54aSAlexander Kozyrev 		*desc = 1 << log2above(*desc);
668e79c9be9SOri Kam 		DRV_LOG(WARNING,
669e79c9be9SOri Kam 			"port %u increased number of descriptors in Rx queue %u"
670e79c9be9SOri Kam 			" to the next power of two (%d)",
671e891b54aSAlexander Kozyrev 			dev->data->port_id, idx, *desc);
672e79c9be9SOri Kam 	}
673e79c9be9SOri Kam 	DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
674e891b54aSAlexander Kozyrev 		dev->data->port_id, idx, *desc);
675e79c9be9SOri Kam 	if (idx >= priv->rxqs_n) {
676e79c9be9SOri Kam 		DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
677e79c9be9SOri Kam 			dev->data->port_id, idx, priv->rxqs_n);
678e79c9be9SOri Kam 		rte_errno = EOVERFLOW;
679e79c9be9SOri Kam 		return -rte_errno;
680e79c9be9SOri Kam 	}
68109c25553SXueming Li 	if (rxq_ctrl == NULL || *rxq_ctrl == NULL)
68209c25553SXueming Li 		return 0;
68309c25553SXueming Li 	if (!(*rxq_ctrl)->rxq.shared) {
684e79c9be9SOri Kam 		if (!mlx5_rxq_releasable(dev, idx)) {
685e79c9be9SOri Kam 			DRV_LOG(ERR, "port %u unable to release queue index %u",
686e79c9be9SOri Kam 				dev->data->port_id, idx);
687e79c9be9SOri Kam 			rte_errno = EBUSY;
688e79c9be9SOri Kam 			return -rte_errno;
689e79c9be9SOri Kam 		}
690e79c9be9SOri Kam 		mlx5_rxq_release(dev, idx);
69109c25553SXueming Li 	} else {
69209c25553SXueming Li 		if ((*rxq_ctrl)->obj != NULL)
69309c25553SXueming Li 			/* Some port using shared Rx queue has been started. */
694e79c9be9SOri Kam 			return 0;
69509c25553SXueming Li 		/* Release all owner RxQ to reconfigure Shared RxQ. */
69609c25553SXueming Li 		do {
69709c25553SXueming Li 			rxq = LIST_FIRST(&(*rxq_ctrl)->owners);
69809c25553SXueming Li 			LIST_REMOVE(rxq, owner_entry);
69909c25553SXueming Li 			empty = LIST_EMPTY(&(*rxq_ctrl)->owners);
70009c25553SXueming Li 			mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx);
70109c25553SXueming Li 		} while (!empty);
70209c25553SXueming Li 		*rxq_ctrl = NULL;
70309c25553SXueming Li 	}
70409c25553SXueming Li 	return 0;
70509c25553SXueming Li }
70609c25553SXueming Li 
70709c25553SXueming Li /**
70809c25553SXueming Li  * Get the shared Rx queue object that matches group and queue index.
70909c25553SXueming Li  *
71009c25553SXueming Li  * @param dev
71109c25553SXueming Li  *   Pointer to Ethernet device structure.
71209c25553SXueming Li  * @param group
71309c25553SXueming Li  *   Shared RXQ group.
71409c25553SXueming Li  * @param share_qid
71509c25553SXueming Li  *   Shared RX queue index.
71609c25553SXueming Li  *
71709c25553SXueming Li  * @return
71809c25553SXueming Li  *   Shared RXQ object that matching, or NULL if not found.
71909c25553SXueming Li  */
72009c25553SXueming Li static struct mlx5_rxq_ctrl *
72109c25553SXueming Li mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid)
72209c25553SXueming Li {
72309c25553SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl;
72409c25553SXueming Li 	struct mlx5_priv *priv = dev->data->dev_private;
72509c25553SXueming Li 
72609c25553SXueming Li 	LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
72709c25553SXueming Li 		if (rxq_ctrl->share_group == group &&
72809c25553SXueming Li 		    rxq_ctrl->share_qid == share_qid)
72909c25553SXueming Li 			return rxq_ctrl;
73009c25553SXueming Li 	}
73109c25553SXueming Li 	return NULL;
73209c25553SXueming Li }
73309c25553SXueming Li 
73409c25553SXueming Li /**
73509c25553SXueming Li  * Check whether requested Rx queue configuration matches shared RXQ.
73609c25553SXueming Li  *
73709c25553SXueming Li  * @param rxq_ctrl
73809c25553SXueming Li  *   Pointer to shared RXQ.
73909c25553SXueming Li  * @param dev
74009c25553SXueming Li  *   Pointer to Ethernet device structure.
74109c25553SXueming Li  * @param idx
74209c25553SXueming Li  *   Queue index.
74309c25553SXueming Li  * @param desc
74409c25553SXueming Li  *   Number of descriptors to configure in queue.
74509c25553SXueming Li  * @param socket
74609c25553SXueming Li  *   NUMA socket on which memory must be allocated.
74709c25553SXueming Li  * @param[in] conf
74809c25553SXueming Li  *   Thresholds parameters.
74909c25553SXueming Li  * @param mp
75009c25553SXueming Li  *   Memory pool for buffer allocations.
75109c25553SXueming Li  *
75209c25553SXueming Li  * @return
75309c25553SXueming Li  *   0 on success, a negative errno value otherwise and rte_errno is set.
75409c25553SXueming Li  */
75509c25553SXueming Li static bool
75609c25553SXueming Li mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
75709c25553SXueming Li 		      uint16_t idx, uint16_t desc, unsigned int socket,
75809c25553SXueming Li 		      const struct rte_eth_rxconf *conf,
75909c25553SXueming Li 		      struct rte_mempool *mp)
76009c25553SXueming Li {
76109c25553SXueming Li 	struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
76209c25553SXueming Li 	struct mlx5_priv *priv = dev->data->dev_private;
76309c25553SXueming Li 	unsigned int i;
76409c25553SXueming Li 
76509c25553SXueming Li 	RTE_SET_USED(conf);
76609c25553SXueming Li 	if (rxq_ctrl->socket != socket) {
76709c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
76809c25553SXueming Li 			dev->data->port_id, idx);
76909c25553SXueming Li 		return false;
77009c25553SXueming Li 	}
77109c25553SXueming Li 	if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
77209c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
77309c25553SXueming Li 			dev->data->port_id, idx);
77409c25553SXueming Li 		return false;
77509c25553SXueming Li 	}
77609c25553SXueming Li 	if (priv->mtu != spriv->mtu) {
77709c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
77809c25553SXueming Li 			dev->data->port_id, idx);
77909c25553SXueming Li 		return false;
78009c25553SXueming Li 	}
78109c25553SXueming Li 	if (priv->dev_data->dev_conf.intr_conf.rxq !=
78209c25553SXueming Li 	    spriv->dev_data->dev_conf.intr_conf.rxq) {
78309c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
78409c25553SXueming Li 			dev->data->port_id, idx);
78509c25553SXueming Li 		return false;
78609c25553SXueming Li 	}
78709c25553SXueming Li 	if (mp != NULL && rxq_ctrl->rxq.mp != mp) {
78809c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
78909c25553SXueming Li 			dev->data->port_id, idx);
79009c25553SXueming Li 		return false;
79109c25553SXueming Li 	} else if (mp == NULL) {
792572c9d4bSViacheslav Ovsiienko 		if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
793572c9d4bSViacheslav Ovsiienko 			DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
794572c9d4bSViacheslav Ovsiienko 				dev->data->port_id, idx);
795572c9d4bSViacheslav Ovsiienko 			return false;
796572c9d4bSViacheslav Ovsiienko 		}
79709c25553SXueming Li 		for (i = 0; i < conf->rx_nseg; i++) {
798572c9d4bSViacheslav Ovsiienko 			if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
799572c9d4bSViacheslav Ovsiienko 				   sizeof(struct rte_eth_rxseg_split))) {
80009c25553SXueming Li 				DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
80109c25553SXueming Li 					dev->data->port_id, idx, i);
80209c25553SXueming Li 				return false;
80309c25553SXueming Li 			}
80409c25553SXueming Li 		}
80509c25553SXueming Li 	}
80609c25553SXueming Li 	if (priv->config.hw_padding != spriv->config.hw_padding) {
80709c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
80809c25553SXueming Li 			dev->data->port_id, idx);
80909c25553SXueming Li 		return false;
81009c25553SXueming Li 	}
81109c25553SXueming Li 	if (priv->config.cqe_comp != spriv->config.cqe_comp ||
81209c25553SXueming Li 	    (priv->config.cqe_comp &&
81309c25553SXueming Li 	     priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
81409c25553SXueming Li 		DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
81509c25553SXueming Li 			dev->data->port_id, idx);
81609c25553SXueming Li 		return false;
81709c25553SXueming Li 	}
81809c25553SXueming Li 	return true;
819e79c9be9SOri Kam }
820e79c9be9SOri Kam 
821e79c9be9SOri Kam /**
8222e22920bSAdrien Mazarguil  *
8232e22920bSAdrien Mazarguil  * @param dev
8242e22920bSAdrien Mazarguil  *   Pointer to Ethernet device structure.
8252e22920bSAdrien Mazarguil  * @param idx
8262e22920bSAdrien Mazarguil  *   RX queue index.
8272e22920bSAdrien Mazarguil  * @param desc
8282e22920bSAdrien Mazarguil  *   Number of descriptors to configure in queue.
8292e22920bSAdrien Mazarguil  * @param socket
8302e22920bSAdrien Mazarguil  *   NUMA socket on which memory must be allocated.
8312e22920bSAdrien Mazarguil  * @param[in] conf
8322e22920bSAdrien Mazarguil  *   Thresholds parameters.
8332e22920bSAdrien Mazarguil  * @param mp
8342e22920bSAdrien Mazarguil  *   Memory pool for buffer allocations.
8352e22920bSAdrien Mazarguil  *
8362e22920bSAdrien Mazarguil  * @return
837a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
8382e22920bSAdrien Mazarguil  */
8392e22920bSAdrien Mazarguil int
8402e22920bSAdrien Mazarguil mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
8412e22920bSAdrien Mazarguil 		    unsigned int socket, const struct rte_eth_rxconf *conf,
8422e22920bSAdrien Mazarguil 		    struct rte_mempool *mp)
8432e22920bSAdrien Mazarguil {
844dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
8454cda06c3SXueming Li 	struct mlx5_rxq_priv *rxq;
84609c25553SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
8479f209b59SViacheslav Ovsiienko 	struct rte_eth_rxseg_split *rx_seg =
8489f209b59SViacheslav Ovsiienko 				(struct rte_eth_rxseg_split *)conf->rx_seg;
8499f209b59SViacheslav Ovsiienko 	struct rte_eth_rxseg_split rx_single = {.mp = mp};
8509f209b59SViacheslav Ovsiienko 	uint16_t n_seg = conf->rx_nseg;
851e79c9be9SOri Kam 	int res;
85209c25553SXueming Li 	uint64_t offloads = conf->offloads |
85309c25553SXueming Li 			    dev->data->dev_conf.rxmode.offloads;
8543a29cb3aSAlexander Kozyrev 	bool is_extmem = false;
8552e22920bSAdrien Mazarguil 
856a213b868SMichael Baum 	if ((offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
857593f913aSMichael Baum 	    !priv->sh->config.lro_allowed) {
858a213b868SMichael Baum 		DRV_LOG(ERR,
859593f913aSMichael Baum 			"Port %u queue %u LRO is configured but not allowed.",
860a213b868SMichael Baum 			dev->data->port_id, idx);
861a213b868SMichael Baum 		rte_errno = EINVAL;
862a213b868SMichael Baum 		return -rte_errno;
863a213b868SMichael Baum 	}
8649f209b59SViacheslav Ovsiienko 	if (mp) {
8659f209b59SViacheslav Ovsiienko 		/*
8669f209b59SViacheslav Ovsiienko 		 * The parameters should be checked on rte_eth_dev layer.
8679f209b59SViacheslav Ovsiienko 		 * If mp is specified it means the compatible configuration
8689f209b59SViacheslav Ovsiienko 		 * without buffer split feature tuning.
8699f209b59SViacheslav Ovsiienko 		 */
8709f209b59SViacheslav Ovsiienko 		rx_seg = &rx_single;
8719f209b59SViacheslav Ovsiienko 		n_seg = 1;
8723a29cb3aSAlexander Kozyrev 		is_extmem = rte_pktmbuf_priv_flags(mp) &
8733a29cb3aSAlexander Kozyrev 			    RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
8749f209b59SViacheslav Ovsiienko 	}
8759f209b59SViacheslav Ovsiienko 	if (n_seg > 1) {
8769f209b59SViacheslav Ovsiienko 		/* The offloads should be checked on rte_eth_dev layer. */
877295968d1SFerruh Yigit 		MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
8789f209b59SViacheslav Ovsiienko 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
8799f209b59SViacheslav Ovsiienko 			DRV_LOG(ERR, "port %u queue index %u split "
8809f209b59SViacheslav Ovsiienko 				     "offload not configured",
8819f209b59SViacheslav Ovsiienko 				     dev->data->port_id, idx);
8829f209b59SViacheslav Ovsiienko 			rte_errno = ENOSPC;
8839f209b59SViacheslav Ovsiienko 			return -rte_errno;
8849f209b59SViacheslav Ovsiienko 		}
8859f209b59SViacheslav Ovsiienko 		MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
8869f209b59SViacheslav Ovsiienko 	}
88709c25553SXueming Li 	if (conf->share_group > 0) {
88853820561SMichael Baum 		if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
88909c25553SXueming Li 			DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
89009c25553SXueming Li 				     dev->data->port_id, idx);
89109c25553SXueming Li 			rte_errno = EINVAL;
89209c25553SXueming Li 			return -rte_errno;
89309c25553SXueming Li 		}
89409c25553SXueming Li 		if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
89509c25553SXueming Li 			DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
89609c25553SXueming Li 				     dev->data->port_id, idx);
89709c25553SXueming Li 			rte_errno = EINVAL;
89809c25553SXueming Li 			return -rte_errno;
89909c25553SXueming Li 		}
90009c25553SXueming Li 		if (conf->share_qid >= priv->rxqs_n) {
90109c25553SXueming Li 			DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u",
90209c25553SXueming Li 				dev->data->port_id, conf->share_qid,
90309c25553SXueming Li 				priv->rxqs_n);
90409c25553SXueming Li 			rte_errno = EINVAL;
90509c25553SXueming Li 			return -rte_errno;
90609c25553SXueming Li 		}
90709c25553SXueming Li 		if (priv->config.mprq.enabled) {
90809c25553SXueming Li 			DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled",
90909c25553SXueming Li 				dev->data->port_id, conf->share_qid);
91009c25553SXueming Li 			rte_errno = EINVAL;
91109c25553SXueming Li 			return -rte_errno;
91209c25553SXueming Li 		}
91309c25553SXueming Li 		/* Try to reuse shared RXQ. */
91409c25553SXueming Li 		rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group,
91509c25553SXueming Li 					       conf->share_qid);
916719eb23dSAlexander Kozyrev 		res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
917719eb23dSAlexander Kozyrev 		if (res)
918719eb23dSAlexander Kozyrev 			return res;
91909c25553SXueming Li 		if (rxq_ctrl != NULL &&
92009c25553SXueming Li 		    !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
92109c25553SXueming Li 					   conf, mp)) {
92209c25553SXueming Li 			rte_errno = EINVAL;
92309c25553SXueming Li 			return -rte_errno;
92409c25553SXueming Li 		}
925719eb23dSAlexander Kozyrev 	} else {
92609c25553SXueming Li 		res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl);
927e79c9be9SOri Kam 		if (res)
928e79c9be9SOri Kam 			return res;
929719eb23dSAlexander Kozyrev 	}
93009c25553SXueming Li 	/* Allocate RXQ. */
9314cda06c3SXueming Li 	rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
9324cda06c3SXueming Li 			  SOCKET_ID_ANY);
9334cda06c3SXueming Li 	if (!rxq) {
9344cda06c3SXueming Li 		DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
9350f99970bSNélio Laranjeiro 			dev->data->port_id, idx);
936a6d83b6aSNélio Laranjeiro 		rte_errno = ENOMEM;
937a6d83b6aSNélio Laranjeiro 		return -rte_errno;
938a1366b1aSNélio Laranjeiro 	}
939f685878aSMichael Baum 	if (rxq_ctrl == NULL) {
940f685878aSMichael Baum 		rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
9413a29cb3aSAlexander Kozyrev 					n_seg, is_extmem);
94209c25553SXueming Li 		if (rxq_ctrl == NULL) {
9434cda06c3SXueming Li 			DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
9444cda06c3SXueming Li 				dev->data->port_id, idx);
9454cda06c3SXueming Li 			mlx5_free(rxq);
9464cda06c3SXueming Li 			rte_errno = ENOMEM;
9474cda06c3SXueming Li 			return -rte_errno;
9484cda06c3SXueming Li 		}
94909c25553SXueming Li 	}
950f685878aSMichael Baum 	rxq->priv = priv;
951f685878aSMichael Baum 	rxq->idx = idx;
952f685878aSMichael Baum 	(*priv->rxq_privs)[idx] = rxq;
953f685878aSMichael Baum 	/* Join owner list. */
954f685878aSMichael Baum 	LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
955f685878aSMichael Baum 	rxq->ctrl = rxq_ctrl;
9563c9a82faSBing Zhao 	rte_atomic_fetch_add_explicit(&rxq_ctrl->ctrl_ref, 1, rte_memory_order_relaxed);
95709c25553SXueming Li 	mlx5_rxq_ref(dev, idx);
958a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
959a170a30dSNélio Laranjeiro 		dev->data->port_id, idx);
9605cf0707fSXueming Li 	dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
961a6d83b6aSNélio Laranjeiro 	return 0;
9622e22920bSAdrien Mazarguil }
9632e22920bSAdrien Mazarguil 
9642e22920bSAdrien Mazarguil /**
965e79c9be9SOri Kam  *
966e79c9be9SOri Kam  * @param dev
967e79c9be9SOri Kam  *   Pointer to Ethernet device structure.
968e79c9be9SOri Kam  * @param idx
969e79c9be9SOri Kam  *   RX queue index.
970e79c9be9SOri Kam  * @param desc
971e79c9be9SOri Kam  *   Number of descriptors to configure in queue.
972e79c9be9SOri Kam  * @param hairpin_conf
973e79c9be9SOri Kam  *   Hairpin configuration parameters.
974e79c9be9SOri Kam  *
975e79c9be9SOri Kam  * @return
976e79c9be9SOri Kam  *   0 on success, a negative errno value otherwise and rte_errno is set.
977e79c9be9SOri Kam  */
978e79c9be9SOri Kam int
979e79c9be9SOri Kam mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
980e79c9be9SOri Kam 			    uint16_t desc,
981e79c9be9SOri Kam 			    const struct rte_eth_hairpin_conf *hairpin_conf)
982e79c9be9SOri Kam {
983e79c9be9SOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
9844cda06c3SXueming Li 	struct mlx5_rxq_priv *rxq;
98553232e3bSXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl;
986e79c9be9SOri Kam 	int res;
987e79c9be9SOri Kam 
98809c25553SXueming Li 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL);
989e79c9be9SOri Kam 	if (res)
990e79c9be9SOri Kam 		return res;
9911a01264fSBing Zhao 	if (hairpin_conf->peer_count != 1) {
992e79c9be9SOri Kam 		rte_errno = EINVAL;
9931a01264fSBing Zhao 		DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
9941a01264fSBing Zhao 			" peer count is %u", dev->data->port_id,
9951a01264fSBing Zhao 			idx, hairpin_conf->peer_count);
996e79c9be9SOri Kam 		return -rte_errno;
997e79c9be9SOri Kam 	}
9981a01264fSBing Zhao 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
9991a01264fSBing Zhao 		if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
10001a01264fSBing Zhao 			rte_errno = EINVAL;
10011a01264fSBing Zhao 			DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
10021a01264fSBing Zhao 				" index %u, Tx %u is larger than %u",
10031a01264fSBing Zhao 				dev->data->port_id, idx,
10041a01264fSBing Zhao 				hairpin_conf->peers[0].queue, priv->txqs_n);
10051a01264fSBing Zhao 			return -rte_errno;
10061a01264fSBing Zhao 		}
10071a01264fSBing Zhao 	} else {
10081a01264fSBing Zhao 		if (hairpin_conf->manual_bind == 0 ||
10091a01264fSBing Zhao 		    hairpin_conf->tx_explicit == 0) {
10101a01264fSBing Zhao 			rte_errno = EINVAL;
10111a01264fSBing Zhao 			DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
10121a01264fSBing Zhao 				" index %u peer port %u with attributes %u %u",
10131a01264fSBing Zhao 				dev->data->port_id, idx,
10141a01264fSBing Zhao 				hairpin_conf->peers[0].port,
10151a01264fSBing Zhao 				hairpin_conf->manual_bind,
10161a01264fSBing Zhao 				hairpin_conf->tx_explicit);
10171a01264fSBing Zhao 			return -rte_errno;
10181a01264fSBing Zhao 		}
10191a01264fSBing Zhao 	}
10204cda06c3SXueming Li 	rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
10214cda06c3SXueming Li 			  SOCKET_ID_ANY);
10224cda06c3SXueming Li 	if (!rxq) {
10234cda06c3SXueming Li 		DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
1024e79c9be9SOri Kam 			dev->data->port_id, idx);
1025e79c9be9SOri Kam 		rte_errno = ENOMEM;
1026e79c9be9SOri Kam 		return -rte_errno;
1027e79c9be9SOri Kam 	}
10284cda06c3SXueming Li 	rxq->priv = priv;
10294cda06c3SXueming Li 	rxq->idx = idx;
10304cda06c3SXueming Li 	(*priv->rxq_privs)[idx] = rxq;
10314cda06c3SXueming Li 	rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
10324cda06c3SXueming Li 	if (!rxq_ctrl) {
10334cda06c3SXueming Li 		DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
10344cda06c3SXueming Li 			dev->data->port_id, idx);
10354cda06c3SXueming Li 		mlx5_free(rxq);
10364cda06c3SXueming Li 		(*priv->rxq_privs)[idx] = NULL;
10374cda06c3SXueming Li 		rte_errno = ENOMEM;
10384cda06c3SXueming Li 		return -rte_errno;
10394cda06c3SXueming Li 	}
10404cda06c3SXueming Li 	DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
1041e79c9be9SOri Kam 		dev->data->port_id, idx);
10425cf0707fSXueming Li 	dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
1043e79c9be9SOri Kam 	return 0;
1044e79c9be9SOri Kam }
1045e79c9be9SOri Kam 
1046e79c9be9SOri Kam /**
10472e22920bSAdrien Mazarguil  * DPDK callback to release a RX queue.
10482e22920bSAdrien Mazarguil  *
10497483341aSXueming Li  * @param dev
10507483341aSXueming Li  *   Pointer to Ethernet device structure.
10517483341aSXueming Li  * @param qid
10527483341aSXueming Li  *   Receive queue index.
10532e22920bSAdrien Mazarguil  */
10542e22920bSAdrien Mazarguil void
10557483341aSXueming Li mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
10562e22920bSAdrien Mazarguil {
10577483341aSXueming Li 	struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
10582e22920bSAdrien Mazarguil 
10592e22920bSAdrien Mazarguil 	if (rxq == NULL)
10602e22920bSAdrien Mazarguil 		return;
10617483341aSXueming Li 	if (!mlx5_rxq_releasable(dev, qid))
10620f99970bSNélio Laranjeiro 		rte_panic("port %u Rx queue %u is still used by a flow and"
10637483341aSXueming Li 			  " cannot be removed\n", dev->data->port_id, qid);
10647483341aSXueming Li 	mlx5_rxq_release(dev, qid);
10652e22920bSAdrien Mazarguil }
1066a48deadaSOr Ami 
1067a48deadaSOr Ami /**
1068e1016cb7SAdrien Mazarguil  * Allocate queue vector and fill epoll fd list for Rx interrupts.
10693c7d44afSShahaf Shuler  *
1070af4f09f2SNélio Laranjeiro  * @param dev
1071af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
10723c7d44afSShahaf Shuler  *
10733c7d44afSShahaf Shuler  * @return
1074a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
10753c7d44afSShahaf Shuler  */
10763c7d44afSShahaf Shuler int
1077af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
10783c7d44afSShahaf Shuler {
1079dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
10803c7d44afSShahaf Shuler 	unsigned int i;
10813c7d44afSShahaf Shuler 	unsigned int rxqs_n = priv->rxqs_n;
10823c7d44afSShahaf Shuler 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1083e1016cb7SAdrien Mazarguil 	unsigned int count = 0;
1084df428ceeSYongseok Koh 	struct rte_intr_handle *intr_handle = dev->intr_handle;
10853c7d44afSShahaf Shuler 
1086df428ceeSYongseok Koh 	if (!dev->data->dev_conf.intr_conf.rxq)
10873c7d44afSShahaf Shuler 		return 0;
1088af4f09f2SNélio Laranjeiro 	mlx5_rx_intr_vec_disable(dev);
1089d61138d4SHarman Kalra 	if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
1090a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
1091a170a30dSNélio Laranjeiro 			"port %u failed to allocate memory for interrupt"
1092a170a30dSNélio Laranjeiro 			" vector, Rx interrupts will not be supported",
10930f99970bSNélio Laranjeiro 			dev->data->port_id);
1094a6d83b6aSNélio Laranjeiro 		rte_errno = ENOMEM;
1095a6d83b6aSNélio Laranjeiro 		return -rte_errno;
10963c7d44afSShahaf Shuler 	}
1097d61138d4SHarman Kalra 
1098d61138d4SHarman Kalra 	if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
1099d61138d4SHarman Kalra 		return -rte_errno;
1100d61138d4SHarman Kalra 
11013c7d44afSShahaf Shuler 	for (i = 0; i != n; ++i) {
110293403560SDekel Peled 		/* This rxq obj must not be released in this function. */
11030cedf34dSXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
11040cedf34dSXueming Li 		struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
11053c7d44afSShahaf Shuler 		int rc;
11063c7d44afSShahaf Shuler 
1107e1016cb7SAdrien Mazarguil 		/* Skip queues that cannot request interrupts. */
110808d1838fSDekel Peled 		if (!rxq_obj || (!rxq_obj->ibv_channel &&
110908d1838fSDekel Peled 				 !rxq_obj->devx_channel)) {
1110e1016cb7SAdrien Mazarguil 			/* Use invalid intr_vec[] index to disable entry. */
1111d61138d4SHarman Kalra 			if (rte_intr_vec_list_index_set(intr_handle, i,
1112d61138d4SHarman Kalra 			   RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
1113d61138d4SHarman Kalra 				return -rte_errno;
1114e1016cb7SAdrien Mazarguil 			continue;
1115e1016cb7SAdrien Mazarguil 		}
11160cedf34dSXueming Li 		mlx5_rxq_ref(dev, i);
1117e1016cb7SAdrien Mazarguil 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
1118a170a30dSNélio Laranjeiro 			DRV_LOG(ERR,
1119a170a30dSNélio Laranjeiro 				"port %u too many Rx queues for interrupt"
1120a170a30dSNélio Laranjeiro 				" vector size (%d), Rx interrupts cannot be"
1121a170a30dSNélio Laranjeiro 				" enabled",
11220f99970bSNélio Laranjeiro 				dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
1123af4f09f2SNélio Laranjeiro 			mlx5_rx_intr_vec_disable(dev);
1124a6d83b6aSNélio Laranjeiro 			rte_errno = ENOMEM;
1125a6d83b6aSNélio Laranjeiro 			return -rte_errno;
1126e1016cb7SAdrien Mazarguil 		}
112708d1838fSDekel Peled 		rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
11283c7d44afSShahaf Shuler 		if (rc < 0) {
1129a6d83b6aSNélio Laranjeiro 			rte_errno = errno;
1130a170a30dSNélio Laranjeiro 			DRV_LOG(ERR,
1131a170a30dSNélio Laranjeiro 				"port %u failed to make Rx interrupt file"
1132a170a30dSNélio Laranjeiro 				" descriptor %d non-blocking for queue index"
1133a170a30dSNélio Laranjeiro 				" %d",
113408d1838fSDekel Peled 				dev->data->port_id, rxq_obj->fd, i);
1135af4f09f2SNélio Laranjeiro 			mlx5_rx_intr_vec_disable(dev);
1136a6d83b6aSNélio Laranjeiro 			return -rte_errno;
11373c7d44afSShahaf Shuler 		}
1138d61138d4SHarman Kalra 
1139d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(intr_handle, i,
1140d61138d4SHarman Kalra 					RTE_INTR_VEC_RXTX_OFFSET + count))
1141d61138d4SHarman Kalra 			return -rte_errno;
1142d61138d4SHarman Kalra 		if (rte_intr_efds_index_set(intr_handle, count,
1143d61138d4SHarman Kalra 						   rxq_obj->fd))
1144d61138d4SHarman Kalra 			return -rte_errno;
1145e1016cb7SAdrien Mazarguil 		count++;
11463c7d44afSShahaf Shuler 	}
1147e1016cb7SAdrien Mazarguil 	if (!count)
1148af4f09f2SNélio Laranjeiro 		mlx5_rx_intr_vec_disable(dev);
1149d61138d4SHarman Kalra 	else if (rte_intr_nb_efd_set(intr_handle, count))
1150d61138d4SHarman Kalra 		return -rte_errno;
11513c7d44afSShahaf Shuler 	return 0;
11523c7d44afSShahaf Shuler }
11533c7d44afSShahaf Shuler 
11543c7d44afSShahaf Shuler /**
1155e1016cb7SAdrien Mazarguil  * Clean up Rx interrupts handler.
11563c7d44afSShahaf Shuler  *
1157af4f09f2SNélio Laranjeiro  * @param dev
1158af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
11593c7d44afSShahaf Shuler  */
11603c7d44afSShahaf Shuler void
1161af4f09f2SNélio Laranjeiro mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
11623c7d44afSShahaf Shuler {
1163dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1164df428ceeSYongseok Koh 	struct rte_intr_handle *intr_handle = dev->intr_handle;
116509cb5b58SNélio Laranjeiro 	unsigned int i;
116609cb5b58SNélio Laranjeiro 	unsigned int rxqs_n = priv->rxqs_n;
116709cb5b58SNélio Laranjeiro 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
11683c7d44afSShahaf Shuler 
1169df428ceeSYongseok Koh 	if (!dev->data->dev_conf.intr_conf.rxq)
117009cb5b58SNélio Laranjeiro 		return;
1171d61138d4SHarman Kalra 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
11728d929641SShahaf Shuler 		goto free;
117309cb5b58SNélio Laranjeiro 	for (i = 0; i != n; ++i) {
1174d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_get(intr_handle, i) ==
1175d61138d4SHarman Kalra 		    RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
117609cb5b58SNélio Laranjeiro 			continue;
117709cb5b58SNélio Laranjeiro 		/**
117809cb5b58SNélio Laranjeiro 		 * Need to access directly the queue to release the reference
11793a3d3982SDekel Peled 		 * kept in mlx5_rx_intr_vec_enable().
118009cb5b58SNélio Laranjeiro 		 */
11810cedf34dSXueming Li 		mlx5_rxq_deref(dev, i);
118209cb5b58SNélio Laranjeiro 	}
11838d929641SShahaf Shuler free:
11843c7d44afSShahaf Shuler 	rte_intr_free_epoll_fd(intr_handle);
1185d61138d4SHarman Kalra 
1186d61138d4SHarman Kalra 	rte_intr_vec_list_free(intr_handle);
1187d61138d4SHarman Kalra 
1188d61138d4SHarman Kalra 	rte_intr_nb_efd_set(intr_handle, 0);
11893c7d44afSShahaf Shuler }
1190b18042fbSAdrien Mazarguil 
119143e9d979SShachar Beiser /**
119243e9d979SShachar Beiser  *  MLX5 CQ notification .
119343e9d979SShachar Beiser  *
119443e9d979SShachar Beiser  *  @param rxq
119543e9d979SShachar Beiser  *     Pointer to receive queue structure.
119643e9d979SShachar Beiser  *  @param sq_n_rxq
119743e9d979SShachar Beiser  *     Sequence number per receive queue .
119843e9d979SShachar Beiser  */
119943e9d979SShachar Beiser static inline void
120078142aacSNélio Laranjeiro mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
120143e9d979SShachar Beiser {
120243e9d979SShachar Beiser 	int sq_n = 0;
120343e9d979SShachar Beiser 	uint32_t doorbell_hi;
120443e9d979SShachar Beiser 	uint64_t doorbell;
120543e9d979SShachar Beiser 
120643e9d979SShachar Beiser 	sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
120743e9d979SShachar Beiser 	doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
120843e9d979SShachar Beiser 	doorbell = (uint64_t)doorbell_hi << 32;
120943e9d979SShachar Beiser 	doorbell |= rxq->cqn;
12105dfa003dSMichael Baum 	mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell),
12115dfa003dSMichael Baum 			   doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0);
121243e9d979SShachar Beiser }
12139f91fb54SAdrien Mazarguil 
1214b18042fbSAdrien Mazarguil /**
1215e1016cb7SAdrien Mazarguil  * DPDK callback for Rx queue interrupt enable.
1216b18042fbSAdrien Mazarguil  *
1217b18042fbSAdrien Mazarguil  * @param dev
1218b18042fbSAdrien Mazarguil  *   Pointer to Ethernet device structure.
1219b18042fbSAdrien Mazarguil  * @param rx_queue_id
1220e1016cb7SAdrien Mazarguil  *   Rx queue number.
1221b18042fbSAdrien Mazarguil  *
1222b18042fbSAdrien Mazarguil  * @return
1223a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1224b18042fbSAdrien Mazarguil  */
1225b18042fbSAdrien Mazarguil int
1226b18042fbSAdrien Mazarguil mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1227b18042fbSAdrien Mazarguil {
12280cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
12290cedf34dSXueming Li 	if (!rxq)
123024e4b650SMichael Baum 		goto error;
12310cedf34dSXueming Li 	if (rxq->ctrl->irq) {
12320cedf34dSXueming Li 		if (!rxq->ctrl->obj)
123324e4b650SMichael Baum 			goto error;
12340cedf34dSXueming Li 		mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
123524e4b650SMichael Baum 	}
123624e4b650SMichael Baum 	return 0;
123724e4b650SMichael Baum error:
1238a6d83b6aSNélio Laranjeiro 	rte_errno = EINVAL;
1239a6d83b6aSNélio Laranjeiro 	return -rte_errno;
124009cb5b58SNélio Laranjeiro }
1241b18042fbSAdrien Mazarguil 
1242b18042fbSAdrien Mazarguil /**
1243e1016cb7SAdrien Mazarguil  * DPDK callback for Rx queue interrupt disable.
1244b18042fbSAdrien Mazarguil  *
1245b18042fbSAdrien Mazarguil  * @param dev
1246b18042fbSAdrien Mazarguil  *   Pointer to Ethernet device structure.
1247b18042fbSAdrien Mazarguil  * @param rx_queue_id
1248e1016cb7SAdrien Mazarguil  *   Rx queue number.
1249b18042fbSAdrien Mazarguil  *
1250b18042fbSAdrien Mazarguil  * @return
1251a6d83b6aSNélio Laranjeiro  *   0 on success, a negative errno value otherwise and rte_errno is set.
1252b18042fbSAdrien Mazarguil  */
1253b18042fbSAdrien Mazarguil int
1254b18042fbSAdrien Mazarguil mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1255b18042fbSAdrien Mazarguil {
125632287079SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
12570cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
125824e4b650SMichael Baum 	int ret = 0;
1259b18042fbSAdrien Mazarguil 
12600cedf34dSXueming Li 	if (!rxq) {
1261a6d83b6aSNélio Laranjeiro 		rte_errno = EINVAL;
1262a6d83b6aSNélio Laranjeiro 		return -rte_errno;
1263e1016cb7SAdrien Mazarguil 	}
12640cedf34dSXueming Li 	if (!rxq->ctrl->obj)
126524e4b650SMichael Baum 		goto error;
12660cedf34dSXueming Li 	if (rxq->ctrl->irq) {
12670cedf34dSXueming Li 		ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
126832287079SMichael Baum 		if (ret < 0)
126924e4b650SMichael Baum 			goto error;
12700cedf34dSXueming Li 		rxq->ctrl->rxq.cq_arm_sn++;
127132287079SMichael Baum 	}
1272a6d83b6aSNélio Laranjeiro 	return 0;
127324e4b650SMichael Baum error:
1274385c1939SOphir Munk 	/**
127532287079SMichael Baum 	 * The ret variable may be EAGAIN which means the get_event function was
127632287079SMichael Baum 	 * called before receiving one.
1277385c1939SOphir Munk 	 */
1278385c1939SOphir Munk 	if (ret < 0)
1279385c1939SOphir Munk 		rte_errno = errno;
1280385c1939SOphir Munk 	else
1281385c1939SOphir Munk 		rte_errno = EINVAL;
12820cedf34dSXueming Li 	if (rte_errno != EAGAIN)
1283a170a30dSNélio Laranjeiro 		DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
12840f99970bSNélio Laranjeiro 			dev->data->port_id, rx_queue_id);
1285a6d83b6aSNélio Laranjeiro 	return -rte_errno;
1286b18042fbSAdrien Mazarguil }
128709cb5b58SNélio Laranjeiro 
128809cb5b58SNélio Laranjeiro /**
128993403560SDekel Peled  * Verify the Rx queue objects list is empty
129009cb5b58SNélio Laranjeiro  *
1291af4f09f2SNélio Laranjeiro  * @param dev
1292af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
129309cb5b58SNélio Laranjeiro  *
1294fb732b0aSNélio Laranjeiro  * @return
129593403560SDekel Peled  *   The number of objects not released.
129609cb5b58SNélio Laranjeiro  */
129709cb5b58SNélio Laranjeiro int
129893403560SDekel Peled mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
129909cb5b58SNélio Laranjeiro {
1300dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
130109cb5b58SNélio Laranjeiro 	int ret = 0;
130293403560SDekel Peled 	struct mlx5_rxq_obj *rxq_obj;
130309cb5b58SNélio Laranjeiro 
130493403560SDekel Peled 	LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
13050ece5de3SDmitry Kozlyuk 		if (rxq_obj->rxq_ctrl == NULL)
13060ece5de3SDmitry Kozlyuk 			continue;
130709c25553SXueming Li 		if (rxq_obj->rxq_ctrl->rxq.shared &&
130809c25553SXueming Li 		    !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
130909c25553SXueming Li 			continue;
131093403560SDekel Peled 		DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
131193403560SDekel Peled 			dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
131209cb5b58SNélio Laranjeiro 		++ret;
131309cb5b58SNélio Laranjeiro 	}
131409cb5b58SNélio Laranjeiro 	return ret;
131509cb5b58SNélio Laranjeiro }
131609cb5b58SNélio Laranjeiro 
131709cb5b58SNélio Laranjeiro /**
13187d6bf6b8SYongseok Koh  * Callback function to initialize mbufs for Multi-Packet RQ.
13197d6bf6b8SYongseok Koh  */
13207d6bf6b8SYongseok Koh static inline void
13213a22f387SMatan Azrad mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
13227d6bf6b8SYongseok Koh 		    void *_m, unsigned int i __rte_unused)
13237d6bf6b8SYongseok Koh {
13247d6bf6b8SYongseok Koh 	struct mlx5_mprq_buf *buf = _m;
13253a22f387SMatan Azrad 	struct rte_mbuf_ext_shared_info *shinfo;
13263a22f387SMatan Azrad 	unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
13273a22f387SMatan Azrad 	unsigned int j;
13287d6bf6b8SYongseok Koh 
13297d6bf6b8SYongseok Koh 	memset(_m, 0, sizeof(*buf));
13307d6bf6b8SYongseok Koh 	buf->mp = mp;
1331e12a0166STyler Retzlaff 	rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
13323a22f387SMatan Azrad 	for (j = 0; j != strd_n; ++j) {
13333a22f387SMatan Azrad 		shinfo = &buf->shinfos[j];
13343a22f387SMatan Azrad 		shinfo->free_cb = mlx5_mprq_buf_free_cb;
13353a22f387SMatan Azrad 		shinfo->fcb_opaque = buf;
13363a22f387SMatan Azrad 	}
13377d6bf6b8SYongseok Koh }
13387d6bf6b8SYongseok Koh 
13397d6bf6b8SYongseok Koh /**
13407d6bf6b8SYongseok Koh  * Free mempool of Multi-Packet RQ.
13417d6bf6b8SYongseok Koh  *
13427d6bf6b8SYongseok Koh  * @param dev
13437d6bf6b8SYongseok Koh  *   Pointer to Ethernet device.
13447d6bf6b8SYongseok Koh  *
13457d6bf6b8SYongseok Koh  * @return
13467d6bf6b8SYongseok Koh  *   0 on success, negative errno value on failure.
13477d6bf6b8SYongseok Koh  */
13487d6bf6b8SYongseok Koh int
13497d6bf6b8SYongseok Koh mlx5_mprq_free_mp(struct rte_eth_dev *dev)
13507d6bf6b8SYongseok Koh {
1351dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
13527d6bf6b8SYongseok Koh 	struct rte_mempool *mp = priv->mprq_mp;
13537d6bf6b8SYongseok Koh 	unsigned int i;
13547d6bf6b8SYongseok Koh 
13557d6bf6b8SYongseok Koh 	if (mp == NULL)
13567d6bf6b8SYongseok Koh 		return 0;
13577d6bf6b8SYongseok Koh 	DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
13587d6bf6b8SYongseok Koh 		dev->data->port_id, mp->name);
13597d6bf6b8SYongseok Koh 	/*
13607d6bf6b8SYongseok Koh 	 * If a buffer in the pool has been externally attached to a mbuf and it
136142280dd9SDekel Peled 	 * is still in use by application, destroying the Rx queue can spoil
13627d6bf6b8SYongseok Koh 	 * the packet. It is unlikely to happen but if application dynamically
13637d6bf6b8SYongseok Koh 	 * creates and destroys with holding Rx packets, this can happen.
13647d6bf6b8SYongseok Koh 	 *
13657d6bf6b8SYongseok Koh 	 * TODO: It is unavoidable for now because the mempool for Multi-Packet
13667d6bf6b8SYongseok Koh 	 * RQ isn't provided by application but managed by PMD.
13677d6bf6b8SYongseok Koh 	 */
13687d6bf6b8SYongseok Koh 	if (!rte_mempool_full(mp)) {
13697d6bf6b8SYongseok Koh 		DRV_LOG(ERR,
13707d6bf6b8SYongseok Koh 			"port %u mempool for Multi-Packet RQ is still in use",
13717d6bf6b8SYongseok Koh 			dev->data->port_id);
13727d6bf6b8SYongseok Koh 		rte_errno = EBUSY;
13737d6bf6b8SYongseok Koh 		return -rte_errno;
13747d6bf6b8SYongseok Koh 	}
13757d6bf6b8SYongseok Koh 	rte_mempool_free(mp);
13767d6bf6b8SYongseok Koh 	/* Unset mempool for each Rx queue. */
13777d6bf6b8SYongseok Koh 	for (i = 0; i != priv->rxqs_n; ++i) {
13785cf0707fSXueming Li 		struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i);
13797d6bf6b8SYongseok Koh 
13807d6bf6b8SYongseok Koh 		if (rxq == NULL)
13817d6bf6b8SYongseok Koh 			continue;
13827d6bf6b8SYongseok Koh 		rxq->mprq_mp = NULL;
13837d6bf6b8SYongseok Koh 	}
138439e98c21SYongseok Koh 	priv->mprq_mp = NULL;
13857d6bf6b8SYongseok Koh 	return 0;
13867d6bf6b8SYongseok Koh }
13877d6bf6b8SYongseok Koh 
13887d6bf6b8SYongseok Koh /**
13897d6bf6b8SYongseok Koh  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
13907d6bf6b8SYongseok Koh  * mempool. If already allocated, reuse it if there're enough elements.
13917d6bf6b8SYongseok Koh  * Otherwise, resize it.
13927d6bf6b8SYongseok Koh  *
13937d6bf6b8SYongseok Koh  * @param dev
13947d6bf6b8SYongseok Koh  *   Pointer to Ethernet device.
13957d6bf6b8SYongseok Koh  *
13967d6bf6b8SYongseok Koh  * @return
13977d6bf6b8SYongseok Koh  *   0 on success, negative errno value on failure.
13987d6bf6b8SYongseok Koh  */
13997d6bf6b8SYongseok Koh int
14007d6bf6b8SYongseok Koh mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
14017d6bf6b8SYongseok Koh {
1402dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
14037d6bf6b8SYongseok Koh 	struct rte_mempool *mp = priv->mprq_mp;
14047d6bf6b8SYongseok Koh 	char name[RTE_MEMPOOL_NAMESIZE];
14057d6bf6b8SYongseok Koh 	unsigned int desc = 0;
14067d6bf6b8SYongseok Koh 	unsigned int buf_len;
14077d6bf6b8SYongseok Koh 	unsigned int obj_num;
14087d6bf6b8SYongseok Koh 	unsigned int obj_size;
14090947ed38SMichael Baum 	unsigned int log_strd_num = 0;
14100947ed38SMichael Baum 	unsigned int log_strd_sz = 0;
14117d6bf6b8SYongseok Koh 	unsigned int i;
1412e79c9be9SOri Kam 	unsigned int n_ibv = 0;
1413fec28ca0SDmitry Kozlyuk 	int ret;
14147d6bf6b8SYongseok Koh 
14157d6bf6b8SYongseok Koh 	if (!mlx5_mprq_enabled(dev))
14167d6bf6b8SYongseok Koh 		return 0;
14177d6bf6b8SYongseok Koh 	/* Count the total number of descriptors configured. */
14187d6bf6b8SYongseok Koh 	for (i = 0; i != priv->rxqs_n; ++i) {
14195cf0707fSXueming Li 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
14205cf0707fSXueming Li 		struct mlx5_rxq_data *rxq;
14217d6bf6b8SYongseok Koh 
1422c06f77aeSMichael Baum 		if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
14237d6bf6b8SYongseok Koh 			continue;
14245cf0707fSXueming Li 		rxq = &rxq_ctrl->rxq;
1425e79c9be9SOri Kam 		n_ibv++;
14267d6bf6b8SYongseok Koh 		desc += 1 << rxq->elts_n;
14277d6bf6b8SYongseok Koh 		/* Get the max number of strides. */
14280947ed38SMichael Baum 		if (log_strd_num < rxq->log_strd_num)
14290947ed38SMichael Baum 			log_strd_num = rxq->log_strd_num;
14307d6bf6b8SYongseok Koh 		/* Get the max size of a stride. */
14310947ed38SMichael Baum 		if (log_strd_sz < rxq->log_strd_sz)
14320947ed38SMichael Baum 			log_strd_sz = rxq->log_strd_sz;
14337d6bf6b8SYongseok Koh 	}
14340947ed38SMichael Baum 	MLX5_ASSERT(log_strd_num && log_strd_sz);
14350947ed38SMichael Baum 	buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
14360947ed38SMichael Baum 	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
14370947ed38SMichael Baum 		   RTE_BIT32(log_strd_num) *
14380947ed38SMichael Baum 		   sizeof(struct rte_mbuf_ext_shared_info) +
14390947ed38SMichael Baum 		   RTE_PKTMBUF_HEADROOM;
14407d6bf6b8SYongseok Koh 	/*
14417d6bf6b8SYongseok Koh 	 * Received packets can be either memcpy'd or externally referenced. In
14427d6bf6b8SYongseok Koh 	 * case that the packet is attached to an mbuf as an external buffer, as
14437d6bf6b8SYongseok Koh 	 * it isn't possible to predict how the buffers will be queued by
14447d6bf6b8SYongseok Koh 	 * application, there's no option to exactly pre-allocate needed buffers
14457d6bf6b8SYongseok Koh 	 * in advance but to speculatively prepares enough buffers.
14467d6bf6b8SYongseok Koh 	 *
14477d6bf6b8SYongseok Koh 	 * In the data path, if this Mempool is depleted, PMD will try to memcpy
14487d6bf6b8SYongseok Koh 	 * received packets to buffers provided by application (rxq->mp) until
14497d6bf6b8SYongseok Koh 	 * this Mempool gets available again.
14507d6bf6b8SYongseok Koh 	 */
14517d6bf6b8SYongseok Koh 	desc *= 4;
1452e79c9be9SOri Kam 	obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1453b85b719aSYongseok Koh 	/*
1454b85b719aSYongseok Koh 	 * rte_mempool_create_empty() has sanity check to refuse large cache
1455b85b719aSYongseok Koh 	 * size compared to the number of elements.
14564b546488SStephen Hemminger 	 * CALC_CACHE_FLUSHTHRESH() is defined in a C file, so using a
1457b85b719aSYongseok Koh 	 * constant number 2 instead.
1458b85b719aSYongseok Koh 	 */
1459b85b719aSYongseok Koh 	obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
14607d6bf6b8SYongseok Koh 	/* Check a mempool is already allocated and if it can be resued. */
14617d6bf6b8SYongseok Koh 	if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
14627d6bf6b8SYongseok Koh 		DRV_LOG(DEBUG, "port %u mempool %s is being reused",
14637d6bf6b8SYongseok Koh 			dev->data->port_id, mp->name);
14647d6bf6b8SYongseok Koh 		/* Reuse. */
14657d6bf6b8SYongseok Koh 		goto exit;
14667d6bf6b8SYongseok Koh 	} else if (mp != NULL) {
14677d6bf6b8SYongseok Koh 		DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
14687d6bf6b8SYongseok Koh 			dev->data->port_id, mp->name);
14697d6bf6b8SYongseok Koh 		/*
14707d6bf6b8SYongseok Koh 		 * If failed to free, which means it may be still in use, no way
14717d6bf6b8SYongseok Koh 		 * but to keep using the existing one. On buffer underrun,
14727d6bf6b8SYongseok Koh 		 * packets will be memcpy'd instead of external buffer
14737d6bf6b8SYongseok Koh 		 * attachment.
14747d6bf6b8SYongseok Koh 		 */
14757d6bf6b8SYongseok Koh 		if (mlx5_mprq_free_mp(dev)) {
14767d6bf6b8SYongseok Koh 			if (mp->elt_size >= obj_size)
14777d6bf6b8SYongseok Koh 				goto exit;
14787d6bf6b8SYongseok Koh 			else
14797d6bf6b8SYongseok Koh 				return -rte_errno;
14807d6bf6b8SYongseok Koh 		}
14817d6bf6b8SYongseok Koh 	}
14824594487bSYongseok Koh 	snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
14837d6bf6b8SYongseok Koh 	mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
14843a22f387SMatan Azrad 				0, NULL, NULL, mlx5_mprq_buf_init,
14850947ed38SMichael Baum 				(void *)((uintptr_t)1 << log_strd_num),
14867d6bf6b8SYongseok Koh 				dev->device->numa_node, 0);
14877d6bf6b8SYongseok Koh 	if (mp == NULL) {
14887d6bf6b8SYongseok Koh 		DRV_LOG(ERR,
14897d6bf6b8SYongseok Koh 			"port %u failed to allocate a mempool for"
14907d6bf6b8SYongseok Koh 			" Multi-Packet RQ, count=%u, size=%u",
14917d6bf6b8SYongseok Koh 			dev->data->port_id, obj_num, obj_size);
14927d6bf6b8SYongseok Koh 		rte_errno = ENOMEM;
14937d6bf6b8SYongseok Koh 		return -rte_errno;
14947d6bf6b8SYongseok Koh 	}
149508ac0358SDmitry Kozlyuk 	ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false);
1496fec28ca0SDmitry Kozlyuk 	if (ret < 0 && rte_errno != EEXIST) {
1497fec28ca0SDmitry Kozlyuk 		ret = rte_errno;
1498fec28ca0SDmitry Kozlyuk 		DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ",
1499fec28ca0SDmitry Kozlyuk 			dev->data->port_id);
1500fec28ca0SDmitry Kozlyuk 		rte_mempool_free(mp);
1501fec28ca0SDmitry Kozlyuk 		rte_errno = ret;
1502fec28ca0SDmitry Kozlyuk 		return -rte_errno;
1503fec28ca0SDmitry Kozlyuk 	}
15047d6bf6b8SYongseok Koh 	priv->mprq_mp = mp;
15057d6bf6b8SYongseok Koh exit:
15067d6bf6b8SYongseok Koh 	/* Set mempool for each Rx queue. */
15077d6bf6b8SYongseok Koh 	for (i = 0; i != priv->rxqs_n; ++i) {
15085cf0707fSXueming Li 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i);
15097d6bf6b8SYongseok Koh 
1510c06f77aeSMichael Baum 		if (rxq_ctrl == NULL || rxq_ctrl->is_hairpin)
15117d6bf6b8SYongseok Koh 			continue;
15125cf0707fSXueming Li 		rxq_ctrl->rxq.mprq_mp = mp;
15137d6bf6b8SYongseok Koh 	}
15147d6bf6b8SYongseok Koh 	DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
15157d6bf6b8SYongseok Koh 		dev->data->port_id);
15167d6bf6b8SYongseok Koh 	return 0;
15177d6bf6b8SYongseok Koh }
15187d6bf6b8SYongseok Koh 
151950c00bafSMatan Azrad #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
152050c00bafSMatan Azrad 					sizeof(struct rte_vlan_hdr) * 2 + \
152150c00bafSMatan Azrad 					sizeof(struct rte_ipv6_hdr)))
15222579543fSMatan Azrad #define MAX_TCP_OPTION_SIZE 40u
15232579543fSMatan Azrad #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
15242579543fSMatan Azrad 				 sizeof(struct rte_tcp_hdr) + \
15252579543fSMatan Azrad 				 MAX_TCP_OPTION_SIZE))
15262579543fSMatan Azrad 
15277d6bf6b8SYongseok Koh /**
1528ee39fe82SMatan Azrad  * Adjust the maximum LRO massage size.
1529ee39fe82SMatan Azrad  *
1530ee39fe82SMatan Azrad  * @param dev
1531ee39fe82SMatan Azrad  *   Pointer to Ethernet device.
15323d491dd6SDekel Peled  * @param idx
15333d491dd6SDekel Peled  *   RX queue index.
153450c00bafSMatan Azrad  * @param max_lro_size
153550c00bafSMatan Azrad  *   The maximum size for LRO packet.
1536ee39fe82SMatan Azrad  */
1537ee39fe82SMatan Azrad static void
15383d491dd6SDekel Peled mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
15393d491dd6SDekel Peled 			     uint32_t max_lro_size)
1540ee39fe82SMatan Azrad {
1541ee39fe82SMatan Azrad 	struct mlx5_priv *priv = dev->data->dev_private;
1542ee39fe82SMatan Azrad 
154353820561SMichael Baum 	if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
154450c00bafSMatan Azrad 	    MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
154550c00bafSMatan Azrad 	    MLX5_MAX_TCP_HDR_OFFSET)
154650c00bafSMatan Azrad 		max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
154750c00bafSMatan Azrad 	max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1548ee39fe82SMatan Azrad 	if (priv->max_lro_msg_size)
1549ee39fe82SMatan Azrad 		priv->max_lro_msg_size =
155050c00bafSMatan Azrad 			RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1551ee39fe82SMatan Azrad 	else
155250c00bafSMatan Azrad 		priv->max_lro_msg_size = max_lro_size;
15533d491dd6SDekel Peled 	DRV_LOG(DEBUG,
15543d491dd6SDekel Peled 		"port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1555a2364004SGregory Etelson 		dev->data->port_id, idx, priv->max_lro_msg_size);
1556ee39fe82SMatan Azrad }
1557ee39fe82SMatan Azrad 
1558ee39fe82SMatan Azrad /**
155934776af6SMichael Baum  * Prepare both size and number of stride for Multi-Packet RQ.
156034776af6SMichael Baum  *
156134776af6SMichael Baum  * @param dev
156234776af6SMichael Baum  *   Pointer to Ethernet device.
156334776af6SMichael Baum  * @param idx
156434776af6SMichael Baum  *   RX queue index.
156534776af6SMichael Baum  * @param desc
156634776af6SMichael Baum  *   Number of descriptors to configure in queue.
156734776af6SMichael Baum  * @param rx_seg_en
156834776af6SMichael Baum  *   Indicator if Rx segment enables, if so Multi-Packet RQ doesn't enable.
156934776af6SMichael Baum  * @param min_mbuf_size
157034776af6SMichael Baum  *   Non scatter min mbuf size, max_rx_pktlen plus overhead.
157134776af6SMichael Baum  * @param actual_log_stride_num
157234776af6SMichael Baum  *   Log number of strides to configure for this queue.
157334776af6SMichael Baum  * @param actual_log_stride_size
157434776af6SMichael Baum  *   Log stride size to configure for this queue.
15753a29cb3aSAlexander Kozyrev  * @param is_extmem
15763a29cb3aSAlexander Kozyrev  *   Is external pinned memory pool used.
157734776af6SMichael Baum  * @return
157834776af6SMichael Baum  *   0 if Multi-Packet RQ is supported, otherwise -1.
157934776af6SMichael Baum  */
158034776af6SMichael Baum static int
158134776af6SMichael Baum mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
158234776af6SMichael Baum 		  bool rx_seg_en, uint32_t min_mbuf_size,
158334776af6SMichael Baum 		  uint32_t *actual_log_stride_num,
15843a29cb3aSAlexander Kozyrev 		  uint32_t *actual_log_stride_size,
15853a29cb3aSAlexander Kozyrev 		  bool is_extmem)
158634776af6SMichael Baum {
158734776af6SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
158845a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
158987af0d1eSMichael Baum 	struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
159087af0d1eSMichael Baum 	uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
159187af0d1eSMichael Baum 	uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
159234776af6SMichael Baum 	uint32_t log_def_stride_num =
159334776af6SMichael Baum 			RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
159434776af6SMichael Baum 					log_min_stride_num),
159534776af6SMichael Baum 				log_max_stride_num);
159687af0d1eSMichael Baum 	uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size;
159787af0d1eSMichael Baum 	uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size;
159834776af6SMichael Baum 	uint32_t log_def_stride_size =
159934776af6SMichael Baum 			RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
160034776af6SMichael Baum 					log_min_stride_size),
160134776af6SMichael Baum 				log_max_stride_size);
160234776af6SMichael Baum 	uint32_t log_stride_wqe_size;
160334776af6SMichael Baum 
16043a29cb3aSAlexander Kozyrev 	if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
160534776af6SMichael Baum 		goto unsupport;
160634776af6SMichael Baum 	/* Checks if chosen number of strides is in supported range. */
160734776af6SMichael Baum 	if (config->mprq.log_stride_num > log_max_stride_num ||
160834776af6SMichael Baum 	    config->mprq.log_stride_num < log_min_stride_num) {
160934776af6SMichael Baum 		*actual_log_stride_num = log_def_stride_num;
161034776af6SMichael Baum 		DRV_LOG(WARNING,
161134776af6SMichael Baum 			"Port %u Rx queue %u number of strides for Multi-Packet RQ is out of range, setting default value (%u)",
161234776af6SMichael Baum 			dev->data->port_id, idx, RTE_BIT32(log_def_stride_num));
161334776af6SMichael Baum 	} else {
161434776af6SMichael Baum 		*actual_log_stride_num = config->mprq.log_stride_num;
161534776af6SMichael Baum 	}
161634776af6SMichael Baum 	/* Checks if chosen size of stride is in supported range. */
1617fdee0f1bSAlexander Kozyrev 	if (config->mprq.log_stride_size != (uint32_t)MLX5_ARG_UNSET) {
161834776af6SMichael Baum 		if (config->mprq.log_stride_size > log_max_stride_size ||
161934776af6SMichael Baum 			config->mprq.log_stride_size < log_min_stride_size) {
162034776af6SMichael Baum 			*actual_log_stride_size = log_def_stride_size;
162134776af6SMichael Baum 			DRV_LOG(WARNING,
162234776af6SMichael Baum 				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
162334776af6SMichael Baum 				dev->data->port_id, idx,
162434776af6SMichael Baum 				RTE_BIT32(log_def_stride_size));
162534776af6SMichael Baum 		} else {
162634776af6SMichael Baum 			*actual_log_stride_size = config->mprq.log_stride_size;
162734776af6SMichael Baum 		}
1628fdee0f1bSAlexander Kozyrev 	} else {
1629e6479f00SAlexander Kozyrev 		/* Make the stride fit the mbuf size by default. */
1630e6479f00SAlexander Kozyrev 		if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) {
1631e6479f00SAlexander Kozyrev 			DRV_LOG(WARNING,
1632e6479f00SAlexander Kozyrev 				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to match the mbuf size (%u)",
1633e6479f00SAlexander Kozyrev 				dev->data->port_id, idx, min_mbuf_size);
163434776af6SMichael Baum 			*actual_log_stride_size = log2above(min_mbuf_size);
1635e6479f00SAlexander Kozyrev 		} else {
163634776af6SMichael Baum 			goto unsupport;
163734776af6SMichael Baum 		}
1638e6479f00SAlexander Kozyrev 	}
1639e6479f00SAlexander Kozyrev 	/* Make sure the stride size is greater than the headroom. */
1640e6479f00SAlexander Kozyrev 	if (RTE_BIT32(*actual_log_stride_size) < RTE_PKTMBUF_HEADROOM) {
1641e6479f00SAlexander Kozyrev 		if (RTE_BIT32(log_max_stride_size) > RTE_PKTMBUF_HEADROOM) {
1642e6479f00SAlexander Kozyrev 			DRV_LOG(WARNING,
1643e6479f00SAlexander Kozyrev 				"Port %u Rx queue %u size of a stride for Multi-Packet RQ is adjusted to accommodate the headroom (%u)",
1644e6479f00SAlexander Kozyrev 				dev->data->port_id, idx, RTE_PKTMBUF_HEADROOM);
1645e6479f00SAlexander Kozyrev 			*actual_log_stride_size = log2above(RTE_PKTMBUF_HEADROOM);
1646e6479f00SAlexander Kozyrev 		} else {
1647e6479f00SAlexander Kozyrev 			goto unsupport;
1648e6479f00SAlexander Kozyrev 		}
1649e6479f00SAlexander Kozyrev 	}
165034776af6SMichael Baum 	log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
165134776af6SMichael Baum 	/* Check if WQE buffer size is supported by hardware. */
165287af0d1eSMichael Baum 	if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) {
165334776af6SMichael Baum 		*actual_log_stride_num = log_def_stride_num;
165434776af6SMichael Baum 		*actual_log_stride_size = log_def_stride_size;
165534776af6SMichael Baum 		DRV_LOG(WARNING,
165634776af6SMichael Baum 			"Port %u Rx queue %u size of WQE buffer for Multi-Packet RQ is too small, setting default values (stride_num_n=%u, stride_size_n=%u)",
165734776af6SMichael Baum 			dev->data->port_id, idx, RTE_BIT32(log_def_stride_num),
165834776af6SMichael Baum 			RTE_BIT32(log_def_stride_size));
165934776af6SMichael Baum 		log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
166034776af6SMichael Baum 	}
166187af0d1eSMichael Baum 	MLX5_ASSERT(log_stride_wqe_size >=
166287af0d1eSMichael Baum 		    dev_cap->mprq.log_min_stride_wqe_size);
166334776af6SMichael Baum 	if (desc <= RTE_BIT32(*actual_log_stride_num))
166434776af6SMichael Baum 		goto unsupport;
166534776af6SMichael Baum 	if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
166634776af6SMichael Baum 		DRV_LOG(WARNING, "Port %u Rx queue %u "
166734776af6SMichael Baum 			"Multi-Packet RQ is unsupported, WQE buffer size (%u) "
166834776af6SMichael Baum 			"is smaller than min mbuf size (%u)",
166934776af6SMichael Baum 			dev->data->port_id, idx, RTE_BIT32(log_stride_wqe_size),
167034776af6SMichael Baum 			min_mbuf_size);
167134776af6SMichael Baum 		goto unsupport;
167234776af6SMichael Baum 	}
167334776af6SMichael Baum 	DRV_LOG(DEBUG, "Port %u Rx queue %u "
167434776af6SMichael Baum 		"Multi-Packet RQ is enabled strd_num_n = %u, strd_sz_n = %u",
167534776af6SMichael Baum 		dev->data->port_id, idx, RTE_BIT32(*actual_log_stride_num),
167634776af6SMichael Baum 		RTE_BIT32(*actual_log_stride_size));
167734776af6SMichael Baum 	return 0;
167834776af6SMichael Baum unsupport:
167934776af6SMichael Baum 	if (config->mprq.enabled)
168034776af6SMichael Baum 		DRV_LOG(WARNING,
168134776af6SMichael Baum 			"Port %u MPRQ is requested but cannot be enabled\n"
168234776af6SMichael Baum 			" (requested: pkt_sz = %u, desc_num = %u,"
168334776af6SMichael Baum 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
168434776af6SMichael Baum 			"  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
168534776af6SMichael Baum 			" min_stride_sz = %u, max_stride_sz = %u).\n"
16863a29cb3aSAlexander Kozyrev 			"Rx segment is %senabled. External mempool is %sused.",
168734776af6SMichael Baum 			dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
1688fdee0f1bSAlexander Kozyrev 			config->mprq.log_stride_size == (uint32_t)MLX5_ARG_UNSET ?
1689fdee0f1bSAlexander Kozyrev 			RTE_BIT32(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) :
169034776af6SMichael Baum 			RTE_BIT32(config->mprq.log_stride_size),
169134776af6SMichael Baum 			RTE_BIT32(config->mprq.log_stride_num),
169234776af6SMichael Baum 			config->mprq.min_rxqs_num,
169387af0d1eSMichael Baum 			RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
169487af0d1eSMichael Baum 			RTE_BIT32(dev_cap->mprq.log_min_stride_size),
169587af0d1eSMichael Baum 			RTE_BIT32(dev_cap->mprq.log_max_stride_size),
16963a29cb3aSAlexander Kozyrev 			rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
169734776af6SMichael Baum 	return -1;
169834776af6SMichael Baum }
169934776af6SMichael Baum 
170034776af6SMichael Baum /**
1701a1366b1aSNélio Laranjeiro  * Create a DPDK Rx queue.
1702a1366b1aSNélio Laranjeiro  *
1703af4f09f2SNélio Laranjeiro  * @param dev
1704af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
1705f685878aSMichael Baum  * @param idx
1706f685878aSMichael Baum  *   RX queue index.
1707a1366b1aSNélio Laranjeiro  * @param desc
1708a1366b1aSNélio Laranjeiro  *   Number of descriptors to configure in queue.
1709a1366b1aSNélio Laranjeiro  * @param socket
1710a1366b1aSNélio Laranjeiro  *   NUMA socket on which memory must be allocated.
1711a1366b1aSNélio Laranjeiro  *
1712a1366b1aSNélio Laranjeiro  * @return
1713a6d83b6aSNélio Laranjeiro  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1714a1366b1aSNélio Laranjeiro  */
1715a1366b1aSNélio Laranjeiro struct mlx5_rxq_ctrl *
1716f685878aSMichael Baum mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
171717b843ebSShahaf Shuler 	     unsigned int socket, const struct rte_eth_rxconf *conf,
17183a29cb3aSAlexander Kozyrev 	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
17193a29cb3aSAlexander Kozyrev 	     bool is_extmem)
1720a1366b1aSNélio Laranjeiro {
1721dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
1722a1366b1aSNélio Laranjeiro 	struct mlx5_rxq_ctrl *tmpl;
17239f209b59SViacheslav Ovsiienko 	unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
172445a6df80SMichael Baum 	struct mlx5_port_config *config = &priv->config;
1725a4996bd8SWei Dai 	uint64_t offloads = conf->offloads |
1726a4996bd8SWei Dai 			   dev->data->dev_conf.rxmode.offloads;
1727295968d1SFerruh Yigit 	unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
17281bb4a528SFerruh Yigit 	unsigned int max_rx_pktlen = lro_on_queue ?
17291c7e57f9SDekel Peled 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
17301bb4a528SFerruh Yigit 			dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
17311bb4a528SFerruh Yigit 				RTE_ETHER_CRC_LEN;
17321bb4a528SFerruh Yigit 	unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
1733721c9530SMatan Azrad 							RTE_PKTMBUF_HEADROOM;
1734bd41389eSMatan Azrad 	unsigned int max_lro_size = 0;
17352579543fSMatan Azrad 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
173634776af6SMichael Baum 	uint32_t mprq_log_actual_stride_num = 0;
173734776af6SMichael Baum 	uint32_t mprq_log_actual_stride_size = 0;
173834776af6SMichael Baum 	bool rx_seg_en = n_seg != 1 || rx_seg[0].offset || rx_seg[0].length;
173934776af6SMichael Baum 	const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
174034776af6SMichael Baum 					       non_scatter_min_mbuf_size,
174134776af6SMichael Baum 					       &mprq_log_actual_stride_num,
17423a29cb3aSAlexander Kozyrev 					       &mprq_log_actual_stride_size,
17433a29cb3aSAlexander Kozyrev 					       is_extmem);
17440f20acbfSAlexander Kozyrev 	/*
17450f20acbfSAlexander Kozyrev 	 * Always allocate extra slots, even if eventually
17460f20acbfSAlexander Kozyrev 	 * the vector Rx will not be used.
17470f20acbfSAlexander Kozyrev 	 */
17480f20acbfSAlexander Kozyrev 	uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
174934776af6SMichael Baum 	size_t alloc_size = sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *);
1750a0a45e8aSViacheslav Ovsiienko 	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1751a0a45e8aSViacheslav Ovsiienko 	unsigned int tail_len;
1752a1366b1aSNélio Laranjeiro 
175334776af6SMichael Baum 	if (mprq_en) {
175434776af6SMichael Baum 		/* Trim the number of descs needed. */
175534776af6SMichael Baum 		desc >>= mprq_log_actual_stride_num;
175634776af6SMichael Baum 		alloc_size += desc * sizeof(struct mlx5_mprq_buf *);
175734776af6SMichael Baum 	}
175834776af6SMichael Baum 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, alloc_size, 0, socket);
1759a0a45e8aSViacheslav Ovsiienko 	if (!tmpl) {
1760a0a45e8aSViacheslav Ovsiienko 		rte_errno = ENOMEM;
1761a0a45e8aSViacheslav Ovsiienko 		return NULL;
1762a0a45e8aSViacheslav Ovsiienko 	}
17634cda06c3SXueming Li 	LIST_INIT(&tmpl->owners);
1764a0a45e8aSViacheslav Ovsiienko 	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1765a0a45e8aSViacheslav Ovsiienko 	/*
1766572c9d4bSViacheslav Ovsiienko 	 * Save the original segment configuration in the shared queue
1767572c9d4bSViacheslav Ovsiienko 	 * descriptor for the later check on the sibling queue creation.
1768572c9d4bSViacheslav Ovsiienko 	 */
1769572c9d4bSViacheslav Ovsiienko 	tmpl->rxseg_n = n_seg;
1770572c9d4bSViacheslav Ovsiienko 	rte_memcpy(tmpl->rxseg, qs_seg,
1771572c9d4bSViacheslav Ovsiienko 		   sizeof(struct rte_eth_rxseg_split) * n_seg);
1772572c9d4bSViacheslav Ovsiienko 	/*
1773a0a45e8aSViacheslav Ovsiienko 	 * Build the array of actual buffer offsets and lengths.
1774a0a45e8aSViacheslav Ovsiienko 	 * Pad with the buffers from the last memory pool if
1775a0a45e8aSViacheslav Ovsiienko 	 * needed to handle max size packets, replace zero length
1776a0a45e8aSViacheslav Ovsiienko 	 * with the buffer length from the pool.
1777a0a45e8aSViacheslav Ovsiienko 	 */
17781bb4a528SFerruh Yigit 	tail_len = max_rx_pktlen;
1779a0a45e8aSViacheslav Ovsiienko 	do {
1780a0a45e8aSViacheslav Ovsiienko 		struct mlx5_eth_rxseg *hw_seg =
1781a0a45e8aSViacheslav Ovsiienko 					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1782a0a45e8aSViacheslav Ovsiienko 		uint32_t buf_len, offset, seg_len;
1783a0a45e8aSViacheslav Ovsiienko 
1784a0a45e8aSViacheslav Ovsiienko 		/*
1785a0a45e8aSViacheslav Ovsiienko 		 * For the buffers beyond descriptions offset is zero,
1786a0a45e8aSViacheslav Ovsiienko 		 * the first buffer contains head room.
1787a0a45e8aSViacheslav Ovsiienko 		 */
1788a0a45e8aSViacheslav Ovsiienko 		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1789a0a45e8aSViacheslav Ovsiienko 		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1790a0a45e8aSViacheslav Ovsiienko 			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1791a0a45e8aSViacheslav Ovsiienko 		/*
1792a0a45e8aSViacheslav Ovsiienko 		 * For the buffers beyond descriptions the length is
1793a0a45e8aSViacheslav Ovsiienko 		 * pool buffer length, zero lengths are replaced with
1794a0a45e8aSViacheslav Ovsiienko 		 * pool buffer length either.
1795a0a45e8aSViacheslav Ovsiienko 		 */
1796a0a45e8aSViacheslav Ovsiienko 		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1797a0a45e8aSViacheslav Ovsiienko 						       qs_seg->length ?
1798a0a45e8aSViacheslav Ovsiienko 						       qs_seg->length :
1799a0a45e8aSViacheslav Ovsiienko 						       (buf_len - offset);
1800a0a45e8aSViacheslav Ovsiienko 		/* Check is done in long int, now overflows. */
1801a0a45e8aSViacheslav Ovsiienko 		if (buf_len < seg_len + offset) {
1802a0a45e8aSViacheslav Ovsiienko 			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1803a0a45e8aSViacheslav Ovsiienko 				     "%u/%u can't be satisfied",
1804a0a45e8aSViacheslav Ovsiienko 				     dev->data->port_id, idx,
1805a0a45e8aSViacheslav Ovsiienko 				     qs_seg->length, qs_seg->offset);
1806a0a45e8aSViacheslav Ovsiienko 			rte_errno = EINVAL;
1807a0a45e8aSViacheslav Ovsiienko 			goto error;
1808a0a45e8aSViacheslav Ovsiienko 		}
1809a0a45e8aSViacheslav Ovsiienko 		if (seg_len > tail_len)
1810a0a45e8aSViacheslav Ovsiienko 			seg_len = buf_len - offset;
1811a0a45e8aSViacheslav Ovsiienko 		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1812a0a45e8aSViacheslav Ovsiienko 			DRV_LOG(ERR,
1813a0a45e8aSViacheslav Ovsiienko 				"port %u too many SGEs (%u) needed to handle"
1814a0a45e8aSViacheslav Ovsiienko 				" requested maximum packet size %u, the maximum"
1815a0a45e8aSViacheslav Ovsiienko 				" supported are %u", dev->data->port_id,
18161bb4a528SFerruh Yigit 				tmpl->rxq.rxseg_n, max_rx_pktlen,
1817a0a45e8aSViacheslav Ovsiienko 				MLX5_MAX_RXQ_NSEG);
1818a0a45e8aSViacheslav Ovsiienko 			rte_errno = ENOTSUP;
1819a0a45e8aSViacheslav Ovsiienko 			goto error;
1820a0a45e8aSViacheslav Ovsiienko 		}
1821a0a45e8aSViacheslav Ovsiienko 		/* Build the actual scattering element in the queue object. */
1822a0a45e8aSViacheslav Ovsiienko 		hw_seg->mp = qs_seg->mp;
1823a0a45e8aSViacheslav Ovsiienko 		MLX5_ASSERT(offset <= UINT16_MAX);
1824a0a45e8aSViacheslav Ovsiienko 		MLX5_ASSERT(seg_len <= UINT16_MAX);
1825a0a45e8aSViacheslav Ovsiienko 		hw_seg->offset = (uint16_t)offset;
1826a0a45e8aSViacheslav Ovsiienko 		hw_seg->length = (uint16_t)seg_len;
1827a0a45e8aSViacheslav Ovsiienko 		/*
1828a0a45e8aSViacheslav Ovsiienko 		 * Advance the segment descriptor, the padding is the based
1829a0a45e8aSViacheslav Ovsiienko 		 * on the attributes of the last descriptor.
1830a0a45e8aSViacheslav Ovsiienko 		 */
1831a0a45e8aSViacheslav Ovsiienko 		if (tmpl->rxq.rxseg_n < n_seg)
1832a0a45e8aSViacheslav Ovsiienko 			qs_seg++;
1833a0a45e8aSViacheslav Ovsiienko 		tail_len -= RTE_MIN(tail_len, seg_len);
1834a0a45e8aSViacheslav Ovsiienko 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1835a0a45e8aSViacheslav Ovsiienko 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1836a0a45e8aSViacheslav Ovsiienko 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1837295968d1SFerruh Yigit 	if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
1838721c9530SMatan Azrad 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1839721c9530SMatan Azrad 			" configured and no enough mbuf space(%u) to contain "
1840721c9530SMatan Azrad 			"the maximum RX packet length(%u) with head-room(%u)",
18411bb4a528SFerruh Yigit 			dev->data->port_id, idx, mb_len, max_rx_pktlen,
1842721c9530SMatan Azrad 			RTE_PKTMBUF_HEADROOM);
1843721c9530SMatan Azrad 		rte_errno = ENOSPC;
184441217cecSMichael Baum 		goto error;
1845721c9530SMatan Azrad 	}
1846c06f77aeSMichael Baum 	tmpl->is_hairpin = false;
184771304b5cSMichael Baum 	if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
184871304b5cSMichael Baum 			      &priv->sh->cdev->mr_scache.dev_gen, socket)) {
1849974f1e7eSYongseok Koh 		/* rte_errno is already set. */
1850974f1e7eSYongseok Koh 		goto error;
1851974f1e7eSYongseok Koh 	}
1852a49b617bSOlivier Gournet 	tmpl->socket = socket;
1853df428ceeSYongseok Koh 	if (dev->data->dev_conf.intr_conf.rxq)
1854a1366b1aSNélio Laranjeiro 		tmpl->irq = 1;
185534776af6SMichael Baum 	if (mprq_en) {
18567d6bf6b8SYongseok Koh 		/* TODO: Rx scatter isn't supported yet. */
18577d6bf6b8SYongseok Koh 		tmpl->rxq.sges_n = 0;
185834776af6SMichael Baum 		tmpl->rxq.log_strd_num = mprq_log_actual_stride_num;
185934776af6SMichael Baum 		tmpl->rxq.log_strd_sz = mprq_log_actual_stride_size;
18607d6bf6b8SYongseok Koh 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1861bd0d5930SAlexander Kozyrev 		tmpl->rxq.strd_scatter_en =
1862295968d1SFerruh Yigit 				!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
18632579543fSMatan Azrad 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
18642579543fSMatan Azrad 				config->mprq.max_memcpy_len);
18651bb4a528SFerruh Yigit 		max_lro_size = RTE_MIN(max_rx_pktlen,
18660947ed38SMichael Baum 				       RTE_BIT32(tmpl->rxq.log_strd_num) *
18670947ed38SMichael Baum 				       RTE_BIT32(tmpl->rxq.log_strd_sz));
1868a0a45e8aSViacheslav Ovsiienko 	} else if (tmpl->rxq.rxseg_n == 1) {
18691bb4a528SFerruh Yigit 		MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
1870a1366b1aSNélio Laranjeiro 		tmpl->rxq.sges_n = 0;
18711bb4a528SFerruh Yigit 		max_lro_size = max_rx_pktlen;
1872295968d1SFerruh Yigit 	} else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
1873a1366b1aSNélio Laranjeiro 		unsigned int sges_n;
1874a1366b1aSNélio Laranjeiro 
187517ed314cSMatan Azrad 		if (lro_on_queue && first_mb_free_size <
18762579543fSMatan Azrad 		    MLX5_MAX_LRO_HEADER_FIX) {
18772579543fSMatan Azrad 			DRV_LOG(ERR, "Not enough space in the first segment(%u)"
18782579543fSMatan Azrad 				" to include the max header size(%u) for LRO",
18792579543fSMatan Azrad 				first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
18802579543fSMatan Azrad 			rte_errno = ENOTSUP;
18812579543fSMatan Azrad 			goto error;
18822579543fSMatan Azrad 		}
1883a1366b1aSNélio Laranjeiro 		/*
1884a1366b1aSNélio Laranjeiro 		 * Determine the number of SGEs needed for a full packet
1885a1366b1aSNélio Laranjeiro 		 * and round it to the next power of two.
1886a1366b1aSNélio Laranjeiro 		 */
1887a0a45e8aSViacheslav Ovsiienko 		sges_n = log2above(tmpl->rxq.rxseg_n);
1888bd41389eSMatan Azrad 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1889a170a30dSNélio Laranjeiro 			DRV_LOG(ERR,
1890a170a30dSNélio Laranjeiro 				"port %u too many SGEs (%u) needed to handle"
1891bd41389eSMatan Azrad 				" requested maximum packet size %u, the maximum"
1892bd41389eSMatan Azrad 				" supported are %u", dev->data->port_id,
18931bb4a528SFerruh Yigit 				1 << sges_n, max_rx_pktlen,
1894bd41389eSMatan Azrad 				1u << MLX5_MAX_LOG_RQ_SEGS);
1895bd41389eSMatan Azrad 			rte_errno = ENOTSUP;
1896a1366b1aSNélio Laranjeiro 			goto error;
1897a1366b1aSNélio Laranjeiro 		}
1898bd41389eSMatan Azrad 		tmpl->rxq.sges_n = sges_n;
18991bb4a528SFerruh Yigit 		max_lro_size = max_rx_pktlen;
1900a1366b1aSNélio Laranjeiro 	}
1901a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
19020f99970bSNélio Laranjeiro 		dev->data->port_id, 1 << tmpl->rxq.sges_n);
1903a1366b1aSNélio Laranjeiro 	if (desc % (1 << tmpl->rxq.sges_n)) {
1904a170a30dSNélio Laranjeiro 		DRV_LOG(ERR,
1905a170a30dSNélio Laranjeiro 			"port %u number of Rx queue descriptors (%u) is not a"
1906a1366b1aSNélio Laranjeiro 			" multiple of SGEs per packet (%u)",
19070f99970bSNélio Laranjeiro 			dev->data->port_id,
1908a1366b1aSNélio Laranjeiro 			desc,
1909a1366b1aSNélio Laranjeiro 			1 << tmpl->rxq.sges_n);
1910a6d83b6aSNélio Laranjeiro 		rte_errno = EINVAL;
1911a1366b1aSNélio Laranjeiro 		goto error;
1912a1366b1aSNélio Laranjeiro 	}
19133d491dd6SDekel Peled 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1914a1366b1aSNélio Laranjeiro 	/* Toggle RX checksum offload if hardware supports it. */
1915295968d1SFerruh Yigit 	tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM);
191604840ecbSThomas Monjalon 	/* Configure Rx timestamp. */
1917295968d1SFerruh Yigit 	tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP);
191804840ecbSThomas Monjalon 	tmpl->rxq.timestamp_rx_flag = 0;
191904840ecbSThomas Monjalon 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
192004840ecbSThomas Monjalon 			&tmpl->rxq.timestamp_offset,
192104840ecbSThomas Monjalon 			&tmpl->rxq.timestamp_rx_flag) != 0) {
192204840ecbSThomas Monjalon 		DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
192304840ecbSThomas Monjalon 		goto error;
192404840ecbSThomas Monjalon 	}
1925a1366b1aSNélio Laranjeiro 	/* Configure VLAN stripping. */
1926295968d1SFerruh Yigit 	tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
1927a1366b1aSNélio Laranjeiro 	/* By default, FCS (CRC) is stripped by hardware. */
1928a1366b1aSNélio Laranjeiro 	tmpl->rxq.crc_present = 0;
192917ed314cSMatan Azrad 	tmpl->rxq.lro = lro_on_queue;
1930295968d1SFerruh Yigit 	if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
1931a13ec19cSMichael Baum 		if (priv->sh->config.hw_fcs_strip) {
1932175f1c21SDekel Peled 			/*
1933175f1c21SDekel Peled 			 * RQs used for LRO-enabled TIRs should not be
1934175f1c21SDekel Peled 			 * configured to scatter the FCS.
1935175f1c21SDekel Peled 			 */
193617ed314cSMatan Azrad 			if (lro_on_queue)
1937175f1c21SDekel Peled 				DRV_LOG(WARNING,
1938175f1c21SDekel Peled 					"port %u CRC stripping has been "
1939175f1c21SDekel Peled 					"disabled but will still be performed "
1940175f1c21SDekel Peled 					"by hardware, because LRO is enabled",
1941175f1c21SDekel Peled 					dev->data->port_id);
1942175f1c21SDekel Peled 			else
1943a1366b1aSNélio Laranjeiro 				tmpl->rxq.crc_present = 1;
1944a1366b1aSNélio Laranjeiro 		} else {
1945a170a30dSNélio Laranjeiro 			DRV_LOG(WARNING,
1946a170a30dSNélio Laranjeiro 				"port %u CRC stripping has been disabled but will"
1947a170a30dSNélio Laranjeiro 				" still be performed by hardware, make sure MLNX_OFED"
1948a170a30dSNélio Laranjeiro 				" and firmware are up to date",
19490f99970bSNélio Laranjeiro 				dev->data->port_id);
195070815c9eSFerruh Yigit 		}
1951a1366b1aSNélio Laranjeiro 	}
1952a170a30dSNélio Laranjeiro 	DRV_LOG(DEBUG,
1953a170a30dSNélio Laranjeiro 		"port %u CRC stripping is %s, %u bytes will be subtracted from"
1954a1366b1aSNélio Laranjeiro 		" incoming frames to hide it",
19550f99970bSNélio Laranjeiro 		dev->data->port_id,
1956a1366b1aSNélio Laranjeiro 		tmpl->rxq.crc_present ? "disabled" : "enabled",
1957a1366b1aSNélio Laranjeiro 		tmpl->rxq.crc_present << 2);
195866669155SDahir Osman 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1959295968d1SFerruh Yigit 		(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
196034776af6SMichael Baum 	/* Save port ID. */
1961a1366b1aSNélio Laranjeiro 	tmpl->rxq.port_id = dev->data->port_id;
19624cda06c3SXueming Li 	tmpl->sh = priv->sh;
19639f209b59SViacheslav Ovsiienko 	tmpl->rxq.mp = rx_seg[0].mp;
1964a1366b1aSNélio Laranjeiro 	tmpl->rxq.elts_n = log2above(desc);
196534776af6SMichael Baum 	tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1966712d1fb8STyler Retzlaff 	tmpl->rxq.elts = (struct rte_mbuf *(*)[])(tmpl + 1);
1967712d1fb8STyler Retzlaff 	tmpl->rxq.mprq_bufs = (struct mlx5_mprq_buf *(*)[])(*tmpl->rxq.elts + desc_n);
1968d5c900d1SYongseok Koh 	tmpl->rxq.idx = idx;
19690ad12a80SMichael Baum 	if (conf->share_group > 0) {
19700ad12a80SMichael Baum 		tmpl->rxq.shared = 1;
19710ad12a80SMichael Baum 		tmpl->share_group = conf->share_group;
19720ad12a80SMichael Baum 		tmpl->share_qid = conf->share_qid;
19730ad12a80SMichael Baum 	}
19743c9a82faSBing Zhao 	LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
19753c9a82faSBing Zhao 	rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed);
1976a1366b1aSNélio Laranjeiro 	return tmpl;
1977a1366b1aSNélio Laranjeiro error:
1978e28e6c63SMichael Baum 	mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1979ac3fc732SSuanming Mou 	mlx5_free(tmpl);
1980a1366b1aSNélio Laranjeiro 	return NULL;
1981a1366b1aSNélio Laranjeiro }
1982a1366b1aSNélio Laranjeiro 
1983a1366b1aSNélio Laranjeiro /**
1984e79c9be9SOri Kam  * Create a DPDK Rx hairpin queue.
1985e79c9be9SOri Kam  *
1986e79c9be9SOri Kam  * @param dev
1987e79c9be9SOri Kam  *   Pointer to Ethernet device.
19884cda06c3SXueming Li  * @param rxq
19894cda06c3SXueming Li  *   RX queue.
1990e79c9be9SOri Kam  * @param desc
1991e79c9be9SOri Kam  *   Number of descriptors to configure in queue.
1992e79c9be9SOri Kam  * @param hairpin_conf
1993e79c9be9SOri Kam  *   The hairpin binding configuration.
1994e79c9be9SOri Kam  *
1995e79c9be9SOri Kam  * @return
1996e79c9be9SOri Kam  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1997e79c9be9SOri Kam  */
1998e79c9be9SOri Kam struct mlx5_rxq_ctrl *
19994cda06c3SXueming Li mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
20004cda06c3SXueming Li 		     uint16_t desc,
2001e79c9be9SOri Kam 		     const struct rte_eth_hairpin_conf *hairpin_conf)
2002e79c9be9SOri Kam {
20034cda06c3SXueming Li 	uint16_t idx = rxq->idx;
2004e79c9be9SOri Kam 	struct mlx5_priv *priv = dev->data->dev_private;
2005e79c9be9SOri Kam 	struct mlx5_rxq_ctrl *tmpl;
2006e79c9be9SOri Kam 
2007ac3fc732SSuanming Mou 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
2008ac3fc732SSuanming Mou 			   SOCKET_ID_ANY);
2009e79c9be9SOri Kam 	if (!tmpl) {
2010e79c9be9SOri Kam 		rte_errno = ENOMEM;
2011e79c9be9SOri Kam 		return NULL;
2012e79c9be9SOri Kam 	}
20134cda06c3SXueming Li 	LIST_INIT(&tmpl->owners);
20144cda06c3SXueming Li 	rxq->ctrl = tmpl;
20154cda06c3SXueming Li 	LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
2016c06f77aeSMichael Baum 	tmpl->is_hairpin = true;
2017e79c9be9SOri Kam 	tmpl->socket = SOCKET_ID_ANY;
2018e79c9be9SOri Kam 	tmpl->rxq.rss_hash = 0;
2019e79c9be9SOri Kam 	tmpl->rxq.port_id = dev->data->port_id;
20204cda06c3SXueming Li 	tmpl->sh = priv->sh;
2021e79c9be9SOri Kam 	tmpl->rxq.mp = NULL;
2022e79c9be9SOri Kam 	tmpl->rxq.elts_n = log2above(desc);
2023e79c9be9SOri Kam 	tmpl->rxq.elts = NULL;
2024e79c9be9SOri Kam 	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2025e79c9be9SOri Kam 	tmpl->rxq.idx = idx;
202644126bd9SXueming Li 	rxq->hairpin_conf = *hairpin_conf;
20270cedf34dSXueming Li 	mlx5_rxq_ref(dev, idx);
20283c9a82faSBing Zhao 	LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
20293c9a82faSBing Zhao 	rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed);
2030e79c9be9SOri Kam 	return tmpl;
2031e79c9be9SOri Kam }
2032e79c9be9SOri Kam 
2033e79c9be9SOri Kam /**
20340cedf34dSXueming Li  * Increase Rx queue reference count.
20350cedf34dSXueming Li  *
20360cedf34dSXueming Li  * @param dev
20370cedf34dSXueming Li  *   Pointer to Ethernet device.
20380cedf34dSXueming Li  * @param idx
20390cedf34dSXueming Li  *   RX queue index.
20400cedf34dSXueming Li  *
20410cedf34dSXueming Li  * @return
20420cedf34dSXueming Li  *   A pointer to the queue if it exists, NULL otherwise.
20430cedf34dSXueming Li  */
20440cedf34dSXueming Li struct mlx5_rxq_priv *
20450cedf34dSXueming Li mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
20460cedf34dSXueming Li {
20470cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
20480cedf34dSXueming Li 
20490cedf34dSXueming Li 	if (rxq != NULL)
2050e12a0166STyler Retzlaff 		rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
20510cedf34dSXueming Li 	return rxq;
20520cedf34dSXueming Li }
20530cedf34dSXueming Li 
20540cedf34dSXueming Li /**
20550cedf34dSXueming Li  * Dereference a Rx queue.
20560cedf34dSXueming Li  *
20570cedf34dSXueming Li  * @param dev
20580cedf34dSXueming Li  *   Pointer to Ethernet device.
20590cedf34dSXueming Li  * @param idx
20600cedf34dSXueming Li  *   RX queue index.
20610cedf34dSXueming Li  *
20620cedf34dSXueming Li  * @return
20630cedf34dSXueming Li  *   Updated reference count.
20640cedf34dSXueming Li  */
20650cedf34dSXueming Li uint32_t
20660cedf34dSXueming Li mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
20670cedf34dSXueming Li {
20680cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
20690cedf34dSXueming Li 
20700cedf34dSXueming Li 	if (rxq == NULL)
20710cedf34dSXueming Li 		return 0;
2072e12a0166STyler Retzlaff 	return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
20730cedf34dSXueming Li }
20740cedf34dSXueming Li 
20750cedf34dSXueming Li /**
2076a1366b1aSNélio Laranjeiro  * Get a Rx queue.
2077a1366b1aSNélio Laranjeiro  *
2078af4f09f2SNélio Laranjeiro  * @param dev
2079af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
2080a1366b1aSNélio Laranjeiro  * @param idx
2081ebd4df0fSDekel Peled  *   RX queue index.
2082a1366b1aSNélio Laranjeiro  *
2083a1366b1aSNélio Laranjeiro  * @return
2084a6d83b6aSNélio Laranjeiro  *   A pointer to the queue if it exists, NULL otherwise.
2085a1366b1aSNélio Laranjeiro  */
20860cedf34dSXueming Li struct mlx5_rxq_priv *
2087af4f09f2SNélio Laranjeiro mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2088a1366b1aSNélio Laranjeiro {
2089dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
2090a1366b1aSNélio Laranjeiro 
20911573b072SMichael Baum 	if (idx >= priv->rxqs_n)
20921573b072SMichael Baum 		return NULL;
20935cf0707fSXueming Li 	MLX5_ASSERT(priv->rxq_privs != NULL);
20940cedf34dSXueming Li 	return (*priv->rxq_privs)[idx];
2095a1366b1aSNélio Laranjeiro }
20960cedf34dSXueming Li 
20970cedf34dSXueming Li /**
20980cedf34dSXueming Li  * Get Rx queue shareable control.
20990cedf34dSXueming Li  *
21000cedf34dSXueming Li  * @param dev
21010cedf34dSXueming Li  *   Pointer to Ethernet device.
21020cedf34dSXueming Li  * @param idx
21030cedf34dSXueming Li  *   RX queue index.
21040cedf34dSXueming Li  *
21050cedf34dSXueming Li  * @return
21060cedf34dSXueming Li  *   A pointer to the queue control if it exists, NULL otherwise.
21070cedf34dSXueming Li  */
21080cedf34dSXueming Li struct mlx5_rxq_ctrl *
21090cedf34dSXueming Li mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
21100cedf34dSXueming Li {
21110cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
21120cedf34dSXueming Li 
21130cedf34dSXueming Li 	return rxq == NULL ? NULL : rxq->ctrl;
21140cedf34dSXueming Li }
21150cedf34dSXueming Li 
21160cedf34dSXueming Li /**
21170cedf34dSXueming Li  * Get Rx queue shareable data.
21180cedf34dSXueming Li  *
21190cedf34dSXueming Li  * @param dev
21200cedf34dSXueming Li  *   Pointer to Ethernet device.
21210cedf34dSXueming Li  * @param idx
21220cedf34dSXueming Li  *   RX queue index.
21230cedf34dSXueming Li  *
21240cedf34dSXueming Li  * @return
21250cedf34dSXueming Li  *   A pointer to the queue data if it exists, NULL otherwise.
21260cedf34dSXueming Li  */
21270cedf34dSXueming Li struct mlx5_rxq_data *
21280cedf34dSXueming Li mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
21290cedf34dSXueming Li {
21300cedf34dSXueming Li 	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
21310cedf34dSXueming Li 
21320cedf34dSXueming Li 	return rxq == NULL ? NULL : &rxq->ctrl->rxq;
2133a1366b1aSNélio Laranjeiro }
2134a1366b1aSNélio Laranjeiro 
2135a1366b1aSNélio Laranjeiro /**
2136311b17e6SMichael Baum  * Increase an external Rx queue reference count.
2137311b17e6SMichael Baum  *
2138311b17e6SMichael Baum  * @param dev
2139311b17e6SMichael Baum  *   Pointer to Ethernet device.
2140311b17e6SMichael Baum  * @param idx
2141311b17e6SMichael Baum  *   External RX queue index.
2142311b17e6SMichael Baum  *
2143311b17e6SMichael Baum  * @return
2144311b17e6SMichael Baum  *   A pointer to the queue if it exists, NULL otherwise.
2145311b17e6SMichael Baum  */
21468e8b44f2SSuanming Mou struct mlx5_external_q *
2147311b17e6SMichael Baum mlx5_ext_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
2148311b17e6SMichael Baum {
21498e8b44f2SSuanming Mou 	struct mlx5_external_q *rxq = mlx5_ext_rxq_get(dev, idx);
2150311b17e6SMichael Baum 
2151e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed);
2152311b17e6SMichael Baum 	return rxq;
2153311b17e6SMichael Baum }
2154311b17e6SMichael Baum 
2155311b17e6SMichael Baum /**
2156311b17e6SMichael Baum  * Decrease an external Rx queue reference count.
2157311b17e6SMichael Baum  *
2158311b17e6SMichael Baum  * @param dev
2159311b17e6SMichael Baum  *   Pointer to Ethernet device.
2160311b17e6SMichael Baum  * @param idx
2161311b17e6SMichael Baum  *   External RX queue index.
2162311b17e6SMichael Baum  *
2163311b17e6SMichael Baum  * @return
2164311b17e6SMichael Baum  *   Updated reference count.
2165311b17e6SMichael Baum  */
2166311b17e6SMichael Baum uint32_t
2167311b17e6SMichael Baum mlx5_ext_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
2168311b17e6SMichael Baum {
21698e8b44f2SSuanming Mou 	struct mlx5_external_q *rxq = mlx5_ext_rxq_get(dev, idx);
2170311b17e6SMichael Baum 
2171e12a0166STyler Retzlaff 	return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, rte_memory_order_relaxed) - 1;
2172311b17e6SMichael Baum }
2173311b17e6SMichael Baum 
2174311b17e6SMichael Baum /**
2175311b17e6SMichael Baum  * Get an external Rx queue.
2176311b17e6SMichael Baum  *
2177311b17e6SMichael Baum  * @param dev
2178311b17e6SMichael Baum  *   Pointer to Ethernet device.
2179311b17e6SMichael Baum  * @param idx
2180311b17e6SMichael Baum  *   External Rx queue index.
2181311b17e6SMichael Baum  *
2182311b17e6SMichael Baum  * @return
2183311b17e6SMichael Baum  *   A pointer to the queue if it exists, NULL otherwise.
2184311b17e6SMichael Baum  */
21858e8b44f2SSuanming Mou struct mlx5_external_q *
2186311b17e6SMichael Baum mlx5_ext_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2187311b17e6SMichael Baum {
2188311b17e6SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
2189311b17e6SMichael Baum 
2190311b17e6SMichael Baum 	MLX5_ASSERT(mlx5_is_external_rxq(dev, idx));
219186647d46SThomas Monjalon 	return &priv->ext_rxqs[idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
2192311b17e6SMichael Baum }
2193311b17e6SMichael Baum 
2194311b17e6SMichael Baum /**
2195f6f1195cSMichael Baum  * Dereference a list of Rx queues.
2196f6f1195cSMichael Baum  *
2197f6f1195cSMichael Baum  * @param dev
2198f6f1195cSMichael Baum  *   Pointer to Ethernet device.
2199f6f1195cSMichael Baum  * @param queues
2200f6f1195cSMichael Baum  *   List of Rx queues to deref.
2201f6f1195cSMichael Baum  * @param queues_n
2202f6f1195cSMichael Baum  *   Number of queues in the array.
2203f6f1195cSMichael Baum  */
2204f6f1195cSMichael Baum static void
2205f6f1195cSMichael Baum mlx5_rxqs_deref(struct rte_eth_dev *dev, uint16_t *queues,
2206f6f1195cSMichael Baum 		const uint32_t queues_n)
2207f6f1195cSMichael Baum {
2208f6f1195cSMichael Baum 	uint32_t i;
2209f6f1195cSMichael Baum 
2210f6f1195cSMichael Baum 	for (i = 0; i < queues_n; i++) {
2211f6f1195cSMichael Baum 		if (mlx5_is_external_rxq(dev, queues[i]))
2212f6f1195cSMichael Baum 			claim_nonzero(mlx5_ext_rxq_deref(dev, queues[i]));
2213f6f1195cSMichael Baum 		else
2214f6f1195cSMichael Baum 			claim_nonzero(mlx5_rxq_deref(dev, queues[i]));
2215f6f1195cSMichael Baum 	}
2216f6f1195cSMichael Baum }
2217f6f1195cSMichael Baum 
2218f6f1195cSMichael Baum /**
22194dcf29a8SMichael Baum  * Increase reference count for list of Rx queues.
22204dcf29a8SMichael Baum  *
22214dcf29a8SMichael Baum  * @param dev
22224dcf29a8SMichael Baum  *   Pointer to Ethernet device.
22234dcf29a8SMichael Baum  * @param queues
22244dcf29a8SMichael Baum  *   List of Rx queues to ref.
22254dcf29a8SMichael Baum  * @param queues_n
22264dcf29a8SMichael Baum  *   Number of queues in the array.
22274dcf29a8SMichael Baum  *
22284dcf29a8SMichael Baum  * @return
22294dcf29a8SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
22304dcf29a8SMichael Baum  */
22314dcf29a8SMichael Baum static int
22324dcf29a8SMichael Baum mlx5_rxqs_ref(struct rte_eth_dev *dev, uint16_t *queues,
22334dcf29a8SMichael Baum 	      const uint32_t queues_n)
22344dcf29a8SMichael Baum {
22354dcf29a8SMichael Baum 	uint32_t i;
22364dcf29a8SMichael Baum 
22374dcf29a8SMichael Baum 	for (i = 0; i != queues_n; ++i) {
22384dcf29a8SMichael Baum 		if (mlx5_is_external_rxq(dev, queues[i])) {
22394dcf29a8SMichael Baum 			if (mlx5_ext_rxq_ref(dev, queues[i]) == NULL)
22404dcf29a8SMichael Baum 				goto error;
22414dcf29a8SMichael Baum 		} else {
22424dcf29a8SMichael Baum 			if (mlx5_rxq_ref(dev, queues[i]) == NULL)
22434dcf29a8SMichael Baum 				goto error;
22444dcf29a8SMichael Baum 		}
22454dcf29a8SMichael Baum 	}
22464dcf29a8SMichael Baum 	return 0;
22474dcf29a8SMichael Baum error:
22484dcf29a8SMichael Baum 	mlx5_rxqs_deref(dev, queues, i);
22494dcf29a8SMichael Baum 	rte_errno = EINVAL;
22504dcf29a8SMichael Baum 	return -rte_errno;
22514dcf29a8SMichael Baum }
22524dcf29a8SMichael Baum 
22534dcf29a8SMichael Baum /**
2254a1366b1aSNélio Laranjeiro  * Release a Rx queue.
2255a1366b1aSNélio Laranjeiro  *
2256af4f09f2SNélio Laranjeiro  * @param dev
2257af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
2258a1366b1aSNélio Laranjeiro  * @param idx
2259ebd4df0fSDekel Peled  *   RX queue index.
2260a1366b1aSNélio Laranjeiro  *
2261a1366b1aSNélio Laranjeiro  * @return
2262925061b5SNélio Laranjeiro  *   1 while a reference on it exists, 0 when freed.
2263a1366b1aSNélio Laranjeiro  */
2264a1366b1aSNélio Laranjeiro int
2265af4f09f2SNélio Laranjeiro mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2266a1366b1aSNélio Laranjeiro {
2267dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
22685ceb3a02SXueming Li 	struct mlx5_rxq_priv *rxq;
22695ceb3a02SXueming Li 	struct mlx5_rxq_ctrl *rxq_ctrl;
227009c25553SXueming Li 	uint32_t refcnt;
2271*f8f294c6SBing Zhao 	int32_t ctrl_ref;
2272a1366b1aSNélio Laranjeiro 
22735ceb3a02SXueming Li 	if (priv->rxq_privs == NULL)
22745ceb3a02SXueming Li 		return 0;
22755ceb3a02SXueming Li 	rxq = mlx5_rxq_get(dev, idx);
227609c25553SXueming Li 	if (rxq == NULL || rxq->refcnt == 0)
2277a1366b1aSNélio Laranjeiro 		return 0;
22785ceb3a02SXueming Li 	rxq_ctrl = rxq->ctrl;
227909c25553SXueming Li 	refcnt = mlx5_rxq_deref(dev, idx);
228009c25553SXueming Li 	if (refcnt > 1) {
228109c25553SXueming Li 		return 1;
228209c25553SXueming Li 	} else if (refcnt == 1) { /* RxQ stopped. */
22835ceb3a02SXueming Li 		priv->obj_ops.rxq_obj_release(rxq);
228409c25553SXueming Li 		if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) {
22851260a87bSMichael Baum 			LIST_REMOVE(rxq_ctrl->obj, next);
22861260a87bSMichael Baum 			mlx5_free(rxq_ctrl->obj);
228793403560SDekel Peled 			rxq_ctrl->obj = NULL;
228824e4b650SMichael Baum 		}
2289c06f77aeSMichael Baum 		if (!rxq_ctrl->is_hairpin) {
229009c25553SXueming Li 			if (!rxq_ctrl->started)
22911260a87bSMichael Baum 				rxq_free_elts(rxq_ctrl);
229209c25553SXueming Li 			dev->data->rx_queue_state[idx] =
229309c25553SXueming Li 					RTE_ETH_QUEUE_STATE_STOPPED;
2294876b5d52SMatan Azrad 		}
229509c25553SXueming Li 	} else { /* Refcnt zero, closing device. */
22964cda06c3SXueming Li 		LIST_REMOVE(rxq, owner_entry);
2297*f8f294c6SBing Zhao 		ctrl_ref = rte_atomic_fetch_sub_explicit(&rxq_ctrl->ctrl_ref, 1,
2298*f8f294c6SBing Zhao 							 rte_memory_order_relaxed) - 1;
2299*f8f294c6SBing Zhao 		if (ctrl_ref == 1 && LIST_EMPTY(&rxq_ctrl->owners)) {
2300c06f77aeSMichael Baum 			if (!rxq_ctrl->is_hairpin)
230109c25553SXueming Li 				mlx5_mr_btree_free
230209c25553SXueming Li 					(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
230309c25553SXueming Li 			LIST_REMOVE(rxq_ctrl, share_entry);
2304ac3fc732SSuanming Mou 			mlx5_free(rxq_ctrl);
230509c25553SXueming Li 		}
23065cf0707fSXueming Li 		dev->data->rx_queues[idx] = NULL;
23074cda06c3SXueming Li 		mlx5_free(rxq);
23084cda06c3SXueming Li 		(*priv->rxq_privs)[idx] = NULL;
2309015d2cb6SMatan Azrad 	}
2310a1366b1aSNélio Laranjeiro 	return 0;
2311a1366b1aSNélio Laranjeiro }
2312a1366b1aSNélio Laranjeiro 
2313a1366b1aSNélio Laranjeiro /**
2314a1366b1aSNélio Laranjeiro  * Verify the Rx Queue list is empty
2315a1366b1aSNélio Laranjeiro  *
2316af4f09f2SNélio Laranjeiro  * @param dev
2317af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
2318a1366b1aSNélio Laranjeiro  *
2319fb732b0aSNélio Laranjeiro  * @return
2320fb732b0aSNélio Laranjeiro  *   The number of object not released.
2321a1366b1aSNélio Laranjeiro  */
2322a1366b1aSNélio Laranjeiro int
2323af4f09f2SNélio Laranjeiro mlx5_rxq_verify(struct rte_eth_dev *dev)
2324a1366b1aSNélio Laranjeiro {
2325dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
2326a1366b1aSNélio Laranjeiro 	struct mlx5_rxq_ctrl *rxq_ctrl;
2327a1366b1aSNélio Laranjeiro 	int ret = 0;
2328a1366b1aSNélio Laranjeiro 
23293c9a82faSBing Zhao 	LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
2330a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2331d5c900d1SYongseok Koh 			dev->data->port_id, rxq_ctrl->rxq.idx);
2332a1366b1aSNélio Laranjeiro 		++ret;
2333a1366b1aSNélio Laranjeiro 	}
2334a1366b1aSNélio Laranjeiro 	return ret;
2335a1366b1aSNélio Laranjeiro }
23364c7a0f5fSNélio Laranjeiro 
23374c7a0f5fSNélio Laranjeiro /**
2338311b17e6SMichael Baum  * Verify the external Rx Queue list is empty.
2339311b17e6SMichael Baum  *
2340311b17e6SMichael Baum  * @param dev
2341311b17e6SMichael Baum  *   Pointer to Ethernet device.
2342311b17e6SMichael Baum  *
2343311b17e6SMichael Baum  * @return
2344311b17e6SMichael Baum  *   The number of object not released.
2345311b17e6SMichael Baum  */
2346311b17e6SMichael Baum int
2347311b17e6SMichael Baum mlx5_ext_rxq_verify(struct rte_eth_dev *dev)
2348311b17e6SMichael Baum {
2349311b17e6SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
23508e8b44f2SSuanming Mou 	struct mlx5_external_q *rxq;
2351311b17e6SMichael Baum 	uint32_t i;
2352311b17e6SMichael Baum 	int ret = 0;
2353311b17e6SMichael Baum 
2354311b17e6SMichael Baum 	if (priv->ext_rxqs == NULL)
2355311b17e6SMichael Baum 		return 0;
2356311b17e6SMichael Baum 
235786647d46SThomas Monjalon 	for (i = RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN; i <= UINT16_MAX ; ++i) {
2358311b17e6SMichael Baum 		rxq = mlx5_ext_rxq_get(dev, i);
2359311b17e6SMichael Baum 		if (rxq->refcnt < 2)
2360311b17e6SMichael Baum 			continue;
2361311b17e6SMichael Baum 		DRV_LOG(DEBUG, "Port %u external RxQ %u still referenced.",
2362311b17e6SMichael Baum 			dev->data->port_id, i);
2363311b17e6SMichael Baum 		++ret;
2364311b17e6SMichael Baum 	}
2365311b17e6SMichael Baum 	return ret;
2366311b17e6SMichael Baum }
2367311b17e6SMichael Baum 
2368311b17e6SMichael Baum /**
2369c06f77aeSMichael Baum  * Check whether RxQ type is Hairpin.
2370d85c7b5eSOri Kam  *
2371d85c7b5eSOri Kam  * @param dev
2372d85c7b5eSOri Kam  *   Pointer to Ethernet device.
2373d85c7b5eSOri Kam  * @param idx
2374d85c7b5eSOri Kam  *   Rx queue index.
2375d85c7b5eSOri Kam  *
2376d85c7b5eSOri Kam  * @return
2377c06f77aeSMichael Baum  *   True if Rx queue type is Hairpin, otherwise False.
2378d85c7b5eSOri Kam  */
2379c06f77aeSMichael Baum bool
2380c06f77aeSMichael Baum mlx5_rxq_is_hairpin(struct rte_eth_dev *dev, uint16_t idx)
2381d85c7b5eSOri Kam {
2382311b17e6SMichael Baum 	struct mlx5_rxq_ctrl *rxq_ctrl;
2383d85c7b5eSOri Kam 
2384311b17e6SMichael Baum 	if (mlx5_is_external_rxq(dev, idx))
2385311b17e6SMichael Baum 		return false;
2386311b17e6SMichael Baum 	rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx);
2387d37b0b4dSMichael Baum 	return (rxq_ctrl != NULL && rxq_ctrl->is_hairpin);
2388d85c7b5eSOri Kam }
2389d85c7b5eSOri Kam 
23903f90d1a0SBing Zhao /*
23913f90d1a0SBing Zhao  * Get a Rx hairpin queue configuration.
23923f90d1a0SBing Zhao  *
23933f90d1a0SBing Zhao  * @param dev
23943f90d1a0SBing Zhao  *   Pointer to Ethernet device.
23953f90d1a0SBing Zhao  * @param idx
23963f90d1a0SBing Zhao  *   Rx queue index.
23973f90d1a0SBing Zhao  *
23983f90d1a0SBing Zhao  * @return
23993f90d1a0SBing Zhao  *   Pointer to the configuration if a hairpin RX queue, otherwise NULL.
24003f90d1a0SBing Zhao  */
24013f90d1a0SBing Zhao const struct rte_eth_hairpin_conf *
24023f90d1a0SBing Zhao mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
24033f90d1a0SBing Zhao {
2404d37b0b4dSMichael Baum 	if (mlx5_rxq_is_hairpin(dev, idx)) {
240544126bd9SXueming Li 		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
24063f90d1a0SBing Zhao 
2407d37b0b4dSMichael Baum 		return rxq != NULL ? &rxq->hairpin_conf : NULL;
2408d37b0b4dSMichael Baum 	}
2409d37b0b4dSMichael Baum 	return NULL;
24103f90d1a0SBing Zhao }
24113f90d1a0SBing Zhao 
2412d85c7b5eSOri Kam /**
2413b8cc58c1SAndrey Vesnovaty  * Match queues listed in arguments to queues contained in indirection table
2414b8cc58c1SAndrey Vesnovaty  * object.
2415b8cc58c1SAndrey Vesnovaty  *
2416b8cc58c1SAndrey Vesnovaty  * @param ind_tbl
2417b8cc58c1SAndrey Vesnovaty  *   Pointer to indirection table to match.
2418b8cc58c1SAndrey Vesnovaty  * @param queues
2419d37b0b4dSMichael Baum  *   Queues to match to queues in indirection table.
2420b8cc58c1SAndrey Vesnovaty  * @param queues_n
2421b8cc58c1SAndrey Vesnovaty  *   Number of queues in the array.
2422b8cc58c1SAndrey Vesnovaty  *
2423b8cc58c1SAndrey Vesnovaty  * @return
24247be78d02SJosh Soref  *   1 if all queues in indirection table match 0 otherwise.
2425b8cc58c1SAndrey Vesnovaty  */
2426b8cc58c1SAndrey Vesnovaty static int
2427b8cc58c1SAndrey Vesnovaty mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
2428b8cc58c1SAndrey Vesnovaty 				const uint16_t *queues, uint32_t queues_n)
2429b8cc58c1SAndrey Vesnovaty {
2430b8cc58c1SAndrey Vesnovaty 	return (ind_tbl->queues_n == queues_n) &&
2431b8cc58c1SAndrey Vesnovaty 		(!memcmp(ind_tbl->queues, queues,
2432b8cc58c1SAndrey Vesnovaty 			 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
2433b8cc58c1SAndrey Vesnovaty }
2434b8cc58c1SAndrey Vesnovaty 
2435b8cc58c1SAndrey Vesnovaty /**
24364c7a0f5fSNélio Laranjeiro  * Get an indirection table.
24374c7a0f5fSNélio Laranjeiro  *
2438af4f09f2SNélio Laranjeiro  * @param dev
2439af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
24404c7a0f5fSNélio Laranjeiro  * @param queues
24414c7a0f5fSNélio Laranjeiro  *   Queues entering in the indirection table.
24424c7a0f5fSNélio Laranjeiro  * @param queues_n
24434c7a0f5fSNélio Laranjeiro  *   Number of queues in the array.
24444c7a0f5fSNélio Laranjeiro  *
24454c7a0f5fSNélio Laranjeiro  * @return
24464c7a0f5fSNélio Laranjeiro  *   An indirection table if found.
24474c7a0f5fSNélio Laranjeiro  */
244885552726SMichael Baum struct mlx5_ind_table_obj *
244915c80a12SDekel Peled mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2450ac8d22deSAdrien Mazarguil 		       uint32_t queues_n)
24514c7a0f5fSNélio Laranjeiro {
2452dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
245315c80a12SDekel Peled 	struct mlx5_ind_table_obj *ind_tbl;
24544c7a0f5fSNélio Laranjeiro 
2455491b7137SMatan Azrad 	rte_rwlock_read_lock(&priv->ind_tbls_lock);
24564c7a0f5fSNélio Laranjeiro 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
24574c7a0f5fSNélio Laranjeiro 		if ((ind_tbl->queues_n == queues_n) &&
24584c7a0f5fSNélio Laranjeiro 		    (memcmp(ind_tbl->queues, queues,
24594c7a0f5fSNélio Laranjeiro 			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2460491b7137SMatan Azrad 		     == 0)) {
2461e12a0166STyler Retzlaff 			rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
2462e12a0166STyler Retzlaff 					   rte_memory_order_relaxed);
24634c7a0f5fSNélio Laranjeiro 			break;
24644c7a0f5fSNélio Laranjeiro 		}
24654c7a0f5fSNélio Laranjeiro 	}
2466491b7137SMatan Azrad 	rte_rwlock_read_unlock(&priv->ind_tbls_lock);
24674c7a0f5fSNélio Laranjeiro 	return ind_tbl;
24684c7a0f5fSNélio Laranjeiro }
24694c7a0f5fSNélio Laranjeiro 
24704c7a0f5fSNélio Laranjeiro /**
24714c7a0f5fSNélio Laranjeiro  * Release an indirection table.
24724c7a0f5fSNélio Laranjeiro  *
2473af4f09f2SNélio Laranjeiro  * @param dev
2474af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
24754c7a0f5fSNélio Laranjeiro  * @param ind_table
24764c7a0f5fSNélio Laranjeiro  *   Indirection table to release.
24778fbce96fSDariusz Sosnowski  * @param deref_rxqs
24788fbce96fSDariusz Sosnowski  *   If true, then dereference RX queues related to indirection table.
24798fbce96fSDariusz Sosnowski  *   Otherwise, no additional action will be taken.
24804c7a0f5fSNélio Laranjeiro  *
24814c7a0f5fSNélio Laranjeiro  * @return
2482925061b5SNélio Laranjeiro  *   1 while a reference on it exists, 0 when freed.
24834c7a0f5fSNélio Laranjeiro  */
248485552726SMichael Baum int
248515c80a12SDekel Peled mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
248684d33890SSuanming Mou 			   struct mlx5_ind_table_obj *ind_tbl,
24878fbce96fSDariusz Sosnowski 			   bool deref_rxqs)
24884c7a0f5fSNélio Laranjeiro {
248987e2db37SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
2490f6f1195cSMichael Baum 	unsigned int ret;
24914c7a0f5fSNélio Laranjeiro 
2492491b7137SMatan Azrad 	rte_rwlock_write_lock(&priv->ind_tbls_lock);
2493e12a0166STyler Retzlaff 	ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed) - 1;
24943a2f674bSSuanming Mou 	if (!ret)
2495491b7137SMatan Azrad 		LIST_REMOVE(ind_tbl, next);
2496491b7137SMatan Azrad 	rte_rwlock_write_unlock(&priv->ind_tbls_lock);
2497491b7137SMatan Azrad 	if (ret)
2498491b7137SMatan Azrad 		return 1;
24995eaf882eSMichael Baum 	priv->obj_ops.ind_table_destroy(ind_tbl);
2500f6f1195cSMichael Baum 	if (deref_rxqs)
2501f6f1195cSMichael Baum 		mlx5_rxqs_deref(dev, ind_tbl->queues, ind_tbl->queues_n);
250283c2047cSSuanming Mou 	mlx5_free(ind_tbl);
25034c7a0f5fSNélio Laranjeiro 	return 0;
25044c7a0f5fSNélio Laranjeiro }
25054c7a0f5fSNélio Laranjeiro 
25064c7a0f5fSNélio Laranjeiro /**
25074c7a0f5fSNélio Laranjeiro  * Verify the Rx Queue list is empty
25084c7a0f5fSNélio Laranjeiro  *
2509af4f09f2SNélio Laranjeiro  * @param dev
2510af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
25114c7a0f5fSNélio Laranjeiro  *
2512fb732b0aSNélio Laranjeiro  * @return
2513fb732b0aSNélio Laranjeiro  *   The number of object not released.
25144c7a0f5fSNélio Laranjeiro  */
25154c7a0f5fSNélio Laranjeiro int
251615c80a12SDekel Peled mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
25174c7a0f5fSNélio Laranjeiro {
2518dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
251915c80a12SDekel Peled 	struct mlx5_ind_table_obj *ind_tbl;
25204c7a0f5fSNélio Laranjeiro 	int ret = 0;
25214c7a0f5fSNélio Laranjeiro 
2522491b7137SMatan Azrad 	rte_rwlock_read_lock(&priv->ind_tbls_lock);
25234c7a0f5fSNélio Laranjeiro 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2524a170a30dSNélio Laranjeiro 		DRV_LOG(DEBUG,
252515c80a12SDekel Peled 			"port %u indirection table obj %p still referenced",
25260f99970bSNélio Laranjeiro 			dev->data->port_id, (void *)ind_tbl);
25274c7a0f5fSNélio Laranjeiro 		++ret;
25284c7a0f5fSNélio Laranjeiro 	}
2529491b7137SMatan Azrad 	rte_rwlock_read_unlock(&priv->ind_tbls_lock);
25304c7a0f5fSNélio Laranjeiro 	return ret;
25314c7a0f5fSNélio Laranjeiro }
2532f5479b68SNélio Laranjeiro 
2533f5479b68SNélio Laranjeiro /**
2534fa7ad49eSAndrey Vesnovaty  * Setup an indirection table structure fields.
2535fa7ad49eSAndrey Vesnovaty  *
2536fa7ad49eSAndrey Vesnovaty  * @param dev
2537fa7ad49eSAndrey Vesnovaty  *   Pointer to Ethernet device.
2538fa7ad49eSAndrey Vesnovaty  * @param ind_table
2539fa7ad49eSAndrey Vesnovaty  *   Indirection table to modify.
2540c65d6844SDmitry Kozlyuk  * @param ref_qs
2541c65d6844SDmitry Kozlyuk  *   Whether to increment RxQ reference counters.
2542fa7ad49eSAndrey Vesnovaty  *
2543fa7ad49eSAndrey Vesnovaty  * @return
2544fa7ad49eSAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
2545fa7ad49eSAndrey Vesnovaty  */
2546fa7ad49eSAndrey Vesnovaty int
2547fa7ad49eSAndrey Vesnovaty mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
2548c65d6844SDmitry Kozlyuk 			 struct mlx5_ind_table_obj *ind_tbl,
2549c65d6844SDmitry Kozlyuk 			 bool ref_qs)
2550fa7ad49eSAndrey Vesnovaty {
2551fa7ad49eSAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
2552fa7ad49eSAndrey Vesnovaty 	uint32_t queues_n = ind_tbl->queues_n;
25534dcf29a8SMichael Baum 	int ret;
2554fa7ad49eSAndrey Vesnovaty 	const unsigned int n = rte_is_power_of_2(queues_n) ?
2555fa7ad49eSAndrey Vesnovaty 			       log2above(queues_n) :
255687af0d1eSMichael Baum 			       log2above(priv->sh->dev_cap.ind_table_max_size);
2557fa7ad49eSAndrey Vesnovaty 
25584dcf29a8SMichael Baum 	if (ref_qs && mlx5_rxqs_ref(dev, ind_tbl->queues, queues_n) < 0) {
25594dcf29a8SMichael Baum 		DRV_LOG(DEBUG, "Port %u invalid indirection table queues.",
25604dcf29a8SMichael Baum 			dev->data->port_id);
25614dcf29a8SMichael Baum 		return -rte_errno;
2562311b17e6SMichael Baum 	}
2563fa7ad49eSAndrey Vesnovaty 	ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
25644dcf29a8SMichael Baum 	if (ret) {
25654dcf29a8SMichael Baum 		DRV_LOG(DEBUG, "Port %u cannot create a new indirection table.",
25664dcf29a8SMichael Baum 			dev->data->port_id);
2567c65d6844SDmitry Kozlyuk 		if (ref_qs) {
25684dcf29a8SMichael Baum 			int err = rte_errno;
25694dcf29a8SMichael Baum 
25704dcf29a8SMichael Baum 			mlx5_rxqs_deref(dev, ind_tbl->queues, queues_n);
2571fa7ad49eSAndrey Vesnovaty 			rte_errno = err;
2572c65d6844SDmitry Kozlyuk 		}
2573fa7ad49eSAndrey Vesnovaty 		return ret;
2574fa7ad49eSAndrey Vesnovaty 	}
2575e12a0166STyler Retzlaff 	rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, rte_memory_order_relaxed);
25764dcf29a8SMichael Baum 	return 0;
25774dcf29a8SMichael Baum }
2578fa7ad49eSAndrey Vesnovaty 
2579fa7ad49eSAndrey Vesnovaty /**
258025ae7f1aSMichael Baum  * Create an indirection table.
258125ae7f1aSMichael Baum  *
258225ae7f1aSMichael Baum  * @param dev
258325ae7f1aSMichael Baum  *   Pointer to Ethernet device.
258425ae7f1aSMichael Baum  * @param queues
258525ae7f1aSMichael Baum  *   Queues entering in the indirection table.
258625ae7f1aSMichael Baum  * @param queues_n
258725ae7f1aSMichael Baum  *   Number of queues in the array.
258884d33890SSuanming Mou  * @param standalone
258984d33890SSuanming Mou  *   Indirection table for Standalone queue.
2590c65d6844SDmitry Kozlyuk  * @param ref_qs
2591c65d6844SDmitry Kozlyuk  *   Whether to increment RxQ reference counters.
259225ae7f1aSMichael Baum  *
259325ae7f1aSMichael Baum  * @return
259425ae7f1aSMichael Baum  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
259525ae7f1aSMichael Baum  */
25963a2f674bSSuanming Mou struct mlx5_ind_table_obj *
259725ae7f1aSMichael Baum mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2598c65d6844SDmitry Kozlyuk 		       uint32_t queues_n, bool standalone, bool ref_qs)
259925ae7f1aSMichael Baum {
260025ae7f1aSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
260125ae7f1aSMichael Baum 	struct mlx5_ind_table_obj *ind_tbl;
260225ae7f1aSMichael Baum 	int ret;
26039fa7c1cdSDariusz Sosnowski 	uint32_t max_queues_n = priv->rxqs_n > queues_n ? priv->rxqs_n : queues_n;
260425ae7f1aSMichael Baum 
26053a2f674bSSuanming Mou 	/*
26063a2f674bSSuanming Mou 	 * Allocate maximum queues for shared action as queue number
26073a2f674bSSuanming Mou 	 * maybe modified later.
26083a2f674bSSuanming Mou 	 */
260925ae7f1aSMichael Baum 	ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
26109fa7c1cdSDariusz Sosnowski 			      (standalone ? max_queues_n : queues_n) *
26113a2f674bSSuanming Mou 			      sizeof(uint16_t), 0, SOCKET_ID_ANY);
261225ae7f1aSMichael Baum 	if (!ind_tbl) {
261325ae7f1aSMichael Baum 		rte_errno = ENOMEM;
261425ae7f1aSMichael Baum 		return NULL;
261525ae7f1aSMichael Baum 	}
261625ae7f1aSMichael Baum 	ind_tbl->queues_n = queues_n;
2617fa7ad49eSAndrey Vesnovaty 	ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2618fa7ad49eSAndrey Vesnovaty 	memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2619c65d6844SDmitry Kozlyuk 	ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
2620fa7ad49eSAndrey Vesnovaty 	if (ret < 0) {
2621fa7ad49eSAndrey Vesnovaty 		mlx5_free(ind_tbl);
2622fa7ad49eSAndrey Vesnovaty 		return NULL;
262325ae7f1aSMichael Baum 	}
2624491b7137SMatan Azrad 	rte_rwlock_write_lock(&priv->ind_tbls_lock);
26253a2f674bSSuanming Mou 	if (!standalone)
262625ae7f1aSMichael Baum 		LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
26273a2f674bSSuanming Mou 	else
26283a2f674bSSuanming Mou 		LIST_INSERT_HEAD(&priv->standalone_ind_tbls, ind_tbl, next);
2629491b7137SMatan Azrad 	rte_rwlock_write_unlock(&priv->ind_tbls_lock);
26303a2f674bSSuanming Mou 
263125ae7f1aSMichael Baum 	return ind_tbl;
2632fa7ad49eSAndrey Vesnovaty }
2633fa7ad49eSAndrey Vesnovaty 
2634ec4e11d4SDmitry Kozlyuk static int
2635ec4e11d4SDmitry Kozlyuk mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
2636ec4e11d4SDmitry Kozlyuk 				    struct mlx5_ind_table_obj *ind_tbl)
2637ec4e11d4SDmitry Kozlyuk {
2638ec4e11d4SDmitry Kozlyuk 	uint32_t refcnt;
2639ec4e11d4SDmitry Kozlyuk 
2640e12a0166STyler Retzlaff 	refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, rte_memory_order_relaxed);
2641ec4e11d4SDmitry Kozlyuk 	if (refcnt <= 1)
2642ec4e11d4SDmitry Kozlyuk 		return 0;
2643ec4e11d4SDmitry Kozlyuk 	/*
2644ec4e11d4SDmitry Kozlyuk 	 * Modification of indirection tables having more than 1
2645ec4e11d4SDmitry Kozlyuk 	 * reference is unsupported.
2646ec4e11d4SDmitry Kozlyuk 	 */
2647ec4e11d4SDmitry Kozlyuk 	DRV_LOG(DEBUG,
2648ec4e11d4SDmitry Kozlyuk 		"Port %u cannot modify indirection table %p (refcnt %u > 1).",
2649ec4e11d4SDmitry Kozlyuk 		dev->data->port_id, (void *)ind_tbl, refcnt);
2650ec4e11d4SDmitry Kozlyuk 	rte_errno = EINVAL;
2651ec4e11d4SDmitry Kozlyuk 	return -rte_errno;
2652ec4e11d4SDmitry Kozlyuk }
2653ec4e11d4SDmitry Kozlyuk 
2654fa7ad49eSAndrey Vesnovaty /**
2655fa7ad49eSAndrey Vesnovaty  * Modify an indirection table.
2656fa7ad49eSAndrey Vesnovaty  *
2657fa7ad49eSAndrey Vesnovaty  * @param dev
2658fa7ad49eSAndrey Vesnovaty  *   Pointer to Ethernet device.
2659fa7ad49eSAndrey Vesnovaty  * @param ind_table
2660fa7ad49eSAndrey Vesnovaty  *   Indirection table to modify.
2661fa7ad49eSAndrey Vesnovaty  * @param queues
2662fa7ad49eSAndrey Vesnovaty  *   Queues replacement for the indirection table.
2663fa7ad49eSAndrey Vesnovaty  * @param queues_n
2664fa7ad49eSAndrey Vesnovaty  *   Number of queues in the array.
2665fa7ad49eSAndrey Vesnovaty  * @param standalone
2666fa7ad49eSAndrey Vesnovaty  *   Indirection table for Standalone queue.
2667ec9b812bSDmitry Kozlyuk  * @param ref_new_qs
2668ec9b812bSDmitry Kozlyuk  *   Whether to increment new RxQ set reference counters.
2669ec9b812bSDmitry Kozlyuk  * @param deref_old_qs
2670ec9b812bSDmitry Kozlyuk  *   Whether to decrement old RxQ set reference counters.
2671fa7ad49eSAndrey Vesnovaty  *
2672fa7ad49eSAndrey Vesnovaty  * @return
2673fa7ad49eSAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
2674fa7ad49eSAndrey Vesnovaty  */
2675fa7ad49eSAndrey Vesnovaty int
2676fa7ad49eSAndrey Vesnovaty mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2677fa7ad49eSAndrey Vesnovaty 			  struct mlx5_ind_table_obj *ind_tbl,
2678fa7ad49eSAndrey Vesnovaty 			  uint16_t *queues, const uint32_t queues_n,
2679ec9b812bSDmitry Kozlyuk 			  bool standalone, bool ref_new_qs, bool deref_old_qs)
2680fa7ad49eSAndrey Vesnovaty {
2681fa7ad49eSAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
26824dcf29a8SMichael Baum 	int ret;
2683fa7ad49eSAndrey Vesnovaty 	const unsigned int n = rte_is_power_of_2(queues_n) ?
2684fa7ad49eSAndrey Vesnovaty 			       log2above(queues_n) :
268587af0d1eSMichael Baum 			       log2above(priv->sh->dev_cap.ind_table_max_size);
2686fa7ad49eSAndrey Vesnovaty 
2687fa7ad49eSAndrey Vesnovaty 	MLX5_ASSERT(standalone);
2688fa7ad49eSAndrey Vesnovaty 	RTE_SET_USED(standalone);
2689ec4e11d4SDmitry Kozlyuk 	if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
2690fa7ad49eSAndrey Vesnovaty 		return -rte_errno;
26914dcf29a8SMichael Baum 	if (ref_new_qs && mlx5_rxqs_ref(dev, queues, queues_n) < 0) {
26924dcf29a8SMichael Baum 		DRV_LOG(DEBUG, "Port %u invalid indirection table queues.",
26934dcf29a8SMichael Baum 			dev->data->port_id);
26944dcf29a8SMichael Baum 		return -rte_errno;
2695fa7ad49eSAndrey Vesnovaty 	}
2696fa7ad49eSAndrey Vesnovaty 	MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2697fa7ad49eSAndrey Vesnovaty 	ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
26984dcf29a8SMichael Baum 	if (ret) {
26994dcf29a8SMichael Baum 		DRV_LOG(DEBUG, "Port %u cannot modify indirection table.",
27004dcf29a8SMichael Baum 			dev->data->port_id);
27014dcf29a8SMichael Baum 		if (ref_new_qs) {
27024dcf29a8SMichael Baum 			int err = rte_errno;
27034dcf29a8SMichael Baum 
27044dcf29a8SMichael Baum 			mlx5_rxqs_deref(dev, queues, queues_n);
27054dcf29a8SMichael Baum 			rte_errno = err;
27064dcf29a8SMichael Baum 		}
27074dcf29a8SMichael Baum 		return ret;
27084dcf29a8SMichael Baum 	}
2709ec9b812bSDmitry Kozlyuk 	if (deref_old_qs)
27104dcf29a8SMichael Baum 		mlx5_rxqs_deref(dev, ind_tbl->queues, ind_tbl->queues_n);
2711fa7ad49eSAndrey Vesnovaty 	ind_tbl->queues_n = queues_n;
2712fa7ad49eSAndrey Vesnovaty 	ind_tbl->queues = queues;
2713fa7ad49eSAndrey Vesnovaty 	return 0;
271425ae7f1aSMichael Baum }
271525ae7f1aSMichael Baum 
2716ec4e11d4SDmitry Kozlyuk /**
2717ec4e11d4SDmitry Kozlyuk  * Attach an indirection table to its queues.
2718ec4e11d4SDmitry Kozlyuk  *
2719ec4e11d4SDmitry Kozlyuk  * @param dev
2720ec4e11d4SDmitry Kozlyuk  *   Pointer to Ethernet device.
2721ec4e11d4SDmitry Kozlyuk  * @param ind_table
2722ec4e11d4SDmitry Kozlyuk  *   Indirection table to attach.
2723ec4e11d4SDmitry Kozlyuk  *
2724ec4e11d4SDmitry Kozlyuk  * @return
2725ec4e11d4SDmitry Kozlyuk  *   0 on success, a negative errno value otherwise and rte_errno is set.
2726ec4e11d4SDmitry Kozlyuk  */
2727ec4e11d4SDmitry Kozlyuk int
2728ec4e11d4SDmitry Kozlyuk mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
2729ec4e11d4SDmitry Kozlyuk 			  struct mlx5_ind_table_obj *ind_tbl)
2730ec4e11d4SDmitry Kozlyuk {
2731ec4e11d4SDmitry Kozlyuk 	int ret;
2732ec4e11d4SDmitry Kozlyuk 
2733ec4e11d4SDmitry Kozlyuk 	ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
2734ec9b812bSDmitry Kozlyuk 					ind_tbl->queues_n,
2735ec9b812bSDmitry Kozlyuk 					true /* standalone */,
2736ec9b812bSDmitry Kozlyuk 					true /* ref_new_qs */,
2737ec9b812bSDmitry Kozlyuk 					false /* deref_old_qs */);
2738ec9b812bSDmitry Kozlyuk 	if (ret != 0)
2739ec4e11d4SDmitry Kozlyuk 		DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2740ec4e11d4SDmitry Kozlyuk 			dev->data->port_id, (void *)ind_tbl);
2741ec4e11d4SDmitry Kozlyuk 	return ret;
2742ec4e11d4SDmitry Kozlyuk }
2743ec4e11d4SDmitry Kozlyuk 
2744ec4e11d4SDmitry Kozlyuk /**
2745ec4e11d4SDmitry Kozlyuk  * Detach an indirection table from its queues.
2746ec4e11d4SDmitry Kozlyuk  *
2747ec4e11d4SDmitry Kozlyuk  * @param dev
2748ec4e11d4SDmitry Kozlyuk  *   Pointer to Ethernet device.
2749ec4e11d4SDmitry Kozlyuk  * @param ind_table
2750ec4e11d4SDmitry Kozlyuk  *   Indirection table to detach.
2751ec4e11d4SDmitry Kozlyuk  *
2752ec4e11d4SDmitry Kozlyuk  * @return
2753ec4e11d4SDmitry Kozlyuk  *   0 on success, a negative errno value otherwise and rte_errno is set.
2754ec4e11d4SDmitry Kozlyuk  */
2755ec4e11d4SDmitry Kozlyuk int
2756ec4e11d4SDmitry Kozlyuk mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
2757ec4e11d4SDmitry Kozlyuk 			  struct mlx5_ind_table_obj *ind_tbl)
2758ec4e11d4SDmitry Kozlyuk {
2759ec4e11d4SDmitry Kozlyuk 	struct mlx5_priv *priv = dev->data->dev_private;
2760ec4e11d4SDmitry Kozlyuk 	const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
2761ec4e11d4SDmitry Kozlyuk 			       log2above(ind_tbl->queues_n) :
276287af0d1eSMichael Baum 			       log2above(priv->sh->dev_cap.ind_table_max_size);
2763ec4e11d4SDmitry Kozlyuk 	unsigned int i;
2764ec4e11d4SDmitry Kozlyuk 	int ret;
2765ec4e11d4SDmitry Kozlyuk 
2766ec4e11d4SDmitry Kozlyuk 	ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
2767ec4e11d4SDmitry Kozlyuk 	if (ret != 0)
2768ec4e11d4SDmitry Kozlyuk 		return ret;
2769ec4e11d4SDmitry Kozlyuk 	MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2770ec4e11d4SDmitry Kozlyuk 	ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
2771ec4e11d4SDmitry Kozlyuk 	if (ret != 0) {
2772ec4e11d4SDmitry Kozlyuk 		DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
2773ec4e11d4SDmitry Kozlyuk 			dev->data->port_id, (void *)ind_tbl);
2774ec4e11d4SDmitry Kozlyuk 		return ret;
2775ec4e11d4SDmitry Kozlyuk 	}
2776ec4e11d4SDmitry Kozlyuk 	for (i = 0; i < ind_tbl->queues_n; i++)
2777ec4e11d4SDmitry Kozlyuk 		mlx5_rxq_release(dev, ind_tbl->queues[i]);
2778ec4e11d4SDmitry Kozlyuk 	return ret;
2779ec4e11d4SDmitry Kozlyuk }
2780ec4e11d4SDmitry Kozlyuk 
2781e1592b6cSSuanming Mou int
2782a5835d53SSuanming Mou mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
2783a5835d53SSuanming Mou 		   void *cb_ctx)
2784f5479b68SNélio Laranjeiro {
2785e1592b6cSSuanming Mou 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2786e1592b6cSSuanming Mou 	struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2787e1592b6cSSuanming Mou 	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2788f5479b68SNélio Laranjeiro 
2789a5835d53SSuanming Mou 	return (hrxq->rss_key_len != rss_desc->key_len ||
27900e04e1e2SXueming Li 	    hrxq->symmetric_hash_function != rss_desc->symmetric_hash_function ||
2791e1592b6cSSuanming Mou 	    memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
27923a2f674bSSuanming Mou 	    hrxq->hws_flags != rss_desc->hws_flags ||
2793a5835d53SSuanming Mou 	    hrxq->hash_fields != rss_desc->hash_fields ||
2794a5835d53SSuanming Mou 	    hrxq->ind_table->queues_n != rss_desc->queue_num ||
2795a5835d53SSuanming Mou 	    memcmp(hrxq->ind_table->queues, rss_desc->queue,
2796a5835d53SSuanming Mou 	    rss_desc->queue_num * sizeof(rss_desc->queue[0])));
2797f5479b68SNélio Laranjeiro }
2798f5479b68SNélio Laranjeiro 
2799f5479b68SNélio Laranjeiro /**
2800b8cc58c1SAndrey Vesnovaty  * Modify an Rx Hash queue configuration.
2801b8cc58c1SAndrey Vesnovaty  *
2802b8cc58c1SAndrey Vesnovaty  * @param dev
2803b8cc58c1SAndrey Vesnovaty  *   Pointer to Ethernet device.
2804b8cc58c1SAndrey Vesnovaty  * @param hrxq
2805b8cc58c1SAndrey Vesnovaty  *   Index to Hash Rx queue to modify.
2806b8cc58c1SAndrey Vesnovaty  * @param rss_key
2807b8cc58c1SAndrey Vesnovaty  *   RSS key for the Rx hash queue.
2808b8cc58c1SAndrey Vesnovaty  * @param rss_key_len
2809b8cc58c1SAndrey Vesnovaty  *   RSS key length.
2810b8cc58c1SAndrey Vesnovaty  * @param hash_fields
2811b8cc58c1SAndrey Vesnovaty  *   Verbs protocol hash field to make the RSS on.
2812b8cc58c1SAndrey Vesnovaty  * @param queues
2813b8cc58c1SAndrey Vesnovaty  *   Queues entering in hash queue. In case of empty hash_fields only the
2814b8cc58c1SAndrey Vesnovaty  *   first queue index will be taken for the indirection table.
2815b8cc58c1SAndrey Vesnovaty  * @param queues_n
2816b8cc58c1SAndrey Vesnovaty  *   Number of queues.
2817b8cc58c1SAndrey Vesnovaty  *
2818b8cc58c1SAndrey Vesnovaty  * @return
2819b8cc58c1SAndrey Vesnovaty  *   0 on success, a negative errno value otherwise and rte_errno is set.
2820b8cc58c1SAndrey Vesnovaty  */
2821b8cc58c1SAndrey Vesnovaty int
2822b8cc58c1SAndrey Vesnovaty mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2823b8cc58c1SAndrey Vesnovaty 		 const uint8_t *rss_key, uint32_t rss_key_len,
28240e04e1e2SXueming Li 		 uint64_t hash_fields, bool symmetric_hash_function,
2825b8cc58c1SAndrey Vesnovaty 		 const uint16_t *queues, uint32_t queues_n)
2826b8cc58c1SAndrey Vesnovaty {
2827b8cc58c1SAndrey Vesnovaty 	int err;
2828b8cc58c1SAndrey Vesnovaty 	struct mlx5_ind_table_obj *ind_tbl = NULL;
2829b8cc58c1SAndrey Vesnovaty 	struct mlx5_priv *priv = dev->data->dev_private;
2830b8cc58c1SAndrey Vesnovaty 	struct mlx5_hrxq *hrxq =
2831b8cc58c1SAndrey Vesnovaty 		mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2832c65d6844SDmitry Kozlyuk 	bool dev_started = !!dev->data->dev_started;
2833b8cc58c1SAndrey Vesnovaty 	int ret;
2834b8cc58c1SAndrey Vesnovaty 
2835b8cc58c1SAndrey Vesnovaty 	if (!hrxq) {
2836b8cc58c1SAndrey Vesnovaty 		rte_errno = EINVAL;
2837b8cc58c1SAndrey Vesnovaty 		return -rte_errno;
2838b8cc58c1SAndrey Vesnovaty 	}
2839b8cc58c1SAndrey Vesnovaty 	/* validations */
2840b8cc58c1SAndrey Vesnovaty 	if (hrxq->rss_key_len != rss_key_len) {
2841b8cc58c1SAndrey Vesnovaty 		/* rss_key_len is fixed size 40 byte & not supposed to change */
2842b8cc58c1SAndrey Vesnovaty 		rte_errno = EINVAL;
2843b8cc58c1SAndrey Vesnovaty 		return -rte_errno;
2844b8cc58c1SAndrey Vesnovaty 	}
2845b8cc58c1SAndrey Vesnovaty 	queues_n = hash_fields ? queues_n : 1;
2846b8cc58c1SAndrey Vesnovaty 	if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2847b8cc58c1SAndrey Vesnovaty 					    queues, queues_n)) {
2848b8cc58c1SAndrey Vesnovaty 		ind_tbl = hrxq->ind_table;
2849b8cc58c1SAndrey Vesnovaty 	} else {
2850fa7ad49eSAndrey Vesnovaty 		if (hrxq->standalone) {
2851fa7ad49eSAndrey Vesnovaty 			/*
2852fa7ad49eSAndrey Vesnovaty 			 * Replacement of indirection table unsupported for
28537be78d02SJosh Soref 			 * standalone hrxq objects (used by shared RSS).
2854fa7ad49eSAndrey Vesnovaty 			 */
2855fa7ad49eSAndrey Vesnovaty 			rte_errno = ENOTSUP;
2856fa7ad49eSAndrey Vesnovaty 			return -rte_errno;
2857fa7ad49eSAndrey Vesnovaty 		}
2858b8cc58c1SAndrey Vesnovaty 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2859b8cc58c1SAndrey Vesnovaty 		if (!ind_tbl)
286084d33890SSuanming Mou 			ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2861c65d6844SDmitry Kozlyuk 							 hrxq->standalone,
2862c65d6844SDmitry Kozlyuk 							 dev_started);
2863b8cc58c1SAndrey Vesnovaty 	}
2864b8cc58c1SAndrey Vesnovaty 	if (!ind_tbl) {
2865b8cc58c1SAndrey Vesnovaty 		rte_errno = ENOMEM;
2866b8cc58c1SAndrey Vesnovaty 		return -rte_errno;
2867b8cc58c1SAndrey Vesnovaty 	}
2868b8cc58c1SAndrey Vesnovaty 	MLX5_ASSERT(priv->obj_ops.hrxq_modify);
28690e04e1e2SXueming Li 	ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key, hash_fields,
28700e04e1e2SXueming Li 					symmetric_hash_function, ind_tbl);
2871b8cc58c1SAndrey Vesnovaty 	if (ret) {
2872b8cc58c1SAndrey Vesnovaty 		rte_errno = errno;
2873b8cc58c1SAndrey Vesnovaty 		goto error;
2874b8cc58c1SAndrey Vesnovaty 	}
2875b8cc58c1SAndrey Vesnovaty 	if (ind_tbl != hrxq->ind_table) {
2876fa7ad49eSAndrey Vesnovaty 		MLX5_ASSERT(!hrxq->standalone);
28773a2f674bSSuanming Mou 		mlx5_ind_table_obj_release(dev, hrxq->ind_table, true);
2878b8cc58c1SAndrey Vesnovaty 		hrxq->ind_table = ind_tbl;
2879b8cc58c1SAndrey Vesnovaty 	}
2880b8cc58c1SAndrey Vesnovaty 	hrxq->hash_fields = hash_fields;
2881b8cc58c1SAndrey Vesnovaty 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
2882b8cc58c1SAndrey Vesnovaty 	return 0;
2883b8cc58c1SAndrey Vesnovaty error:
2884b8cc58c1SAndrey Vesnovaty 	err = rte_errno;
2885fa7ad49eSAndrey Vesnovaty 	if (ind_tbl != hrxq->ind_table) {
2886fa7ad49eSAndrey Vesnovaty 		MLX5_ASSERT(!hrxq->standalone);
28873a2f674bSSuanming Mou 		mlx5_ind_table_obj_release(dev, ind_tbl, true);
2888fa7ad49eSAndrey Vesnovaty 	}
2889b8cc58c1SAndrey Vesnovaty 	rte_errno = err;
2890b8cc58c1SAndrey Vesnovaty 	return -rte_errno;
2891b8cc58c1SAndrey Vesnovaty }
2892b8cc58c1SAndrey Vesnovaty 
2893e1592b6cSSuanming Mou static void
2894e1592b6cSSuanming Mou __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2895e1592b6cSSuanming Mou {
2896e1592b6cSSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
28971ea333d2SBing Zhao 	bool deref_rxqs = true;
2898e1592b6cSSuanming Mou 
2899e1592b6cSSuanming Mou #ifdef HAVE_IBV_FLOW_DV_SUPPORT
29003a2f674bSSuanming Mou 	if (hrxq->hws_flags)
29013a2f674bSSuanming Mou 		mlx5dr_action_destroy(hrxq->action);
29023a2f674bSSuanming Mou 	else
2903e1592b6cSSuanming Mou 		mlx5_glue->destroy_flow_action(hrxq->action);
2904e1592b6cSSuanming Mou #endif
2905e1592b6cSSuanming Mou 	priv->obj_ops.hrxq_destroy(hrxq);
2906fa7ad49eSAndrey Vesnovaty 	if (!hrxq->standalone) {
29071ea333d2SBing Zhao 		if (!dev->data->dev_started && hrxq->hws_flags &&
29081ea333d2SBing Zhao 		    !priv->hws_rule_flushing)
29091ea333d2SBing Zhao 			deref_rxqs = false;
29101ea333d2SBing Zhao 		mlx5_ind_table_obj_release(dev, hrxq->ind_table, deref_rxqs);
2911fa7ad49eSAndrey Vesnovaty 	}
2912e1592b6cSSuanming Mou 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2913e1592b6cSSuanming Mou }
2914e1592b6cSSuanming Mou 
2915b8cc58c1SAndrey Vesnovaty /**
2916f5479b68SNélio Laranjeiro  * Release the hash Rx queue.
2917f5479b68SNélio Laranjeiro  *
2918af4f09f2SNélio Laranjeiro  * @param dev
2919af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
2920f5479b68SNélio Laranjeiro  * @param hrxq
2921772dc0ebSSuanming Mou  *   Index to Hash Rx queue to release.
2922f5479b68SNélio Laranjeiro  *
2923e1592b6cSSuanming Mou  * @param list
2924e78e5408SMatan Azrad  *   mlx5 list pointer.
2925e1592b6cSSuanming Mou  * @param entry
2926e1592b6cSSuanming Mou  *   Hash queue entry pointer.
2927f5479b68SNélio Laranjeiro  */
2928e1592b6cSSuanming Mou void
29296507c9f5SSuanming Mou mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
2930f5479b68SNélio Laranjeiro {
29316507c9f5SSuanming Mou 	struct rte_eth_dev *dev = tool_ctx;
2932e1592b6cSSuanming Mou 	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2933772dc0ebSSuanming Mou 
2934e1592b6cSSuanming Mou 	__mlx5_hrxq_remove(dev, hrxq);
2935f5479b68SNélio Laranjeiro }
2936f5479b68SNélio Laranjeiro 
2937e1592b6cSSuanming Mou static struct mlx5_hrxq *
2938e1592b6cSSuanming Mou __mlx5_hrxq_create(struct rte_eth_dev *dev,
2939e1592b6cSSuanming Mou 		   struct mlx5_flow_rss_desc *rss_desc)
29405a959cbfSMichael Baum {
29415a959cbfSMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
2942e1592b6cSSuanming Mou 	const uint8_t *rss_key = rss_desc->key;
2943e1592b6cSSuanming Mou 	uint32_t rss_key_len =  rss_desc->key_len;
2944fabf8a37SSuanming Mou 	bool standalone = !!rss_desc->shared_rss;
2945e1592b6cSSuanming Mou 	const uint16_t *queues =
2946fabf8a37SSuanming Mou 		standalone ? rss_desc->const_q : rss_desc->queue;
2947e1592b6cSSuanming Mou 	uint32_t queues_n = rss_desc->queue_num;
29485a959cbfSMichael Baum 	struct mlx5_hrxq *hrxq = NULL;
29495a959cbfSMichael Baum 	uint32_t hrxq_idx = 0;
2950fa7ad49eSAndrey Vesnovaty 	struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
29515a959cbfSMichael Baum 	int ret;
29525a959cbfSMichael Baum 
2953e1592b6cSSuanming Mou 	queues_n = rss_desc->hash_fields ? queues_n : 1;
29543a2f674bSSuanming Mou 	if (!ind_tbl && !rss_desc->hws_flags)
2955fa7ad49eSAndrey Vesnovaty 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
29565a959cbfSMichael Baum 	if (!ind_tbl)
295784d33890SSuanming Mou 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
29583a2f674bSSuanming Mou 						 standalone ||
29593a2f674bSSuanming Mou 						 rss_desc->hws_flags,
2960c65d6844SDmitry Kozlyuk 						 !!dev->data->dev_started);
2961e1592b6cSSuanming Mou 	if (!ind_tbl)
2962e1592b6cSSuanming Mou 		return NULL;
29635a959cbfSMichael Baum 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
29645a959cbfSMichael Baum 	if (!hrxq)
29655a959cbfSMichael Baum 		goto error;
2966fabf8a37SSuanming Mou 	hrxq->standalone = standalone;
2967e1592b6cSSuanming Mou 	hrxq->idx = hrxq_idx;
29685a959cbfSMichael Baum 	hrxq->ind_table = ind_tbl;
29695a959cbfSMichael Baum 	hrxq->rss_key_len = rss_key_len;
2970e1592b6cSSuanming Mou 	hrxq->hash_fields = rss_desc->hash_fields;
29713a2f674bSSuanming Mou 	hrxq->hws_flags = rss_desc->hws_flags;
29720e04e1e2SXueming Li 	hrxq->symmetric_hash_function = rss_desc->symmetric_hash_function;
29735a959cbfSMichael Baum 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
2974e1592b6cSSuanming Mou 	ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2975e1592b6cSSuanming Mou 	if (ret < 0)
29765a959cbfSMichael Baum 		goto error;
2977e1592b6cSSuanming Mou 	return hrxq;
29785a959cbfSMichael Baum error:
2979fa7ad49eSAndrey Vesnovaty 	if (!rss_desc->ind_tbl)
29803a2f674bSSuanming Mou 		mlx5_ind_table_obj_release(dev, ind_tbl, true);
29815a959cbfSMichael Baum 	if (hrxq)
29825a959cbfSMichael Baum 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2983e1592b6cSSuanming Mou 	return NULL;
2984e1592b6cSSuanming Mou }
2985e1592b6cSSuanming Mou 
2986e78e5408SMatan Azrad struct mlx5_list_entry *
29876507c9f5SSuanming Mou mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
2988e1592b6cSSuanming Mou {
29896507c9f5SSuanming Mou 	struct rte_eth_dev *dev = tool_ctx;
2990e1592b6cSSuanming Mou 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2991e1592b6cSSuanming Mou 	struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2992e1592b6cSSuanming Mou 	struct mlx5_hrxq *hrxq;
2993e1592b6cSSuanming Mou 
2994e1592b6cSSuanming Mou 	hrxq = __mlx5_hrxq_create(dev, rss_desc);
2995e1592b6cSSuanming Mou 	return hrxq ? &hrxq->entry : NULL;
2996e1592b6cSSuanming Mou }
2997e1592b6cSSuanming Mou 
2998491b7137SMatan Azrad struct mlx5_list_entry *
29996507c9f5SSuanming Mou mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
3000491b7137SMatan Azrad 		    void *cb_ctx __rte_unused)
3001491b7137SMatan Azrad {
30026507c9f5SSuanming Mou 	struct rte_eth_dev *dev = tool_ctx;
3003491b7137SMatan Azrad 	struct mlx5_priv *priv = dev->data->dev_private;
3004491b7137SMatan Azrad 	struct mlx5_hrxq *hrxq;
3005491b7137SMatan Azrad 	uint32_t hrxq_idx = 0;
3006491b7137SMatan Azrad 
3007491b7137SMatan Azrad 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
3008491b7137SMatan Azrad 	if (!hrxq)
3009491b7137SMatan Azrad 		return NULL;
3010491b7137SMatan Azrad 	memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
3011491b7137SMatan Azrad 	hrxq->idx = hrxq_idx;
3012491b7137SMatan Azrad 	return &hrxq->entry;
3013491b7137SMatan Azrad }
3014491b7137SMatan Azrad 
3015491b7137SMatan Azrad void
30166507c9f5SSuanming Mou mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
3017491b7137SMatan Azrad {
30186507c9f5SSuanming Mou 	struct rte_eth_dev *dev = tool_ctx;
3019491b7137SMatan Azrad 	struct mlx5_priv *priv = dev->data->dev_private;
3020491b7137SMatan Azrad 	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
3021491b7137SMatan Azrad 
3022491b7137SMatan Azrad 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
3023491b7137SMatan Azrad }
3024491b7137SMatan Azrad 
3025e1592b6cSSuanming Mou /**
3026e1592b6cSSuanming Mou  * Get an Rx Hash queue.
3027e1592b6cSSuanming Mou  *
3028e1592b6cSSuanming Mou  * @param dev
3029e1592b6cSSuanming Mou  *   Pointer to Ethernet device.
3030e1592b6cSSuanming Mou  * @param rss_desc
3031e1592b6cSSuanming Mou  *   RSS configuration for the Rx hash queue.
3032e1592b6cSSuanming Mou  *
3033e1592b6cSSuanming Mou  * @return
30343a2f674bSSuanming Mou  *   An hash Rx queue on success.
3035e1592b6cSSuanming Mou  */
30363a2f674bSSuanming Mou struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
3037e1592b6cSSuanming Mou 		       struct mlx5_flow_rss_desc *rss_desc)
3038e1592b6cSSuanming Mou {
3039e1592b6cSSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
30403a2f674bSSuanming Mou 	struct mlx5_hrxq *hrxq = NULL;
3041e78e5408SMatan Azrad 	struct mlx5_list_entry *entry;
3042e1592b6cSSuanming Mou 	struct mlx5_flow_cb_ctx ctx = {
3043e1592b6cSSuanming Mou 		.data = rss_desc,
3044e1592b6cSSuanming Mou 	};
3045e1592b6cSSuanming Mou 
3046fabf8a37SSuanming Mou 	if (rss_desc->shared_rss) {
3047e1592b6cSSuanming Mou 		hrxq = __mlx5_hrxq_create(dev, rss_desc);
3048e1592b6cSSuanming Mou 	} else {
3049679f46c7SMatan Azrad 		entry = mlx5_list_register(priv->hrxqs, &ctx);
3050e1592b6cSSuanming Mou 		if (!entry)
30513a2f674bSSuanming Mou 			return NULL;
3052e1592b6cSSuanming Mou 		hrxq = container_of(entry, typeof(*hrxq), entry);
3053e1592b6cSSuanming Mou 	}
30543a2f674bSSuanming Mou 	return hrxq;
3055e1592b6cSSuanming Mou }
3056e1592b6cSSuanming Mou 
3057e1592b6cSSuanming Mou /**
3058e1592b6cSSuanming Mou  * Release the hash Rx queue.
3059e1592b6cSSuanming Mou  *
3060e1592b6cSSuanming Mou  * @param dev
3061e1592b6cSSuanming Mou  *   Pointer to Ethernet device.
3062e1592b6cSSuanming Mou  * @param hrxq_idx
30633a2f674bSSuanming Mou  *   Hash Rx queue to release.
30643a2f674bSSuanming Mou  *
30653a2f674bSSuanming Mou  * @return
30663a2f674bSSuanming Mou  *   1 while a reference on it exists, 0 when freed.
30673a2f674bSSuanming Mou  */
30683a2f674bSSuanming Mou int mlx5_hrxq_obj_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
30693a2f674bSSuanming Mou {
30703a2f674bSSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
30713a2f674bSSuanming Mou 
30723a2f674bSSuanming Mou 	if (!hrxq)
30733a2f674bSSuanming Mou 		return 0;
30743a2f674bSSuanming Mou 	if (!hrxq->standalone)
30753a2f674bSSuanming Mou 		return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
30763a2f674bSSuanming Mou 	__mlx5_hrxq_remove(dev, hrxq);
30773a2f674bSSuanming Mou 	return 0;
30783a2f674bSSuanming Mou }
30793a2f674bSSuanming Mou 
30803a2f674bSSuanming Mou /**
30813a2f674bSSuanming Mou  * Release the hash Rx queue with index.
30823a2f674bSSuanming Mou  *
30833a2f674bSSuanming Mou  * @param dev
30843a2f674bSSuanming Mou  *   Pointer to Ethernet device.
30853a2f674bSSuanming Mou  * @param hrxq_idx
3086e1592b6cSSuanming Mou  *   Index to Hash Rx queue to release.
3087e1592b6cSSuanming Mou  *
3088e1592b6cSSuanming Mou  * @return
3089e1592b6cSSuanming Mou  *   1 while a reference on it exists, 0 when freed.
3090e1592b6cSSuanming Mou  */
3091e1592b6cSSuanming Mou int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
3092e1592b6cSSuanming Mou {
3093e1592b6cSSuanming Mou 	struct mlx5_priv *priv = dev->data->dev_private;
3094e1592b6cSSuanming Mou 	struct mlx5_hrxq *hrxq;
3095e1592b6cSSuanming Mou 
3096e1592b6cSSuanming Mou 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
30973a2f674bSSuanming Mou 	return mlx5_hrxq_obj_release(dev, hrxq);
30985a959cbfSMichael Baum }
30995a959cbfSMichael Baum 
31005a959cbfSMichael Baum /**
31010c762e81SMichael Baum  * Create a drop Rx Hash queue.
31020c762e81SMichael Baum  *
31030c762e81SMichael Baum  * @param dev
31040c762e81SMichael Baum  *   Pointer to Ethernet device.
31050c762e81SMichael Baum  *
31060c762e81SMichael Baum  * @return
31070c762e81SMichael Baum  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
31080c762e81SMichael Baum  */
31090c762e81SMichael Baum struct mlx5_hrxq *
31100c762e81SMichael Baum mlx5_drop_action_create(struct rte_eth_dev *dev)
31110c762e81SMichael Baum {
31120c762e81SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
31130c762e81SMichael Baum 	struct mlx5_hrxq *hrxq = NULL;
31140c762e81SMichael Baum 	int ret;
31150c762e81SMichael Baum 
3116fabf8a37SSuanming Mou 	if (priv->drop_queue.hrxq)
31170c762e81SMichael Baum 		return priv->drop_queue.hrxq;
3118a73b7855SYunjian Wang 	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN, 0, SOCKET_ID_ANY);
31190c762e81SMichael Baum 	if (!hrxq) {
31200c762e81SMichael Baum 		DRV_LOG(WARNING,
31210c762e81SMichael Baum 			"Port %u cannot allocate memory for drop queue.",
31220c762e81SMichael Baum 			dev->data->port_id);
31230c762e81SMichael Baum 		rte_errno = ENOMEM;
31240c762e81SMichael Baum 		goto error;
31250c762e81SMichael Baum 	}
31260c762e81SMichael Baum 	priv->drop_queue.hrxq = hrxq;
31270c762e81SMichael Baum 	hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
31280c762e81SMichael Baum 				      0, SOCKET_ID_ANY);
31290c762e81SMichael Baum 	if (!hrxq->ind_table) {
31300c762e81SMichael Baum 		rte_errno = ENOMEM;
31310c762e81SMichael Baum 		goto error;
31320c762e81SMichael Baum 	}
31330c762e81SMichael Baum 	ret = priv->obj_ops.drop_action_create(dev);
31340c762e81SMichael Baum 	if (ret < 0)
31350c762e81SMichael Baum 		goto error;
31360c762e81SMichael Baum 	return hrxq;
31370c762e81SMichael Baum error:
31380c762e81SMichael Baum 	if (hrxq) {
31390c762e81SMichael Baum 		if (hrxq->ind_table)
31400c762e81SMichael Baum 			mlx5_free(hrxq->ind_table);
31410c762e81SMichael Baum 		priv->drop_queue.hrxq = NULL;
31420c762e81SMichael Baum 		mlx5_free(hrxq);
31430c762e81SMichael Baum 	}
31440c762e81SMichael Baum 	return NULL;
31450c762e81SMichael Baum }
31460c762e81SMichael Baum 
31470c762e81SMichael Baum /**
31480c762e81SMichael Baum  * Release a drop hash Rx queue.
31490c762e81SMichael Baum  *
31500c762e81SMichael Baum  * @param dev
31510c762e81SMichael Baum  *   Pointer to Ethernet device.
31520c762e81SMichael Baum  */
31530c762e81SMichael Baum void
31540c762e81SMichael Baum mlx5_drop_action_destroy(struct rte_eth_dev *dev)
31550c762e81SMichael Baum {
31560c762e81SMichael Baum 	struct mlx5_priv *priv = dev->data->dev_private;
31570c762e81SMichael Baum 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
31580c762e81SMichael Baum 
3159fabf8a37SSuanming Mou 	if (!priv->drop_queue.hrxq)
3160fabf8a37SSuanming Mou 		return;
31610c762e81SMichael Baum 	priv->obj_ops.drop_action_destroy(dev);
31620c762e81SMichael Baum 	mlx5_free(priv->drop_queue.rxq);
31630c762e81SMichael Baum 	mlx5_free(hrxq->ind_table);
31640c762e81SMichael Baum 	mlx5_free(hrxq);
31650c762e81SMichael Baum 	priv->drop_queue.rxq = NULL;
31660c762e81SMichael Baum 	priv->drop_queue.hrxq = NULL;
31670c762e81SMichael Baum }
31680c762e81SMichael Baum 
31690c762e81SMichael Baum /**
3170f5479b68SNélio Laranjeiro  * Verify the Rx Queue list is empty
3171f5479b68SNélio Laranjeiro  *
3172af4f09f2SNélio Laranjeiro  * @param dev
3173af4f09f2SNélio Laranjeiro  *   Pointer to Ethernet device.
3174f5479b68SNélio Laranjeiro  *
3175fb732b0aSNélio Laranjeiro  * @return
3176fb732b0aSNélio Laranjeiro  *   The number of object not released.
3177f5479b68SNélio Laranjeiro  */
3178e1592b6cSSuanming Mou uint32_t
317923820a79SDekel Peled mlx5_hrxq_verify(struct rte_eth_dev *dev)
3180f5479b68SNélio Laranjeiro {
3181dbeba4cfSThomas Monjalon 	struct mlx5_priv *priv = dev->data->dev_private;
3182f5479b68SNélio Laranjeiro 
3183679f46c7SMatan Azrad 	return mlx5_list_get_entry_num(priv->hrxqs);
3184f5479b68SNélio Laranjeiro }
318578be8852SNelio Laranjeiro 
318678be8852SNelio Laranjeiro /**
3187a2854c4dSViacheslav Ovsiienko  * Set the Rx queue timestamp conversion parameters
3188a2854c4dSViacheslav Ovsiienko  *
3189a2854c4dSViacheslav Ovsiienko  * @param[in] dev
3190a2854c4dSViacheslav Ovsiienko  *   Pointer to the Ethernet device structure.
3191a2854c4dSViacheslav Ovsiienko  */
3192a2854c4dSViacheslav Ovsiienko void
3193a2854c4dSViacheslav Ovsiienko mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
3194a2854c4dSViacheslav Ovsiienko {
3195a2854c4dSViacheslav Ovsiienko 	struct mlx5_priv *priv = dev->data->dev_private;
3196a2854c4dSViacheslav Ovsiienko 	struct mlx5_dev_ctx_shared *sh = priv->sh;
3197a2854c4dSViacheslav Ovsiienko 	unsigned int i;
3198a2854c4dSViacheslav Ovsiienko 
3199a2854c4dSViacheslav Ovsiienko 	for (i = 0; i != priv->rxqs_n; ++i) {
32005cf0707fSXueming Li 		struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i);
32015cf0707fSXueming Li 
32025cf0707fSXueming Li 		if (data == NULL)
3203a2854c4dSViacheslav Ovsiienko 			continue;
3204a2854c4dSViacheslav Ovsiienko 		data->sh = sh;
320587af0d1eSMichael Baum 		data->rt_timestamp = sh->dev_cap.rt_timestamp;
3206a2854c4dSViacheslav Ovsiienko 	}
3207a2854c4dSViacheslav Ovsiienko }
320880f872eeSMichael Baum 
320980f872eeSMichael Baum /**
321080f872eeSMichael Baum  * Validate given external RxQ rte_plow index, and get pointer to concurrent
321180f872eeSMichael Baum  * external RxQ object to map/unmap.
321280f872eeSMichael Baum  *
321380f872eeSMichael Baum  * @param[in] port_id
321480f872eeSMichael Baum  *   The port identifier of the Ethernet device.
321580f872eeSMichael Baum  * @param[in] dpdk_idx
321680f872eeSMichael Baum  *   Queue index in rte_flow.
321780f872eeSMichael Baum  *
321880f872eeSMichael Baum  * @return
321980f872eeSMichael Baum  *   Pointer to concurrent external RxQ on success,
322080f872eeSMichael Baum  *   NULL otherwise and rte_errno is set.
322180f872eeSMichael Baum  */
32228e8b44f2SSuanming Mou static struct mlx5_external_q *
322380f872eeSMichael Baum mlx5_external_rx_queue_get_validate(uint16_t port_id, uint16_t dpdk_idx)
322480f872eeSMichael Baum {
322580f872eeSMichael Baum 	struct rte_eth_dev *dev;
322680f872eeSMichael Baum 	struct mlx5_priv *priv;
32271944fbc3SSuanming Mou 	int ret;
322880f872eeSMichael Baum 
322986647d46SThomas Monjalon 	if (dpdk_idx < RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN) {
323080f872eeSMichael Baum 		DRV_LOG(ERR, "Queue index %u should be in range: [%u, %u].",
323186647d46SThomas Monjalon 			dpdk_idx, RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN, UINT16_MAX);
323280f872eeSMichael Baum 		rte_errno = EINVAL;
323380f872eeSMichael Baum 		return NULL;
323480f872eeSMichael Baum 	}
32351944fbc3SSuanming Mou 	ret = mlx5_devx_extq_port_validate(port_id);
32361944fbc3SSuanming Mou 	if (unlikely(ret))
323780f872eeSMichael Baum 		return NULL;
323880f872eeSMichael Baum 	dev = &rte_eth_devices[port_id];
323980f872eeSMichael Baum 	priv = dev->data->dev_private;
324080f872eeSMichael Baum 	/*
324180f872eeSMichael Baum 	 * When user configures remote PD and CTX and device creates RxQ by
324280f872eeSMichael Baum 	 * DevX, external RxQs array is allocated.
324380f872eeSMichael Baum 	 */
324480f872eeSMichael Baum 	MLX5_ASSERT(priv->ext_rxqs != NULL);
324586647d46SThomas Monjalon 	return &priv->ext_rxqs[dpdk_idx - RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
324680f872eeSMichael Baum }
324780f872eeSMichael Baum 
324880f872eeSMichael Baum int
324980f872eeSMichael Baum rte_pmd_mlx5_external_rx_queue_id_map(uint16_t port_id, uint16_t dpdk_idx,
325080f872eeSMichael Baum 				      uint32_t hw_idx)
325180f872eeSMichael Baum {
32528e8b44f2SSuanming Mou 	struct mlx5_external_q *ext_rxq;
325380f872eeSMichael Baum 	uint32_t unmapped = 0;
325480f872eeSMichael Baum 
325580f872eeSMichael Baum 	ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
325680f872eeSMichael Baum 	if (ext_rxq == NULL)
325780f872eeSMichael Baum 		return -rte_errno;
3258e12a0166STyler Retzlaff 	if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &unmapped, 1,
3259e12a0166STyler Retzlaff 					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
326080f872eeSMichael Baum 		if (ext_rxq->hw_id != hw_idx) {
326180f872eeSMichael Baum 			DRV_LOG(ERR, "Port %u external RxQ index %u "
326280f872eeSMichael Baum 				"is already mapped to HW index (requesting is "
326380f872eeSMichael Baum 				"%u, existing is %u).",
326480f872eeSMichael Baum 				port_id, dpdk_idx, hw_idx, ext_rxq->hw_id);
326580f872eeSMichael Baum 			rte_errno = EEXIST;
326680f872eeSMichael Baum 			return -rte_errno;
326780f872eeSMichael Baum 		}
326880f872eeSMichael Baum 		DRV_LOG(WARNING, "Port %u external RxQ index %u "
326980f872eeSMichael Baum 			"is already mapped to the requested HW index (%u)",
327080f872eeSMichael Baum 			port_id, dpdk_idx, hw_idx);
327180f872eeSMichael Baum 
327280f872eeSMichael Baum 	} else {
327380f872eeSMichael Baum 		ext_rxq->hw_id = hw_idx;
327480f872eeSMichael Baum 		DRV_LOG(DEBUG, "Port %u external RxQ index %u "
327580f872eeSMichael Baum 			"is successfully mapped to the requested HW index (%u)",
327680f872eeSMichael Baum 			port_id, dpdk_idx, hw_idx);
327780f872eeSMichael Baum 	}
327880f872eeSMichael Baum 	return 0;
327980f872eeSMichael Baum }
328080f872eeSMichael Baum 
328180f872eeSMichael Baum int
328280f872eeSMichael Baum rte_pmd_mlx5_external_rx_queue_id_unmap(uint16_t port_id, uint16_t dpdk_idx)
328380f872eeSMichael Baum {
32848e8b44f2SSuanming Mou 	struct mlx5_external_q *ext_rxq;
328580f872eeSMichael Baum 	uint32_t mapped = 1;
328680f872eeSMichael Baum 
328780f872eeSMichael Baum 	ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
328880f872eeSMichael Baum 	if (ext_rxq == NULL)
328980f872eeSMichael Baum 		return -rte_errno;
329080f872eeSMichael Baum 	if (ext_rxq->refcnt > 1) {
329180f872eeSMichael Baum 		DRV_LOG(ERR, "Port %u external RxQ index %u still referenced.",
329280f872eeSMichael Baum 			port_id, dpdk_idx);
329380f872eeSMichael Baum 		rte_errno = EINVAL;
329480f872eeSMichael Baum 		return -rte_errno;
329580f872eeSMichael Baum 	}
3296e12a0166STyler Retzlaff 	if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, &mapped, 0,
3297e12a0166STyler Retzlaff 					 rte_memory_order_relaxed, rte_memory_order_relaxed)) {
329880f872eeSMichael Baum 		DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
329980f872eeSMichael Baum 			port_id, dpdk_idx);
330080f872eeSMichael Baum 		rte_errno = EINVAL;
330180f872eeSMichael Baum 		return -rte_errno;
330280f872eeSMichael Baum 	}
330380f872eeSMichael Baum 	DRV_LOG(DEBUG,
330480f872eeSMichael Baum 		"Port %u external RxQ index %u is successfully unmapped.",
330580f872eeSMichael Baum 		port_id, dpdk_idx);
330680f872eeSMichael Baum 	return 0;
330780f872eeSMichael Baum }
3308