xref: /dpdk/drivers/net/mlx5/mlx5_rxq.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <fcntl.h>
11 #include <sys/queue.h>
12 
13 #include <rte_mbuf.h>
14 #include <rte_malloc.h>
15 #include <ethdev_driver.h>
16 #include <rte_common.h>
17 #include <rte_interrupts.h>
18 #include <rte_debug.h>
19 #include <rte_io.h>
20 #include <rte_eal_paging.h>
21 
22 #include <mlx5_glue.h>
23 #include <mlx5_malloc.h>
24 
25 #include "mlx5_defs.h"
26 #include "mlx5.h"
27 #include "mlx5_rxtx.h"
28 #include "mlx5_utils.h"
29 #include "mlx5_autoconf.h"
30 
31 
32 /* Default RSS hash key also used for ConnectX-3. */
33 uint8_t rss_hash_default_key[] = {
34 	0x2c, 0xc6, 0x81, 0xd1,
35 	0x5b, 0xdb, 0xf4, 0xf7,
36 	0xfc, 0xa2, 0x83, 0x19,
37 	0xdb, 0x1a, 0x3e, 0x94,
38 	0x6b, 0x9e, 0x38, 0xd9,
39 	0x2c, 0x9c, 0x03, 0xd1,
40 	0xad, 0x99, 0x44, 0xa7,
41 	0xd9, 0x56, 0x3d, 0x59,
42 	0x06, 0x3c, 0x25, 0xf3,
43 	0xfc, 0x1f, 0xdc, 0x2a,
44 };
45 
46 /* Length of the default RSS hash key. */
47 static_assert(MLX5_RSS_HASH_KEY_LEN ==
48 	      (unsigned int)sizeof(rss_hash_default_key),
49 	      "wrong RSS default key size.");
50 
51 /**
52  * Calculate the number of CQEs in CQ for the Rx queue.
53  *
54  *  @param rxq_data
55  *     Pointer to receive queue structure.
56  *
57  * @return
58  *   Number of CQEs in CQ.
59  */
60 unsigned int
61 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
62 {
63 	unsigned int cqe_n;
64 	unsigned int wqe_n = 1 << rxq_data->elts_n;
65 
66 	if (mlx5_rxq_mprq_enabled(rxq_data))
67 		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
68 	else
69 		cqe_n = wqe_n - 1;
70 	return cqe_n;
71 }
72 
73 /**
74  * Allocate RX queue elements for Multi-Packet RQ.
75  *
76  * @param rxq_ctrl
77  *   Pointer to RX queue structure.
78  *
79  * @return
80  *   0 on success, a negative errno value otherwise and rte_errno is set.
81  */
82 static int
83 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
84 {
85 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
86 	unsigned int wqe_n = 1 << rxq->elts_n;
87 	unsigned int i;
88 	int err;
89 
90 	/* Iterate on segments. */
91 	for (i = 0; i <= wqe_n; ++i) {
92 		struct mlx5_mprq_buf *buf;
93 
94 		if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
95 			DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
96 			rte_errno = ENOMEM;
97 			goto error;
98 		}
99 		if (i < wqe_n)
100 			(*rxq->mprq_bufs)[i] = buf;
101 		else
102 			rxq->mprq_repl = buf;
103 	}
104 	DRV_LOG(DEBUG,
105 		"port %u MPRQ queue %u allocated and configured %u segments",
106 		rxq->port_id, rxq->idx, wqe_n);
107 	return 0;
108 error:
109 	err = rte_errno; /* Save rte_errno before cleanup. */
110 	wqe_n = i;
111 	for (i = 0; (i != wqe_n); ++i) {
112 		if ((*rxq->mprq_bufs)[i] != NULL)
113 			rte_mempool_put(rxq->mprq_mp,
114 					(*rxq->mprq_bufs)[i]);
115 		(*rxq->mprq_bufs)[i] = NULL;
116 	}
117 	DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
118 		rxq->port_id, rxq->idx);
119 	rte_errno = err; /* Restore rte_errno. */
120 	return -rte_errno;
121 }
122 
123 /**
124  * Allocate RX queue elements for Single-Packet RQ.
125  *
126  * @param rxq_ctrl
127  *   Pointer to RX queue structure.
128  *
129  * @return
130  *   0 on success, errno value on failure.
131  */
132 static int
133 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
134 {
135 	const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
136 	unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
137 		(1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
138 		(1 << rxq_ctrl->rxq.elts_n);
139 	unsigned int i;
140 	int err;
141 
142 	/* Iterate on segments. */
143 	for (i = 0; (i != elts_n); ++i) {
144 		struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
145 		struct rte_mbuf *buf;
146 
147 		buf = rte_pktmbuf_alloc(seg->mp);
148 		if (buf == NULL) {
149 			DRV_LOG(ERR, "port %u empty mbuf pool",
150 				PORT_ID(rxq_ctrl->priv));
151 			rte_errno = ENOMEM;
152 			goto error;
153 		}
154 		/* Headroom is reserved by rte_pktmbuf_alloc(). */
155 		MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
156 		/* Buffer is supposed to be empty. */
157 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
158 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
159 		MLX5_ASSERT(!buf->next);
160 		SET_DATA_OFF(buf, seg->offset);
161 		PORT(buf) = rxq_ctrl->rxq.port_id;
162 		DATA_LEN(buf) = seg->length;
163 		PKT_LEN(buf) = seg->length;
164 		NB_SEGS(buf) = 1;
165 		(*rxq_ctrl->rxq.elts)[i] = buf;
166 	}
167 	/* If Rx vector is activated. */
168 	if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
169 		struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
170 		struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
171 		struct rte_pktmbuf_pool_private *priv =
172 			(struct rte_pktmbuf_pool_private *)
173 				rte_mempool_get_priv(rxq_ctrl->rxq.mp);
174 		int j;
175 
176 		/* Initialize default rearm_data for vPMD. */
177 		mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
178 		rte_mbuf_refcnt_set(mbuf_init, 1);
179 		mbuf_init->nb_segs = 1;
180 		mbuf_init->port = rxq->port_id;
181 		if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
182 			mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
183 		/*
184 		 * prevent compiler reordering:
185 		 * rearm_data covers previous fields.
186 		 */
187 		rte_compiler_barrier();
188 		rxq->mbuf_initializer =
189 			*(rte_xmm_t *)&mbuf_init->rearm_data;
190 		/* Padding with a fake mbuf for vectorized Rx. */
191 		for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
192 			(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
193 	}
194 	DRV_LOG(DEBUG,
195 		"port %u SPRQ queue %u allocated and configured %u segments"
196 		" (max %u packets)",
197 		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
198 		elts_n / (1 << rxq_ctrl->rxq.sges_n));
199 	return 0;
200 error:
201 	err = rte_errno; /* Save rte_errno before cleanup. */
202 	elts_n = i;
203 	for (i = 0; (i != elts_n); ++i) {
204 		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
205 			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
206 		(*rxq_ctrl->rxq.elts)[i] = NULL;
207 	}
208 	DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
209 		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
210 	rte_errno = err; /* Restore rte_errno. */
211 	return -rte_errno;
212 }
213 
214 /**
215  * Allocate RX queue elements.
216  *
217  * @param rxq_ctrl
218  *   Pointer to RX queue structure.
219  *
220  * @return
221  *   0 on success, errno value on failure.
222  */
223 int
224 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
225 {
226 	int ret = 0;
227 
228 	/**
229 	 * For MPRQ we need to allocate both MPRQ buffers
230 	 * for WQEs and simple mbufs for vector processing.
231 	 */
232 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
233 		ret = rxq_alloc_elts_mprq(rxq_ctrl);
234 	return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
235 }
236 
237 /**
238  * Free RX queue elements for Multi-Packet RQ.
239  *
240  * @param rxq_ctrl
241  *   Pointer to RX queue structure.
242  */
243 static void
244 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
245 {
246 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
247 	uint16_t i;
248 
249 	DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
250 		rxq->port_id, rxq->idx, (1u << rxq->elts_n));
251 	if (rxq->mprq_bufs == NULL)
252 		return;
253 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
254 		if ((*rxq->mprq_bufs)[i] != NULL)
255 			mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
256 		(*rxq->mprq_bufs)[i] = NULL;
257 	}
258 	if (rxq->mprq_repl != NULL) {
259 		mlx5_mprq_buf_free(rxq->mprq_repl);
260 		rxq->mprq_repl = NULL;
261 	}
262 }
263 
264 /**
265  * Free RX queue elements for Single-Packet RQ.
266  *
267  * @param rxq_ctrl
268  *   Pointer to RX queue structure.
269  */
270 static void
271 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
272 {
273 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
274 	const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
275 		(1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
276 		(1 << rxq->elts_n);
277 	const uint16_t q_mask = q_n - 1;
278 	uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
279 		rxq->elts_ci : rxq->rq_ci;
280 	uint16_t used = q_n - (elts_ci - rxq->rq_pi);
281 	uint16_t i;
282 
283 	DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
284 		PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
285 	if (rxq->elts == NULL)
286 		return;
287 	/**
288 	 * Some mbuf in the Ring belongs to the application.
289 	 * They cannot be freed.
290 	 */
291 	if (mlx5_rxq_check_vec_support(rxq) > 0) {
292 		for (i = 0; i < used; ++i)
293 			(*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
294 		rxq->rq_pi = elts_ci;
295 	}
296 	for (i = 0; i != q_n; ++i) {
297 		if ((*rxq->elts)[i] != NULL)
298 			rte_pktmbuf_free_seg((*rxq->elts)[i]);
299 		(*rxq->elts)[i] = NULL;
300 	}
301 }
302 
303 /**
304  * Free RX queue elements.
305  *
306  * @param rxq_ctrl
307  *   Pointer to RX queue structure.
308  */
309 static void
310 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
311 {
312 	/*
313 	 * For MPRQ we need to allocate both MPRQ buffers
314 	 * for WQEs and simple mbufs for vector processing.
315 	 */
316 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
317 		rxq_free_elts_mprq(rxq_ctrl);
318 	rxq_free_elts_sprq(rxq_ctrl);
319 }
320 
321 /**
322  * Returns the per-queue supported offloads.
323  *
324  * @param dev
325  *   Pointer to Ethernet device.
326  *
327  * @return
328  *   Supported Rx offloads.
329  */
330 uint64_t
331 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
332 {
333 	struct mlx5_priv *priv = dev->data->dev_private;
334 	struct mlx5_dev_config *config = &priv->config;
335 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
336 			     DEV_RX_OFFLOAD_TIMESTAMP |
337 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
338 			     DEV_RX_OFFLOAD_RSS_HASH);
339 
340 	if (!config->mprq.enabled)
341 		offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
342 	if (config->hw_fcs_strip)
343 		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
344 	if (config->hw_csum)
345 		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
346 			     DEV_RX_OFFLOAD_UDP_CKSUM |
347 			     DEV_RX_OFFLOAD_TCP_CKSUM);
348 	if (config->hw_vlan_strip)
349 		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
350 	if (MLX5_LRO_SUPPORTED(dev))
351 		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
352 	return offloads;
353 }
354 
355 
356 /**
357  * Returns the per-port supported offloads.
358  *
359  * @return
360  *   Supported Rx offloads.
361  */
362 uint64_t
363 mlx5_get_rx_port_offloads(void)
364 {
365 	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
366 
367 	return offloads;
368 }
369 
370 /**
371  * Verify if the queue can be released.
372  *
373  * @param dev
374  *   Pointer to Ethernet device.
375  * @param idx
376  *   RX queue index.
377  *
378  * @return
379  *   1 if the queue can be released
380  *   0 if the queue can not be released, there are references to it.
381  *   Negative errno and rte_errno is set if queue doesn't exist.
382  */
383 static int
384 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
385 {
386 	struct mlx5_priv *priv = dev->data->dev_private;
387 	struct mlx5_rxq_ctrl *rxq_ctrl;
388 
389 	if (!(*priv->rxqs)[idx]) {
390 		rte_errno = EINVAL;
391 		return -rte_errno;
392 	}
393 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
394 	return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
395 }
396 
397 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
398 static void
399 rxq_sync_cq(struct mlx5_rxq_data *rxq)
400 {
401 	const uint16_t cqe_n = 1 << rxq->cqe_n;
402 	const uint16_t cqe_mask = cqe_n - 1;
403 	volatile struct mlx5_cqe *cqe;
404 	int ret, i;
405 
406 	i = cqe_n;
407 	do {
408 		cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
409 		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
410 		if (ret == MLX5_CQE_STATUS_HW_OWN)
411 			break;
412 		if (ret == MLX5_CQE_STATUS_ERR) {
413 			rxq->cq_ci++;
414 			continue;
415 		}
416 		MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN);
417 		if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) {
418 			rxq->cq_ci++;
419 			continue;
420 		}
421 		/* Compute the next non compressed CQE. */
422 		rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt);
423 
424 	} while (--i);
425 	/* Move all CQEs to HW ownership, including possible MiniCQEs. */
426 	for (i = 0; i < cqe_n; i++) {
427 		cqe = &(*rxq->cqes)[i];
428 		cqe->op_own = MLX5_CQE_INVALIDATE;
429 	}
430 	/* Resync CQE and WQE (WQ in RESET state). */
431 	rte_io_wmb();
432 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
433 	rte_io_wmb();
434 	*rxq->rq_db = rte_cpu_to_be_32(0);
435 	rte_io_wmb();
436 }
437 
438 /**
439  * Rx queue stop. Device queue goes to the RESET state,
440  * all involved mbufs are freed from WQ.
441  *
442  * @param dev
443  *   Pointer to Ethernet device structure.
444  * @param idx
445  *   RX queue index.
446  *
447  * @return
448  *   0 on success, a negative errno value otherwise and rte_errno is set.
449  */
450 int
451 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
452 {
453 	struct mlx5_priv *priv = dev->data->dev_private;
454 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
455 	struct mlx5_rxq_ctrl *rxq_ctrl =
456 			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
457 	int ret;
458 
459 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
460 	ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
461 	if (ret) {
462 		DRV_LOG(ERR, "Cannot change Rx WQ state to RESET:  %s",
463 			strerror(errno));
464 		rte_errno = errno;
465 		return ret;
466 	}
467 	/* Remove all processes CQEs. */
468 	rxq_sync_cq(rxq);
469 	/* Free all involved mbufs. */
470 	rxq_free_elts(rxq_ctrl);
471 	/* Set the actual queue state. */
472 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
473 	return 0;
474 }
475 
476 /**
477  * Rx queue stop. Device queue goes to the RESET state,
478  * all involved mbufs are freed from WQ.
479  *
480  * @param dev
481  *   Pointer to Ethernet device structure.
482  * @param idx
483  *   RX queue index.
484  *
485  * @return
486  *   0 on success, a negative errno value otherwise and rte_errno is set.
487  */
488 int
489 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx)
490 {
491 	eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
492 	int ret;
493 
494 	if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
495 		DRV_LOG(ERR, "Hairpin queue can't be stopped");
496 		rte_errno = EINVAL;
497 		return -EINVAL;
498 	}
499 	if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED)
500 		return 0;
501 	/*
502 	 * Vectorized Rx burst requires the CQ and RQ indices
503 	 * synchronized, that might be broken on RQ restart
504 	 * and cause Rx malfunction, so queue stopping is
505 	 * not supported if vectorized Rx burst is engaged.
506 	 * The routine pointer depends on the process
507 	 * type, should perform check there.
508 	 */
509 	if (pkt_burst == mlx5_rx_burst_vec) {
510 		DRV_LOG(ERR, "Rx queue stop is not supported "
511 			"for vectorized Rx");
512 		rte_errno = EINVAL;
513 		return -EINVAL;
514 	}
515 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
516 		ret = mlx5_mp_os_req_queue_control(dev, idx,
517 						   MLX5_MP_REQ_QUEUE_RX_STOP);
518 	} else {
519 		ret = mlx5_rx_queue_stop_primary(dev, idx);
520 	}
521 	return ret;
522 }
523 
524 /**
525  * Rx queue start. Device queue goes to the ready state,
526  * all required mbufs are allocated and WQ is replenished.
527  *
528  * @param dev
529  *   Pointer to Ethernet device structure.
530  * @param idx
531  *   RX queue index.
532  *
533  * @return
534  *   0 on success, a negative errno value otherwise and rte_errno is set.
535  */
536 int
537 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
538 {
539 	struct mlx5_priv *priv = dev->data->dev_private;
540 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
541 	struct mlx5_rxq_ctrl *rxq_ctrl =
542 			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
543 	int ret;
544 
545 	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
546 	/* Allocate needed buffers. */
547 	ret = rxq_alloc_elts(rxq_ctrl);
548 	if (ret) {
549 		DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
550 		rte_errno = errno;
551 		return ret;
552 	}
553 	rte_io_wmb();
554 	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
555 	rte_io_wmb();
556 	/* Reset RQ consumer before moving queue to READY state. */
557 	*rxq->rq_db = rte_cpu_to_be_32(0);
558 	rte_io_wmb();
559 	ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
560 	if (ret) {
561 		DRV_LOG(ERR, "Cannot change Rx WQ state to READY:  %s",
562 			strerror(errno));
563 		rte_errno = errno;
564 		return ret;
565 	}
566 	/* Reinitialize RQ - set WQEs. */
567 	mlx5_rxq_initialize(rxq);
568 	rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
569 	/* Set actual queue state. */
570 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
571 	return 0;
572 }
573 
574 /**
575  * Rx queue start. Device queue goes to the ready state,
576  * all required mbufs are allocated and WQ is replenished.
577  *
578  * @param dev
579  *   Pointer to Ethernet device structure.
580  * @param idx
581  *   RX queue index.
582  *
583  * @return
584  *   0 on success, a negative errno value otherwise and rte_errno is set.
585  */
586 int
587 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx)
588 {
589 	int ret;
590 
591 	if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
592 		DRV_LOG(ERR, "Hairpin queue can't be started");
593 		rte_errno = EINVAL;
594 		return -EINVAL;
595 	}
596 	if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED)
597 		return 0;
598 	if (rte_eal_process_type() ==  RTE_PROC_SECONDARY) {
599 		ret = mlx5_mp_os_req_queue_control(dev, idx,
600 						   MLX5_MP_REQ_QUEUE_RX_START);
601 	} else {
602 		ret = mlx5_rx_queue_start_primary(dev, idx);
603 	}
604 	return ret;
605 }
606 
607 /**
608  * Rx queue presetup checks.
609  *
610  * @param dev
611  *   Pointer to Ethernet device structure.
612  * @param idx
613  *   RX queue index.
614  * @param desc
615  *   Number of descriptors to configure in queue.
616  *
617  * @return
618  *   0 on success, a negative errno value otherwise and rte_errno is set.
619  */
620 static int
621 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
622 {
623 	struct mlx5_priv *priv = dev->data->dev_private;
624 
625 	if (!rte_is_power_of_2(*desc)) {
626 		*desc = 1 << log2above(*desc);
627 		DRV_LOG(WARNING,
628 			"port %u increased number of descriptors in Rx queue %u"
629 			" to the next power of two (%d)",
630 			dev->data->port_id, idx, *desc);
631 	}
632 	DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
633 		dev->data->port_id, idx, *desc);
634 	if (idx >= priv->rxqs_n) {
635 		DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
636 			dev->data->port_id, idx, priv->rxqs_n);
637 		rte_errno = EOVERFLOW;
638 		return -rte_errno;
639 	}
640 	if (!mlx5_rxq_releasable(dev, idx)) {
641 		DRV_LOG(ERR, "port %u unable to release queue index %u",
642 			dev->data->port_id, idx);
643 		rte_errno = EBUSY;
644 		return -rte_errno;
645 	}
646 	mlx5_rxq_release(dev, idx);
647 	return 0;
648 }
649 
650 /**
651  *
652  * @param dev
653  *   Pointer to Ethernet device structure.
654  * @param idx
655  *   RX queue index.
656  * @param desc
657  *   Number of descriptors to configure in queue.
658  * @param socket
659  *   NUMA socket on which memory must be allocated.
660  * @param[in] conf
661  *   Thresholds parameters.
662  * @param mp
663  *   Memory pool for buffer allocations.
664  *
665  * @return
666  *   0 on success, a negative errno value otherwise and rte_errno is set.
667  */
668 int
669 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
670 		    unsigned int socket, const struct rte_eth_rxconf *conf,
671 		    struct rte_mempool *mp)
672 {
673 	struct mlx5_priv *priv = dev->data->dev_private;
674 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
675 	struct mlx5_rxq_ctrl *rxq_ctrl =
676 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
677 	struct rte_eth_rxseg_split *rx_seg =
678 				(struct rte_eth_rxseg_split *)conf->rx_seg;
679 	struct rte_eth_rxseg_split rx_single = {.mp = mp};
680 	uint16_t n_seg = conf->rx_nseg;
681 	int res;
682 
683 	if (mp) {
684 		/*
685 		 * The parameters should be checked on rte_eth_dev layer.
686 		 * If mp is specified it means the compatible configuration
687 		 * without buffer split feature tuning.
688 		 */
689 		rx_seg = &rx_single;
690 		n_seg = 1;
691 	}
692 	if (n_seg > 1) {
693 		uint64_t offloads = conf->offloads |
694 				    dev->data->dev_conf.rxmode.offloads;
695 
696 		/* The offloads should be checked on rte_eth_dev layer. */
697 		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
698 		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
699 			DRV_LOG(ERR, "port %u queue index %u split "
700 				     "offload not configured",
701 				     dev->data->port_id, idx);
702 			rte_errno = ENOSPC;
703 			return -rte_errno;
704 		}
705 		MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
706 	}
707 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
708 	if (res)
709 		return res;
710 	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
711 	if (!rxq_ctrl) {
712 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
713 			dev->data->port_id, idx);
714 		rte_errno = ENOMEM;
715 		return -rte_errno;
716 	}
717 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
718 		dev->data->port_id, idx);
719 	(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
720 	return 0;
721 }
722 
723 /**
724  *
725  * @param dev
726  *   Pointer to Ethernet device structure.
727  * @param idx
728  *   RX queue index.
729  * @param desc
730  *   Number of descriptors to configure in queue.
731  * @param hairpin_conf
732  *   Hairpin configuration parameters.
733  *
734  * @return
735  *   0 on success, a negative errno value otherwise and rte_errno is set.
736  */
737 int
738 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
739 			    uint16_t desc,
740 			    const struct rte_eth_hairpin_conf *hairpin_conf)
741 {
742 	struct mlx5_priv *priv = dev->data->dev_private;
743 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
744 	struct mlx5_rxq_ctrl *rxq_ctrl =
745 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
746 	int res;
747 
748 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
749 	if (res)
750 		return res;
751 	if (hairpin_conf->peer_count != 1) {
752 		rte_errno = EINVAL;
753 		DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u"
754 			" peer count is %u", dev->data->port_id,
755 			idx, hairpin_conf->peer_count);
756 		return -rte_errno;
757 	}
758 	if (hairpin_conf->peers[0].port == dev->data->port_id) {
759 		if (hairpin_conf->peers[0].queue >= priv->txqs_n) {
760 			rte_errno = EINVAL;
761 			DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
762 				" index %u, Tx %u is larger than %u",
763 				dev->data->port_id, idx,
764 				hairpin_conf->peers[0].queue, priv->txqs_n);
765 			return -rte_errno;
766 		}
767 	} else {
768 		if (hairpin_conf->manual_bind == 0 ||
769 		    hairpin_conf->tx_explicit == 0) {
770 			rte_errno = EINVAL;
771 			DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue"
772 				" index %u peer port %u with attributes %u %u",
773 				dev->data->port_id, idx,
774 				hairpin_conf->peers[0].port,
775 				hairpin_conf->manual_bind,
776 				hairpin_conf->tx_explicit);
777 			return -rte_errno;
778 		}
779 	}
780 	rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
781 	if (!rxq_ctrl) {
782 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
783 			dev->data->port_id, idx);
784 		rte_errno = ENOMEM;
785 		return -rte_errno;
786 	}
787 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
788 		dev->data->port_id, idx);
789 	(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
790 	return 0;
791 }
792 
793 /**
794  * DPDK callback to release a RX queue.
795  *
796  * @param dpdk_rxq
797  *   Generic RX queue pointer.
798  */
799 void
800 mlx5_rx_queue_release(void *dpdk_rxq)
801 {
802 	struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
803 	struct mlx5_rxq_ctrl *rxq_ctrl;
804 	struct mlx5_priv *priv;
805 
806 	if (rxq == NULL)
807 		return;
808 	rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
809 	priv = rxq_ctrl->priv;
810 	if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
811 		rte_panic("port %u Rx queue %u is still used by a flow and"
812 			  " cannot be removed\n",
813 			  PORT_ID(priv), rxq->idx);
814 	mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
815 }
816 
817 /**
818  * Allocate queue vector and fill epoll fd list for Rx interrupts.
819  *
820  * @param dev
821  *   Pointer to Ethernet device.
822  *
823  * @return
824  *   0 on success, a negative errno value otherwise and rte_errno is set.
825  */
826 int
827 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
828 {
829 	struct mlx5_priv *priv = dev->data->dev_private;
830 	unsigned int i;
831 	unsigned int rxqs_n = priv->rxqs_n;
832 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
833 	unsigned int count = 0;
834 	struct rte_intr_handle *intr_handle = dev->intr_handle;
835 
836 	/* Representor shares dev->intr_handle with PF. */
837 	if (priv->representor)
838 		return 0;
839 	if (!dev->data->dev_conf.intr_conf.rxq)
840 		return 0;
841 	mlx5_rx_intr_vec_disable(dev);
842 	intr_handle->intr_vec = mlx5_malloc(0,
843 				n * sizeof(intr_handle->intr_vec[0]),
844 				0, SOCKET_ID_ANY);
845 	if (intr_handle->intr_vec == NULL) {
846 		DRV_LOG(ERR,
847 			"port %u failed to allocate memory for interrupt"
848 			" vector, Rx interrupts will not be supported",
849 			dev->data->port_id);
850 		rte_errno = ENOMEM;
851 		return -rte_errno;
852 	}
853 	intr_handle->type = RTE_INTR_HANDLE_EXT;
854 	for (i = 0; i != n; ++i) {
855 		/* This rxq obj must not be released in this function. */
856 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
857 		struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
858 		int rc;
859 
860 		/* Skip queues that cannot request interrupts. */
861 		if (!rxq_obj || (!rxq_obj->ibv_channel &&
862 				 !rxq_obj->devx_channel)) {
863 			/* Use invalid intr_vec[] index to disable entry. */
864 			intr_handle->intr_vec[i] =
865 				RTE_INTR_VEC_RXTX_OFFSET +
866 				RTE_MAX_RXTX_INTR_VEC_ID;
867 			/* Decrease the rxq_ctrl's refcnt */
868 			if (rxq_ctrl)
869 				mlx5_rxq_release(dev, i);
870 			continue;
871 		}
872 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
873 			DRV_LOG(ERR,
874 				"port %u too many Rx queues for interrupt"
875 				" vector size (%d), Rx interrupts cannot be"
876 				" enabled",
877 				dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
878 			mlx5_rx_intr_vec_disable(dev);
879 			rte_errno = ENOMEM;
880 			return -rte_errno;
881 		}
882 		rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
883 		if (rc < 0) {
884 			rte_errno = errno;
885 			DRV_LOG(ERR,
886 				"port %u failed to make Rx interrupt file"
887 				" descriptor %d non-blocking for queue index"
888 				" %d",
889 				dev->data->port_id, rxq_obj->fd, i);
890 			mlx5_rx_intr_vec_disable(dev);
891 			return -rte_errno;
892 		}
893 		intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
894 		intr_handle->efds[count] = rxq_obj->fd;
895 		count++;
896 	}
897 	if (!count)
898 		mlx5_rx_intr_vec_disable(dev);
899 	else
900 		intr_handle->nb_efd = count;
901 	return 0;
902 }
903 
904 /**
905  * Clean up Rx interrupts handler.
906  *
907  * @param dev
908  *   Pointer to Ethernet device.
909  */
910 void
911 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
912 {
913 	struct mlx5_priv *priv = dev->data->dev_private;
914 	struct rte_intr_handle *intr_handle = dev->intr_handle;
915 	unsigned int i;
916 	unsigned int rxqs_n = priv->rxqs_n;
917 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
918 
919 	/* Representor shares dev->intr_handle with PF. */
920 	if (priv->representor)
921 		return;
922 	if (!dev->data->dev_conf.intr_conf.rxq)
923 		return;
924 	if (!intr_handle->intr_vec)
925 		goto free;
926 	for (i = 0; i != n; ++i) {
927 		if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
928 		    RTE_MAX_RXTX_INTR_VEC_ID)
929 			continue;
930 		/**
931 		 * Need to access directly the queue to release the reference
932 		 * kept in mlx5_rx_intr_vec_enable().
933 		 */
934 		mlx5_rxq_release(dev, i);
935 	}
936 free:
937 	rte_intr_free_epoll_fd(intr_handle);
938 	if (intr_handle->intr_vec)
939 		mlx5_free(intr_handle->intr_vec);
940 	intr_handle->nb_efd = 0;
941 	intr_handle->intr_vec = NULL;
942 }
943 
944 /**
945  *  MLX5 CQ notification .
946  *
947  *  @param rxq
948  *     Pointer to receive queue structure.
949  *  @param sq_n_rxq
950  *     Sequence number per receive queue .
951  */
952 static inline void
953 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
954 {
955 	int sq_n = 0;
956 	uint32_t doorbell_hi;
957 	uint64_t doorbell;
958 	void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
959 
960 	sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
961 	doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
962 	doorbell = (uint64_t)doorbell_hi << 32;
963 	doorbell |= rxq->cqn;
964 	rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
965 	mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
966 			 cq_db_reg, rxq->uar_lock_cq);
967 }
968 
969 /**
970  * DPDK callback for Rx queue interrupt enable.
971  *
972  * @param dev
973  *   Pointer to Ethernet device structure.
974  * @param rx_queue_id
975  *   Rx queue number.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 int
981 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
982 {
983 	struct mlx5_rxq_ctrl *rxq_ctrl;
984 
985 	rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
986 	if (!rxq_ctrl)
987 		goto error;
988 	if (rxq_ctrl->irq) {
989 		if (!rxq_ctrl->obj) {
990 			mlx5_rxq_release(dev, rx_queue_id);
991 			goto error;
992 		}
993 		mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
994 	}
995 	mlx5_rxq_release(dev, rx_queue_id);
996 	return 0;
997 error:
998 	rte_errno = EINVAL;
999 	return -rte_errno;
1000 }
1001 
1002 /**
1003  * DPDK callback for Rx queue interrupt disable.
1004  *
1005  * @param dev
1006  *   Pointer to Ethernet device structure.
1007  * @param rx_queue_id
1008  *   Rx queue number.
1009  *
1010  * @return
1011  *   0 on success, a negative errno value otherwise and rte_errno is set.
1012  */
1013 int
1014 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1015 {
1016 	struct mlx5_priv *priv = dev->data->dev_private;
1017 	struct mlx5_rxq_ctrl *rxq_ctrl;
1018 	int ret = 0;
1019 
1020 	rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
1021 	if (!rxq_ctrl) {
1022 		rte_errno = EINVAL;
1023 		return -rte_errno;
1024 	}
1025 	if (!rxq_ctrl->obj)
1026 		goto error;
1027 	if (rxq_ctrl->irq) {
1028 		ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
1029 		if (ret < 0)
1030 			goto error;
1031 		rxq_ctrl->rxq.cq_arm_sn++;
1032 	}
1033 	mlx5_rxq_release(dev, rx_queue_id);
1034 	return 0;
1035 error:
1036 	/**
1037 	 * The ret variable may be EAGAIN which means the get_event function was
1038 	 * called before receiving one.
1039 	 */
1040 	if (ret < 0)
1041 		rte_errno = errno;
1042 	else
1043 		rte_errno = EINVAL;
1044 	ret = rte_errno; /* Save rte_errno before cleanup. */
1045 	mlx5_rxq_release(dev, rx_queue_id);
1046 	if (ret != EAGAIN)
1047 		DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
1048 			dev->data->port_id, rx_queue_id);
1049 	rte_errno = ret; /* Restore rte_errno. */
1050 	return -rte_errno;
1051 }
1052 
1053 /**
1054  * Verify the Rx queue objects list is empty
1055  *
1056  * @param dev
1057  *   Pointer to Ethernet device.
1058  *
1059  * @return
1060  *   The number of objects not released.
1061  */
1062 int
1063 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1064 {
1065 	struct mlx5_priv *priv = dev->data->dev_private;
1066 	int ret = 0;
1067 	struct mlx5_rxq_obj *rxq_obj;
1068 
1069 	LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1070 		DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1071 			dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1072 		++ret;
1073 	}
1074 	return ret;
1075 }
1076 
1077 /**
1078  * Callback function to initialize mbufs for Multi-Packet RQ.
1079  */
1080 static inline void
1081 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1082 		    void *_m, unsigned int i __rte_unused)
1083 {
1084 	struct mlx5_mprq_buf *buf = _m;
1085 	struct rte_mbuf_ext_shared_info *shinfo;
1086 	unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1087 	unsigned int j;
1088 
1089 	memset(_m, 0, sizeof(*buf));
1090 	buf->mp = mp;
1091 	__atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1092 	for (j = 0; j != strd_n; ++j) {
1093 		shinfo = &buf->shinfos[j];
1094 		shinfo->free_cb = mlx5_mprq_buf_free_cb;
1095 		shinfo->fcb_opaque = buf;
1096 	}
1097 }
1098 
1099 /**
1100  * Free mempool of Multi-Packet RQ.
1101  *
1102  * @param dev
1103  *   Pointer to Ethernet device.
1104  *
1105  * @return
1106  *   0 on success, negative errno value on failure.
1107  */
1108 int
1109 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1110 {
1111 	struct mlx5_priv *priv = dev->data->dev_private;
1112 	struct rte_mempool *mp = priv->mprq_mp;
1113 	unsigned int i;
1114 
1115 	if (mp == NULL)
1116 		return 0;
1117 	DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1118 		dev->data->port_id, mp->name);
1119 	/*
1120 	 * If a buffer in the pool has been externally attached to a mbuf and it
1121 	 * is still in use by application, destroying the Rx queue can spoil
1122 	 * the packet. It is unlikely to happen but if application dynamically
1123 	 * creates and destroys with holding Rx packets, this can happen.
1124 	 *
1125 	 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1126 	 * RQ isn't provided by application but managed by PMD.
1127 	 */
1128 	if (!rte_mempool_full(mp)) {
1129 		DRV_LOG(ERR,
1130 			"port %u mempool for Multi-Packet RQ is still in use",
1131 			dev->data->port_id);
1132 		rte_errno = EBUSY;
1133 		return -rte_errno;
1134 	}
1135 	rte_mempool_free(mp);
1136 	/* Unset mempool for each Rx queue. */
1137 	for (i = 0; i != priv->rxqs_n; ++i) {
1138 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1139 
1140 		if (rxq == NULL)
1141 			continue;
1142 		rxq->mprq_mp = NULL;
1143 	}
1144 	priv->mprq_mp = NULL;
1145 	return 0;
1146 }
1147 
1148 /**
1149  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1150  * mempool. If already allocated, reuse it if there're enough elements.
1151  * Otherwise, resize it.
1152  *
1153  * @param dev
1154  *   Pointer to Ethernet device.
1155  *
1156  * @return
1157  *   0 on success, negative errno value on failure.
1158  */
1159 int
1160 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1161 {
1162 	struct mlx5_priv *priv = dev->data->dev_private;
1163 	struct rte_mempool *mp = priv->mprq_mp;
1164 	char name[RTE_MEMPOOL_NAMESIZE];
1165 	unsigned int desc = 0;
1166 	unsigned int buf_len;
1167 	unsigned int obj_num;
1168 	unsigned int obj_size;
1169 	unsigned int strd_num_n = 0;
1170 	unsigned int strd_sz_n = 0;
1171 	unsigned int i;
1172 	unsigned int n_ibv = 0;
1173 
1174 	if (!mlx5_mprq_enabled(dev))
1175 		return 0;
1176 	/* Count the total number of descriptors configured. */
1177 	for (i = 0; i != priv->rxqs_n; ++i) {
1178 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1179 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1180 			(rxq, struct mlx5_rxq_ctrl, rxq);
1181 
1182 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1183 			continue;
1184 		n_ibv++;
1185 		desc += 1 << rxq->elts_n;
1186 		/* Get the max number of strides. */
1187 		if (strd_num_n < rxq->strd_num_n)
1188 			strd_num_n = rxq->strd_num_n;
1189 		/* Get the max size of a stride. */
1190 		if (strd_sz_n < rxq->strd_sz_n)
1191 			strd_sz_n = rxq->strd_sz_n;
1192 	}
1193 	MLX5_ASSERT(strd_num_n && strd_sz_n);
1194 	buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1195 	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1196 		sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1197 	/*
1198 	 * Received packets can be either memcpy'd or externally referenced. In
1199 	 * case that the packet is attached to an mbuf as an external buffer, as
1200 	 * it isn't possible to predict how the buffers will be queued by
1201 	 * application, there's no option to exactly pre-allocate needed buffers
1202 	 * in advance but to speculatively prepares enough buffers.
1203 	 *
1204 	 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1205 	 * received packets to buffers provided by application (rxq->mp) until
1206 	 * this Mempool gets available again.
1207 	 */
1208 	desc *= 4;
1209 	obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1210 	/*
1211 	 * rte_mempool_create_empty() has sanity check to refuse large cache
1212 	 * size compared to the number of elements.
1213 	 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1214 	 * constant number 2 instead.
1215 	 */
1216 	obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1217 	/* Check a mempool is already allocated and if it can be resued. */
1218 	if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1219 		DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1220 			dev->data->port_id, mp->name);
1221 		/* Reuse. */
1222 		goto exit;
1223 	} else if (mp != NULL) {
1224 		DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1225 			dev->data->port_id, mp->name);
1226 		/*
1227 		 * If failed to free, which means it may be still in use, no way
1228 		 * but to keep using the existing one. On buffer underrun,
1229 		 * packets will be memcpy'd instead of external buffer
1230 		 * attachment.
1231 		 */
1232 		if (mlx5_mprq_free_mp(dev)) {
1233 			if (mp->elt_size >= obj_size)
1234 				goto exit;
1235 			else
1236 				return -rte_errno;
1237 		}
1238 	}
1239 	snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1240 	mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1241 				0, NULL, NULL, mlx5_mprq_buf_init,
1242 				(void *)(uintptr_t)(1 << strd_num_n),
1243 				dev->device->numa_node, 0);
1244 	if (mp == NULL) {
1245 		DRV_LOG(ERR,
1246 			"port %u failed to allocate a mempool for"
1247 			" Multi-Packet RQ, count=%u, size=%u",
1248 			dev->data->port_id, obj_num, obj_size);
1249 		rte_errno = ENOMEM;
1250 		return -rte_errno;
1251 	}
1252 	priv->mprq_mp = mp;
1253 exit:
1254 	/* Set mempool for each Rx queue. */
1255 	for (i = 0; i != priv->rxqs_n; ++i) {
1256 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1257 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1258 			(rxq, struct mlx5_rxq_ctrl, rxq);
1259 
1260 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1261 			continue;
1262 		rxq->mprq_mp = mp;
1263 	}
1264 	DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1265 		dev->data->port_id);
1266 	return 0;
1267 }
1268 
1269 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1270 					sizeof(struct rte_vlan_hdr) * 2 + \
1271 					sizeof(struct rte_ipv6_hdr)))
1272 #define MAX_TCP_OPTION_SIZE 40u
1273 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1274 				 sizeof(struct rte_tcp_hdr) + \
1275 				 MAX_TCP_OPTION_SIZE))
1276 
1277 /**
1278  * Adjust the maximum LRO massage size.
1279  *
1280  * @param dev
1281  *   Pointer to Ethernet device.
1282  * @param idx
1283  *   RX queue index.
1284  * @param max_lro_size
1285  *   The maximum size for LRO packet.
1286  */
1287 static void
1288 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1289 			     uint32_t max_lro_size)
1290 {
1291 	struct mlx5_priv *priv = dev->data->dev_private;
1292 
1293 	if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1294 	    MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1295 	    MLX5_MAX_TCP_HDR_OFFSET)
1296 		max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1297 	max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1298 	MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1299 	max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1300 	if (priv->max_lro_msg_size)
1301 		priv->max_lro_msg_size =
1302 			RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1303 	else
1304 		priv->max_lro_msg_size = max_lro_size;
1305 	DRV_LOG(DEBUG,
1306 		"port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1307 		dev->data->port_id, idx,
1308 		priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1309 }
1310 
1311 /**
1312  * Create a DPDK Rx queue.
1313  *
1314  * @param dev
1315  *   Pointer to Ethernet device.
1316  * @param idx
1317  *   RX queue index.
1318  * @param desc
1319  *   Number of descriptors to configure in queue.
1320  * @param socket
1321  *   NUMA socket on which memory must be allocated.
1322  *
1323  * @return
1324  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1325  */
1326 struct mlx5_rxq_ctrl *
1327 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1328 	     unsigned int socket, const struct rte_eth_rxconf *conf,
1329 	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
1330 {
1331 	struct mlx5_priv *priv = dev->data->dev_private;
1332 	struct mlx5_rxq_ctrl *tmpl;
1333 	unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
1334 	struct mlx5_dev_config *config = &priv->config;
1335 	uint64_t offloads = conf->offloads |
1336 			   dev->data->dev_conf.rxmode.offloads;
1337 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1338 	unsigned int max_rx_pkt_len = lro_on_queue ?
1339 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
1340 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
1341 	unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1342 							RTE_PKTMBUF_HEADROOM;
1343 	unsigned int max_lro_size = 0;
1344 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1345 	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
1346 			    !rx_seg[0].offset && !rx_seg[0].length;
1347 	unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
1348 		config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
1349 	unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
1350 		(1U << config->mprq.max_stride_size_n) ?
1351 		log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
1352 	unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
1353 		(1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
1354 		(config->mprq.stride_size_n ?
1355 		(1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
1356 	/*
1357 	 * Always allocate extra slots, even if eventually
1358 	 * the vector Rx will not be used.
1359 	 */
1360 	uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1361 	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
1362 	unsigned int tail_len;
1363 
1364 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1365 		sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
1366 		(!!mprq_en) *
1367 		(desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
1368 		0, socket);
1369 	if (!tmpl) {
1370 		rte_errno = ENOMEM;
1371 		return NULL;
1372 	}
1373 	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
1374 	/*
1375 	 * Build the array of actual buffer offsets and lengths.
1376 	 * Pad with the buffers from the last memory pool if
1377 	 * needed to handle max size packets, replace zero length
1378 	 * with the buffer length from the pool.
1379 	 */
1380 	tail_len = max_rx_pkt_len;
1381 	do {
1382 		struct mlx5_eth_rxseg *hw_seg =
1383 					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
1384 		uint32_t buf_len, offset, seg_len;
1385 
1386 		/*
1387 		 * For the buffers beyond descriptions offset is zero,
1388 		 * the first buffer contains head room.
1389 		 */
1390 		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
1391 		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
1392 			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
1393 		/*
1394 		 * For the buffers beyond descriptions the length is
1395 		 * pool buffer length, zero lengths are replaced with
1396 		 * pool buffer length either.
1397 		 */
1398 		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
1399 						       qs_seg->length ?
1400 						       qs_seg->length :
1401 						       (buf_len - offset);
1402 		/* Check is done in long int, now overflows. */
1403 		if (buf_len < seg_len + offset) {
1404 			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
1405 				     "%u/%u can't be satisfied",
1406 				     dev->data->port_id, idx,
1407 				     qs_seg->length, qs_seg->offset);
1408 			rte_errno = EINVAL;
1409 			goto error;
1410 		}
1411 		if (seg_len > tail_len)
1412 			seg_len = buf_len - offset;
1413 		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
1414 			DRV_LOG(ERR,
1415 				"port %u too many SGEs (%u) needed to handle"
1416 				" requested maximum packet size %u, the maximum"
1417 				" supported are %u", dev->data->port_id,
1418 				tmpl->rxq.rxseg_n, max_rx_pkt_len,
1419 				MLX5_MAX_RXQ_NSEG);
1420 			rte_errno = ENOTSUP;
1421 			goto error;
1422 		}
1423 		/* Build the actual scattering element in the queue object. */
1424 		hw_seg->mp = qs_seg->mp;
1425 		MLX5_ASSERT(offset <= UINT16_MAX);
1426 		MLX5_ASSERT(seg_len <= UINT16_MAX);
1427 		hw_seg->offset = (uint16_t)offset;
1428 		hw_seg->length = (uint16_t)seg_len;
1429 		/*
1430 		 * Advance the segment descriptor, the padding is the based
1431 		 * on the attributes of the last descriptor.
1432 		 */
1433 		if (tmpl->rxq.rxseg_n < n_seg)
1434 			qs_seg++;
1435 		tail_len -= RTE_MIN(tail_len, seg_len);
1436 	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
1437 	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
1438 		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
1439 	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
1440 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1441 			" configured and no enough mbuf space(%u) to contain "
1442 			"the maximum RX packet length(%u) with head-room(%u)",
1443 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1444 			RTE_PKTMBUF_HEADROOM);
1445 		rte_errno = ENOSPC;
1446 		goto error;
1447 	}
1448 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1449 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1450 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1451 		/* rte_errno is already set. */
1452 		goto error;
1453 	}
1454 	tmpl->socket = socket;
1455 	if (dev->data->dev_conf.intr_conf.rxq)
1456 		tmpl->irq = 1;
1457 	/*
1458 	 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1459 	 * following conditions are met:
1460 	 *  - MPRQ is enabled.
1461 	 *  - The number of descs is more than the number of strides.
1462 	 *  - max_rx_pkt_len plus overhead is less than the max size
1463 	 *    of a stride or mprq_stride_size is specified by a user.
1464 	 *    Need to make sure that there are enough strides to encap
1465 	 *    the maximum packet size in case mprq_stride_size is set.
1466 	 *  Otherwise, enable Rx scatter if necessary.
1467 	 */
1468 	if (mprq_en && desc > (1U << mprq_stride_nums) &&
1469 	    (non_scatter_min_mbuf_size <=
1470 	     (1U << config->mprq.max_stride_size_n) ||
1471 	     (config->mprq.stride_size_n &&
1472 	      non_scatter_min_mbuf_size <= mprq_stride_cap))) {
1473 		/* TODO: Rx scatter isn't supported yet. */
1474 		tmpl->rxq.sges_n = 0;
1475 		/* Trim the number of descs needed. */
1476 		desc >>= mprq_stride_nums;
1477 		tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
1478 			config->mprq.stride_num_n : mprq_stride_nums;
1479 		tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
1480 			config->mprq.stride_size_n : mprq_stride_size;
1481 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1482 		tmpl->rxq.strd_scatter_en =
1483 				!!(offloads & DEV_RX_OFFLOAD_SCATTER);
1484 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1485 				config->mprq.max_memcpy_len);
1486 		max_lro_size = RTE_MIN(max_rx_pkt_len,
1487 				       (1u << tmpl->rxq.strd_num_n) *
1488 				       (1u << tmpl->rxq.strd_sz_n));
1489 		DRV_LOG(DEBUG,
1490 			"port %u Rx queue %u: Multi-Packet RQ is enabled"
1491 			" strd_num_n = %u, strd_sz_n = %u",
1492 			dev->data->port_id, idx,
1493 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1494 	} else if (tmpl->rxq.rxseg_n == 1) {
1495 		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
1496 		tmpl->rxq.sges_n = 0;
1497 		max_lro_size = max_rx_pkt_len;
1498 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1499 		unsigned int sges_n;
1500 
1501 		if (lro_on_queue && first_mb_free_size <
1502 		    MLX5_MAX_LRO_HEADER_FIX) {
1503 			DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1504 				" to include the max header size(%u) for LRO",
1505 				first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1506 			rte_errno = ENOTSUP;
1507 			goto error;
1508 		}
1509 		/*
1510 		 * Determine the number of SGEs needed for a full packet
1511 		 * and round it to the next power of two.
1512 		 */
1513 		sges_n = log2above(tmpl->rxq.rxseg_n);
1514 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1515 			DRV_LOG(ERR,
1516 				"port %u too many SGEs (%u) needed to handle"
1517 				" requested maximum packet size %u, the maximum"
1518 				" supported are %u", dev->data->port_id,
1519 				1 << sges_n, max_rx_pkt_len,
1520 				1u << MLX5_MAX_LOG_RQ_SEGS);
1521 			rte_errno = ENOTSUP;
1522 			goto error;
1523 		}
1524 		tmpl->rxq.sges_n = sges_n;
1525 		max_lro_size = max_rx_pkt_len;
1526 	}
1527 	if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1528 		DRV_LOG(WARNING,
1529 			"port %u MPRQ is requested but cannot be enabled\n"
1530 			" (requested: pkt_sz = %u, desc_num = %u,"
1531 			" rxq_num = %u, stride_sz = %u, stride_num = %u\n"
1532 			"  supported: min_rxqs_num = %u,"
1533 			" min_stride_sz = %u, max_stride_sz = %u).",
1534 			dev->data->port_id, non_scatter_min_mbuf_size,
1535 			desc, priv->rxqs_n,
1536 			config->mprq.stride_size_n ?
1537 				(1U << config->mprq.stride_size_n) :
1538 				(1U << mprq_stride_size),
1539 			config->mprq.stride_num_n ?
1540 				(1U << config->mprq.stride_num_n) :
1541 				(1U << mprq_stride_nums),
1542 			config->mprq.min_rxqs_num,
1543 			(1U << config->mprq.min_stride_size_n),
1544 			(1U << config->mprq.max_stride_size_n));
1545 	DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1546 		dev->data->port_id, 1 << tmpl->rxq.sges_n);
1547 	if (desc % (1 << tmpl->rxq.sges_n)) {
1548 		DRV_LOG(ERR,
1549 			"port %u number of Rx queue descriptors (%u) is not a"
1550 			" multiple of SGEs per packet (%u)",
1551 			dev->data->port_id,
1552 			desc,
1553 			1 << tmpl->rxq.sges_n);
1554 		rte_errno = EINVAL;
1555 		goto error;
1556 	}
1557 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1558 	/* Toggle RX checksum offload if hardware supports it. */
1559 	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1560 	/* Configure Rx timestamp. */
1561 	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1562 	tmpl->rxq.timestamp_rx_flag = 0;
1563 	if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
1564 			&tmpl->rxq.timestamp_offset,
1565 			&tmpl->rxq.timestamp_rx_flag) != 0) {
1566 		DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
1567 		goto error;
1568 	}
1569 	/* Configure VLAN stripping. */
1570 	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1571 	/* By default, FCS (CRC) is stripped by hardware. */
1572 	tmpl->rxq.crc_present = 0;
1573 	tmpl->rxq.lro = lro_on_queue;
1574 	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1575 		if (config->hw_fcs_strip) {
1576 			/*
1577 			 * RQs used for LRO-enabled TIRs should not be
1578 			 * configured to scatter the FCS.
1579 			 */
1580 			if (lro_on_queue)
1581 				DRV_LOG(WARNING,
1582 					"port %u CRC stripping has been "
1583 					"disabled but will still be performed "
1584 					"by hardware, because LRO is enabled",
1585 					dev->data->port_id);
1586 			else
1587 				tmpl->rxq.crc_present = 1;
1588 		} else {
1589 			DRV_LOG(WARNING,
1590 				"port %u CRC stripping has been disabled but will"
1591 				" still be performed by hardware, make sure MLNX_OFED"
1592 				" and firmware are up to date",
1593 				dev->data->port_id);
1594 		}
1595 	}
1596 	DRV_LOG(DEBUG,
1597 		"port %u CRC stripping is %s, %u bytes will be subtracted from"
1598 		" incoming frames to hide it",
1599 		dev->data->port_id,
1600 		tmpl->rxq.crc_present ? "disabled" : "enabled",
1601 		tmpl->rxq.crc_present << 2);
1602 	/* Save port ID. */
1603 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1604 		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1605 	tmpl->rxq.port_id = dev->data->port_id;
1606 	tmpl->priv = priv;
1607 	tmpl->rxq.mp = rx_seg[0].mp;
1608 	tmpl->rxq.elts_n = log2above(desc);
1609 	tmpl->rxq.rq_repl_thresh =
1610 		MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
1611 	tmpl->rxq.elts =
1612 		(struct rte_mbuf *(*)[desc_n])(tmpl + 1);
1613 	tmpl->rxq.mprq_bufs =
1614 		(struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
1615 #ifndef RTE_ARCH_64
1616 	tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
1617 #endif
1618 	tmpl->rxq.idx = idx;
1619 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1620 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1621 	return tmpl;
1622 error:
1623 	mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
1624 	mlx5_free(tmpl);
1625 	return NULL;
1626 }
1627 
1628 /**
1629  * Create a DPDK Rx hairpin queue.
1630  *
1631  * @param dev
1632  *   Pointer to Ethernet device.
1633  * @param idx
1634  *   RX queue index.
1635  * @param desc
1636  *   Number of descriptors to configure in queue.
1637  * @param hairpin_conf
1638  *   The hairpin binding configuration.
1639  *
1640  * @return
1641  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1642  */
1643 struct mlx5_rxq_ctrl *
1644 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1645 		     const struct rte_eth_hairpin_conf *hairpin_conf)
1646 {
1647 	struct mlx5_priv *priv = dev->data->dev_private;
1648 	struct mlx5_rxq_ctrl *tmpl;
1649 
1650 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
1651 			   SOCKET_ID_ANY);
1652 	if (!tmpl) {
1653 		rte_errno = ENOMEM;
1654 		return NULL;
1655 	}
1656 	tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
1657 	tmpl->socket = SOCKET_ID_ANY;
1658 	tmpl->rxq.rss_hash = 0;
1659 	tmpl->rxq.port_id = dev->data->port_id;
1660 	tmpl->priv = priv;
1661 	tmpl->rxq.mp = NULL;
1662 	tmpl->rxq.elts_n = log2above(desc);
1663 	tmpl->rxq.elts = NULL;
1664 	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
1665 	tmpl->hairpin_conf = *hairpin_conf;
1666 	tmpl->rxq.idx = idx;
1667 	__atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
1668 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1669 	return tmpl;
1670 }
1671 
1672 /**
1673  * Get a Rx queue.
1674  *
1675  * @param dev
1676  *   Pointer to Ethernet device.
1677  * @param idx
1678  *   RX queue index.
1679  *
1680  * @return
1681  *   A pointer to the queue if it exists, NULL otherwise.
1682  */
1683 struct mlx5_rxq_ctrl *
1684 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
1685 {
1686 	struct mlx5_priv *priv = dev->data->dev_private;
1687 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1688 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1689 
1690 	if (rxq_data) {
1691 		rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1692 		__atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
1693 	}
1694 	return rxq_ctrl;
1695 }
1696 
1697 /**
1698  * Release a Rx queue.
1699  *
1700  * @param dev
1701  *   Pointer to Ethernet device.
1702  * @param idx
1703  *   RX queue index.
1704  *
1705  * @return
1706  *   1 while a reference on it exists, 0 when freed.
1707  */
1708 int
1709 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
1710 {
1711 	struct mlx5_priv *priv = dev->data->dev_private;
1712 	struct mlx5_rxq_ctrl *rxq_ctrl;
1713 
1714 	if (!(*priv->rxqs)[idx])
1715 		return 0;
1716 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
1717 	if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
1718 		return 1;
1719 	if (rxq_ctrl->obj) {
1720 		priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
1721 		LIST_REMOVE(rxq_ctrl->obj, next);
1722 		mlx5_free(rxq_ctrl->obj);
1723 		rxq_ctrl->obj = NULL;
1724 	}
1725 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
1726 		rxq_free_elts(rxq_ctrl);
1727 		dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
1728 	}
1729 	if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
1730 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
1731 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
1732 		LIST_REMOVE(rxq_ctrl, next);
1733 		mlx5_free(rxq_ctrl);
1734 		(*priv->rxqs)[idx] = NULL;
1735 	}
1736 	return 0;
1737 }
1738 
1739 /**
1740  * Verify the Rx Queue list is empty
1741  *
1742  * @param dev
1743  *   Pointer to Ethernet device.
1744  *
1745  * @return
1746  *   The number of object not released.
1747  */
1748 int
1749 mlx5_rxq_verify(struct rte_eth_dev *dev)
1750 {
1751 	struct mlx5_priv *priv = dev->data->dev_private;
1752 	struct mlx5_rxq_ctrl *rxq_ctrl;
1753 	int ret = 0;
1754 
1755 	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
1756 		DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
1757 			dev->data->port_id, rxq_ctrl->rxq.idx);
1758 		++ret;
1759 	}
1760 	return ret;
1761 }
1762 
1763 /**
1764  * Get a Rx queue type.
1765  *
1766  * @param dev
1767  *   Pointer to Ethernet device.
1768  * @param idx
1769  *   Rx queue index.
1770  *
1771  * @return
1772  *   The Rx queue type.
1773  */
1774 enum mlx5_rxq_type
1775 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
1776 {
1777 	struct mlx5_priv *priv = dev->data->dev_private;
1778 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1779 
1780 	if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1781 		rxq_ctrl = container_of((*priv->rxqs)[idx],
1782 					struct mlx5_rxq_ctrl,
1783 					rxq);
1784 		return rxq_ctrl->type;
1785 	}
1786 	return MLX5_RXQ_TYPE_UNDEFINED;
1787 }
1788 
1789 /*
1790  * Get a Rx hairpin queue configuration.
1791  *
1792  * @param dev
1793  *   Pointer to Ethernet device.
1794  * @param idx
1795  *   Rx queue index.
1796  *
1797  * @return
1798  *   Pointer to the configuration if a hairpin RX queue, otherwise NULL.
1799  */
1800 const struct rte_eth_hairpin_conf *
1801 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
1802 {
1803 	struct mlx5_priv *priv = dev->data->dev_private;
1804 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1805 
1806 	if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
1807 		rxq_ctrl = container_of((*priv->rxqs)[idx],
1808 					struct mlx5_rxq_ctrl,
1809 					rxq);
1810 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
1811 			return &rxq_ctrl->hairpin_conf;
1812 	}
1813 	return NULL;
1814 }
1815 
1816 /**
1817  * Match queues listed in arguments to queues contained in indirection table
1818  * object.
1819  *
1820  * @param ind_tbl
1821  *   Pointer to indirection table to match.
1822  * @param queues
1823  *   Queues to match to ques in indirection table.
1824  * @param queues_n
1825  *   Number of queues in the array.
1826  *
1827  * @return
1828  *   1 if all queues in indirection table match 0 othrwise.
1829  */
1830 static int
1831 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
1832 		       const uint16_t *queues, uint32_t queues_n)
1833 {
1834 		return (ind_tbl->queues_n == queues_n) &&
1835 		    (!memcmp(ind_tbl->queues, queues,
1836 			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0])));
1837 }
1838 
1839 /**
1840  * Get an indirection table.
1841  *
1842  * @param dev
1843  *   Pointer to Ethernet device.
1844  * @param queues
1845  *   Queues entering in the indirection table.
1846  * @param queues_n
1847  *   Number of queues in the array.
1848  *
1849  * @return
1850  *   An indirection table if found.
1851  */
1852 struct mlx5_ind_table_obj *
1853 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
1854 		       uint32_t queues_n)
1855 {
1856 	struct mlx5_priv *priv = dev->data->dev_private;
1857 	struct mlx5_ind_table_obj *ind_tbl;
1858 
1859 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1860 		if ((ind_tbl->queues_n == queues_n) &&
1861 		    (memcmp(ind_tbl->queues, queues,
1862 			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
1863 		     == 0))
1864 			break;
1865 	}
1866 	if (ind_tbl) {
1867 		unsigned int i;
1868 
1869 		__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
1870 		for (i = 0; i != ind_tbl->queues_n; ++i)
1871 			mlx5_rxq_get(dev, ind_tbl->queues[i]);
1872 	}
1873 	return ind_tbl;
1874 }
1875 
1876 /**
1877  * Release an indirection table.
1878  *
1879  * @param dev
1880  *   Pointer to Ethernet device.
1881  * @param ind_table
1882  *   Indirection table to release.
1883  * @param standalone
1884  *   Indirection table for Standalone queue.
1885  *
1886  * @return
1887  *   1 while a reference on it exists, 0 when freed.
1888  */
1889 int
1890 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
1891 			   struct mlx5_ind_table_obj *ind_tbl,
1892 			   bool standalone)
1893 {
1894 	struct mlx5_priv *priv = dev->data->dev_private;
1895 	unsigned int i;
1896 
1897 	if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0)
1898 		priv->obj_ops.ind_table_destroy(ind_tbl);
1899 	for (i = 0; i != ind_tbl->queues_n; ++i)
1900 		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
1901 	if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
1902 		if (!standalone)
1903 			LIST_REMOVE(ind_tbl, next);
1904 		mlx5_free(ind_tbl);
1905 		return 0;
1906 	}
1907 	return 1;
1908 }
1909 
1910 /**
1911  * Verify the Rx Queue list is empty
1912  *
1913  * @param dev
1914  *   Pointer to Ethernet device.
1915  *
1916  * @return
1917  *   The number of object not released.
1918  */
1919 int
1920 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
1921 {
1922 	struct mlx5_priv *priv = dev->data->dev_private;
1923 	struct mlx5_ind_table_obj *ind_tbl;
1924 	int ret = 0;
1925 
1926 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
1927 		DRV_LOG(DEBUG,
1928 			"port %u indirection table obj %p still referenced",
1929 			dev->data->port_id, (void *)ind_tbl);
1930 		++ret;
1931 	}
1932 	return ret;
1933 }
1934 
1935 /**
1936  * Setup an indirection table structure fields.
1937  *
1938  * @param dev
1939  *   Pointer to Ethernet device.
1940  * @param ind_table
1941  *   Indirection table to modify.
1942  *
1943  * @return
1944  *   0 on success, a negative errno value otherwise and rte_errno is set.
1945  */
1946 int
1947 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
1948 			 struct mlx5_ind_table_obj *ind_tbl)
1949 {
1950 	struct mlx5_priv *priv = dev->data->dev_private;
1951 	uint32_t queues_n = ind_tbl->queues_n;
1952 	uint16_t *queues = ind_tbl->queues;
1953 	unsigned int i, j;
1954 	int ret = 0, err;
1955 	const unsigned int n = rte_is_power_of_2(queues_n) ?
1956 			       log2above(queues_n) :
1957 			       log2above(priv->config.ind_table_max_size);
1958 
1959 	for (i = 0; i != queues_n; ++i) {
1960 		if (!mlx5_rxq_get(dev, queues[i])) {
1961 			ret = -rte_errno;
1962 			goto error;
1963 		}
1964 	}
1965 	ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
1966 	if (ret)
1967 		goto error;
1968 	__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
1969 	return 0;
1970 error:
1971 	err = rte_errno;
1972 	for (j = 0; j < i; j++)
1973 		mlx5_rxq_release(dev, ind_tbl->queues[j]);
1974 	rte_errno = err;
1975 	DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
1976 		dev->data->port_id);
1977 	return ret;
1978 }
1979 
1980 /**
1981  * Create an indirection table.
1982  *
1983  * @param dev
1984  *   Pointer to Ethernet device.
1985  * @param queues
1986  *   Queues entering in the indirection table.
1987  * @param queues_n
1988  *   Number of queues in the array.
1989  * @param standalone
1990  *   Indirection table for Standalone queue.
1991  *
1992  * @return
1993  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
1994  */
1995 static struct mlx5_ind_table_obj *
1996 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
1997 		       uint32_t queues_n, bool standalone)
1998 {
1999 	struct mlx5_priv *priv = dev->data->dev_private;
2000 	struct mlx5_ind_table_obj *ind_tbl;
2001 	int ret;
2002 
2003 	ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
2004 			      queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
2005 	if (!ind_tbl) {
2006 		rte_errno = ENOMEM;
2007 		return NULL;
2008 	}
2009 	ind_tbl->queues_n = queues_n;
2010 	ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
2011 	memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
2012 	ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
2013 	if (ret < 0) {
2014 		mlx5_free(ind_tbl);
2015 		return NULL;
2016 	}
2017 	if (!standalone)
2018 		LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2019 	return ind_tbl;
2020 }
2021 
2022 /**
2023  * Modify an indirection table.
2024  *
2025  * @param dev
2026  *   Pointer to Ethernet device.
2027  * @param ind_table
2028  *   Indirection table to modify.
2029  * @param queues
2030  *   Queues replacement for the indirection table.
2031  * @param queues_n
2032  *   Number of queues in the array.
2033  * @param standalone
2034  *   Indirection table for Standalone queue.
2035  *
2036  * @return
2037  *   0 on success, a negative errno value otherwise and rte_errno is set.
2038  */
2039 int
2040 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
2041 			  struct mlx5_ind_table_obj *ind_tbl,
2042 			  uint16_t *queues, const uint32_t queues_n,
2043 			  bool standalone)
2044 {
2045 	struct mlx5_priv *priv = dev->data->dev_private;
2046 	unsigned int i, j;
2047 	int ret = 0, err;
2048 	const unsigned int n = rte_is_power_of_2(queues_n) ?
2049 			       log2above(queues_n) :
2050 			       log2above(priv->config.ind_table_max_size);
2051 
2052 	MLX5_ASSERT(standalone);
2053 	RTE_SET_USED(standalone);
2054 	if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
2055 		/*
2056 		 * Modification of indirection ntables having more than 1
2057 		 * reference unsupported. Intended for standalone indirection
2058 		 * tables only.
2059 		 */
2060 		DRV_LOG(DEBUG,
2061 			"Port %u cannot modify indirection table (refcnt> 1).",
2062 			dev->data->port_id);
2063 		rte_errno = EINVAL;
2064 		return -rte_errno;
2065 	}
2066 	for (i = 0; i != queues_n; ++i) {
2067 		if (!mlx5_rxq_get(dev, queues[i])) {
2068 			ret = -rte_errno;
2069 			goto error;
2070 		}
2071 	}
2072 	MLX5_ASSERT(priv->obj_ops.ind_table_modify);
2073 	ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
2074 	if (ret)
2075 		goto error;
2076 	for (j = 0; j < ind_tbl->queues_n; j++)
2077 		mlx5_rxq_release(dev, ind_tbl->queues[j]);
2078 	ind_tbl->queues_n = queues_n;
2079 	ind_tbl->queues = queues;
2080 	return 0;
2081 error:
2082 	err = rte_errno;
2083 	for (j = 0; j < i; j++)
2084 		mlx5_rxq_release(dev, ind_tbl->queues[j]);
2085 	rte_errno = err;
2086 	DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
2087 		dev->data->port_id);
2088 	return ret;
2089 }
2090 
2091 /**
2092  * Match an Rx Hash queue.
2093  *
2094  * @param list
2095  *   Cache list pointer.
2096  * @param entry
2097  *   Hash queue entry pointer.
2098  * @param cb_ctx
2099  *   Context of the callback function.
2100  *
2101  * @return
2102  *   0 if match, none zero if not match.
2103  */
2104 int
2105 mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
2106 		   struct mlx5_cache_entry *entry,
2107 		   void *cb_ctx)
2108 {
2109 	struct rte_eth_dev *dev = list->ctx;
2110 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2111 	struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2112 	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2113 	struct mlx5_ind_table_obj *ind_tbl;
2114 
2115 	if (hrxq->rss_key_len != rss_desc->key_len ||
2116 	    memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
2117 	    hrxq->hash_fields != rss_desc->hash_fields)
2118 		return 1;
2119 	ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
2120 					 rss_desc->queue_num);
2121 	if (ind_tbl)
2122 		mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2123 	return ind_tbl != hrxq->ind_table;
2124 }
2125 
2126 /**
2127  * Modify an Rx Hash queue configuration.
2128  *
2129  * @param dev
2130  *   Pointer to Ethernet device.
2131  * @param hrxq
2132  *   Index to Hash Rx queue to modify.
2133  * @param rss_key
2134  *   RSS key for the Rx hash queue.
2135  * @param rss_key_len
2136  *   RSS key length.
2137  * @param hash_fields
2138  *   Verbs protocol hash field to make the RSS on.
2139  * @param queues
2140  *   Queues entering in hash queue. In case of empty hash_fields only the
2141  *   first queue index will be taken for the indirection table.
2142  * @param queues_n
2143  *   Number of queues.
2144  *
2145  * @return
2146  *   0 on success, a negative errno value otherwise and rte_errno is set.
2147  */
2148 int
2149 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx,
2150 		 const uint8_t *rss_key, uint32_t rss_key_len,
2151 		 uint64_t hash_fields,
2152 		 const uint16_t *queues, uint32_t queues_n)
2153 {
2154 	int err;
2155 	struct mlx5_ind_table_obj *ind_tbl = NULL;
2156 	struct mlx5_priv *priv = dev->data->dev_private;
2157 	struct mlx5_hrxq *hrxq =
2158 		mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2159 	int ret;
2160 
2161 	if (!hrxq) {
2162 		rte_errno = EINVAL;
2163 		return -rte_errno;
2164 	}
2165 	/* validations */
2166 	if (hrxq->rss_key_len != rss_key_len) {
2167 		/* rss_key_len is fixed size 40 byte & not supposed to change */
2168 		rte_errno = EINVAL;
2169 		return -rte_errno;
2170 	}
2171 	queues_n = hash_fields ? queues_n : 1;
2172 	if (mlx5_ind_table_obj_match_queues(hrxq->ind_table,
2173 					    queues, queues_n)) {
2174 		ind_tbl = hrxq->ind_table;
2175 	} else {
2176 		if (hrxq->standalone) {
2177 			/*
2178 			 * Replacement of indirection table unsupported for
2179 			 * stanalone hrxq objects (used by shared RSS).
2180 			 */
2181 			rte_errno = ENOTSUP;
2182 			return -rte_errno;
2183 		}
2184 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2185 		if (!ind_tbl)
2186 			ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2187 							 hrxq->standalone);
2188 	}
2189 	if (!ind_tbl) {
2190 		rte_errno = ENOMEM;
2191 		return -rte_errno;
2192 	}
2193 	MLX5_ASSERT(priv->obj_ops.hrxq_modify);
2194 	ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key,
2195 					hash_fields, ind_tbl);
2196 	if (ret) {
2197 		rte_errno = errno;
2198 		goto error;
2199 	}
2200 	if (ind_tbl != hrxq->ind_table) {
2201 		MLX5_ASSERT(!hrxq->standalone);
2202 		mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2203 					   hrxq->standalone);
2204 		hrxq->ind_table = ind_tbl;
2205 	}
2206 	hrxq->hash_fields = hash_fields;
2207 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
2208 	return 0;
2209 error:
2210 	err = rte_errno;
2211 	if (ind_tbl != hrxq->ind_table) {
2212 		MLX5_ASSERT(!hrxq->standalone);
2213 		mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
2214 	}
2215 	rte_errno = err;
2216 	return -rte_errno;
2217 }
2218 
2219 static void
2220 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2221 {
2222 	struct mlx5_priv *priv = dev->data->dev_private;
2223 
2224 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2225 	mlx5_glue->destroy_flow_action(hrxq->action);
2226 #endif
2227 	priv->obj_ops.hrxq_destroy(hrxq);
2228 	if (!hrxq->standalone) {
2229 		mlx5_ind_table_obj_release(dev, hrxq->ind_table,
2230 					   hrxq->standalone);
2231 	}
2232 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
2233 }
2234 
2235 /**
2236  * Release the hash Rx queue.
2237  *
2238  * @param dev
2239  *   Pointer to Ethernet device.
2240  * @param hrxq
2241  *   Index to Hash Rx queue to release.
2242  *
2243  * @param list
2244  *   Cache list pointer.
2245  * @param entry
2246  *   Hash queue entry pointer.
2247  */
2248 void
2249 mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
2250 		    struct mlx5_cache_entry *entry)
2251 {
2252 	struct rte_eth_dev *dev = list->ctx;
2253 	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
2254 
2255 	__mlx5_hrxq_remove(dev, hrxq);
2256 }
2257 
2258 static struct mlx5_hrxq *
2259 __mlx5_hrxq_create(struct rte_eth_dev *dev,
2260 		   struct mlx5_flow_rss_desc *rss_desc)
2261 {
2262 	struct mlx5_priv *priv = dev->data->dev_private;
2263 	const uint8_t *rss_key = rss_desc->key;
2264 	uint32_t rss_key_len =  rss_desc->key_len;
2265 	bool standalone = !!rss_desc->shared_rss;
2266 	const uint16_t *queues =
2267 		standalone ? rss_desc->const_q : rss_desc->queue;
2268 	uint32_t queues_n = rss_desc->queue_num;
2269 	struct mlx5_hrxq *hrxq = NULL;
2270 	uint32_t hrxq_idx = 0;
2271 	struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
2272 	int ret;
2273 
2274 	queues_n = rss_desc->hash_fields ? queues_n : 1;
2275 	if (!ind_tbl)
2276 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2277 	if (!ind_tbl)
2278 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
2279 						 standalone);
2280 	if (!ind_tbl)
2281 		return NULL;
2282 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
2283 	if (!hrxq)
2284 		goto error;
2285 	hrxq->standalone = standalone;
2286 	hrxq->idx = hrxq_idx;
2287 	hrxq->ind_table = ind_tbl;
2288 	hrxq->rss_key_len = rss_key_len;
2289 	hrxq->hash_fields = rss_desc->hash_fields;
2290 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
2291 	ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
2292 	if (ret < 0)
2293 		goto error;
2294 	return hrxq;
2295 error:
2296 	if (!rss_desc->ind_tbl)
2297 		mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
2298 	if (hrxq)
2299 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2300 	return NULL;
2301 }
2302 
2303 /**
2304  * Create an Rx Hash queue.
2305  *
2306  * @param list
2307  *   Cache list pointer.
2308  * @param entry
2309  *   Hash queue entry pointer.
2310  * @param cb_ctx
2311  *   Context of the callback function.
2312  *
2313  * @return
2314  *   queue entry on success, NULL otherwise.
2315  */
2316 struct mlx5_cache_entry *
2317 mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
2318 		    struct mlx5_cache_entry *entry __rte_unused,
2319 		    void *cb_ctx)
2320 {
2321 	struct rte_eth_dev *dev = list->ctx;
2322 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2323 	struct mlx5_flow_rss_desc *rss_desc = ctx->data;
2324 	struct mlx5_hrxq *hrxq;
2325 
2326 	hrxq = __mlx5_hrxq_create(dev, rss_desc);
2327 	return hrxq ? &hrxq->entry : NULL;
2328 }
2329 
2330 /**
2331  * Get an Rx Hash queue.
2332  *
2333  * @param dev
2334  *   Pointer to Ethernet device.
2335  * @param rss_desc
2336  *   RSS configuration for the Rx hash queue.
2337  *
2338  * @return
2339  *   An hash Rx queue index on success.
2340  */
2341 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
2342 		       struct mlx5_flow_rss_desc *rss_desc)
2343 {
2344 	struct mlx5_priv *priv = dev->data->dev_private;
2345 	struct mlx5_hrxq *hrxq;
2346 	struct mlx5_cache_entry *entry;
2347 	struct mlx5_flow_cb_ctx ctx = {
2348 		.data = rss_desc,
2349 	};
2350 
2351 	if (rss_desc->shared_rss) {
2352 		hrxq = __mlx5_hrxq_create(dev, rss_desc);
2353 	} else {
2354 		entry = mlx5_cache_register(&priv->hrxqs, &ctx);
2355 		if (!entry)
2356 			return 0;
2357 		hrxq = container_of(entry, typeof(*hrxq), entry);
2358 	}
2359 	if (hrxq)
2360 		return hrxq->idx;
2361 	return 0;
2362 }
2363 
2364 /**
2365  * Release the hash Rx queue.
2366  *
2367  * @param dev
2368  *   Pointer to Ethernet device.
2369  * @param hrxq_idx
2370  *   Index to Hash Rx queue to release.
2371  *
2372  * @return
2373  *   1 while a reference on it exists, 0 when freed.
2374  */
2375 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
2376 {
2377 	struct mlx5_priv *priv = dev->data->dev_private;
2378 	struct mlx5_hrxq *hrxq;
2379 
2380 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
2381 	if (!hrxq)
2382 		return 0;
2383 	if (!hrxq->standalone)
2384 		return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
2385 	__mlx5_hrxq_remove(dev, hrxq);
2386 	return 0;
2387 }
2388 
2389 /**
2390  * Create a drop Rx Hash queue.
2391  *
2392  * @param dev
2393  *   Pointer to Ethernet device.
2394  *
2395  * @return
2396  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
2397  */
2398 struct mlx5_hrxq *
2399 mlx5_drop_action_create(struct rte_eth_dev *dev)
2400 {
2401 	struct mlx5_priv *priv = dev->data->dev_private;
2402 	struct mlx5_hrxq *hrxq = NULL;
2403 	int ret;
2404 
2405 	if (priv->drop_queue.hrxq)
2406 		return priv->drop_queue.hrxq;
2407 	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
2408 	if (!hrxq) {
2409 		DRV_LOG(WARNING,
2410 			"Port %u cannot allocate memory for drop queue.",
2411 			dev->data->port_id);
2412 		rte_errno = ENOMEM;
2413 		goto error;
2414 	}
2415 	priv->drop_queue.hrxq = hrxq;
2416 	hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
2417 				      0, SOCKET_ID_ANY);
2418 	if (!hrxq->ind_table) {
2419 		rte_errno = ENOMEM;
2420 		goto error;
2421 	}
2422 	ret = priv->obj_ops.drop_action_create(dev);
2423 	if (ret < 0)
2424 		goto error;
2425 	return hrxq;
2426 error:
2427 	if (hrxq) {
2428 		if (hrxq->ind_table)
2429 			mlx5_free(hrxq->ind_table);
2430 		priv->drop_queue.hrxq = NULL;
2431 		mlx5_free(hrxq);
2432 	}
2433 	return NULL;
2434 }
2435 
2436 /**
2437  * Release a drop hash Rx queue.
2438  *
2439  * @param dev
2440  *   Pointer to Ethernet device.
2441  */
2442 void
2443 mlx5_drop_action_destroy(struct rte_eth_dev *dev)
2444 {
2445 	struct mlx5_priv *priv = dev->data->dev_private;
2446 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2447 
2448 	if (!priv->drop_queue.hrxq)
2449 		return;
2450 	priv->obj_ops.drop_action_destroy(dev);
2451 	mlx5_free(priv->drop_queue.rxq);
2452 	mlx5_free(hrxq->ind_table);
2453 	mlx5_free(hrxq);
2454 	priv->drop_queue.rxq = NULL;
2455 	priv->drop_queue.hrxq = NULL;
2456 }
2457 
2458 /**
2459  * Verify the Rx Queue list is empty
2460  *
2461  * @param dev
2462  *   Pointer to Ethernet device.
2463  *
2464  * @return
2465  *   The number of object not released.
2466  */
2467 uint32_t
2468 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2469 {
2470 	struct mlx5_priv *priv = dev->data->dev_private;
2471 
2472 	return mlx5_cache_list_get_entry_num(&priv->hrxqs);
2473 }
2474 
2475 /**
2476  * Set the Rx queue timestamp conversion parameters
2477  *
2478  * @param[in] dev
2479  *   Pointer to the Ethernet device structure.
2480  */
2481 void
2482 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev)
2483 {
2484 	struct mlx5_priv *priv = dev->data->dev_private;
2485 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2486 	struct mlx5_rxq_data *data;
2487 	unsigned int i;
2488 
2489 	for (i = 0; i != priv->rxqs_n; ++i) {
2490 		if (!(*priv->rxqs)[i])
2491 			continue;
2492 		data = (*priv->rxqs)[i];
2493 		data->sh = sh;
2494 		data->rt_timestamp = priv->config.rt_timestamp;
2495 	}
2496 }
2497