xref: /dpdk/drivers/net/mlx5/mlx5_rxq.c (revision 7adf992fb9bf7162a7edc45b50d10fbb1d57824d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <fcntl.h>
11 #include <sys/queue.h>
12 
13 /* Verbs header. */
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
15 #ifdef PEDANTIC
16 #pragma GCC diagnostic ignored "-Wpedantic"
17 #endif
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic error "-Wpedantic"
22 #endif
23 
24 #include <rte_mbuf.h>
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
28 #include <rte_interrupts.h>
29 #include <rte_debug.h>
30 #include <rte_io.h>
31 
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
34 
35 #include "mlx5_defs.h"
36 #include "mlx5.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_utils.h"
39 #include "mlx5_autoconf.h"
40 #include "mlx5_flow.h"
41 
42 
43 /* Default RSS hash key also used for ConnectX-3. */
44 uint8_t rss_hash_default_key[] = {
45 	0x2c, 0xc6, 0x81, 0xd1,
46 	0x5b, 0xdb, 0xf4, 0xf7,
47 	0xfc, 0xa2, 0x83, 0x19,
48 	0xdb, 0x1a, 0x3e, 0x94,
49 	0x6b, 0x9e, 0x38, 0xd9,
50 	0x2c, 0x9c, 0x03, 0xd1,
51 	0xad, 0x99, 0x44, 0xa7,
52 	0xd9, 0x56, 0x3d, 0x59,
53 	0x06, 0x3c, 0x25, 0xf3,
54 	0xfc, 0x1f, 0xdc, 0x2a,
55 };
56 
57 /* Length of the default RSS hash key. */
58 static_assert(MLX5_RSS_HASH_KEY_LEN ==
59 	      (unsigned int)sizeof(rss_hash_default_key),
60 	      "wrong RSS default key size.");
61 
62 /**
63  * Check whether Multi-Packet RQ can be enabled for the device.
64  *
65  * @param dev
66  *   Pointer to Ethernet device.
67  *
68  * @return
69  *   1 if supported, negative errno value if not.
70  */
71 inline int
72 mlx5_check_mprq_support(struct rte_eth_dev *dev)
73 {
74 	struct mlx5_priv *priv = dev->data->dev_private;
75 
76 	if (priv->config.mprq.enabled &&
77 	    priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
78 		return 1;
79 	return -ENOTSUP;
80 }
81 
82 /**
83  * Check whether Multi-Packet RQ is enabled for the Rx queue.
84  *
85  *  @param rxq
86  *     Pointer to receive queue structure.
87  *
88  * @return
89  *   0 if disabled, otherwise enabled.
90  */
91 inline int
92 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
93 {
94 	return rxq->strd_num_n > 0;
95 }
96 
97 /**
98  * Check whether Multi-Packet RQ is enabled for the device.
99  *
100  * @param dev
101  *   Pointer to Ethernet device.
102  *
103  * @return
104  *   0 if disabled, otherwise enabled.
105  */
106 inline int
107 mlx5_mprq_enabled(struct rte_eth_dev *dev)
108 {
109 	struct mlx5_priv *priv = dev->data->dev_private;
110 	uint16_t i;
111 	uint16_t n = 0;
112 	uint16_t n_ibv = 0;
113 
114 	if (mlx5_check_mprq_support(dev) < 0)
115 		return 0;
116 	/* All the configured queues should be enabled. */
117 	for (i = 0; i < priv->rxqs_n; ++i) {
118 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
119 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
120 			(rxq, struct mlx5_rxq_ctrl, rxq);
121 
122 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
123 			continue;
124 		n_ibv++;
125 		if (mlx5_rxq_mprq_enabled(rxq))
126 			++n;
127 	}
128 	/* Multi-Packet RQ can't be partially configured. */
129 	MLX5_ASSERT(n == 0 || n == n_ibv);
130 	return n == n_ibv;
131 }
132 
133 /**
134  * Allocate RX queue elements for Multi-Packet RQ.
135  *
136  * @param rxq_ctrl
137  *   Pointer to RX queue structure.
138  *
139  * @return
140  *   0 on success, a negative errno value otherwise and rte_errno is set.
141  */
142 static int
143 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
144 {
145 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
146 	unsigned int wqe_n = 1 << rxq->elts_n;
147 	unsigned int i;
148 	int err;
149 
150 	/* Iterate on segments. */
151 	for (i = 0; i <= wqe_n; ++i) {
152 		struct mlx5_mprq_buf *buf;
153 
154 		if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
155 			DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
156 			rte_errno = ENOMEM;
157 			goto error;
158 		}
159 		if (i < wqe_n)
160 			(*rxq->mprq_bufs)[i] = buf;
161 		else
162 			rxq->mprq_repl = buf;
163 	}
164 	DRV_LOG(DEBUG,
165 		"port %u Rx queue %u allocated and configured %u segments",
166 		rxq->port_id, rxq->idx, wqe_n);
167 	return 0;
168 error:
169 	err = rte_errno; /* Save rte_errno before cleanup. */
170 	wqe_n = i;
171 	for (i = 0; (i != wqe_n); ++i) {
172 		if ((*rxq->mprq_bufs)[i] != NULL)
173 			rte_mempool_put(rxq->mprq_mp,
174 					(*rxq->mprq_bufs)[i]);
175 		(*rxq->mprq_bufs)[i] = NULL;
176 	}
177 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
178 		rxq->port_id, rxq->idx);
179 	rte_errno = err; /* Restore rte_errno. */
180 	return -rte_errno;
181 }
182 
183 /**
184  * Allocate RX queue elements for Single-Packet RQ.
185  *
186  * @param rxq_ctrl
187  *   Pointer to RX queue structure.
188  *
189  * @return
190  *   0 on success, errno value on failure.
191  */
192 static int
193 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
194 {
195 	const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
196 	unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
197 	unsigned int i;
198 	int err;
199 
200 	/* Iterate on segments. */
201 	for (i = 0; (i != elts_n); ++i) {
202 		struct rte_mbuf *buf;
203 
204 		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
205 		if (buf == NULL) {
206 			DRV_LOG(ERR, "port %u empty mbuf pool",
207 				PORT_ID(rxq_ctrl->priv));
208 			rte_errno = ENOMEM;
209 			goto error;
210 		}
211 		/* Headroom is reserved by rte_pktmbuf_alloc(). */
212 		MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
213 		/* Buffer is supposed to be empty. */
214 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
215 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
216 		MLX5_ASSERT(!buf->next);
217 		/* Only the first segment keeps headroom. */
218 		if (i % sges_n)
219 			SET_DATA_OFF(buf, 0);
220 		PORT(buf) = rxq_ctrl->rxq.port_id;
221 		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
222 		PKT_LEN(buf) = DATA_LEN(buf);
223 		NB_SEGS(buf) = 1;
224 		(*rxq_ctrl->rxq.elts)[i] = buf;
225 	}
226 	/* If Rx vector is activated. */
227 	if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
228 		struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
229 		struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
230 		struct rte_pktmbuf_pool_private *priv =
231 			(struct rte_pktmbuf_pool_private *)
232 				rte_mempool_get_priv(rxq_ctrl->rxq.mp);
233 		int j;
234 
235 		/* Initialize default rearm_data for vPMD. */
236 		mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
237 		rte_mbuf_refcnt_set(mbuf_init, 1);
238 		mbuf_init->nb_segs = 1;
239 		mbuf_init->port = rxq->port_id;
240 		if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
241 			mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
242 		/*
243 		 * prevent compiler reordering:
244 		 * rearm_data covers previous fields.
245 		 */
246 		rte_compiler_barrier();
247 		rxq->mbuf_initializer =
248 			*(rte_xmm_t *)&mbuf_init->rearm_data;
249 		/* Padding with a fake mbuf for vectorized Rx. */
250 		for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
251 			(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
252 	}
253 	DRV_LOG(DEBUG,
254 		"port %u Rx queue %u allocated and configured %u segments"
255 		" (max %u packets)",
256 		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
257 		elts_n / (1 << rxq_ctrl->rxq.sges_n));
258 	return 0;
259 error:
260 	err = rte_errno; /* Save rte_errno before cleanup. */
261 	elts_n = i;
262 	for (i = 0; (i != elts_n); ++i) {
263 		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
264 			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
265 		(*rxq_ctrl->rxq.elts)[i] = NULL;
266 	}
267 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
268 		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
269 	rte_errno = err; /* Restore rte_errno. */
270 	return -rte_errno;
271 }
272 
273 /**
274  * Allocate RX queue elements.
275  *
276  * @param rxq_ctrl
277  *   Pointer to RX queue structure.
278  *
279  * @return
280  *   0 on success, errno value on failure.
281  */
282 int
283 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
284 {
285 	return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
286 	       rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
287 }
288 
289 /**
290  * Free RX queue elements for Multi-Packet RQ.
291  *
292  * @param rxq_ctrl
293  *   Pointer to RX queue structure.
294  */
295 static void
296 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
297 {
298 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
299 	uint16_t i;
300 
301 	DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
302 		rxq->port_id, rxq->idx);
303 	if (rxq->mprq_bufs == NULL)
304 		return;
305 	MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
306 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
307 		if ((*rxq->mprq_bufs)[i] != NULL)
308 			mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
309 		(*rxq->mprq_bufs)[i] = NULL;
310 	}
311 	if (rxq->mprq_repl != NULL) {
312 		mlx5_mprq_buf_free(rxq->mprq_repl);
313 		rxq->mprq_repl = NULL;
314 	}
315 }
316 
317 /**
318  * Free RX queue elements for Single-Packet RQ.
319  *
320  * @param rxq_ctrl
321  *   Pointer to RX queue structure.
322  */
323 static void
324 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
325 {
326 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
327 	const uint16_t q_n = (1 << rxq->elts_n);
328 	const uint16_t q_mask = q_n - 1;
329 	uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
330 	uint16_t i;
331 
332 	DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
333 		PORT_ID(rxq_ctrl->priv), rxq->idx);
334 	if (rxq->elts == NULL)
335 		return;
336 	/**
337 	 * Some mbuf in the Ring belongs to the application.  They cannot be
338 	 * freed.
339 	 */
340 	if (mlx5_rxq_check_vec_support(rxq) > 0) {
341 		for (i = 0; i < used; ++i)
342 			(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
343 		rxq->rq_pi = rxq->rq_ci;
344 	}
345 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
346 		if ((*rxq->elts)[i] != NULL)
347 			rte_pktmbuf_free_seg((*rxq->elts)[i]);
348 		(*rxq->elts)[i] = NULL;
349 	}
350 }
351 
352 /**
353  * Free RX queue elements.
354  *
355  * @param rxq_ctrl
356  *   Pointer to RX queue structure.
357  */
358 static void
359 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
360 {
361 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
362 		rxq_free_elts_mprq(rxq_ctrl);
363 	else
364 		rxq_free_elts_sprq(rxq_ctrl);
365 }
366 
367 /**
368  * Returns the per-queue supported offloads.
369  *
370  * @param dev
371  *   Pointer to Ethernet device.
372  *
373  * @return
374  *   Supported Rx offloads.
375  */
376 uint64_t
377 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
378 {
379 	struct mlx5_priv *priv = dev->data->dev_private;
380 	struct mlx5_dev_config *config = &priv->config;
381 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
382 			     DEV_RX_OFFLOAD_TIMESTAMP |
383 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
384 			     DEV_RX_OFFLOAD_RSS_HASH);
385 
386 	if (config->hw_fcs_strip)
387 		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
388 
389 	if (config->hw_csum)
390 		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
391 			     DEV_RX_OFFLOAD_UDP_CKSUM |
392 			     DEV_RX_OFFLOAD_TCP_CKSUM);
393 	if (config->hw_vlan_strip)
394 		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
395 	if (MLX5_LRO_SUPPORTED(dev))
396 		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
397 	return offloads;
398 }
399 
400 
401 /**
402  * Returns the per-port supported offloads.
403  *
404  * @return
405  *   Supported Rx offloads.
406  */
407 uint64_t
408 mlx5_get_rx_port_offloads(void)
409 {
410 	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
411 
412 	return offloads;
413 }
414 
415 /**
416  * Verify if the queue can be released.
417  *
418  * @param dev
419  *   Pointer to Ethernet device.
420  * @param idx
421  *   RX queue index.
422  *
423  * @return
424  *   1 if the queue can be released
425  *   0 if the queue can not be released, there are references to it.
426  *   Negative errno and rte_errno is set if queue doesn't exist.
427  */
428 static int
429 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
430 {
431 	struct mlx5_priv *priv = dev->data->dev_private;
432 	struct mlx5_rxq_ctrl *rxq_ctrl;
433 
434 	if (!(*priv->rxqs)[idx]) {
435 		rte_errno = EINVAL;
436 		return -rte_errno;
437 	}
438 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
439 	return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
440 }
441 
442 /**
443  * Rx queue presetup checks.
444  *
445  * @param dev
446  *   Pointer to Ethernet device structure.
447  * @param idx
448  *   RX queue index.
449  * @param desc
450  *   Number of descriptors to configure in queue.
451  *
452  * @return
453  *   0 on success, a negative errno value otherwise and rte_errno is set.
454  */
455 static int
456 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
457 {
458 	struct mlx5_priv *priv = dev->data->dev_private;
459 
460 	if (!rte_is_power_of_2(desc)) {
461 		desc = 1 << log2above(desc);
462 		DRV_LOG(WARNING,
463 			"port %u increased number of descriptors in Rx queue %u"
464 			" to the next power of two (%d)",
465 			dev->data->port_id, idx, desc);
466 	}
467 	DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
468 		dev->data->port_id, idx, desc);
469 	if (idx >= priv->rxqs_n) {
470 		DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
471 			dev->data->port_id, idx, priv->rxqs_n);
472 		rte_errno = EOVERFLOW;
473 		return -rte_errno;
474 	}
475 	if (!mlx5_rxq_releasable(dev, idx)) {
476 		DRV_LOG(ERR, "port %u unable to release queue index %u",
477 			dev->data->port_id, idx);
478 		rte_errno = EBUSY;
479 		return -rte_errno;
480 	}
481 	mlx5_rxq_release(dev, idx);
482 	return 0;
483 }
484 
485 /**
486  *
487  * @param dev
488  *   Pointer to Ethernet device structure.
489  * @param idx
490  *   RX queue index.
491  * @param desc
492  *   Number of descriptors to configure in queue.
493  * @param socket
494  *   NUMA socket on which memory must be allocated.
495  * @param[in] conf
496  *   Thresholds parameters.
497  * @param mp
498  *   Memory pool for buffer allocations.
499  *
500  * @return
501  *   0 on success, a negative errno value otherwise and rte_errno is set.
502  */
503 int
504 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
505 		    unsigned int socket, const struct rte_eth_rxconf *conf,
506 		    struct rte_mempool *mp)
507 {
508 	struct mlx5_priv *priv = dev->data->dev_private;
509 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
510 	struct mlx5_rxq_ctrl *rxq_ctrl =
511 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
512 	int res;
513 
514 	res = mlx5_rx_queue_pre_setup(dev, idx, desc);
515 	if (res)
516 		return res;
517 	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
518 	if (!rxq_ctrl) {
519 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
520 			dev->data->port_id, idx);
521 		rte_errno = ENOMEM;
522 		return -rte_errno;
523 	}
524 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
525 		dev->data->port_id, idx);
526 	(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
527 	return 0;
528 }
529 
530 /**
531  *
532  * @param dev
533  *   Pointer to Ethernet device structure.
534  * @param idx
535  *   RX queue index.
536  * @param desc
537  *   Number of descriptors to configure in queue.
538  * @param hairpin_conf
539  *   Hairpin configuration parameters.
540  *
541  * @return
542  *   0 on success, a negative errno value otherwise and rte_errno is set.
543  */
544 int
545 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
546 			    uint16_t desc,
547 			    const struct rte_eth_hairpin_conf *hairpin_conf)
548 {
549 	struct mlx5_priv *priv = dev->data->dev_private;
550 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
551 	struct mlx5_rxq_ctrl *rxq_ctrl =
552 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
553 	int res;
554 
555 	res = mlx5_rx_queue_pre_setup(dev, idx, desc);
556 	if (res)
557 		return res;
558 	if (hairpin_conf->peer_count != 1 ||
559 	    hairpin_conf->peers[0].port != dev->data->port_id ||
560 	    hairpin_conf->peers[0].queue >= priv->txqs_n) {
561 		DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
562 			" invalid hairpind configuration", dev->data->port_id,
563 			idx);
564 		rte_errno = EINVAL;
565 		return -rte_errno;
566 	}
567 	rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
568 	if (!rxq_ctrl) {
569 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
570 			dev->data->port_id, idx);
571 		rte_errno = ENOMEM;
572 		return -rte_errno;
573 	}
574 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
575 		dev->data->port_id, idx);
576 	(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
577 	return 0;
578 }
579 
580 /**
581  * DPDK callback to release a RX queue.
582  *
583  * @param dpdk_rxq
584  *   Generic RX queue pointer.
585  */
586 void
587 mlx5_rx_queue_release(void *dpdk_rxq)
588 {
589 	struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
590 	struct mlx5_rxq_ctrl *rxq_ctrl;
591 	struct mlx5_priv *priv;
592 
593 	if (rxq == NULL)
594 		return;
595 	rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
596 	priv = rxq_ctrl->priv;
597 	if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
598 		rte_panic("port %u Rx queue %u is still used by a flow and"
599 			  " cannot be removed\n",
600 			  PORT_ID(priv), rxq->idx);
601 	mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
602 }
603 
604 /**
605  * Get an Rx queue Verbs/DevX object.
606  *
607  * @param dev
608  *   Pointer to Ethernet device.
609  * @param idx
610  *   Queue index in DPDK Rx queue array
611  *
612  * @return
613  *   The Verbs/DevX object if it exists.
614  */
615 static struct mlx5_rxq_obj *
616 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
617 {
618 	struct mlx5_priv *priv = dev->data->dev_private;
619 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
620 	struct mlx5_rxq_ctrl *rxq_ctrl;
621 
622 	if (idx >= priv->rxqs_n)
623 		return NULL;
624 	if (!rxq_data)
625 		return NULL;
626 	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
627 	if (rxq_ctrl->obj)
628 		rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
629 	return rxq_ctrl->obj;
630 }
631 
632 /**
633  * Release the resources allocated for an RQ DevX object.
634  *
635  * @param rxq_ctrl
636  *   DevX Rx queue object.
637  */
638 static void
639 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
640 {
641 	if (rxq_ctrl->rxq.wqes) {
642 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
643 		rxq_ctrl->rxq.wqes = NULL;
644 	}
645 	if (rxq_ctrl->wq_umem) {
646 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
647 		rxq_ctrl->wq_umem = NULL;
648 	}
649 }
650 
651 /**
652  * Release an Rx hairpin related resources.
653  *
654  * @param rxq_obj
655  *   Hairpin Rx queue object.
656  */
657 static void
658 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
659 {
660 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
661 
662 	MLX5_ASSERT(rxq_obj);
663 	rq_attr.state = MLX5_RQC_STATE_RST;
664 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
665 	mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
666 	claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
667 }
668 
669 /**
670  * Release an Rx verbs/DevX queue object.
671  *
672  * @param rxq_obj
673  *   Verbs/DevX Rx queue object.
674  *
675  * @return
676  *   1 while a reference on it exists, 0 when freed.
677  */
678 static int
679 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
680 {
681 	MLX5_ASSERT(rxq_obj);
682 	if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
683 		switch (rxq_obj->type) {
684 		case MLX5_RXQ_OBJ_TYPE_IBV:
685 			MLX5_ASSERT(rxq_obj->wq);
686 			MLX5_ASSERT(rxq_obj->cq);
687 			rxq_free_elts(rxq_obj->rxq_ctrl);
688 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
689 			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
690 			break;
691 		case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
692 			MLX5_ASSERT(rxq_obj->cq);
693 			MLX5_ASSERT(rxq_obj->rq);
694 			rxq_free_elts(rxq_obj->rxq_ctrl);
695 			claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
696 			rxq_release_rq_resources(rxq_obj->rxq_ctrl);
697 			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
698 			break;
699 		case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
700 			MLX5_ASSERT(rxq_obj->rq);
701 			rxq_obj_hairpin_release(rxq_obj);
702 			break;
703 		}
704 		if (rxq_obj->channel)
705 			claim_zero(mlx5_glue->destroy_comp_channel
706 				   (rxq_obj->channel));
707 		LIST_REMOVE(rxq_obj, next);
708 		rte_free(rxq_obj);
709 		return 0;
710 	}
711 	return 1;
712 }
713 
714 /**
715  * Allocate queue vector and fill epoll fd list for Rx interrupts.
716  *
717  * @param dev
718  *   Pointer to Ethernet device.
719  *
720  * @return
721  *   0 on success, a negative errno value otherwise and rte_errno is set.
722  */
723 int
724 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
725 {
726 	struct mlx5_priv *priv = dev->data->dev_private;
727 	unsigned int i;
728 	unsigned int rxqs_n = priv->rxqs_n;
729 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
730 	unsigned int count = 0;
731 	struct rte_intr_handle *intr_handle = dev->intr_handle;
732 
733 	if (!dev->data->dev_conf.intr_conf.rxq)
734 		return 0;
735 	mlx5_rx_intr_vec_disable(dev);
736 	intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
737 	if (intr_handle->intr_vec == NULL) {
738 		DRV_LOG(ERR,
739 			"port %u failed to allocate memory for interrupt"
740 			" vector, Rx interrupts will not be supported",
741 			dev->data->port_id);
742 		rte_errno = ENOMEM;
743 		return -rte_errno;
744 	}
745 	intr_handle->type = RTE_INTR_HANDLE_EXT;
746 	for (i = 0; i != n; ++i) {
747 		/* This rxq obj must not be released in this function. */
748 		struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
749 		int fd;
750 		int flags;
751 		int rc;
752 
753 		/* Skip queues that cannot request interrupts. */
754 		if (!rxq_obj || !rxq_obj->channel) {
755 			/* Use invalid intr_vec[] index to disable entry. */
756 			intr_handle->intr_vec[i] =
757 				RTE_INTR_VEC_RXTX_OFFSET +
758 				RTE_MAX_RXTX_INTR_VEC_ID;
759 			continue;
760 		}
761 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
762 			DRV_LOG(ERR,
763 				"port %u too many Rx queues for interrupt"
764 				" vector size (%d), Rx interrupts cannot be"
765 				" enabled",
766 				dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
767 			mlx5_rx_intr_vec_disable(dev);
768 			rte_errno = ENOMEM;
769 			return -rte_errno;
770 		}
771 		fd = rxq_obj->channel->fd;
772 		flags = fcntl(fd, F_GETFL);
773 		rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
774 		if (rc < 0) {
775 			rte_errno = errno;
776 			DRV_LOG(ERR,
777 				"port %u failed to make Rx interrupt file"
778 				" descriptor %d non-blocking for queue index"
779 				" %d",
780 				dev->data->port_id, fd, i);
781 			mlx5_rx_intr_vec_disable(dev);
782 			return -rte_errno;
783 		}
784 		intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
785 		intr_handle->efds[count] = fd;
786 		count++;
787 	}
788 	if (!count)
789 		mlx5_rx_intr_vec_disable(dev);
790 	else
791 		intr_handle->nb_efd = count;
792 	return 0;
793 }
794 
795 /**
796  * Clean up Rx interrupts handler.
797  *
798  * @param dev
799  *   Pointer to Ethernet device.
800  */
801 void
802 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
803 {
804 	struct mlx5_priv *priv = dev->data->dev_private;
805 	struct rte_intr_handle *intr_handle = dev->intr_handle;
806 	unsigned int i;
807 	unsigned int rxqs_n = priv->rxqs_n;
808 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
809 
810 	if (!dev->data->dev_conf.intr_conf.rxq)
811 		return;
812 	if (!intr_handle->intr_vec)
813 		goto free;
814 	for (i = 0; i != n; ++i) {
815 		struct mlx5_rxq_ctrl *rxq_ctrl;
816 		struct mlx5_rxq_data *rxq_data;
817 
818 		if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
819 		    RTE_MAX_RXTX_INTR_VEC_ID)
820 			continue;
821 		/**
822 		 * Need to access directly the queue to release the reference
823 		 * kept in mlx5_rx_intr_vec_enable().
824 		 */
825 		rxq_data = (*priv->rxqs)[i];
826 		rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
827 		if (rxq_ctrl->obj)
828 			mlx5_rxq_obj_release(rxq_ctrl->obj);
829 	}
830 free:
831 	rte_intr_free_epoll_fd(intr_handle);
832 	if (intr_handle->intr_vec)
833 		free(intr_handle->intr_vec);
834 	intr_handle->nb_efd = 0;
835 	intr_handle->intr_vec = NULL;
836 }
837 
838 /**
839  *  MLX5 CQ notification .
840  *
841  *  @param rxq
842  *     Pointer to receive queue structure.
843  *  @param sq_n_rxq
844  *     Sequence number per receive queue .
845  */
846 static inline void
847 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
848 {
849 	int sq_n = 0;
850 	uint32_t doorbell_hi;
851 	uint64_t doorbell;
852 	void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
853 
854 	sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
855 	doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
856 	doorbell = (uint64_t)doorbell_hi << 32;
857 	doorbell |=  rxq->cqn;
858 	rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
859 	mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
860 			 cq_db_reg, rxq->uar_lock_cq);
861 }
862 
863 /**
864  * DPDK callback for Rx queue interrupt enable.
865  *
866  * @param dev
867  *   Pointer to Ethernet device structure.
868  * @param rx_queue_id
869  *   Rx queue number.
870  *
871  * @return
872  *   0 on success, a negative errno value otherwise and rte_errno is set.
873  */
874 int
875 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
876 {
877 	struct mlx5_priv *priv = dev->data->dev_private;
878 	struct mlx5_rxq_data *rxq_data;
879 	struct mlx5_rxq_ctrl *rxq_ctrl;
880 
881 	rxq_data = (*priv->rxqs)[rx_queue_id];
882 	if (!rxq_data) {
883 		rte_errno = EINVAL;
884 		return -rte_errno;
885 	}
886 	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
887 	if (rxq_ctrl->irq) {
888 		struct mlx5_rxq_obj *rxq_obj;
889 
890 		rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
891 		if (!rxq_obj) {
892 			rte_errno = EINVAL;
893 			return -rte_errno;
894 		}
895 		mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
896 		mlx5_rxq_obj_release(rxq_obj);
897 	}
898 	return 0;
899 }
900 
901 /**
902  * DPDK callback for Rx queue interrupt disable.
903  *
904  * @param dev
905  *   Pointer to Ethernet device structure.
906  * @param rx_queue_id
907  *   Rx queue number.
908  *
909  * @return
910  *   0 on success, a negative errno value otherwise and rte_errno is set.
911  */
912 int
913 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
914 {
915 	struct mlx5_priv *priv = dev->data->dev_private;
916 	struct mlx5_rxq_data *rxq_data;
917 	struct mlx5_rxq_ctrl *rxq_ctrl;
918 	struct mlx5_rxq_obj *rxq_obj = NULL;
919 	struct ibv_cq *ev_cq;
920 	void *ev_ctx;
921 	int ret;
922 
923 	rxq_data = (*priv->rxqs)[rx_queue_id];
924 	if (!rxq_data) {
925 		rte_errno = EINVAL;
926 		return -rte_errno;
927 	}
928 	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
929 	if (!rxq_ctrl->irq)
930 		return 0;
931 	rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
932 	if (!rxq_obj) {
933 		rte_errno = EINVAL;
934 		return -rte_errno;
935 	}
936 	ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
937 	if (ret || ev_cq != rxq_obj->cq) {
938 		rte_errno = EINVAL;
939 		goto exit;
940 	}
941 	rxq_data->cq_arm_sn++;
942 	mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
943 	mlx5_rxq_obj_release(rxq_obj);
944 	return 0;
945 exit:
946 	ret = rte_errno; /* Save rte_errno before cleanup. */
947 	if (rxq_obj)
948 		mlx5_rxq_obj_release(rxq_obj);
949 	DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
950 		dev->data->port_id, rx_queue_id);
951 	rte_errno = ret; /* Restore rte_errno. */
952 	return -rte_errno;
953 }
954 
955 /**
956  * Create a CQ Verbs object.
957  *
958  * @param dev
959  *   Pointer to Ethernet device.
960  * @param priv
961  *   Pointer to device private data.
962  * @param rxq_data
963  *   Pointer to Rx queue data.
964  * @param cqe_n
965  *   Number of CQEs in CQ.
966  * @param rxq_obj
967  *   Pointer to Rx queue object data.
968  *
969  * @return
970  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
971  */
972 static struct ibv_cq *
973 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
974 		struct mlx5_rxq_data *rxq_data,
975 		unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
976 {
977 	struct {
978 		struct ibv_cq_init_attr_ex ibv;
979 		struct mlx5dv_cq_init_attr mlx5;
980 	} cq_attr;
981 
982 	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
983 		.cqe = cqe_n,
984 		.channel = rxq_obj->channel,
985 		.comp_mask = 0,
986 	};
987 	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
988 		.comp_mask = 0,
989 	};
990 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
991 	    !rxq_data->lro) {
992 		cq_attr.mlx5.comp_mask |=
993 				MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
994 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
995 		cq_attr.mlx5.cqe_comp_res_format =
996 				mlx5_rxq_mprq_enabled(rxq_data) ?
997 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
998 				MLX5DV_CQE_RES_FORMAT_HASH;
999 #else
1000 		cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1001 #endif
1002 		/*
1003 		 * For vectorized Rx, it must not be doubled in order to
1004 		 * make cq_ci and rq_ci aligned.
1005 		 */
1006 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1007 			cq_attr.ibv.cqe *= 2;
1008 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1009 		DRV_LOG(DEBUG,
1010 			"port %u Rx CQE compression is disabled for HW"
1011 			" timestamp",
1012 			dev->data->port_id);
1013 	} else if (priv->config.cqe_comp && rxq_data->lro) {
1014 		DRV_LOG(DEBUG,
1015 			"port %u Rx CQE compression is disabled for LRO",
1016 			dev->data->port_id);
1017 	}
1018 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1019 	if (priv->config.cqe_pad) {
1020 		cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1021 		cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1022 	}
1023 #endif
1024 	return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1025 							      &cq_attr.ibv,
1026 							      &cq_attr.mlx5));
1027 }
1028 
1029 /**
1030  * Create a WQ Verbs object.
1031  *
1032  * @param dev
1033  *   Pointer to Ethernet device.
1034  * @param priv
1035  *   Pointer to device private data.
1036  * @param rxq_data
1037  *   Pointer to Rx queue data.
1038  * @param idx
1039  *   Queue index in DPDK Rx queue array
1040  * @param wqe_n
1041  *   Number of WQEs in WQ.
1042  * @param rxq_obj
1043  *   Pointer to Rx queue object data.
1044  *
1045  * @return
1046  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1047  */
1048 static struct ibv_wq *
1049 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1050 		struct mlx5_rxq_data *rxq_data, uint16_t idx,
1051 		unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1052 {
1053 	struct {
1054 		struct ibv_wq_init_attr ibv;
1055 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1056 		struct mlx5dv_wq_init_attr mlx5;
1057 #endif
1058 	} wq_attr;
1059 
1060 	wq_attr.ibv = (struct ibv_wq_init_attr){
1061 		.wq_context = NULL, /* Could be useful in the future. */
1062 		.wq_type = IBV_WQT_RQ,
1063 		/* Max number of outstanding WRs. */
1064 		.max_wr = wqe_n >> rxq_data->sges_n,
1065 		/* Max number of scatter/gather elements in a WR. */
1066 		.max_sge = 1 << rxq_data->sges_n,
1067 		.pd = priv->sh->pd,
1068 		.cq = rxq_obj->cq,
1069 		.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1070 		.create_flags = (rxq_data->vlan_strip ?
1071 				 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1072 	};
1073 	/* By default, FCS (CRC) is stripped by hardware. */
1074 	if (rxq_data->crc_present) {
1075 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1076 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1077 	}
1078 	if (priv->config.hw_padding) {
1079 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1080 		wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1081 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1082 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1083 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1084 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1085 #endif
1086 	}
1087 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1088 	wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1089 		.comp_mask = 0,
1090 	};
1091 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
1092 		struct mlx5dv_striding_rq_init_attr *mprq_attr =
1093 						&wq_attr.mlx5.striding_rq_attrs;
1094 
1095 		wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1096 		*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1097 			.single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1098 			.single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1099 			.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1100 		};
1101 	}
1102 	rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1103 					      &wq_attr.mlx5);
1104 #else
1105 	rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1106 #endif
1107 	if (rxq_obj->wq) {
1108 		/*
1109 		 * Make sure number of WRs*SGEs match expectations since a queue
1110 		 * cannot allocate more than "desc" buffers.
1111 		 */
1112 		if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1113 		    wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1114 			DRV_LOG(ERR,
1115 				"port %u Rx queue %u requested %u*%u but got"
1116 				" %u*%u WRs*SGEs",
1117 				dev->data->port_id, idx,
1118 				wqe_n >> rxq_data->sges_n,
1119 				(1 << rxq_data->sges_n),
1120 				wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1121 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1122 			rxq_obj->wq = NULL;
1123 			rte_errno = EINVAL;
1124 		}
1125 	}
1126 	return rxq_obj->wq;
1127 }
1128 
1129 /**
1130  * Fill common fields of create RQ attributes structure.
1131  *
1132  * @param rxq_data
1133  *   Pointer to Rx queue data.
1134  * @param cqn
1135  *   CQ number to use with this RQ.
1136  * @param rq_attr
1137  *   RQ attributes structure to fill..
1138  */
1139 static void
1140 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1141 			      struct mlx5_devx_create_rq_attr *rq_attr)
1142 {
1143 	rq_attr->state = MLX5_RQC_STATE_RST;
1144 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1145 	rq_attr->cqn = cqn;
1146 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1147 }
1148 
1149 /**
1150  * Fill common fields of DevX WQ attributes structure.
1151  *
1152  * @param priv
1153  *   Pointer to device private data.
1154  * @param rxq_ctrl
1155  *   Pointer to Rx queue control structure.
1156  * @param wq_attr
1157  *   WQ attributes structure to fill..
1158  */
1159 static void
1160 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1161 		       struct mlx5_devx_wq_attr *wq_attr)
1162 {
1163 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
1164 					MLX5_WQ_END_PAD_MODE_ALIGN :
1165 					MLX5_WQ_END_PAD_MODE_NONE;
1166 	wq_attr->pd = priv->sh->pdn;
1167 	wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1168 	wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1169 	wq_attr->dbr_umem_valid = 1;
1170 	wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1171 	wq_attr->wq_umem_valid = 1;
1172 }
1173 
1174 /**
1175  * Create a RQ object using DevX.
1176  *
1177  * @param dev
1178  *   Pointer to Ethernet device.
1179  * @param idx
1180  *   Queue index in DPDK Rx queue array
1181  * @param cqn
1182  *   CQ number to use with this RQ.
1183  *
1184  * @return
1185  *   The DevX object initialised, NULL otherwise and rte_errno is set.
1186  */
1187 static struct mlx5_devx_obj *
1188 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1189 {
1190 	struct mlx5_priv *priv = dev->data->dev_private;
1191 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1192 	struct mlx5_rxq_ctrl *rxq_ctrl =
1193 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1194 	struct mlx5_devx_create_rq_attr rq_attr;
1195 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1196 	uint32_t wq_size = 0;
1197 	uint32_t wqe_size = 0;
1198 	uint32_t log_wqe_size = 0;
1199 	void *buf = NULL;
1200 	struct mlx5_devx_obj *rq;
1201 
1202 	memset(&rq_attr, 0, sizeof(rq_attr));
1203 	/* Fill RQ attributes. */
1204 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1205 	rq_attr.flush_in_error_en = 1;
1206 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1207 	/* Fill WQ attributes for this RQ. */
1208 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
1209 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1210 		/*
1211 		 * Number of strides in each WQE:
1212 		 * 512*2^single_wqe_log_num_of_strides.
1213 		 */
1214 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
1215 				rxq_data->strd_num_n -
1216 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1217 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1218 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
1219 				rxq_data->strd_sz_n -
1220 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1221 		wqe_size = sizeof(struct mlx5_wqe_mprq);
1222 	} else {
1223 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1224 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
1225 	}
1226 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1227 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1228 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1229 	/* Calculate and allocate WQ memory space. */
1230 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1231 	wq_size = wqe_n * wqe_size;
1232 	buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
1233 				rxq_ctrl->socket);
1234 	if (!buf)
1235 		return NULL;
1236 	rxq_data->wqes = buf;
1237 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1238 						     buf, wq_size, 0);
1239 	if (!rxq_ctrl->wq_umem) {
1240 		rte_free(buf);
1241 		return NULL;
1242 	}
1243 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1244 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1245 	if (!rq)
1246 		rxq_release_rq_resources(rxq_ctrl);
1247 	return rq;
1248 }
1249 
1250 /**
1251  * Create the Rx hairpin queue object.
1252  *
1253  * @param dev
1254  *   Pointer to Ethernet device.
1255  * @param idx
1256  *   Queue index in DPDK Rx queue array
1257  *
1258  * @return
1259  *   The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1260  */
1261 static struct mlx5_rxq_obj *
1262 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1263 {
1264 	struct mlx5_priv *priv = dev->data->dev_private;
1265 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1266 	struct mlx5_rxq_ctrl *rxq_ctrl =
1267 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1268 	struct mlx5_devx_create_rq_attr attr = { 0 };
1269 	struct mlx5_rxq_obj *tmpl = NULL;
1270 	int ret = 0;
1271 
1272 	MLX5_ASSERT(rxq_data);
1273 	MLX5_ASSERT(!rxq_ctrl->obj);
1274 	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1275 				 rxq_ctrl->socket);
1276 	if (!tmpl) {
1277 		DRV_LOG(ERR,
1278 			"port %u Rx queue %u cannot allocate verbs resources",
1279 			dev->data->port_id, rxq_data->idx);
1280 		rte_errno = ENOMEM;
1281 		goto error;
1282 	}
1283 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1284 	tmpl->rxq_ctrl = rxq_ctrl;
1285 	attr.hairpin = 1;
1286 	/* Workaround for hairpin startup */
1287 	attr.wq_attr.log_hairpin_num_packets = log2above(32);
1288 	/* Workaround for packets larger than 1KB */
1289 	attr.wq_attr.log_hairpin_data_sz =
1290 			priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1291 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1292 					   rxq_ctrl->socket);
1293 	if (!tmpl->rq) {
1294 		DRV_LOG(ERR,
1295 			"port %u Rx hairpin queue %u can't create rq object",
1296 			dev->data->port_id, idx);
1297 		rte_errno = errno;
1298 		goto error;
1299 	}
1300 	DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1301 		idx, (void *)&tmpl);
1302 	rte_atomic32_inc(&tmpl->refcnt);
1303 	LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1304 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1305 	return tmpl;
1306 error:
1307 	ret = rte_errno; /* Save rte_errno before cleanup. */
1308 	if (tmpl->rq)
1309 		mlx5_devx_cmd_destroy(tmpl->rq);
1310 	rte_errno = ret; /* Restore rte_errno. */
1311 	return NULL;
1312 }
1313 
1314 /**
1315  * Create the Rx queue Verbs/DevX object.
1316  *
1317  * @param dev
1318  *   Pointer to Ethernet device.
1319  * @param idx
1320  *   Queue index in DPDK Rx queue array
1321  * @param type
1322  *   Type of Rx queue object to create.
1323  *
1324  * @return
1325  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1326  */
1327 struct mlx5_rxq_obj *
1328 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1329 		 enum mlx5_rxq_obj_type type)
1330 {
1331 	struct mlx5_priv *priv = dev->data->dev_private;
1332 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1333 	struct mlx5_rxq_ctrl *rxq_ctrl =
1334 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1335 	struct ibv_wq_attr mod;
1336 	unsigned int cqe_n;
1337 	unsigned int wqe_n = 1 << rxq_data->elts_n;
1338 	struct mlx5_rxq_obj *tmpl = NULL;
1339 	struct mlx5dv_cq cq_info;
1340 	struct mlx5dv_rwq rwq;
1341 	int ret = 0;
1342 	struct mlx5dv_obj obj;
1343 
1344 	MLX5_ASSERT(rxq_data);
1345 	MLX5_ASSERT(!rxq_ctrl->obj);
1346 	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1347 		return mlx5_rxq_obj_hairpin_new(dev, idx);
1348 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1349 	priv->verbs_alloc_ctx.obj = rxq_ctrl;
1350 	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1351 				 rxq_ctrl->socket);
1352 	if (!tmpl) {
1353 		DRV_LOG(ERR,
1354 			"port %u Rx queue %u cannot allocate verbs resources",
1355 			dev->data->port_id, rxq_data->idx);
1356 		rte_errno = ENOMEM;
1357 		goto error;
1358 	}
1359 	tmpl->type = type;
1360 	tmpl->rxq_ctrl = rxq_ctrl;
1361 	if (rxq_ctrl->irq) {
1362 		tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1363 		if (!tmpl->channel) {
1364 			DRV_LOG(ERR, "port %u: comp channel creation failure",
1365 				dev->data->port_id);
1366 			rte_errno = ENOMEM;
1367 			goto error;
1368 		}
1369 	}
1370 	if (mlx5_rxq_mprq_enabled(rxq_data))
1371 		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1372 	else
1373 		cqe_n = wqe_n  - 1;
1374 	tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1375 	if (!tmpl->cq) {
1376 		DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1377 			dev->data->port_id, idx);
1378 		rte_errno = ENOMEM;
1379 		goto error;
1380 	}
1381 	obj.cq.in = tmpl->cq;
1382 	obj.cq.out = &cq_info;
1383 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1384 	if (ret) {
1385 		rte_errno = ret;
1386 		goto error;
1387 	}
1388 	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1389 		DRV_LOG(ERR,
1390 			"port %u wrong MLX5_CQE_SIZE environment variable"
1391 			" value: it should be set to %u",
1392 			dev->data->port_id, RTE_CACHE_LINE_SIZE);
1393 		rte_errno = EINVAL;
1394 		goto error;
1395 	}
1396 	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1397 		dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1398 	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1399 		dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1400 	/* Allocate door-bell for types created with DevX. */
1401 	if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1402 		struct mlx5_devx_dbr_page *dbr_page;
1403 		int64_t dbr_offset;
1404 
1405 		dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1406 		if (dbr_offset < 0)
1407 			goto error;
1408 		rxq_ctrl->dbr_offset = dbr_offset;
1409 		rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1410 		rxq_ctrl->dbr_umem_id_valid = 1;
1411 		rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1412 					       (uintptr_t)rxq_ctrl->dbr_offset);
1413 	}
1414 	if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1415 		tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1416 					   tmpl);
1417 		if (!tmpl->wq) {
1418 			DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1419 				dev->data->port_id, idx);
1420 			rte_errno = ENOMEM;
1421 			goto error;
1422 		}
1423 		/* Change queue state to ready. */
1424 		mod = (struct ibv_wq_attr){
1425 			.attr_mask = IBV_WQ_ATTR_STATE,
1426 			.wq_state = IBV_WQS_RDY,
1427 		};
1428 		ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1429 		if (ret) {
1430 			DRV_LOG(ERR,
1431 				"port %u Rx queue %u WQ state to IBV_WQS_RDY"
1432 				" failed", dev->data->port_id, idx);
1433 			rte_errno = ret;
1434 			goto error;
1435 		}
1436 		obj.rwq.in = tmpl->wq;
1437 		obj.rwq.out = &rwq;
1438 		ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1439 		if (ret) {
1440 			rte_errno = ret;
1441 			goto error;
1442 		}
1443 		rxq_data->wqes = rwq.buf;
1444 		rxq_data->rq_db = rwq.dbrec;
1445 	} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1446 		struct mlx5_devx_modify_rq_attr rq_attr;
1447 
1448 		memset(&rq_attr, 0, sizeof(rq_attr));
1449 		tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1450 		if (!tmpl->rq) {
1451 			DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1452 				dev->data->port_id, idx);
1453 			rte_errno = ENOMEM;
1454 			goto error;
1455 		}
1456 		/* Change queue state to ready. */
1457 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
1458 		rq_attr.state = MLX5_RQC_STATE_RDY;
1459 		ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1460 		if (ret)
1461 			goto error;
1462 	}
1463 	/* Fill the rings. */
1464 	rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1465 	rxq_data->cq_db = cq_info.dbrec;
1466 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1467 	rxq_data->cq_uar = cq_info.cq_uar;
1468 	rxq_data->cqn = cq_info.cqn;
1469 	rxq_data->cq_arm_sn = 0;
1470 	mlx5_rxq_initialize(rxq_data);
1471 	rxq_data->cq_ci = 0;
1472 	DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1473 		idx, (void *)&tmpl);
1474 	rte_atomic32_inc(&tmpl->refcnt);
1475 	LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1476 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1477 	return tmpl;
1478 error:
1479 	if (tmpl) {
1480 		ret = rte_errno; /* Save rte_errno before cleanup. */
1481 		if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1482 			claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1483 		else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1484 			claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1485 		if (tmpl->cq)
1486 			claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1487 		if (tmpl->channel)
1488 			claim_zero(mlx5_glue->destroy_comp_channel
1489 							(tmpl->channel));
1490 		rte_free(tmpl);
1491 		rte_errno = ret; /* Restore rte_errno. */
1492 	}
1493 	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1494 		rxq_release_rq_resources(rxq_ctrl);
1495 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1496 	return NULL;
1497 }
1498 
1499 /**
1500  * Verify the Rx queue objects list is empty
1501  *
1502  * @param dev
1503  *   Pointer to Ethernet device.
1504  *
1505  * @return
1506  *   The number of objects not released.
1507  */
1508 int
1509 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1510 {
1511 	struct mlx5_priv *priv = dev->data->dev_private;
1512 	int ret = 0;
1513 	struct mlx5_rxq_obj *rxq_obj;
1514 
1515 	LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1516 		DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1517 			dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1518 		++ret;
1519 	}
1520 	return ret;
1521 }
1522 
1523 /**
1524  * Callback function to initialize mbufs for Multi-Packet RQ.
1525  */
1526 static inline void
1527 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1528 		    void *_m, unsigned int i __rte_unused)
1529 {
1530 	struct mlx5_mprq_buf *buf = _m;
1531 	struct rte_mbuf_ext_shared_info *shinfo;
1532 	unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1533 	unsigned int j;
1534 
1535 	memset(_m, 0, sizeof(*buf));
1536 	buf->mp = mp;
1537 	rte_atomic16_set(&buf->refcnt, 1);
1538 	for (j = 0; j != strd_n; ++j) {
1539 		shinfo = &buf->shinfos[j];
1540 		shinfo->free_cb = mlx5_mprq_buf_free_cb;
1541 		shinfo->fcb_opaque = buf;
1542 	}
1543 }
1544 
1545 /**
1546  * Free mempool of Multi-Packet RQ.
1547  *
1548  * @param dev
1549  *   Pointer to Ethernet device.
1550  *
1551  * @return
1552  *   0 on success, negative errno value on failure.
1553  */
1554 int
1555 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1556 {
1557 	struct mlx5_priv *priv = dev->data->dev_private;
1558 	struct rte_mempool *mp = priv->mprq_mp;
1559 	unsigned int i;
1560 
1561 	if (mp == NULL)
1562 		return 0;
1563 	DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1564 		dev->data->port_id, mp->name);
1565 	/*
1566 	 * If a buffer in the pool has been externally attached to a mbuf and it
1567 	 * is still in use by application, destroying the Rx queue can spoil
1568 	 * the packet. It is unlikely to happen but if application dynamically
1569 	 * creates and destroys with holding Rx packets, this can happen.
1570 	 *
1571 	 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1572 	 * RQ isn't provided by application but managed by PMD.
1573 	 */
1574 	if (!rte_mempool_full(mp)) {
1575 		DRV_LOG(ERR,
1576 			"port %u mempool for Multi-Packet RQ is still in use",
1577 			dev->data->port_id);
1578 		rte_errno = EBUSY;
1579 		return -rte_errno;
1580 	}
1581 	rte_mempool_free(mp);
1582 	/* Unset mempool for each Rx queue. */
1583 	for (i = 0; i != priv->rxqs_n; ++i) {
1584 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1585 
1586 		if (rxq == NULL)
1587 			continue;
1588 		rxq->mprq_mp = NULL;
1589 	}
1590 	priv->mprq_mp = NULL;
1591 	return 0;
1592 }
1593 
1594 /**
1595  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1596  * mempool. If already allocated, reuse it if there're enough elements.
1597  * Otherwise, resize it.
1598  *
1599  * @param dev
1600  *   Pointer to Ethernet device.
1601  *
1602  * @return
1603  *   0 on success, negative errno value on failure.
1604  */
1605 int
1606 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1607 {
1608 	struct mlx5_priv *priv = dev->data->dev_private;
1609 	struct rte_mempool *mp = priv->mprq_mp;
1610 	char name[RTE_MEMPOOL_NAMESIZE];
1611 	unsigned int desc = 0;
1612 	unsigned int buf_len;
1613 	unsigned int obj_num;
1614 	unsigned int obj_size;
1615 	unsigned int strd_num_n = 0;
1616 	unsigned int strd_sz_n = 0;
1617 	unsigned int i;
1618 	unsigned int n_ibv = 0;
1619 
1620 	if (!mlx5_mprq_enabled(dev))
1621 		return 0;
1622 	/* Count the total number of descriptors configured. */
1623 	for (i = 0; i != priv->rxqs_n; ++i) {
1624 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1625 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1626 			(rxq, struct mlx5_rxq_ctrl, rxq);
1627 
1628 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1629 			continue;
1630 		n_ibv++;
1631 		desc += 1 << rxq->elts_n;
1632 		/* Get the max number of strides. */
1633 		if (strd_num_n < rxq->strd_num_n)
1634 			strd_num_n = rxq->strd_num_n;
1635 		/* Get the max size of a stride. */
1636 		if (strd_sz_n < rxq->strd_sz_n)
1637 			strd_sz_n = rxq->strd_sz_n;
1638 	}
1639 	MLX5_ASSERT(strd_num_n && strd_sz_n);
1640 	buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1641 	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1642 		sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1643 	/*
1644 	 * Received packets can be either memcpy'd or externally referenced. In
1645 	 * case that the packet is attached to an mbuf as an external buffer, as
1646 	 * it isn't possible to predict how the buffers will be queued by
1647 	 * application, there's no option to exactly pre-allocate needed buffers
1648 	 * in advance but to speculatively prepares enough buffers.
1649 	 *
1650 	 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1651 	 * received packets to buffers provided by application (rxq->mp) until
1652 	 * this Mempool gets available again.
1653 	 */
1654 	desc *= 4;
1655 	obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1656 	/*
1657 	 * rte_mempool_create_empty() has sanity check to refuse large cache
1658 	 * size compared to the number of elements.
1659 	 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1660 	 * constant number 2 instead.
1661 	 */
1662 	obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1663 	/* Check a mempool is already allocated and if it can be resued. */
1664 	if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1665 		DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1666 			dev->data->port_id, mp->name);
1667 		/* Reuse. */
1668 		goto exit;
1669 	} else if (mp != NULL) {
1670 		DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1671 			dev->data->port_id, mp->name);
1672 		/*
1673 		 * If failed to free, which means it may be still in use, no way
1674 		 * but to keep using the existing one. On buffer underrun,
1675 		 * packets will be memcpy'd instead of external buffer
1676 		 * attachment.
1677 		 */
1678 		if (mlx5_mprq_free_mp(dev)) {
1679 			if (mp->elt_size >= obj_size)
1680 				goto exit;
1681 			else
1682 				return -rte_errno;
1683 		}
1684 	}
1685 	snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1686 	mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1687 				0, NULL, NULL, mlx5_mprq_buf_init,
1688 				(void *)(uintptr_t)(1 << strd_num_n),
1689 				dev->device->numa_node, 0);
1690 	if (mp == NULL) {
1691 		DRV_LOG(ERR,
1692 			"port %u failed to allocate a mempool for"
1693 			" Multi-Packet RQ, count=%u, size=%u",
1694 			dev->data->port_id, obj_num, obj_size);
1695 		rte_errno = ENOMEM;
1696 		return -rte_errno;
1697 	}
1698 	priv->mprq_mp = mp;
1699 exit:
1700 	/* Set mempool for each Rx queue. */
1701 	for (i = 0; i != priv->rxqs_n; ++i) {
1702 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1703 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1704 			(rxq, struct mlx5_rxq_ctrl, rxq);
1705 
1706 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1707 			continue;
1708 		rxq->mprq_mp = mp;
1709 	}
1710 	DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1711 		dev->data->port_id);
1712 	return 0;
1713 }
1714 
1715 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1716 					sizeof(struct rte_vlan_hdr) * 2 + \
1717 					sizeof(struct rte_ipv6_hdr)))
1718 #define MAX_TCP_OPTION_SIZE 40u
1719 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1720 				 sizeof(struct rte_tcp_hdr) + \
1721 				 MAX_TCP_OPTION_SIZE))
1722 
1723 /**
1724  * Adjust the maximum LRO massage size.
1725  *
1726  * @param dev
1727  *   Pointer to Ethernet device.
1728  * @param idx
1729  *   RX queue index.
1730  * @param max_lro_size
1731  *   The maximum size for LRO packet.
1732  */
1733 static void
1734 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1735 			     uint32_t max_lro_size)
1736 {
1737 	struct mlx5_priv *priv = dev->data->dev_private;
1738 
1739 	if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1740 	    MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1741 	    MLX5_MAX_TCP_HDR_OFFSET)
1742 		max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1743 	max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1744 	MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1745 	max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1746 	if (priv->max_lro_msg_size)
1747 		priv->max_lro_msg_size =
1748 			RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1749 	else
1750 		priv->max_lro_msg_size = max_lro_size;
1751 	DRV_LOG(DEBUG,
1752 		"port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1753 		dev->data->port_id, idx,
1754 		priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1755 }
1756 
1757 /**
1758  * Create a DPDK Rx queue.
1759  *
1760  * @param dev
1761  *   Pointer to Ethernet device.
1762  * @param idx
1763  *   RX queue index.
1764  * @param desc
1765  *   Number of descriptors to configure in queue.
1766  * @param socket
1767  *   NUMA socket on which memory must be allocated.
1768  *
1769  * @return
1770  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1771  */
1772 struct mlx5_rxq_ctrl *
1773 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1774 	     unsigned int socket, const struct rte_eth_rxconf *conf,
1775 	     struct rte_mempool *mp)
1776 {
1777 	struct mlx5_priv *priv = dev->data->dev_private;
1778 	struct mlx5_rxq_ctrl *tmpl;
1779 	unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1780 	unsigned int mprq_stride_size;
1781 	struct mlx5_dev_config *config = &priv->config;
1782 	unsigned int strd_headroom_en;
1783 	/*
1784 	 * Always allocate extra slots, even if eventually
1785 	 * the vector Rx will not be used.
1786 	 */
1787 	uint16_t desc_n =
1788 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1789 	uint64_t offloads = conf->offloads |
1790 			   dev->data->dev_conf.rxmode.offloads;
1791 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1792 	const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1793 	unsigned int max_rx_pkt_len = lro_on_queue ?
1794 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
1795 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
1796 	unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1797 							RTE_PKTMBUF_HEADROOM;
1798 	unsigned int max_lro_size = 0;
1799 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1800 
1801 	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1802 						    DEV_RX_OFFLOAD_SCATTER)) {
1803 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1804 			" configured and no enough mbuf space(%u) to contain "
1805 			"the maximum RX packet length(%u) with head-room(%u)",
1806 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1807 			RTE_PKTMBUF_HEADROOM);
1808 		rte_errno = ENOSPC;
1809 		return NULL;
1810 	}
1811 	tmpl = rte_calloc_socket("RXQ", 1,
1812 				 sizeof(*tmpl) +
1813 				 desc_n * sizeof(struct rte_mbuf *),
1814 				 0, socket);
1815 	if (!tmpl) {
1816 		rte_errno = ENOMEM;
1817 		return NULL;
1818 	}
1819 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1820 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1821 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1822 		/* rte_errno is already set. */
1823 		goto error;
1824 	}
1825 	tmpl->socket = socket;
1826 	if (dev->data->dev_conf.intr_conf.rxq)
1827 		tmpl->irq = 1;
1828 	/*
1829 	 * LRO packet may consume all the stride memory, hence we cannot
1830 	 * guaranty head-room near the packet memory in the stride.
1831 	 * In this case scatter is, for sure, enabled and an empty mbuf may be
1832 	 * added in the start for the head-room.
1833 	 */
1834 	if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
1835 	    non_scatter_min_mbuf_size > mb_len) {
1836 		strd_headroom_en = 0;
1837 		mprq_stride_size = RTE_MIN(max_rx_pkt_len,
1838 					1u << config->mprq.max_stride_size_n);
1839 	} else {
1840 		strd_headroom_en = 1;
1841 		mprq_stride_size = non_scatter_min_mbuf_size;
1842 	}
1843 	/*
1844 	 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1845 	 * following conditions are met:
1846 	 *  - MPRQ is enabled.
1847 	 *  - The number of descs is more than the number of strides.
1848 	 *  - max_rx_pkt_len plus overhead is less than the max size of a
1849 	 *    stride.
1850 	 *  Otherwise, enable Rx scatter if necessary.
1851 	 */
1852 	if (mprq_en &&
1853 	    desc > (1U << config->mprq.stride_num_n) &&
1854 	    mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1855 		/* TODO: Rx scatter isn't supported yet. */
1856 		tmpl->rxq.sges_n = 0;
1857 		/* Trim the number of descs needed. */
1858 		desc >>= config->mprq.stride_num_n;
1859 		tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1860 		tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1861 					      config->mprq.min_stride_size_n);
1862 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1863 		tmpl->rxq.strd_headroom_en = strd_headroom_en;
1864 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1865 				config->mprq.max_memcpy_len);
1866 		max_lro_size = RTE_MIN(max_rx_pkt_len,
1867 				       (1u << tmpl->rxq.strd_num_n) *
1868 				       (1u << tmpl->rxq.strd_sz_n));
1869 		DRV_LOG(DEBUG,
1870 			"port %u Rx queue %u: Multi-Packet RQ is enabled"
1871 			" strd_num_n = %u, strd_sz_n = %u",
1872 			dev->data->port_id, idx,
1873 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1874 	} else if (max_rx_pkt_len <= first_mb_free_size) {
1875 		tmpl->rxq.sges_n = 0;
1876 		max_lro_size = max_rx_pkt_len;
1877 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1878 		unsigned int size = non_scatter_min_mbuf_size;
1879 		unsigned int sges_n;
1880 
1881 		if (lro_on_queue && first_mb_free_size <
1882 		    MLX5_MAX_LRO_HEADER_FIX) {
1883 			DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1884 				" to include the max header size(%u) for LRO",
1885 				first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1886 			rte_errno = ENOTSUP;
1887 			goto error;
1888 		}
1889 		/*
1890 		 * Determine the number of SGEs needed for a full packet
1891 		 * and round it to the next power of two.
1892 		 */
1893 		sges_n = log2above((size / mb_len) + !!(size % mb_len));
1894 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1895 			DRV_LOG(ERR,
1896 				"port %u too many SGEs (%u) needed to handle"
1897 				" requested maximum packet size %u, the maximum"
1898 				" supported are %u", dev->data->port_id,
1899 				1 << sges_n, max_rx_pkt_len,
1900 				1u << MLX5_MAX_LOG_RQ_SEGS);
1901 			rte_errno = ENOTSUP;
1902 			goto error;
1903 		}
1904 		tmpl->rxq.sges_n = sges_n;
1905 		max_lro_size = max_rx_pkt_len;
1906 	}
1907 	if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1908 		DRV_LOG(WARNING,
1909 			"port %u MPRQ is requested but cannot be enabled"
1910 			" (requested: desc = %u, stride_sz = %u,"
1911 			" supported: min_stride_num = %u, max_stride_sz = %u).",
1912 			dev->data->port_id, desc, mprq_stride_size,
1913 			(1 << config->mprq.stride_num_n),
1914 			(1 << config->mprq.max_stride_size_n));
1915 	DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1916 		dev->data->port_id, 1 << tmpl->rxq.sges_n);
1917 	if (desc % (1 << tmpl->rxq.sges_n)) {
1918 		DRV_LOG(ERR,
1919 			"port %u number of Rx queue descriptors (%u) is not a"
1920 			" multiple of SGEs per packet (%u)",
1921 			dev->data->port_id,
1922 			desc,
1923 			1 << tmpl->rxq.sges_n);
1924 		rte_errno = EINVAL;
1925 		goto error;
1926 	}
1927 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1928 	/* Toggle RX checksum offload if hardware supports it. */
1929 	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1930 	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1931 	/* Configure VLAN stripping. */
1932 	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1933 	/* By default, FCS (CRC) is stripped by hardware. */
1934 	tmpl->rxq.crc_present = 0;
1935 	tmpl->rxq.lro = lro_on_queue;
1936 	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1937 		if (config->hw_fcs_strip) {
1938 			/*
1939 			 * RQs used for LRO-enabled TIRs should not be
1940 			 * configured to scatter the FCS.
1941 			 */
1942 			if (lro_on_queue)
1943 				DRV_LOG(WARNING,
1944 					"port %u CRC stripping has been "
1945 					"disabled but will still be performed "
1946 					"by hardware, because LRO is enabled",
1947 					dev->data->port_id);
1948 			else
1949 				tmpl->rxq.crc_present = 1;
1950 		} else {
1951 			DRV_LOG(WARNING,
1952 				"port %u CRC stripping has been disabled but will"
1953 				" still be performed by hardware, make sure MLNX_OFED"
1954 				" and firmware are up to date",
1955 				dev->data->port_id);
1956 		}
1957 	}
1958 	DRV_LOG(DEBUG,
1959 		"port %u CRC stripping is %s, %u bytes will be subtracted from"
1960 		" incoming frames to hide it",
1961 		dev->data->port_id,
1962 		tmpl->rxq.crc_present ? "disabled" : "enabled",
1963 		tmpl->rxq.crc_present << 2);
1964 	/* Save port ID. */
1965 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1966 		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1967 	tmpl->rxq.port_id = dev->data->port_id;
1968 	tmpl->priv = priv;
1969 	tmpl->rxq.mp = mp;
1970 	tmpl->rxq.elts_n = log2above(desc);
1971 	tmpl->rxq.rq_repl_thresh =
1972 		MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1973 	tmpl->rxq.elts =
1974 		(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1975 #ifndef RTE_ARCH_64
1976 	tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1977 #endif
1978 	tmpl->rxq.idx = idx;
1979 	rte_atomic32_inc(&tmpl->refcnt);
1980 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1981 	return tmpl;
1982 error:
1983 	rte_free(tmpl);
1984 	return NULL;
1985 }
1986 
1987 /**
1988  * Create a DPDK Rx hairpin queue.
1989  *
1990  * @param dev
1991  *   Pointer to Ethernet device.
1992  * @param idx
1993  *   RX queue index.
1994  * @param desc
1995  *   Number of descriptors to configure in queue.
1996  * @param hairpin_conf
1997  *   The hairpin binding configuration.
1998  *
1999  * @return
2000  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
2001  */
2002 struct mlx5_rxq_ctrl *
2003 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2004 		     const struct rte_eth_hairpin_conf *hairpin_conf)
2005 {
2006 	struct mlx5_priv *priv = dev->data->dev_private;
2007 	struct mlx5_rxq_ctrl *tmpl;
2008 
2009 	tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
2010 	if (!tmpl) {
2011 		rte_errno = ENOMEM;
2012 		return NULL;
2013 	}
2014 	tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2015 	tmpl->socket = SOCKET_ID_ANY;
2016 	tmpl->rxq.rss_hash = 0;
2017 	tmpl->rxq.port_id = dev->data->port_id;
2018 	tmpl->priv = priv;
2019 	tmpl->rxq.mp = NULL;
2020 	tmpl->rxq.elts_n = log2above(desc);
2021 	tmpl->rxq.elts = NULL;
2022 	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2023 	tmpl->hairpin_conf = *hairpin_conf;
2024 	tmpl->rxq.idx = idx;
2025 	rte_atomic32_inc(&tmpl->refcnt);
2026 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2027 	return tmpl;
2028 }
2029 
2030 /**
2031  * Get a Rx queue.
2032  *
2033  * @param dev
2034  *   Pointer to Ethernet device.
2035  * @param idx
2036  *   RX queue index.
2037  *
2038  * @return
2039  *   A pointer to the queue if it exists, NULL otherwise.
2040  */
2041 struct mlx5_rxq_ctrl *
2042 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2043 {
2044 	struct mlx5_priv *priv = dev->data->dev_private;
2045 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2046 
2047 	if ((*priv->rxqs)[idx]) {
2048 		rxq_ctrl = container_of((*priv->rxqs)[idx],
2049 					struct mlx5_rxq_ctrl,
2050 					rxq);
2051 		mlx5_rxq_obj_get(dev, idx);
2052 		rte_atomic32_inc(&rxq_ctrl->refcnt);
2053 	}
2054 	return rxq_ctrl;
2055 }
2056 
2057 /**
2058  * Release a Rx queue.
2059  *
2060  * @param dev
2061  *   Pointer to Ethernet device.
2062  * @param idx
2063  *   RX queue index.
2064  *
2065  * @return
2066  *   1 while a reference on it exists, 0 when freed.
2067  */
2068 int
2069 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2070 {
2071 	struct mlx5_priv *priv = dev->data->dev_private;
2072 	struct mlx5_rxq_ctrl *rxq_ctrl;
2073 
2074 	if (!(*priv->rxqs)[idx])
2075 		return 0;
2076 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2077 	MLX5_ASSERT(rxq_ctrl->priv);
2078 	if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2079 		rxq_ctrl->obj = NULL;
2080 	if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2081 		if (rxq_ctrl->dbr_umem_id_valid)
2082 			claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
2083 						    rxq_ctrl->dbr_offset));
2084 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2085 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2086 		LIST_REMOVE(rxq_ctrl, next);
2087 		rte_free(rxq_ctrl);
2088 		(*priv->rxqs)[idx] = NULL;
2089 		return 0;
2090 	}
2091 	return 1;
2092 }
2093 
2094 /**
2095  * Verify the Rx Queue list is empty
2096  *
2097  * @param dev
2098  *   Pointer to Ethernet device.
2099  *
2100  * @return
2101  *   The number of object not released.
2102  */
2103 int
2104 mlx5_rxq_verify(struct rte_eth_dev *dev)
2105 {
2106 	struct mlx5_priv *priv = dev->data->dev_private;
2107 	struct mlx5_rxq_ctrl *rxq_ctrl;
2108 	int ret = 0;
2109 
2110 	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2111 		DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2112 			dev->data->port_id, rxq_ctrl->rxq.idx);
2113 		++ret;
2114 	}
2115 	return ret;
2116 }
2117 
2118 /**
2119  * Get a Rx queue type.
2120  *
2121  * @param dev
2122  *   Pointer to Ethernet device.
2123  * @param idx
2124  *   Rx queue index.
2125  *
2126  * @return
2127  *   The Rx queue type.
2128  */
2129 enum mlx5_rxq_type
2130 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2131 {
2132 	struct mlx5_priv *priv = dev->data->dev_private;
2133 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2134 
2135 	if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2136 		rxq_ctrl = container_of((*priv->rxqs)[idx],
2137 					struct mlx5_rxq_ctrl,
2138 					rxq);
2139 		return rxq_ctrl->type;
2140 	}
2141 	return MLX5_RXQ_TYPE_UNDEFINED;
2142 }
2143 
2144 /**
2145  * Create an indirection table.
2146  *
2147  * @param dev
2148  *   Pointer to Ethernet device.
2149  * @param queues
2150  *   Queues entering in the indirection table.
2151  * @param queues_n
2152  *   Number of queues in the array.
2153  *
2154  * @return
2155  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2156  */
2157 static struct mlx5_ind_table_obj *
2158 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2159 		       uint32_t queues_n, enum mlx5_ind_tbl_type type)
2160 {
2161 	struct mlx5_priv *priv = dev->data->dev_private;
2162 	struct mlx5_ind_table_obj *ind_tbl;
2163 	unsigned int i = 0, j = 0, k = 0;
2164 
2165 	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
2166 			     queues_n * sizeof(uint16_t), 0);
2167 	if (!ind_tbl) {
2168 		rte_errno = ENOMEM;
2169 		return NULL;
2170 	}
2171 	ind_tbl->type = type;
2172 	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2173 		const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2174 			log2above(queues_n) :
2175 			log2above(priv->config.ind_table_max_size);
2176 		struct ibv_wq *wq[1 << wq_n];
2177 
2178 		for (i = 0; i != queues_n; ++i) {
2179 			struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2180 								 queues[i]);
2181 			if (!rxq)
2182 				goto error;
2183 			wq[i] = rxq->obj->wq;
2184 			ind_tbl->queues[i] = queues[i];
2185 		}
2186 		ind_tbl->queues_n = queues_n;
2187 		/* Finalise indirection table. */
2188 		k = i; /* Retain value of i for use in error case. */
2189 		for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2190 			wq[k] = wq[j];
2191 		ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2192 			(priv->sh->ctx,
2193 			 &(struct ibv_rwq_ind_table_init_attr){
2194 				.log_ind_tbl_size = wq_n,
2195 				.ind_tbl = wq,
2196 				.comp_mask = 0,
2197 			});
2198 		if (!ind_tbl->ind_table) {
2199 			rte_errno = errno;
2200 			goto error;
2201 		}
2202 	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2203 		struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2204 		const unsigned int rqt_n =
2205 			1 << (rte_is_power_of_2(queues_n) ?
2206 			      log2above(queues_n) :
2207 			      log2above(priv->config.ind_table_max_size));
2208 
2209 		rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
2210 				      rqt_n * sizeof(uint32_t), 0);
2211 		if (!rqt_attr) {
2212 			DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2213 				dev->data->port_id);
2214 			rte_errno = ENOMEM;
2215 			goto error;
2216 		}
2217 		rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2218 		rqt_attr->rqt_actual_size = rqt_n;
2219 		for (i = 0; i != queues_n; ++i) {
2220 			struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2221 								 queues[i]);
2222 			if (!rxq)
2223 				goto error;
2224 			rqt_attr->rq_list[i] = rxq->obj->rq->id;
2225 			ind_tbl->queues[i] = queues[i];
2226 		}
2227 		k = i; /* Retain value of i for use in error case. */
2228 		for (j = 0; k != rqt_n; ++k, ++j)
2229 			rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2230 		ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2231 							rqt_attr);
2232 		rte_free(rqt_attr);
2233 		if (!ind_tbl->rqt) {
2234 			DRV_LOG(ERR, "port %u cannot create DevX RQT",
2235 				dev->data->port_id);
2236 			rte_errno = errno;
2237 			goto error;
2238 		}
2239 		ind_tbl->queues_n = queues_n;
2240 	}
2241 	rte_atomic32_inc(&ind_tbl->refcnt);
2242 	LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2243 	return ind_tbl;
2244 error:
2245 	for (j = 0; j < i; j++)
2246 		mlx5_rxq_release(dev, ind_tbl->queues[j]);
2247 	rte_free(ind_tbl);
2248 	DEBUG("port %u cannot create indirection table", dev->data->port_id);
2249 	return NULL;
2250 }
2251 
2252 /**
2253  * Get an indirection table.
2254  *
2255  * @param dev
2256  *   Pointer to Ethernet device.
2257  * @param queues
2258  *   Queues entering in the indirection table.
2259  * @param queues_n
2260  *   Number of queues in the array.
2261  *
2262  * @return
2263  *   An indirection table if found.
2264  */
2265 static struct mlx5_ind_table_obj *
2266 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2267 		       uint32_t queues_n)
2268 {
2269 	struct mlx5_priv *priv = dev->data->dev_private;
2270 	struct mlx5_ind_table_obj *ind_tbl;
2271 
2272 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2273 		if ((ind_tbl->queues_n == queues_n) &&
2274 		    (memcmp(ind_tbl->queues, queues,
2275 			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2276 		     == 0))
2277 			break;
2278 	}
2279 	if (ind_tbl) {
2280 		unsigned int i;
2281 
2282 		rte_atomic32_inc(&ind_tbl->refcnt);
2283 		for (i = 0; i != ind_tbl->queues_n; ++i)
2284 			mlx5_rxq_get(dev, ind_tbl->queues[i]);
2285 	}
2286 	return ind_tbl;
2287 }
2288 
2289 /**
2290  * Release an indirection table.
2291  *
2292  * @param dev
2293  *   Pointer to Ethernet device.
2294  * @param ind_table
2295  *   Indirection table to release.
2296  *
2297  * @return
2298  *   1 while a reference on it exists, 0 when freed.
2299  */
2300 static int
2301 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2302 			   struct mlx5_ind_table_obj *ind_tbl)
2303 {
2304 	unsigned int i;
2305 
2306 	if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2307 		if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2308 			claim_zero(mlx5_glue->destroy_rwq_ind_table
2309 							(ind_tbl->ind_table));
2310 		else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2311 			claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2312 	}
2313 	for (i = 0; i != ind_tbl->queues_n; ++i)
2314 		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2315 	if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2316 		LIST_REMOVE(ind_tbl, next);
2317 		rte_free(ind_tbl);
2318 		return 0;
2319 	}
2320 	return 1;
2321 }
2322 
2323 /**
2324  * Verify the Rx Queue list is empty
2325  *
2326  * @param dev
2327  *   Pointer to Ethernet device.
2328  *
2329  * @return
2330  *   The number of object not released.
2331  */
2332 int
2333 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2334 {
2335 	struct mlx5_priv *priv = dev->data->dev_private;
2336 	struct mlx5_ind_table_obj *ind_tbl;
2337 	int ret = 0;
2338 
2339 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2340 		DRV_LOG(DEBUG,
2341 			"port %u indirection table obj %p still referenced",
2342 			dev->data->port_id, (void *)ind_tbl);
2343 		++ret;
2344 	}
2345 	return ret;
2346 }
2347 
2348 /**
2349  * Create an Rx Hash queue.
2350  *
2351  * @param dev
2352  *   Pointer to Ethernet device.
2353  * @param rss_key
2354  *   RSS key for the Rx hash queue.
2355  * @param rss_key_len
2356  *   RSS key length.
2357  * @param hash_fields
2358  *   Verbs protocol hash field to make the RSS on.
2359  * @param queues
2360  *   Queues entering in hash queue. In case of empty hash_fields only the
2361  *   first queue index will be taken for the indirection table.
2362  * @param queues_n
2363  *   Number of queues.
2364  * @param tunnel
2365  *   Tunnel type.
2366  *
2367  * @return
2368  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2369  */
2370 struct mlx5_hrxq *
2371 mlx5_hrxq_new(struct rte_eth_dev *dev,
2372 	      const uint8_t *rss_key, uint32_t rss_key_len,
2373 	      uint64_t hash_fields,
2374 	      const uint16_t *queues, uint32_t queues_n,
2375 	      int tunnel __rte_unused)
2376 {
2377 	struct mlx5_priv *priv = dev->data->dev_private;
2378 	struct mlx5_hrxq *hrxq;
2379 	struct ibv_qp *qp = NULL;
2380 	struct mlx5_ind_table_obj *ind_tbl;
2381 	int err;
2382 	struct mlx5_devx_obj *tir = NULL;
2383 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2384 	struct mlx5_rxq_ctrl *rxq_ctrl =
2385 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2386 
2387 	queues_n = hash_fields ? queues_n : 1;
2388 	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2389 	if (!ind_tbl) {
2390 		enum mlx5_ind_tbl_type type;
2391 
2392 		type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2393 				MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2394 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2395 	}
2396 	if (!ind_tbl) {
2397 		rte_errno = ENOMEM;
2398 		return NULL;
2399 	}
2400 	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2401 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2402 		struct mlx5dv_qp_init_attr qp_init_attr;
2403 
2404 		memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2405 		if (tunnel) {
2406 			qp_init_attr.comp_mask =
2407 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2408 			qp_init_attr.create_flags =
2409 				MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2410 		}
2411 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2412 		if (dev->data->dev_conf.lpbk_mode) {
2413 			/*
2414 			 * Allow packet sent from NIC loop back
2415 			 * w/o source MAC check.
2416 			 */
2417 			qp_init_attr.comp_mask |=
2418 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2419 			qp_init_attr.create_flags |=
2420 				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2421 		}
2422 #endif
2423 		qp = mlx5_glue->dv_create_qp
2424 			(priv->sh->ctx,
2425 			 &(struct ibv_qp_init_attr_ex){
2426 				.qp_type = IBV_QPT_RAW_PACKET,
2427 				.comp_mask =
2428 					IBV_QP_INIT_ATTR_PD |
2429 					IBV_QP_INIT_ATTR_IND_TABLE |
2430 					IBV_QP_INIT_ATTR_RX_HASH,
2431 				.rx_hash_conf = (struct ibv_rx_hash_conf){
2432 					.rx_hash_function =
2433 						IBV_RX_HASH_FUNC_TOEPLITZ,
2434 					.rx_hash_key_len = rss_key_len,
2435 					.rx_hash_key =
2436 						(void *)(uintptr_t)rss_key,
2437 					.rx_hash_fields_mask = hash_fields,
2438 				},
2439 				.rwq_ind_tbl = ind_tbl->ind_table,
2440 				.pd = priv->sh->pd,
2441 			  },
2442 			  &qp_init_attr);
2443 #else
2444 		qp = mlx5_glue->create_qp_ex
2445 			(priv->sh->ctx,
2446 			 &(struct ibv_qp_init_attr_ex){
2447 				.qp_type = IBV_QPT_RAW_PACKET,
2448 				.comp_mask =
2449 					IBV_QP_INIT_ATTR_PD |
2450 					IBV_QP_INIT_ATTR_IND_TABLE |
2451 					IBV_QP_INIT_ATTR_RX_HASH,
2452 				.rx_hash_conf = (struct ibv_rx_hash_conf){
2453 					.rx_hash_function =
2454 						IBV_RX_HASH_FUNC_TOEPLITZ,
2455 					.rx_hash_key_len = rss_key_len,
2456 					.rx_hash_key =
2457 						(void *)(uintptr_t)rss_key,
2458 					.rx_hash_fields_mask = hash_fields,
2459 				},
2460 				.rwq_ind_tbl = ind_tbl->ind_table,
2461 				.pd = priv->sh->pd,
2462 			 });
2463 #endif
2464 		if (!qp) {
2465 			rte_errno = errno;
2466 			goto error;
2467 		}
2468 	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2469 		struct mlx5_devx_tir_attr tir_attr;
2470 		uint32_t i;
2471 		uint32_t lro = 1;
2472 
2473 		/* Enable TIR LRO only if all the queues were configured for. */
2474 		for (i = 0; i < queues_n; ++i) {
2475 			if (!(*priv->rxqs)[queues[i]]->lro) {
2476 				lro = 0;
2477 				break;
2478 			}
2479 		}
2480 		memset(&tir_attr, 0, sizeof(tir_attr));
2481 		tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2482 		tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2483 		tir_attr.tunneled_offload_en = !!tunnel;
2484 		/* If needed, translate hash_fields bitmap to PRM format. */
2485 		if (hash_fields) {
2486 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2487 			struct mlx5_rx_hash_field_select *rx_hash_field_select =
2488 					hash_fields & IBV_RX_HASH_INNER ?
2489 					&tir_attr.rx_hash_field_selector_inner :
2490 					&tir_attr.rx_hash_field_selector_outer;
2491 #else
2492 			struct mlx5_rx_hash_field_select *rx_hash_field_select =
2493 					&tir_attr.rx_hash_field_selector_outer;
2494 #endif
2495 
2496 			/* 1 bit: 0: IPv4, 1: IPv6. */
2497 			rx_hash_field_select->l3_prot_type =
2498 				!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2499 			/* 1 bit: 0: TCP, 1: UDP. */
2500 			rx_hash_field_select->l4_prot_type =
2501 				!!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2502 			/* Bitmask which sets which fields to use in RX Hash. */
2503 			rx_hash_field_select->selected_fields =
2504 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2505 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2506 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2507 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2508 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2509 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2510 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2511 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2512 		}
2513 		if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2514 			tir_attr.transport_domain = priv->sh->td->id;
2515 		else
2516 			tir_attr.transport_domain = priv->sh->tdn;
2517 		memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2518 		tir_attr.indirect_table = ind_tbl->rqt->id;
2519 		if (dev->data->dev_conf.lpbk_mode)
2520 			tir_attr.self_lb_block =
2521 					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2522 		if (lro) {
2523 			tir_attr.lro_timeout_period_usecs =
2524 					priv->config.lro.timeout;
2525 			tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2526 			tir_attr.lro_enable_mask =
2527 					MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2528 					MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2529 		}
2530 		tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2531 		if (!tir) {
2532 			DRV_LOG(ERR, "port %u cannot create DevX TIR",
2533 				dev->data->port_id);
2534 			rte_errno = errno;
2535 			goto error;
2536 		}
2537 	}
2538 	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2539 	if (!hrxq)
2540 		goto error;
2541 	hrxq->ind_table = ind_tbl;
2542 	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2543 		hrxq->qp = qp;
2544 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2545 		hrxq->action =
2546 			mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2547 		if (!hrxq->action) {
2548 			rte_errno = errno;
2549 			goto error;
2550 		}
2551 #endif
2552 	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2553 		hrxq->tir = tir;
2554 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2555 		hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2556 							(hrxq->tir->obj);
2557 		if (!hrxq->action) {
2558 			rte_errno = errno;
2559 			goto error;
2560 		}
2561 #endif
2562 	}
2563 	hrxq->rss_key_len = rss_key_len;
2564 	hrxq->hash_fields = hash_fields;
2565 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
2566 	rte_atomic32_inc(&hrxq->refcnt);
2567 	LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2568 	return hrxq;
2569 error:
2570 	err = rte_errno; /* Save rte_errno before cleanup. */
2571 	mlx5_ind_table_obj_release(dev, ind_tbl);
2572 	if (qp)
2573 		claim_zero(mlx5_glue->destroy_qp(qp));
2574 	else if (tir)
2575 		claim_zero(mlx5_devx_cmd_destroy(tir));
2576 	rte_errno = err; /* Restore rte_errno. */
2577 	return NULL;
2578 }
2579 
2580 /**
2581  * Get an Rx Hash queue.
2582  *
2583  * @param dev
2584  *   Pointer to Ethernet device.
2585  * @param rss_conf
2586  *   RSS configuration for the Rx hash queue.
2587  * @param queues
2588  *   Queues entering in hash queue. In case of empty hash_fields only the
2589  *   first queue index will be taken for the indirection table.
2590  * @param queues_n
2591  *   Number of queues.
2592  *
2593  * @return
2594  *   An hash Rx queue on success.
2595  */
2596 struct mlx5_hrxq *
2597 mlx5_hrxq_get(struct rte_eth_dev *dev,
2598 	      const uint8_t *rss_key, uint32_t rss_key_len,
2599 	      uint64_t hash_fields,
2600 	      const uint16_t *queues, uint32_t queues_n)
2601 {
2602 	struct mlx5_priv *priv = dev->data->dev_private;
2603 	struct mlx5_hrxq *hrxq;
2604 
2605 	queues_n = hash_fields ? queues_n : 1;
2606 	LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2607 		struct mlx5_ind_table_obj *ind_tbl;
2608 
2609 		if (hrxq->rss_key_len != rss_key_len)
2610 			continue;
2611 		if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2612 			continue;
2613 		if (hrxq->hash_fields != hash_fields)
2614 			continue;
2615 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2616 		if (!ind_tbl)
2617 			continue;
2618 		if (ind_tbl != hrxq->ind_table) {
2619 			mlx5_ind_table_obj_release(dev, ind_tbl);
2620 			continue;
2621 		}
2622 		rte_atomic32_inc(&hrxq->refcnt);
2623 		return hrxq;
2624 	}
2625 	return NULL;
2626 }
2627 
2628 /**
2629  * Release the hash Rx queue.
2630  *
2631  * @param dev
2632  *   Pointer to Ethernet device.
2633  * @param hrxq
2634  *   Pointer to Hash Rx queue to release.
2635  *
2636  * @return
2637  *   1 while a reference on it exists, 0 when freed.
2638  */
2639 int
2640 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2641 {
2642 	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2643 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2644 		mlx5_glue->destroy_flow_action(hrxq->action);
2645 #endif
2646 		if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2647 			claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2648 		else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2649 			claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2650 		mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2651 		LIST_REMOVE(hrxq, next);
2652 		rte_free(hrxq);
2653 		return 0;
2654 	}
2655 	claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2656 	return 1;
2657 }
2658 
2659 /**
2660  * Verify the Rx Queue list is empty
2661  *
2662  * @param dev
2663  *   Pointer to Ethernet device.
2664  *
2665  * @return
2666  *   The number of object not released.
2667  */
2668 int
2669 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2670 {
2671 	struct mlx5_priv *priv = dev->data->dev_private;
2672 	struct mlx5_hrxq *hrxq;
2673 	int ret = 0;
2674 
2675 	LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2676 		DRV_LOG(DEBUG,
2677 			"port %u hash Rx queue %p still referenced",
2678 			dev->data->port_id, (void *)hrxq);
2679 		++ret;
2680 	}
2681 	return ret;
2682 }
2683 
2684 /**
2685  * Create a drop Rx queue Verbs/DevX object.
2686  *
2687  * @param dev
2688  *   Pointer to Ethernet device.
2689  *
2690  * @return
2691  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2692  */
2693 static struct mlx5_rxq_obj *
2694 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2695 {
2696 	struct mlx5_priv *priv = dev->data->dev_private;
2697 	struct ibv_context *ctx = priv->sh->ctx;
2698 	struct ibv_cq *cq;
2699 	struct ibv_wq *wq = NULL;
2700 	struct mlx5_rxq_obj *rxq;
2701 
2702 	if (priv->drop_queue.rxq)
2703 		return priv->drop_queue.rxq;
2704 	cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2705 	if (!cq) {
2706 		DEBUG("port %u cannot allocate CQ for drop queue",
2707 		      dev->data->port_id);
2708 		rte_errno = errno;
2709 		goto error;
2710 	}
2711 	wq = mlx5_glue->create_wq(ctx,
2712 		 &(struct ibv_wq_init_attr){
2713 			.wq_type = IBV_WQT_RQ,
2714 			.max_wr = 1,
2715 			.max_sge = 1,
2716 			.pd = priv->sh->pd,
2717 			.cq = cq,
2718 		 });
2719 	if (!wq) {
2720 		DEBUG("port %u cannot allocate WQ for drop queue",
2721 		      dev->data->port_id);
2722 		rte_errno = errno;
2723 		goto error;
2724 	}
2725 	rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2726 	if (!rxq) {
2727 		DEBUG("port %u cannot allocate drop Rx queue memory",
2728 		      dev->data->port_id);
2729 		rte_errno = ENOMEM;
2730 		goto error;
2731 	}
2732 	rxq->cq = cq;
2733 	rxq->wq = wq;
2734 	priv->drop_queue.rxq = rxq;
2735 	return rxq;
2736 error:
2737 	if (wq)
2738 		claim_zero(mlx5_glue->destroy_wq(wq));
2739 	if (cq)
2740 		claim_zero(mlx5_glue->destroy_cq(cq));
2741 	return NULL;
2742 }
2743 
2744 /**
2745  * Release a drop Rx queue Verbs/DevX object.
2746  *
2747  * @param dev
2748  *   Pointer to Ethernet device.
2749  *
2750  * @return
2751  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2752  */
2753 static void
2754 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2755 {
2756 	struct mlx5_priv *priv = dev->data->dev_private;
2757 	struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2758 
2759 	if (rxq->wq)
2760 		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2761 	if (rxq->cq)
2762 		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2763 	rte_free(rxq);
2764 	priv->drop_queue.rxq = NULL;
2765 }
2766 
2767 /**
2768  * Create a drop indirection table.
2769  *
2770  * @param dev
2771  *   Pointer to Ethernet device.
2772  *
2773  * @return
2774  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2775  */
2776 static struct mlx5_ind_table_obj *
2777 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2778 {
2779 	struct mlx5_priv *priv = dev->data->dev_private;
2780 	struct mlx5_ind_table_obj *ind_tbl;
2781 	struct mlx5_rxq_obj *rxq;
2782 	struct mlx5_ind_table_obj tmpl;
2783 
2784 	rxq = mlx5_rxq_obj_drop_new(dev);
2785 	if (!rxq)
2786 		return NULL;
2787 	tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2788 		(priv->sh->ctx,
2789 		 &(struct ibv_rwq_ind_table_init_attr){
2790 			.log_ind_tbl_size = 0,
2791 			.ind_tbl = &rxq->wq,
2792 			.comp_mask = 0,
2793 		 });
2794 	if (!tmpl.ind_table) {
2795 		DEBUG("port %u cannot allocate indirection table for drop"
2796 		      " queue",
2797 		      dev->data->port_id);
2798 		rte_errno = errno;
2799 		goto error;
2800 	}
2801 	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2802 	if (!ind_tbl) {
2803 		rte_errno = ENOMEM;
2804 		goto error;
2805 	}
2806 	ind_tbl->ind_table = tmpl.ind_table;
2807 	return ind_tbl;
2808 error:
2809 	mlx5_rxq_obj_drop_release(dev);
2810 	return NULL;
2811 }
2812 
2813 /**
2814  * Release a drop indirection table.
2815  *
2816  * @param dev
2817  *   Pointer to Ethernet device.
2818  */
2819 static void
2820 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2821 {
2822 	struct mlx5_priv *priv = dev->data->dev_private;
2823 	struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2824 
2825 	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2826 	mlx5_rxq_obj_drop_release(dev);
2827 	rte_free(ind_tbl);
2828 	priv->drop_queue.hrxq->ind_table = NULL;
2829 }
2830 
2831 /**
2832  * Create a drop Rx Hash queue.
2833  *
2834  * @param dev
2835  *   Pointer to Ethernet device.
2836  *
2837  * @return
2838  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2839  */
2840 struct mlx5_hrxq *
2841 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2842 {
2843 	struct mlx5_priv *priv = dev->data->dev_private;
2844 	struct mlx5_ind_table_obj *ind_tbl = NULL;
2845 	struct ibv_qp *qp = NULL;
2846 	struct mlx5_hrxq *hrxq = NULL;
2847 
2848 	if (priv->drop_queue.hrxq) {
2849 		rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2850 		return priv->drop_queue.hrxq;
2851 	}
2852 	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2853 	if (!hrxq) {
2854 		DRV_LOG(WARNING,
2855 			"port %u cannot allocate memory for drop queue",
2856 			dev->data->port_id);
2857 		rte_errno = ENOMEM;
2858 		goto error;
2859 	}
2860 	priv->drop_queue.hrxq = hrxq;
2861 	ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2862 	if (!ind_tbl)
2863 		goto error;
2864 	hrxq->ind_table = ind_tbl;
2865 	qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2866 		 &(struct ibv_qp_init_attr_ex){
2867 			.qp_type = IBV_QPT_RAW_PACKET,
2868 			.comp_mask =
2869 				IBV_QP_INIT_ATTR_PD |
2870 				IBV_QP_INIT_ATTR_IND_TABLE |
2871 				IBV_QP_INIT_ATTR_RX_HASH,
2872 			.rx_hash_conf = (struct ibv_rx_hash_conf){
2873 				.rx_hash_function =
2874 					IBV_RX_HASH_FUNC_TOEPLITZ,
2875 				.rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2876 				.rx_hash_key = rss_hash_default_key,
2877 				.rx_hash_fields_mask = 0,
2878 				},
2879 			.rwq_ind_tbl = ind_tbl->ind_table,
2880 			.pd = priv->sh->pd
2881 		 });
2882 	if (!qp) {
2883 		DEBUG("port %u cannot allocate QP for drop queue",
2884 		      dev->data->port_id);
2885 		rte_errno = errno;
2886 		goto error;
2887 	}
2888 	hrxq->qp = qp;
2889 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2890 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2891 	if (!hrxq->action) {
2892 		rte_errno = errno;
2893 		goto error;
2894 	}
2895 #endif
2896 	rte_atomic32_set(&hrxq->refcnt, 1);
2897 	return hrxq;
2898 error:
2899 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2900 	if (hrxq && hrxq->action)
2901 		mlx5_glue->destroy_flow_action(hrxq->action);
2902 #endif
2903 	if (qp)
2904 		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2905 	if (ind_tbl)
2906 		mlx5_ind_table_obj_drop_release(dev);
2907 	if (hrxq) {
2908 		priv->drop_queue.hrxq = NULL;
2909 		rte_free(hrxq);
2910 	}
2911 	return NULL;
2912 }
2913 
2914 /**
2915  * Release a drop hash Rx queue.
2916  *
2917  * @param dev
2918  *   Pointer to Ethernet device.
2919  */
2920 void
2921 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2922 {
2923 	struct mlx5_priv *priv = dev->data->dev_private;
2924 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2925 
2926 	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2927 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2928 		mlx5_glue->destroy_flow_action(hrxq->action);
2929 #endif
2930 		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2931 		mlx5_ind_table_obj_drop_release(dev);
2932 		rte_free(hrxq);
2933 		priv->drop_queue.hrxq = NULL;
2934 	}
2935 }
2936