xref: /dpdk/drivers/net/mlx5/mlx5_rxq.c (revision f69ed1044230c218c9afd8f1b47b6fe6aa1eeec5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2015 6WIND S.A.
3  * Copyright 2015 Mellanox Technologies, Ltd
4  */
5 
6 #include <stddef.h>
7 #include <errno.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <fcntl.h>
11 #include <sys/queue.h>
12 
13 /* Verbs header. */
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
15 #ifdef PEDANTIC
16 #pragma GCC diagnostic ignored "-Wpedantic"
17 #endif
18 #include <infiniband/verbs.h>
19 #include <infiniband/mlx5dv.h>
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic error "-Wpedantic"
22 #endif
23 
24 #include <rte_mbuf.h>
25 #include <rte_malloc.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_common.h>
28 #include <rte_interrupts.h>
29 #include <rte_debug.h>
30 #include <rte_io.h>
31 
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
34 
35 #include "mlx5_defs.h"
36 #include "mlx5.h"
37 #include "mlx5_rxtx.h"
38 #include "mlx5_utils.h"
39 #include "mlx5_autoconf.h"
40 #include "mlx5_flow.h"
41 
42 
43 /* Default RSS hash key also used for ConnectX-3. */
44 uint8_t rss_hash_default_key[] = {
45 	0x2c, 0xc6, 0x81, 0xd1,
46 	0x5b, 0xdb, 0xf4, 0xf7,
47 	0xfc, 0xa2, 0x83, 0x19,
48 	0xdb, 0x1a, 0x3e, 0x94,
49 	0x6b, 0x9e, 0x38, 0xd9,
50 	0x2c, 0x9c, 0x03, 0xd1,
51 	0xad, 0x99, 0x44, 0xa7,
52 	0xd9, 0x56, 0x3d, 0x59,
53 	0x06, 0x3c, 0x25, 0xf3,
54 	0xfc, 0x1f, 0xdc, 0x2a,
55 };
56 
57 /* Length of the default RSS hash key. */
58 static_assert(MLX5_RSS_HASH_KEY_LEN ==
59 	      (unsigned int)sizeof(rss_hash_default_key),
60 	      "wrong RSS default key size.");
61 
62 /**
63  * Check whether Multi-Packet RQ can be enabled for the device.
64  *
65  * @param dev
66  *   Pointer to Ethernet device.
67  *
68  * @return
69  *   1 if supported, negative errno value if not.
70  */
71 inline int
72 mlx5_check_mprq_support(struct rte_eth_dev *dev)
73 {
74 	struct mlx5_priv *priv = dev->data->dev_private;
75 
76 	if (priv->config.mprq.enabled &&
77 	    priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
78 		return 1;
79 	return -ENOTSUP;
80 }
81 
82 /**
83  * Check whether Multi-Packet RQ is enabled for the Rx queue.
84  *
85  *  @param rxq
86  *     Pointer to receive queue structure.
87  *
88  * @return
89  *   0 if disabled, otherwise enabled.
90  */
91 inline int
92 mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
93 {
94 	return rxq->strd_num_n > 0;
95 }
96 
97 /**
98  * Check whether Multi-Packet RQ is enabled for the device.
99  *
100  * @param dev
101  *   Pointer to Ethernet device.
102  *
103  * @return
104  *   0 if disabled, otherwise enabled.
105  */
106 inline int
107 mlx5_mprq_enabled(struct rte_eth_dev *dev)
108 {
109 	struct mlx5_priv *priv = dev->data->dev_private;
110 	uint16_t i;
111 	uint16_t n = 0;
112 	uint16_t n_ibv = 0;
113 
114 	if (mlx5_check_mprq_support(dev) < 0)
115 		return 0;
116 	/* All the configured queues should be enabled. */
117 	for (i = 0; i < priv->rxqs_n; ++i) {
118 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
119 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
120 			(rxq, struct mlx5_rxq_ctrl, rxq);
121 
122 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
123 			continue;
124 		n_ibv++;
125 		if (mlx5_rxq_mprq_enabled(rxq))
126 			++n;
127 	}
128 	/* Multi-Packet RQ can't be partially configured. */
129 	MLX5_ASSERT(n == 0 || n == n_ibv);
130 	return n == n_ibv;
131 }
132 
133 /**
134  * Allocate RX queue elements for Multi-Packet RQ.
135  *
136  * @param rxq_ctrl
137  *   Pointer to RX queue structure.
138  *
139  * @return
140  *   0 on success, a negative errno value otherwise and rte_errno is set.
141  */
142 static int
143 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
144 {
145 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
146 	unsigned int wqe_n = 1 << rxq->elts_n;
147 	unsigned int i;
148 	int err;
149 
150 	/* Iterate on segments. */
151 	for (i = 0; i <= wqe_n; ++i) {
152 		struct mlx5_mprq_buf *buf;
153 
154 		if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
155 			DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
156 			rte_errno = ENOMEM;
157 			goto error;
158 		}
159 		if (i < wqe_n)
160 			(*rxq->mprq_bufs)[i] = buf;
161 		else
162 			rxq->mprq_repl = buf;
163 	}
164 	DRV_LOG(DEBUG,
165 		"port %u Rx queue %u allocated and configured %u segments",
166 		rxq->port_id, rxq->idx, wqe_n);
167 	return 0;
168 error:
169 	err = rte_errno; /* Save rte_errno before cleanup. */
170 	wqe_n = i;
171 	for (i = 0; (i != wqe_n); ++i) {
172 		if ((*rxq->mprq_bufs)[i] != NULL)
173 			rte_mempool_put(rxq->mprq_mp,
174 					(*rxq->mprq_bufs)[i]);
175 		(*rxq->mprq_bufs)[i] = NULL;
176 	}
177 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
178 		rxq->port_id, rxq->idx);
179 	rte_errno = err; /* Restore rte_errno. */
180 	return -rte_errno;
181 }
182 
183 /**
184  * Allocate RX queue elements for Single-Packet RQ.
185  *
186  * @param rxq_ctrl
187  *   Pointer to RX queue structure.
188  *
189  * @return
190  *   0 on success, errno value on failure.
191  */
192 static int
193 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
194 {
195 	const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
196 	unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
197 	unsigned int i;
198 	int err;
199 
200 	/* Iterate on segments. */
201 	for (i = 0; (i != elts_n); ++i) {
202 		struct rte_mbuf *buf;
203 
204 		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
205 		if (buf == NULL) {
206 			DRV_LOG(ERR, "port %u empty mbuf pool",
207 				PORT_ID(rxq_ctrl->priv));
208 			rte_errno = ENOMEM;
209 			goto error;
210 		}
211 		/* Headroom is reserved by rte_pktmbuf_alloc(). */
212 		MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
213 		/* Buffer is supposed to be empty. */
214 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
215 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
216 		MLX5_ASSERT(!buf->next);
217 		/* Only the first segment keeps headroom. */
218 		if (i % sges_n)
219 			SET_DATA_OFF(buf, 0);
220 		PORT(buf) = rxq_ctrl->rxq.port_id;
221 		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
222 		PKT_LEN(buf) = DATA_LEN(buf);
223 		NB_SEGS(buf) = 1;
224 		(*rxq_ctrl->rxq.elts)[i] = buf;
225 	}
226 	/* If Rx vector is activated. */
227 	if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
228 		struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
229 		struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
230 		struct rte_pktmbuf_pool_private *priv =
231 			(struct rte_pktmbuf_pool_private *)
232 				rte_mempool_get_priv(rxq_ctrl->rxq.mp);
233 		int j;
234 
235 		/* Initialize default rearm_data for vPMD. */
236 		mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
237 		rte_mbuf_refcnt_set(mbuf_init, 1);
238 		mbuf_init->nb_segs = 1;
239 		mbuf_init->port = rxq->port_id;
240 		if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
241 			mbuf_init->ol_flags = EXT_ATTACHED_MBUF;
242 		/*
243 		 * prevent compiler reordering:
244 		 * rearm_data covers previous fields.
245 		 */
246 		rte_compiler_barrier();
247 		rxq->mbuf_initializer =
248 			*(rte_xmm_t *)&mbuf_init->rearm_data;
249 		/* Padding with a fake mbuf for vectorized Rx. */
250 		for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
251 			(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
252 	}
253 	DRV_LOG(DEBUG,
254 		"port %u Rx queue %u allocated and configured %u segments"
255 		" (max %u packets)",
256 		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
257 		elts_n / (1 << rxq_ctrl->rxq.sges_n));
258 	return 0;
259 error:
260 	err = rte_errno; /* Save rte_errno before cleanup. */
261 	elts_n = i;
262 	for (i = 0; (i != elts_n); ++i) {
263 		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
264 			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
265 		(*rxq_ctrl->rxq.elts)[i] = NULL;
266 	}
267 	DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
268 		PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
269 	rte_errno = err; /* Restore rte_errno. */
270 	return -rte_errno;
271 }
272 
273 /**
274  * Allocate RX queue elements.
275  *
276  * @param rxq_ctrl
277  *   Pointer to RX queue structure.
278  *
279  * @return
280  *   0 on success, errno value on failure.
281  */
282 int
283 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
284 {
285 	return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
286 	       rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
287 }
288 
289 /**
290  * Free RX queue elements for Multi-Packet RQ.
291  *
292  * @param rxq_ctrl
293  *   Pointer to RX queue structure.
294  */
295 static void
296 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
297 {
298 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
299 	uint16_t i;
300 
301 	DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
302 		rxq->port_id, rxq->idx);
303 	if (rxq->mprq_bufs == NULL)
304 		return;
305 	MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
306 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
307 		if ((*rxq->mprq_bufs)[i] != NULL)
308 			mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
309 		(*rxq->mprq_bufs)[i] = NULL;
310 	}
311 	if (rxq->mprq_repl != NULL) {
312 		mlx5_mprq_buf_free(rxq->mprq_repl);
313 		rxq->mprq_repl = NULL;
314 	}
315 }
316 
317 /**
318  * Free RX queue elements for Single-Packet RQ.
319  *
320  * @param rxq_ctrl
321  *   Pointer to RX queue structure.
322  */
323 static void
324 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
325 {
326 	struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
327 	const uint16_t q_n = (1 << rxq->elts_n);
328 	const uint16_t q_mask = q_n - 1;
329 	uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
330 	uint16_t i;
331 
332 	DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
333 		PORT_ID(rxq_ctrl->priv), rxq->idx);
334 	if (rxq->elts == NULL)
335 		return;
336 	/**
337 	 * Some mbuf in the Ring belongs to the application.  They cannot be
338 	 * freed.
339 	 */
340 	if (mlx5_rxq_check_vec_support(rxq) > 0) {
341 		for (i = 0; i < used; ++i)
342 			(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
343 		rxq->rq_pi = rxq->rq_ci;
344 	}
345 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
346 		if ((*rxq->elts)[i] != NULL)
347 			rte_pktmbuf_free_seg((*rxq->elts)[i]);
348 		(*rxq->elts)[i] = NULL;
349 	}
350 }
351 
352 /**
353  * Free RX queue elements.
354  *
355  * @param rxq_ctrl
356  *   Pointer to RX queue structure.
357  */
358 static void
359 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
360 {
361 	if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
362 		rxq_free_elts_mprq(rxq_ctrl);
363 	else
364 		rxq_free_elts_sprq(rxq_ctrl);
365 }
366 
367 /**
368  * Returns the per-queue supported offloads.
369  *
370  * @param dev
371  *   Pointer to Ethernet device.
372  *
373  * @return
374  *   Supported Rx offloads.
375  */
376 uint64_t
377 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
378 {
379 	struct mlx5_priv *priv = dev->data->dev_private;
380 	struct mlx5_dev_config *config = &priv->config;
381 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
382 			     DEV_RX_OFFLOAD_TIMESTAMP |
383 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
384 			     DEV_RX_OFFLOAD_RSS_HASH);
385 
386 	if (config->hw_fcs_strip)
387 		offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
388 
389 	if (config->hw_csum)
390 		offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
391 			     DEV_RX_OFFLOAD_UDP_CKSUM |
392 			     DEV_RX_OFFLOAD_TCP_CKSUM);
393 	if (config->hw_vlan_strip)
394 		offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
395 	if (MLX5_LRO_SUPPORTED(dev))
396 		offloads |= DEV_RX_OFFLOAD_TCP_LRO;
397 	return offloads;
398 }
399 
400 
401 /**
402  * Returns the per-port supported offloads.
403  *
404  * @return
405  *   Supported Rx offloads.
406  */
407 uint64_t
408 mlx5_get_rx_port_offloads(void)
409 {
410 	uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
411 
412 	return offloads;
413 }
414 
415 /**
416  * Verify if the queue can be released.
417  *
418  * @param dev
419  *   Pointer to Ethernet device.
420  * @param idx
421  *   RX queue index.
422  *
423  * @return
424  *   1 if the queue can be released
425  *   0 if the queue can not be released, there are references to it.
426  *   Negative errno and rte_errno is set if queue doesn't exist.
427  */
428 static int
429 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
430 {
431 	struct mlx5_priv *priv = dev->data->dev_private;
432 	struct mlx5_rxq_ctrl *rxq_ctrl;
433 
434 	if (!(*priv->rxqs)[idx]) {
435 		rte_errno = EINVAL;
436 		return -rte_errno;
437 	}
438 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
439 	return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
440 }
441 
442 /**
443  * Rx queue presetup checks.
444  *
445  * @param dev
446  *   Pointer to Ethernet device structure.
447  * @param idx
448  *   RX queue index.
449  * @param desc
450  *   Number of descriptors to configure in queue.
451  *
452  * @return
453  *   0 on success, a negative errno value otherwise and rte_errno is set.
454  */
455 static int
456 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
457 {
458 	struct mlx5_priv *priv = dev->data->dev_private;
459 
460 	if (!rte_is_power_of_2(desc)) {
461 		desc = 1 << log2above(desc);
462 		DRV_LOG(WARNING,
463 			"port %u increased number of descriptors in Rx queue %u"
464 			" to the next power of two (%d)",
465 			dev->data->port_id, idx, desc);
466 	}
467 	DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
468 		dev->data->port_id, idx, desc);
469 	if (idx >= priv->rxqs_n) {
470 		DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
471 			dev->data->port_id, idx, priv->rxqs_n);
472 		rte_errno = EOVERFLOW;
473 		return -rte_errno;
474 	}
475 	if (!mlx5_rxq_releasable(dev, idx)) {
476 		DRV_LOG(ERR, "port %u unable to release queue index %u",
477 			dev->data->port_id, idx);
478 		rte_errno = EBUSY;
479 		return -rte_errno;
480 	}
481 	mlx5_rxq_release(dev, idx);
482 	return 0;
483 }
484 
485 /**
486  *
487  * @param dev
488  *   Pointer to Ethernet device structure.
489  * @param idx
490  *   RX queue index.
491  * @param desc
492  *   Number of descriptors to configure in queue.
493  * @param socket
494  *   NUMA socket on which memory must be allocated.
495  * @param[in] conf
496  *   Thresholds parameters.
497  * @param mp
498  *   Memory pool for buffer allocations.
499  *
500  * @return
501  *   0 on success, a negative errno value otherwise and rte_errno is set.
502  */
503 int
504 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
505 		    unsigned int socket, const struct rte_eth_rxconf *conf,
506 		    struct rte_mempool *mp)
507 {
508 	struct mlx5_priv *priv = dev->data->dev_private;
509 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
510 	struct mlx5_rxq_ctrl *rxq_ctrl =
511 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
512 	int res;
513 
514 	res = mlx5_rx_queue_pre_setup(dev, idx, desc);
515 	if (res)
516 		return res;
517 	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
518 	if (!rxq_ctrl) {
519 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
520 			dev->data->port_id, idx);
521 		rte_errno = ENOMEM;
522 		return -rte_errno;
523 	}
524 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
525 		dev->data->port_id, idx);
526 	(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
527 	return 0;
528 }
529 
530 /**
531  *
532  * @param dev
533  *   Pointer to Ethernet device structure.
534  * @param idx
535  *   RX queue index.
536  * @param desc
537  *   Number of descriptors to configure in queue.
538  * @param hairpin_conf
539  *   Hairpin configuration parameters.
540  *
541  * @return
542  *   0 on success, a negative errno value otherwise and rte_errno is set.
543  */
544 int
545 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
546 			    uint16_t desc,
547 			    const struct rte_eth_hairpin_conf *hairpin_conf)
548 {
549 	struct mlx5_priv *priv = dev->data->dev_private;
550 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
551 	struct mlx5_rxq_ctrl *rxq_ctrl =
552 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
553 	int res;
554 
555 	res = mlx5_rx_queue_pre_setup(dev, idx, desc);
556 	if (res)
557 		return res;
558 	if (hairpin_conf->peer_count != 1 ||
559 	    hairpin_conf->peers[0].port != dev->data->port_id ||
560 	    hairpin_conf->peers[0].queue >= priv->txqs_n) {
561 		DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
562 			" invalid hairpind configuration", dev->data->port_id,
563 			idx);
564 		rte_errno = EINVAL;
565 		return -rte_errno;
566 	}
567 	rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
568 	if (!rxq_ctrl) {
569 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
570 			dev->data->port_id, idx);
571 		rte_errno = ENOMEM;
572 		return -rte_errno;
573 	}
574 	DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
575 		dev->data->port_id, idx);
576 	(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
577 	return 0;
578 }
579 
580 /**
581  * DPDK callback to release a RX queue.
582  *
583  * @param dpdk_rxq
584  *   Generic RX queue pointer.
585  */
586 void
587 mlx5_rx_queue_release(void *dpdk_rxq)
588 {
589 	struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
590 	struct mlx5_rxq_ctrl *rxq_ctrl;
591 	struct mlx5_priv *priv;
592 
593 	if (rxq == NULL)
594 		return;
595 	rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
596 	priv = rxq_ctrl->priv;
597 	if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
598 		rte_panic("port %u Rx queue %u is still used by a flow and"
599 			  " cannot be removed\n",
600 			  PORT_ID(priv), rxq->idx);
601 	mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
602 }
603 
604 /**
605  * Get an Rx queue Verbs/DevX object.
606  *
607  * @param dev
608  *   Pointer to Ethernet device.
609  * @param idx
610  *   Queue index in DPDK Rx queue array
611  *
612  * @return
613  *   The Verbs/DevX object if it exists.
614  */
615 static struct mlx5_rxq_obj *
616 mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
617 {
618 	struct mlx5_priv *priv = dev->data->dev_private;
619 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
620 	struct mlx5_rxq_ctrl *rxq_ctrl;
621 
622 	if (idx >= priv->rxqs_n)
623 		return NULL;
624 	if (!rxq_data)
625 		return NULL;
626 	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
627 	if (rxq_ctrl->obj)
628 		rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
629 	return rxq_ctrl->obj;
630 }
631 
632 /**
633  * Release the resources allocated for an RQ DevX object.
634  *
635  * @param rxq_ctrl
636  *   DevX Rx queue object.
637  */
638 static void
639 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
640 {
641 	if (rxq_ctrl->rxq.wqes) {
642 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
643 		rxq_ctrl->rxq.wqes = NULL;
644 	}
645 	if (rxq_ctrl->wq_umem) {
646 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
647 		rxq_ctrl->wq_umem = NULL;
648 	}
649 }
650 
651 /**
652  * Release an Rx hairpin related resources.
653  *
654  * @param rxq_obj
655  *   Hairpin Rx queue object.
656  */
657 static void
658 rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
659 {
660 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
661 
662 	MLX5_ASSERT(rxq_obj);
663 	rq_attr.state = MLX5_RQC_STATE_RST;
664 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
665 	mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
666 	claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
667 }
668 
669 /**
670  * Release an Rx verbs/DevX queue object.
671  *
672  * @param rxq_obj
673  *   Verbs/DevX Rx queue object.
674  *
675  * @return
676  *   1 while a reference on it exists, 0 when freed.
677  */
678 static int
679 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
680 {
681 	MLX5_ASSERT(rxq_obj);
682 	if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
683 		switch (rxq_obj->type) {
684 		case MLX5_RXQ_OBJ_TYPE_IBV:
685 			MLX5_ASSERT(rxq_obj->wq);
686 			MLX5_ASSERT(rxq_obj->cq);
687 			rxq_free_elts(rxq_obj->rxq_ctrl);
688 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
689 			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
690 			break;
691 		case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
692 			MLX5_ASSERT(rxq_obj->cq);
693 			MLX5_ASSERT(rxq_obj->rq);
694 			rxq_free_elts(rxq_obj->rxq_ctrl);
695 			claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
696 			rxq_release_rq_resources(rxq_obj->rxq_ctrl);
697 			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
698 			break;
699 		case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
700 			MLX5_ASSERT(rxq_obj->rq);
701 			rxq_obj_hairpin_release(rxq_obj);
702 			break;
703 		}
704 		if (rxq_obj->channel)
705 			claim_zero(mlx5_glue->destroy_comp_channel
706 				   (rxq_obj->channel));
707 		LIST_REMOVE(rxq_obj, next);
708 		rte_free(rxq_obj);
709 		return 0;
710 	}
711 	return 1;
712 }
713 
714 /**
715  * Allocate queue vector and fill epoll fd list for Rx interrupts.
716  *
717  * @param dev
718  *   Pointer to Ethernet device.
719  *
720  * @return
721  *   0 on success, a negative errno value otherwise and rte_errno is set.
722  */
723 int
724 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
725 {
726 	struct mlx5_priv *priv = dev->data->dev_private;
727 	unsigned int i;
728 	unsigned int rxqs_n = priv->rxqs_n;
729 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
730 	unsigned int count = 0;
731 	struct rte_intr_handle *intr_handle = dev->intr_handle;
732 
733 	if (!dev->data->dev_conf.intr_conf.rxq)
734 		return 0;
735 	mlx5_rx_intr_vec_disable(dev);
736 	intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
737 	if (intr_handle->intr_vec == NULL) {
738 		DRV_LOG(ERR,
739 			"port %u failed to allocate memory for interrupt"
740 			" vector, Rx interrupts will not be supported",
741 			dev->data->port_id);
742 		rte_errno = ENOMEM;
743 		return -rte_errno;
744 	}
745 	intr_handle->type = RTE_INTR_HANDLE_EXT;
746 	for (i = 0; i != n; ++i) {
747 		/* This rxq obj must not be released in this function. */
748 		struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
749 		int fd;
750 		int flags;
751 		int rc;
752 
753 		/* Skip queues that cannot request interrupts. */
754 		if (!rxq_obj || !rxq_obj->channel) {
755 			/* Use invalid intr_vec[] index to disable entry. */
756 			intr_handle->intr_vec[i] =
757 				RTE_INTR_VEC_RXTX_OFFSET +
758 				RTE_MAX_RXTX_INTR_VEC_ID;
759 			continue;
760 		}
761 		if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
762 			DRV_LOG(ERR,
763 				"port %u too many Rx queues for interrupt"
764 				" vector size (%d), Rx interrupts cannot be"
765 				" enabled",
766 				dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
767 			mlx5_rx_intr_vec_disable(dev);
768 			rte_errno = ENOMEM;
769 			return -rte_errno;
770 		}
771 		fd = rxq_obj->channel->fd;
772 		flags = fcntl(fd, F_GETFL);
773 		rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
774 		if (rc < 0) {
775 			rte_errno = errno;
776 			DRV_LOG(ERR,
777 				"port %u failed to make Rx interrupt file"
778 				" descriptor %d non-blocking for queue index"
779 				" %d",
780 				dev->data->port_id, fd, i);
781 			mlx5_rx_intr_vec_disable(dev);
782 			return -rte_errno;
783 		}
784 		intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
785 		intr_handle->efds[count] = fd;
786 		count++;
787 	}
788 	if (!count)
789 		mlx5_rx_intr_vec_disable(dev);
790 	else
791 		intr_handle->nb_efd = count;
792 	return 0;
793 }
794 
795 /**
796  * Clean up Rx interrupts handler.
797  *
798  * @param dev
799  *   Pointer to Ethernet device.
800  */
801 void
802 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
803 {
804 	struct mlx5_priv *priv = dev->data->dev_private;
805 	struct rte_intr_handle *intr_handle = dev->intr_handle;
806 	unsigned int i;
807 	unsigned int rxqs_n = priv->rxqs_n;
808 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
809 
810 	if (!dev->data->dev_conf.intr_conf.rxq)
811 		return;
812 	if (!intr_handle->intr_vec)
813 		goto free;
814 	for (i = 0; i != n; ++i) {
815 		struct mlx5_rxq_ctrl *rxq_ctrl;
816 		struct mlx5_rxq_data *rxq_data;
817 
818 		if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
819 		    RTE_MAX_RXTX_INTR_VEC_ID)
820 			continue;
821 		/**
822 		 * Need to access directly the queue to release the reference
823 		 * kept in mlx5_rx_intr_vec_enable().
824 		 */
825 		rxq_data = (*priv->rxqs)[i];
826 		rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
827 		if (rxq_ctrl->obj)
828 			mlx5_rxq_obj_release(rxq_ctrl->obj);
829 	}
830 free:
831 	rte_intr_free_epoll_fd(intr_handle);
832 	if (intr_handle->intr_vec)
833 		free(intr_handle->intr_vec);
834 	intr_handle->nb_efd = 0;
835 	intr_handle->intr_vec = NULL;
836 }
837 
838 /**
839  *  MLX5 CQ notification .
840  *
841  *  @param rxq
842  *     Pointer to receive queue structure.
843  *  @param sq_n_rxq
844  *     Sequence number per receive queue .
845  */
846 static inline void
847 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
848 {
849 	int sq_n = 0;
850 	uint32_t doorbell_hi;
851 	uint64_t doorbell;
852 	void *cq_db_reg = (char *)rxq->cq_uar + MLX5_CQ_DOORBELL;
853 
854 	sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
855 	doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
856 	doorbell = (uint64_t)doorbell_hi << 32;
857 	doorbell |=  rxq->cqn;
858 	rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
859 	mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
860 			 cq_db_reg, rxq->uar_lock_cq);
861 }
862 
863 /**
864  * DPDK callback for Rx queue interrupt enable.
865  *
866  * @param dev
867  *   Pointer to Ethernet device structure.
868  * @param rx_queue_id
869  *   Rx queue number.
870  *
871  * @return
872  *   0 on success, a negative errno value otherwise and rte_errno is set.
873  */
874 int
875 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
876 {
877 	struct mlx5_priv *priv = dev->data->dev_private;
878 	struct mlx5_rxq_data *rxq_data;
879 	struct mlx5_rxq_ctrl *rxq_ctrl;
880 
881 	rxq_data = (*priv->rxqs)[rx_queue_id];
882 	if (!rxq_data) {
883 		rte_errno = EINVAL;
884 		return -rte_errno;
885 	}
886 	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
887 	if (rxq_ctrl->irq) {
888 		struct mlx5_rxq_obj *rxq_obj;
889 
890 		rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
891 		if (!rxq_obj) {
892 			rte_errno = EINVAL;
893 			return -rte_errno;
894 		}
895 		mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
896 		mlx5_rxq_obj_release(rxq_obj);
897 	}
898 	return 0;
899 }
900 
901 /**
902  * DPDK callback for Rx queue interrupt disable.
903  *
904  * @param dev
905  *   Pointer to Ethernet device structure.
906  * @param rx_queue_id
907  *   Rx queue number.
908  *
909  * @return
910  *   0 on success, a negative errno value otherwise and rte_errno is set.
911  */
912 int
913 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
914 {
915 	struct mlx5_priv *priv = dev->data->dev_private;
916 	struct mlx5_rxq_data *rxq_data;
917 	struct mlx5_rxq_ctrl *rxq_ctrl;
918 	struct mlx5_rxq_obj *rxq_obj = NULL;
919 	struct ibv_cq *ev_cq;
920 	void *ev_ctx;
921 	int ret;
922 
923 	rxq_data = (*priv->rxqs)[rx_queue_id];
924 	if (!rxq_data) {
925 		rte_errno = EINVAL;
926 		return -rte_errno;
927 	}
928 	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
929 	if (!rxq_ctrl->irq)
930 		return 0;
931 	rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
932 	if (!rxq_obj) {
933 		rte_errno = EINVAL;
934 		return -rte_errno;
935 	}
936 	ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
937 	if (ret || ev_cq != rxq_obj->cq) {
938 		rte_errno = EINVAL;
939 		goto exit;
940 	}
941 	rxq_data->cq_arm_sn++;
942 	mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
943 	mlx5_rxq_obj_release(rxq_obj);
944 	return 0;
945 exit:
946 	ret = rte_errno; /* Save rte_errno before cleanup. */
947 	if (rxq_obj)
948 		mlx5_rxq_obj_release(rxq_obj);
949 	DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
950 		dev->data->port_id, rx_queue_id);
951 	rte_errno = ret; /* Restore rte_errno. */
952 	return -rte_errno;
953 }
954 
955 /**
956  * Create a CQ Verbs object.
957  *
958  * @param dev
959  *   Pointer to Ethernet device.
960  * @param priv
961  *   Pointer to device private data.
962  * @param rxq_data
963  *   Pointer to Rx queue data.
964  * @param cqe_n
965  *   Number of CQEs in CQ.
966  * @param rxq_obj
967  *   Pointer to Rx queue object data.
968  *
969  * @return
970  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
971  */
972 static struct ibv_cq *
973 mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
974 		struct mlx5_rxq_data *rxq_data,
975 		unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
976 {
977 	struct {
978 		struct ibv_cq_init_attr_ex ibv;
979 		struct mlx5dv_cq_init_attr mlx5;
980 	} cq_attr;
981 
982 	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
983 		.cqe = cqe_n,
984 		.channel = rxq_obj->channel,
985 		.comp_mask = 0,
986 	};
987 	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
988 		.comp_mask = 0,
989 	};
990 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
991 	    !rxq_data->lro) {
992 		cq_attr.mlx5.comp_mask |=
993 				MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
994 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
995 		cq_attr.mlx5.cqe_comp_res_format =
996 				mlx5_rxq_mprq_enabled(rxq_data) ?
997 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
998 				MLX5DV_CQE_RES_FORMAT_HASH;
999 #else
1000 		cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
1001 #endif
1002 		/*
1003 		 * For vectorized Rx, it must not be doubled in order to
1004 		 * make cq_ci and rq_ci aligned.
1005 		 */
1006 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
1007 			cq_attr.ibv.cqe *= 2;
1008 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
1009 		DRV_LOG(DEBUG,
1010 			"port %u Rx CQE compression is disabled for HW"
1011 			" timestamp",
1012 			dev->data->port_id);
1013 	} else if (priv->config.cqe_comp && rxq_data->lro) {
1014 		DRV_LOG(DEBUG,
1015 			"port %u Rx CQE compression is disabled for LRO",
1016 			dev->data->port_id);
1017 	}
1018 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
1019 	if (priv->config.cqe_pad) {
1020 		cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
1021 		cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
1022 	}
1023 #endif
1024 	return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
1025 							      &cq_attr.ibv,
1026 							      &cq_attr.mlx5));
1027 }
1028 
1029 /**
1030  * Create a WQ Verbs object.
1031  *
1032  * @param dev
1033  *   Pointer to Ethernet device.
1034  * @param priv
1035  *   Pointer to device private data.
1036  * @param rxq_data
1037  *   Pointer to Rx queue data.
1038  * @param idx
1039  *   Queue index in DPDK Rx queue array
1040  * @param wqe_n
1041  *   Number of WQEs in WQ.
1042  * @param rxq_obj
1043  *   Pointer to Rx queue object data.
1044  *
1045  * @return
1046  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
1047  */
1048 static struct ibv_wq *
1049 mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
1050 		struct mlx5_rxq_data *rxq_data, uint16_t idx,
1051 		unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
1052 {
1053 	struct {
1054 		struct ibv_wq_init_attr ibv;
1055 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1056 		struct mlx5dv_wq_init_attr mlx5;
1057 #endif
1058 	} wq_attr;
1059 
1060 	wq_attr.ibv = (struct ibv_wq_init_attr){
1061 		.wq_context = NULL, /* Could be useful in the future. */
1062 		.wq_type = IBV_WQT_RQ,
1063 		/* Max number of outstanding WRs. */
1064 		.max_wr = wqe_n >> rxq_data->sges_n,
1065 		/* Max number of scatter/gather elements in a WR. */
1066 		.max_sge = 1 << rxq_data->sges_n,
1067 		.pd = priv->sh->pd,
1068 		.cq = rxq_obj->cq,
1069 		.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
1070 		.create_flags = (rxq_data->vlan_strip ?
1071 				 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
1072 	};
1073 	/* By default, FCS (CRC) is stripped by hardware. */
1074 	if (rxq_data->crc_present) {
1075 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
1076 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1077 	}
1078 	if (priv->config.hw_padding) {
1079 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
1080 		wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
1081 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1082 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
1083 		wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
1084 		wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
1085 #endif
1086 	}
1087 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
1088 	wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
1089 		.comp_mask = 0,
1090 	};
1091 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
1092 		struct mlx5dv_striding_rq_init_attr *mprq_attr =
1093 						&wq_attr.mlx5.striding_rq_attrs;
1094 
1095 		wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
1096 		*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
1097 			.single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
1098 			.single_wqe_log_num_of_strides = rxq_data->strd_num_n,
1099 			.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
1100 		};
1101 	}
1102 	rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
1103 					      &wq_attr.mlx5);
1104 #else
1105 	rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
1106 #endif
1107 	if (rxq_obj->wq) {
1108 		/*
1109 		 * Make sure number of WRs*SGEs match expectations since a queue
1110 		 * cannot allocate more than "desc" buffers.
1111 		 */
1112 		if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
1113 		    wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
1114 			DRV_LOG(ERR,
1115 				"port %u Rx queue %u requested %u*%u but got"
1116 				" %u*%u WRs*SGEs",
1117 				dev->data->port_id, idx,
1118 				wqe_n >> rxq_data->sges_n,
1119 				(1 << rxq_data->sges_n),
1120 				wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
1121 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
1122 			rxq_obj->wq = NULL;
1123 			rte_errno = EINVAL;
1124 		}
1125 	}
1126 	return rxq_obj->wq;
1127 }
1128 
1129 /**
1130  * Fill common fields of create RQ attributes structure.
1131  *
1132  * @param rxq_data
1133  *   Pointer to Rx queue data.
1134  * @param cqn
1135  *   CQ number to use with this RQ.
1136  * @param rq_attr
1137  *   RQ attributes structure to fill..
1138  */
1139 static void
1140 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
1141 			      struct mlx5_devx_create_rq_attr *rq_attr)
1142 {
1143 	rq_attr->state = MLX5_RQC_STATE_RST;
1144 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
1145 	rq_attr->cqn = cqn;
1146 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
1147 }
1148 
1149 /**
1150  * Fill common fields of DevX WQ attributes structure.
1151  *
1152  * @param priv
1153  *   Pointer to device private data.
1154  * @param rxq_ctrl
1155  *   Pointer to Rx queue control structure.
1156  * @param wq_attr
1157  *   WQ attributes structure to fill..
1158  */
1159 static void
1160 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
1161 		       struct mlx5_devx_wq_attr *wq_attr)
1162 {
1163 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
1164 					MLX5_WQ_END_PAD_MODE_ALIGN :
1165 					MLX5_WQ_END_PAD_MODE_NONE;
1166 	wq_attr->pd = priv->sh->pdn;
1167 	wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
1168 	wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
1169 	wq_attr->dbr_umem_valid = 1;
1170 	wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
1171 	wq_attr->wq_umem_valid = 1;
1172 }
1173 
1174 /**
1175  * Create a RQ object using DevX.
1176  *
1177  * @param dev
1178  *   Pointer to Ethernet device.
1179  * @param idx
1180  *   Queue index in DPDK Rx queue array
1181  * @param cqn
1182  *   CQ number to use with this RQ.
1183  *
1184  * @return
1185  *   The DevX object initialised, NULL otherwise and rte_errno is set.
1186  */
1187 static struct mlx5_devx_obj *
1188 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
1189 {
1190 	struct mlx5_priv *priv = dev->data->dev_private;
1191 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1192 	struct mlx5_rxq_ctrl *rxq_ctrl =
1193 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1194 	struct mlx5_devx_create_rq_attr rq_attr;
1195 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
1196 	uint32_t wq_size = 0;
1197 	uint32_t wqe_size = 0;
1198 	uint32_t log_wqe_size = 0;
1199 	void *buf = NULL;
1200 	struct mlx5_devx_obj *rq;
1201 
1202 	memset(&rq_attr, 0, sizeof(rq_attr));
1203 	/* Fill RQ attributes. */
1204 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
1205 	rq_attr.flush_in_error_en = 1;
1206 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
1207 	/* Fill WQ attributes for this RQ. */
1208 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
1209 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
1210 		/*
1211 		 * Number of strides in each WQE:
1212 		 * 512*2^single_wqe_log_num_of_strides.
1213 		 */
1214 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
1215 				rxq_data->strd_num_n -
1216 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1217 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
1218 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
1219 				rxq_data->strd_sz_n -
1220 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1221 		wqe_size = sizeof(struct mlx5_wqe_mprq);
1222 	} else {
1223 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1224 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
1225 	}
1226 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
1227 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
1228 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
1229 	/* Calculate and allocate WQ memory space. */
1230 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
1231 	wq_size = wqe_n * wqe_size;
1232 	buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
1233 				rxq_ctrl->socket);
1234 	if (!buf)
1235 		return NULL;
1236 	rxq_data->wqes = buf;
1237 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1238 						     buf, wq_size, 0);
1239 	if (!rxq_ctrl->wq_umem) {
1240 		rte_free(buf);
1241 		return NULL;
1242 	}
1243 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
1244 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
1245 	if (!rq)
1246 		rxq_release_rq_resources(rxq_ctrl);
1247 	return rq;
1248 }
1249 
1250 /**
1251  * Create the Rx hairpin queue object.
1252  *
1253  * @param dev
1254  *   Pointer to Ethernet device.
1255  * @param idx
1256  *   Queue index in DPDK Rx queue array
1257  *
1258  * @return
1259  *   The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
1260  */
1261 static struct mlx5_rxq_obj *
1262 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1263 {
1264 	struct mlx5_priv *priv = dev->data->dev_private;
1265 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1266 	struct mlx5_rxq_ctrl *rxq_ctrl =
1267 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1268 	struct mlx5_devx_create_rq_attr attr = { 0 };
1269 	struct mlx5_rxq_obj *tmpl = NULL;
1270 	int ret = 0;
1271 	uint32_t max_wq_data;
1272 
1273 	MLX5_ASSERT(rxq_data);
1274 	MLX5_ASSERT(!rxq_ctrl->obj);
1275 	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1276 				 rxq_ctrl->socket);
1277 	if (!tmpl) {
1278 		DRV_LOG(ERR,
1279 			"port %u Rx queue %u cannot allocate verbs resources",
1280 			dev->data->port_id, rxq_data->idx);
1281 		rte_errno = ENOMEM;
1282 		goto error;
1283 	}
1284 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
1285 	tmpl->rxq_ctrl = rxq_ctrl;
1286 	attr.hairpin = 1;
1287 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1288 	/* Jumbo frames > 9KB should be supported, and more packets. */
1289 	attr.wq_attr.log_hairpin_data_sz =
1290 			(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1291 			max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1292 	/* Set the packets number to the maximum value for performance. */
1293 	attr.wq_attr.log_hairpin_num_packets =
1294 			attr.wq_attr.log_hairpin_data_sz -
1295 			MLX5_HAIRPIN_QUEUE_STRIDE;
1296 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
1297 					   rxq_ctrl->socket);
1298 	if (!tmpl->rq) {
1299 		DRV_LOG(ERR,
1300 			"port %u Rx hairpin queue %u can't create rq object",
1301 			dev->data->port_id, idx);
1302 		rte_errno = errno;
1303 		goto error;
1304 	}
1305 	DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1306 		idx, (void *)&tmpl);
1307 	rte_atomic32_inc(&tmpl->refcnt);
1308 	LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1309 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1310 	return tmpl;
1311 error:
1312 	ret = rte_errno; /* Save rte_errno before cleanup. */
1313 	if (tmpl->rq)
1314 		mlx5_devx_cmd_destroy(tmpl->rq);
1315 	rte_errno = ret; /* Restore rte_errno. */
1316 	return NULL;
1317 }
1318 
1319 /**
1320  * Create the Rx queue Verbs/DevX object.
1321  *
1322  * @param dev
1323  *   Pointer to Ethernet device.
1324  * @param idx
1325  *   Queue index in DPDK Rx queue array
1326  * @param type
1327  *   Type of Rx queue object to create.
1328  *
1329  * @return
1330  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
1331  */
1332 struct mlx5_rxq_obj *
1333 mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
1334 		 enum mlx5_rxq_obj_type type)
1335 {
1336 	struct mlx5_priv *priv = dev->data->dev_private;
1337 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
1338 	struct mlx5_rxq_ctrl *rxq_ctrl =
1339 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
1340 	struct ibv_wq_attr mod;
1341 	unsigned int cqe_n;
1342 	unsigned int wqe_n = 1 << rxq_data->elts_n;
1343 	struct mlx5_rxq_obj *tmpl = NULL;
1344 	struct mlx5dv_cq cq_info;
1345 	struct mlx5dv_rwq rwq;
1346 	int ret = 0;
1347 	struct mlx5dv_obj obj;
1348 
1349 	MLX5_ASSERT(rxq_data);
1350 	MLX5_ASSERT(!rxq_ctrl->obj);
1351 	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
1352 		return mlx5_rxq_obj_hairpin_new(dev, idx);
1353 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
1354 	priv->verbs_alloc_ctx.obj = rxq_ctrl;
1355 	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
1356 				 rxq_ctrl->socket);
1357 	if (!tmpl) {
1358 		DRV_LOG(ERR,
1359 			"port %u Rx queue %u cannot allocate verbs resources",
1360 			dev->data->port_id, rxq_data->idx);
1361 		rte_errno = ENOMEM;
1362 		goto error;
1363 	}
1364 	tmpl->type = type;
1365 	tmpl->rxq_ctrl = rxq_ctrl;
1366 	if (rxq_ctrl->irq) {
1367 		tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
1368 		if (!tmpl->channel) {
1369 			DRV_LOG(ERR, "port %u: comp channel creation failure",
1370 				dev->data->port_id);
1371 			rte_errno = ENOMEM;
1372 			goto error;
1373 		}
1374 	}
1375 	if (mlx5_rxq_mprq_enabled(rxq_data))
1376 		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
1377 	else
1378 		cqe_n = wqe_n  - 1;
1379 	tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
1380 	if (!tmpl->cq) {
1381 		DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
1382 			dev->data->port_id, idx);
1383 		rte_errno = ENOMEM;
1384 		goto error;
1385 	}
1386 	obj.cq.in = tmpl->cq;
1387 	obj.cq.out = &cq_info;
1388 	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
1389 	if (ret) {
1390 		rte_errno = ret;
1391 		goto error;
1392 	}
1393 	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
1394 		DRV_LOG(ERR,
1395 			"port %u wrong MLX5_CQE_SIZE environment variable"
1396 			" value: it should be set to %u",
1397 			dev->data->port_id, RTE_CACHE_LINE_SIZE);
1398 		rte_errno = EINVAL;
1399 		goto error;
1400 	}
1401 	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
1402 		dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
1403 	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
1404 		dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
1405 	/* Allocate door-bell for types created with DevX. */
1406 	if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
1407 		struct mlx5_devx_dbr_page *dbr_page;
1408 		int64_t dbr_offset;
1409 
1410 		dbr_offset = mlx5_get_dbr(dev, &dbr_page);
1411 		if (dbr_offset < 0)
1412 			goto error;
1413 		rxq_ctrl->dbr_offset = dbr_offset;
1414 		rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
1415 		rxq_ctrl->dbr_umem_id_valid = 1;
1416 		rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
1417 					       (uintptr_t)rxq_ctrl->dbr_offset);
1418 	}
1419 	if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
1420 		tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
1421 					   tmpl);
1422 		if (!tmpl->wq) {
1423 			DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
1424 				dev->data->port_id, idx);
1425 			rte_errno = ENOMEM;
1426 			goto error;
1427 		}
1428 		/* Change queue state to ready. */
1429 		mod = (struct ibv_wq_attr){
1430 			.attr_mask = IBV_WQ_ATTR_STATE,
1431 			.wq_state = IBV_WQS_RDY,
1432 		};
1433 		ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
1434 		if (ret) {
1435 			DRV_LOG(ERR,
1436 				"port %u Rx queue %u WQ state to IBV_WQS_RDY"
1437 				" failed", dev->data->port_id, idx);
1438 			rte_errno = ret;
1439 			goto error;
1440 		}
1441 		obj.rwq.in = tmpl->wq;
1442 		obj.rwq.out = &rwq;
1443 		ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
1444 		if (ret) {
1445 			rte_errno = ret;
1446 			goto error;
1447 		}
1448 		rxq_data->wqes = rwq.buf;
1449 		rxq_data->rq_db = rwq.dbrec;
1450 	} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
1451 		struct mlx5_devx_modify_rq_attr rq_attr;
1452 
1453 		memset(&rq_attr, 0, sizeof(rq_attr));
1454 		tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
1455 		if (!tmpl->rq) {
1456 			DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
1457 				dev->data->port_id, idx);
1458 			rte_errno = ENOMEM;
1459 			goto error;
1460 		}
1461 		/* Change queue state to ready. */
1462 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
1463 		rq_attr.state = MLX5_RQC_STATE_RDY;
1464 		ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
1465 		if (ret)
1466 			goto error;
1467 	}
1468 	/* Fill the rings. */
1469 	rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
1470 	rxq_data->cq_db = cq_info.dbrec;
1471 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
1472 	rxq_data->cq_uar = cq_info.cq_uar;
1473 	rxq_data->cqn = cq_info.cqn;
1474 	rxq_data->cq_arm_sn = 0;
1475 	mlx5_rxq_initialize(rxq_data);
1476 	rxq_data->cq_ci = 0;
1477 	DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
1478 		idx, (void *)&tmpl);
1479 	rte_atomic32_inc(&tmpl->refcnt);
1480 	LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
1481 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1482 	return tmpl;
1483 error:
1484 	if (tmpl) {
1485 		ret = rte_errno; /* Save rte_errno before cleanup. */
1486 		if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
1487 			claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
1488 		else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
1489 			claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
1490 		if (tmpl->cq)
1491 			claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
1492 		if (tmpl->channel)
1493 			claim_zero(mlx5_glue->destroy_comp_channel
1494 							(tmpl->channel));
1495 		rte_free(tmpl);
1496 		rte_errno = ret; /* Restore rte_errno. */
1497 	}
1498 	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
1499 		rxq_release_rq_resources(rxq_ctrl);
1500 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1501 	return NULL;
1502 }
1503 
1504 /**
1505  * Verify the Rx queue objects list is empty
1506  *
1507  * @param dev
1508  *   Pointer to Ethernet device.
1509  *
1510  * @return
1511  *   The number of objects not released.
1512  */
1513 int
1514 mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
1515 {
1516 	struct mlx5_priv *priv = dev->data->dev_private;
1517 	int ret = 0;
1518 	struct mlx5_rxq_obj *rxq_obj;
1519 
1520 	LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
1521 		DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
1522 			dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
1523 		++ret;
1524 	}
1525 	return ret;
1526 }
1527 
1528 /**
1529  * Callback function to initialize mbufs for Multi-Packet RQ.
1530  */
1531 static inline void
1532 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
1533 		    void *_m, unsigned int i __rte_unused)
1534 {
1535 	struct mlx5_mprq_buf *buf = _m;
1536 	struct rte_mbuf_ext_shared_info *shinfo;
1537 	unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
1538 	unsigned int j;
1539 
1540 	memset(_m, 0, sizeof(*buf));
1541 	buf->mp = mp;
1542 	rte_atomic16_set(&buf->refcnt, 1);
1543 	for (j = 0; j != strd_n; ++j) {
1544 		shinfo = &buf->shinfos[j];
1545 		shinfo->free_cb = mlx5_mprq_buf_free_cb;
1546 		shinfo->fcb_opaque = buf;
1547 	}
1548 }
1549 
1550 /**
1551  * Free mempool of Multi-Packet RQ.
1552  *
1553  * @param dev
1554  *   Pointer to Ethernet device.
1555  *
1556  * @return
1557  *   0 on success, negative errno value on failure.
1558  */
1559 int
1560 mlx5_mprq_free_mp(struct rte_eth_dev *dev)
1561 {
1562 	struct mlx5_priv *priv = dev->data->dev_private;
1563 	struct rte_mempool *mp = priv->mprq_mp;
1564 	unsigned int i;
1565 
1566 	if (mp == NULL)
1567 		return 0;
1568 	DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
1569 		dev->data->port_id, mp->name);
1570 	/*
1571 	 * If a buffer in the pool has been externally attached to a mbuf and it
1572 	 * is still in use by application, destroying the Rx queue can spoil
1573 	 * the packet. It is unlikely to happen but if application dynamically
1574 	 * creates and destroys with holding Rx packets, this can happen.
1575 	 *
1576 	 * TODO: It is unavoidable for now because the mempool for Multi-Packet
1577 	 * RQ isn't provided by application but managed by PMD.
1578 	 */
1579 	if (!rte_mempool_full(mp)) {
1580 		DRV_LOG(ERR,
1581 			"port %u mempool for Multi-Packet RQ is still in use",
1582 			dev->data->port_id);
1583 		rte_errno = EBUSY;
1584 		return -rte_errno;
1585 	}
1586 	rte_mempool_free(mp);
1587 	/* Unset mempool for each Rx queue. */
1588 	for (i = 0; i != priv->rxqs_n; ++i) {
1589 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1590 
1591 		if (rxq == NULL)
1592 			continue;
1593 		rxq->mprq_mp = NULL;
1594 	}
1595 	priv->mprq_mp = NULL;
1596 	return 0;
1597 }
1598 
1599 /**
1600  * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
1601  * mempool. If already allocated, reuse it if there're enough elements.
1602  * Otherwise, resize it.
1603  *
1604  * @param dev
1605  *   Pointer to Ethernet device.
1606  *
1607  * @return
1608  *   0 on success, negative errno value on failure.
1609  */
1610 int
1611 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
1612 {
1613 	struct mlx5_priv *priv = dev->data->dev_private;
1614 	struct rte_mempool *mp = priv->mprq_mp;
1615 	char name[RTE_MEMPOOL_NAMESIZE];
1616 	unsigned int desc = 0;
1617 	unsigned int buf_len;
1618 	unsigned int obj_num;
1619 	unsigned int obj_size;
1620 	unsigned int strd_num_n = 0;
1621 	unsigned int strd_sz_n = 0;
1622 	unsigned int i;
1623 	unsigned int n_ibv = 0;
1624 
1625 	if (!mlx5_mprq_enabled(dev))
1626 		return 0;
1627 	/* Count the total number of descriptors configured. */
1628 	for (i = 0; i != priv->rxqs_n; ++i) {
1629 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1630 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1631 			(rxq, struct mlx5_rxq_ctrl, rxq);
1632 
1633 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1634 			continue;
1635 		n_ibv++;
1636 		desc += 1 << rxq->elts_n;
1637 		/* Get the max number of strides. */
1638 		if (strd_num_n < rxq->strd_num_n)
1639 			strd_num_n = rxq->strd_num_n;
1640 		/* Get the max size of a stride. */
1641 		if (strd_sz_n < rxq->strd_sz_n)
1642 			strd_sz_n = rxq->strd_sz_n;
1643 	}
1644 	MLX5_ASSERT(strd_num_n && strd_sz_n);
1645 	buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
1646 	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
1647 		sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
1648 	/*
1649 	 * Received packets can be either memcpy'd or externally referenced. In
1650 	 * case that the packet is attached to an mbuf as an external buffer, as
1651 	 * it isn't possible to predict how the buffers will be queued by
1652 	 * application, there's no option to exactly pre-allocate needed buffers
1653 	 * in advance but to speculatively prepares enough buffers.
1654 	 *
1655 	 * In the data path, if this Mempool is depleted, PMD will try to memcpy
1656 	 * received packets to buffers provided by application (rxq->mp) until
1657 	 * this Mempool gets available again.
1658 	 */
1659 	desc *= 4;
1660 	obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
1661 	/*
1662 	 * rte_mempool_create_empty() has sanity check to refuse large cache
1663 	 * size compared to the number of elements.
1664 	 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
1665 	 * constant number 2 instead.
1666 	 */
1667 	obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
1668 	/* Check a mempool is already allocated and if it can be resued. */
1669 	if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
1670 		DRV_LOG(DEBUG, "port %u mempool %s is being reused",
1671 			dev->data->port_id, mp->name);
1672 		/* Reuse. */
1673 		goto exit;
1674 	} else if (mp != NULL) {
1675 		DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
1676 			dev->data->port_id, mp->name);
1677 		/*
1678 		 * If failed to free, which means it may be still in use, no way
1679 		 * but to keep using the existing one. On buffer underrun,
1680 		 * packets will be memcpy'd instead of external buffer
1681 		 * attachment.
1682 		 */
1683 		if (mlx5_mprq_free_mp(dev)) {
1684 			if (mp->elt_size >= obj_size)
1685 				goto exit;
1686 			else
1687 				return -rte_errno;
1688 		}
1689 	}
1690 	snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
1691 	mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
1692 				0, NULL, NULL, mlx5_mprq_buf_init,
1693 				(void *)(uintptr_t)(1 << strd_num_n),
1694 				dev->device->numa_node, 0);
1695 	if (mp == NULL) {
1696 		DRV_LOG(ERR,
1697 			"port %u failed to allocate a mempool for"
1698 			" Multi-Packet RQ, count=%u, size=%u",
1699 			dev->data->port_id, obj_num, obj_size);
1700 		rte_errno = ENOMEM;
1701 		return -rte_errno;
1702 	}
1703 	priv->mprq_mp = mp;
1704 exit:
1705 	/* Set mempool for each Rx queue. */
1706 	for (i = 0; i != priv->rxqs_n; ++i) {
1707 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
1708 		struct mlx5_rxq_ctrl *rxq_ctrl = container_of
1709 			(rxq, struct mlx5_rxq_ctrl, rxq);
1710 
1711 		if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
1712 			continue;
1713 		rxq->mprq_mp = mp;
1714 	}
1715 	DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
1716 		dev->data->port_id);
1717 	return 0;
1718 }
1719 
1720 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
1721 					sizeof(struct rte_vlan_hdr) * 2 + \
1722 					sizeof(struct rte_ipv6_hdr)))
1723 #define MAX_TCP_OPTION_SIZE 40u
1724 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
1725 				 sizeof(struct rte_tcp_hdr) + \
1726 				 MAX_TCP_OPTION_SIZE))
1727 
1728 /**
1729  * Adjust the maximum LRO massage size.
1730  *
1731  * @param dev
1732  *   Pointer to Ethernet device.
1733  * @param idx
1734  *   RX queue index.
1735  * @param max_lro_size
1736  *   The maximum size for LRO packet.
1737  */
1738 static void
1739 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
1740 			     uint32_t max_lro_size)
1741 {
1742 	struct mlx5_priv *priv = dev->data->dev_private;
1743 
1744 	if (priv->config.hca_attr.lro_max_msg_sz_mode ==
1745 	    MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
1746 	    MLX5_MAX_TCP_HDR_OFFSET)
1747 		max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
1748 	max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
1749 	MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
1750 	max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
1751 	if (priv->max_lro_msg_size)
1752 		priv->max_lro_msg_size =
1753 			RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
1754 	else
1755 		priv->max_lro_msg_size = max_lro_size;
1756 	DRV_LOG(DEBUG,
1757 		"port %u Rx Queue %u max LRO message size adjusted to %u bytes",
1758 		dev->data->port_id, idx,
1759 		priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
1760 }
1761 
1762 /**
1763  * Create a DPDK Rx queue.
1764  *
1765  * @param dev
1766  *   Pointer to Ethernet device.
1767  * @param idx
1768  *   RX queue index.
1769  * @param desc
1770  *   Number of descriptors to configure in queue.
1771  * @param socket
1772  *   NUMA socket on which memory must be allocated.
1773  *
1774  * @return
1775  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
1776  */
1777 struct mlx5_rxq_ctrl *
1778 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1779 	     unsigned int socket, const struct rte_eth_rxconf *conf,
1780 	     struct rte_mempool *mp)
1781 {
1782 	struct mlx5_priv *priv = dev->data->dev_private;
1783 	struct mlx5_rxq_ctrl *tmpl;
1784 	unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
1785 	unsigned int mprq_stride_size;
1786 	struct mlx5_dev_config *config = &priv->config;
1787 	unsigned int strd_headroom_en;
1788 	/*
1789 	 * Always allocate extra slots, even if eventually
1790 	 * the vector Rx will not be used.
1791 	 */
1792 	uint16_t desc_n =
1793 		desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
1794 	uint64_t offloads = conf->offloads |
1795 			   dev->data->dev_conf.rxmode.offloads;
1796 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
1797 	const int mprq_en = mlx5_check_mprq_support(dev) > 0;
1798 	unsigned int max_rx_pkt_len = lro_on_queue ?
1799 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
1800 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
1801 	unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
1802 							RTE_PKTMBUF_HEADROOM;
1803 	unsigned int max_lro_size = 0;
1804 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
1805 
1806 	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
1807 						    DEV_RX_OFFLOAD_SCATTER)) {
1808 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
1809 			" configured and no enough mbuf space(%u) to contain "
1810 			"the maximum RX packet length(%u) with head-room(%u)",
1811 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
1812 			RTE_PKTMBUF_HEADROOM);
1813 		rte_errno = ENOSPC;
1814 		return NULL;
1815 	}
1816 	tmpl = rte_calloc_socket("RXQ", 1,
1817 				 sizeof(*tmpl) +
1818 				 desc_n * sizeof(struct rte_mbuf *),
1819 				 0, socket);
1820 	if (!tmpl) {
1821 		rte_errno = ENOMEM;
1822 		return NULL;
1823 	}
1824 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
1825 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
1826 			       MLX5_MR_BTREE_CACHE_N, socket)) {
1827 		/* rte_errno is already set. */
1828 		goto error;
1829 	}
1830 	tmpl->socket = socket;
1831 	if (dev->data->dev_conf.intr_conf.rxq)
1832 		tmpl->irq = 1;
1833 	/*
1834 	 * LRO packet may consume all the stride memory, hence we cannot
1835 	 * guaranty head-room near the packet memory in the stride.
1836 	 * In this case scatter is, for sure, enabled and an empty mbuf may be
1837 	 * added in the start for the head-room.
1838 	 */
1839 	if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
1840 	    non_scatter_min_mbuf_size > mb_len) {
1841 		strd_headroom_en = 0;
1842 		mprq_stride_size = RTE_MIN(max_rx_pkt_len,
1843 					1u << config->mprq.max_stride_size_n);
1844 	} else {
1845 		strd_headroom_en = 1;
1846 		mprq_stride_size = non_scatter_min_mbuf_size;
1847 	}
1848 	/*
1849 	 * This Rx queue can be configured as a Multi-Packet RQ if all of the
1850 	 * following conditions are met:
1851 	 *  - MPRQ is enabled.
1852 	 *  - The number of descs is more than the number of strides.
1853 	 *  - max_rx_pkt_len plus overhead is less than the max size of a
1854 	 *    stride.
1855 	 *  Otherwise, enable Rx scatter if necessary.
1856 	 */
1857 	if (mprq_en &&
1858 	    desc > (1U << config->mprq.stride_num_n) &&
1859 	    mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
1860 		/* TODO: Rx scatter isn't supported yet. */
1861 		tmpl->rxq.sges_n = 0;
1862 		/* Trim the number of descs needed. */
1863 		desc >>= config->mprq.stride_num_n;
1864 		tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
1865 		tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
1866 					      config->mprq.min_stride_size_n);
1867 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
1868 		tmpl->rxq.strd_headroom_en = strd_headroom_en;
1869 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
1870 				config->mprq.max_memcpy_len);
1871 		max_lro_size = RTE_MIN(max_rx_pkt_len,
1872 				       (1u << tmpl->rxq.strd_num_n) *
1873 				       (1u << tmpl->rxq.strd_sz_n));
1874 		DRV_LOG(DEBUG,
1875 			"port %u Rx queue %u: Multi-Packet RQ is enabled"
1876 			" strd_num_n = %u, strd_sz_n = %u",
1877 			dev->data->port_id, idx,
1878 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
1879 	} else if (max_rx_pkt_len <= first_mb_free_size) {
1880 		tmpl->rxq.sges_n = 0;
1881 		max_lro_size = max_rx_pkt_len;
1882 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
1883 		unsigned int size = non_scatter_min_mbuf_size;
1884 		unsigned int sges_n;
1885 
1886 		if (lro_on_queue && first_mb_free_size <
1887 		    MLX5_MAX_LRO_HEADER_FIX) {
1888 			DRV_LOG(ERR, "Not enough space in the first segment(%u)"
1889 				" to include the max header size(%u) for LRO",
1890 				first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
1891 			rte_errno = ENOTSUP;
1892 			goto error;
1893 		}
1894 		/*
1895 		 * Determine the number of SGEs needed for a full packet
1896 		 * and round it to the next power of two.
1897 		 */
1898 		sges_n = log2above((size / mb_len) + !!(size % mb_len));
1899 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
1900 			DRV_LOG(ERR,
1901 				"port %u too many SGEs (%u) needed to handle"
1902 				" requested maximum packet size %u, the maximum"
1903 				" supported are %u", dev->data->port_id,
1904 				1 << sges_n, max_rx_pkt_len,
1905 				1u << MLX5_MAX_LOG_RQ_SEGS);
1906 			rte_errno = ENOTSUP;
1907 			goto error;
1908 		}
1909 		tmpl->rxq.sges_n = sges_n;
1910 		max_lro_size = max_rx_pkt_len;
1911 	}
1912 	if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
1913 		DRV_LOG(WARNING,
1914 			"port %u MPRQ is requested but cannot be enabled"
1915 			" (requested: desc = %u, stride_sz = %u,"
1916 			" supported: min_stride_num = %u, max_stride_sz = %u).",
1917 			dev->data->port_id, desc, mprq_stride_size,
1918 			(1 << config->mprq.stride_num_n),
1919 			(1 << config->mprq.max_stride_size_n));
1920 	DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
1921 		dev->data->port_id, 1 << tmpl->rxq.sges_n);
1922 	if (desc % (1 << tmpl->rxq.sges_n)) {
1923 		DRV_LOG(ERR,
1924 			"port %u number of Rx queue descriptors (%u) is not a"
1925 			" multiple of SGEs per packet (%u)",
1926 			dev->data->port_id,
1927 			desc,
1928 			1 << tmpl->rxq.sges_n);
1929 		rte_errno = EINVAL;
1930 		goto error;
1931 	}
1932 	mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
1933 	/* Toggle RX checksum offload if hardware supports it. */
1934 	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
1935 	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
1936 	/* Configure VLAN stripping. */
1937 	tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
1938 	/* By default, FCS (CRC) is stripped by hardware. */
1939 	tmpl->rxq.crc_present = 0;
1940 	tmpl->rxq.lro = lro_on_queue;
1941 	if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
1942 		if (config->hw_fcs_strip) {
1943 			/*
1944 			 * RQs used for LRO-enabled TIRs should not be
1945 			 * configured to scatter the FCS.
1946 			 */
1947 			if (lro_on_queue)
1948 				DRV_LOG(WARNING,
1949 					"port %u CRC stripping has been "
1950 					"disabled but will still be performed "
1951 					"by hardware, because LRO is enabled",
1952 					dev->data->port_id);
1953 			else
1954 				tmpl->rxq.crc_present = 1;
1955 		} else {
1956 			DRV_LOG(WARNING,
1957 				"port %u CRC stripping has been disabled but will"
1958 				" still be performed by hardware, make sure MLNX_OFED"
1959 				" and firmware are up to date",
1960 				dev->data->port_id);
1961 		}
1962 	}
1963 	DRV_LOG(DEBUG,
1964 		"port %u CRC stripping is %s, %u bytes will be subtracted from"
1965 		" incoming frames to hide it",
1966 		dev->data->port_id,
1967 		tmpl->rxq.crc_present ? "disabled" : "enabled",
1968 		tmpl->rxq.crc_present << 2);
1969 	/* Save port ID. */
1970 	tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
1971 		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
1972 	tmpl->rxq.port_id = dev->data->port_id;
1973 	tmpl->priv = priv;
1974 	tmpl->rxq.mp = mp;
1975 	tmpl->rxq.elts_n = log2above(desc);
1976 	tmpl->rxq.rq_repl_thresh =
1977 		MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
1978 	tmpl->rxq.elts =
1979 		(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
1980 #ifndef RTE_ARCH_64
1981 	tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
1982 #endif
1983 	tmpl->rxq.idx = idx;
1984 	rte_atomic32_inc(&tmpl->refcnt);
1985 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
1986 	return tmpl;
1987 error:
1988 	rte_free(tmpl);
1989 	return NULL;
1990 }
1991 
1992 /**
1993  * Create a DPDK Rx hairpin queue.
1994  *
1995  * @param dev
1996  *   Pointer to Ethernet device.
1997  * @param idx
1998  *   RX queue index.
1999  * @param desc
2000  *   Number of descriptors to configure in queue.
2001  * @param hairpin_conf
2002  *   The hairpin binding configuration.
2003  *
2004  * @return
2005  *   A DPDK queue object on success, NULL otherwise and rte_errno is set.
2006  */
2007 struct mlx5_rxq_ctrl *
2008 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2009 		     const struct rte_eth_hairpin_conf *hairpin_conf)
2010 {
2011 	struct mlx5_priv *priv = dev->data->dev_private;
2012 	struct mlx5_rxq_ctrl *tmpl;
2013 
2014 	tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
2015 	if (!tmpl) {
2016 		rte_errno = ENOMEM;
2017 		return NULL;
2018 	}
2019 	tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
2020 	tmpl->socket = SOCKET_ID_ANY;
2021 	tmpl->rxq.rss_hash = 0;
2022 	tmpl->rxq.port_id = dev->data->port_id;
2023 	tmpl->priv = priv;
2024 	tmpl->rxq.mp = NULL;
2025 	tmpl->rxq.elts_n = log2above(desc);
2026 	tmpl->rxq.elts = NULL;
2027 	tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
2028 	tmpl->hairpin_conf = *hairpin_conf;
2029 	tmpl->rxq.idx = idx;
2030 	rte_atomic32_inc(&tmpl->refcnt);
2031 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
2032 	return tmpl;
2033 }
2034 
2035 /**
2036  * Get a Rx queue.
2037  *
2038  * @param dev
2039  *   Pointer to Ethernet device.
2040  * @param idx
2041  *   RX queue index.
2042  *
2043  * @return
2044  *   A pointer to the queue if it exists, NULL otherwise.
2045  */
2046 struct mlx5_rxq_ctrl *
2047 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
2048 {
2049 	struct mlx5_priv *priv = dev->data->dev_private;
2050 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2051 
2052 	if ((*priv->rxqs)[idx]) {
2053 		rxq_ctrl = container_of((*priv->rxqs)[idx],
2054 					struct mlx5_rxq_ctrl,
2055 					rxq);
2056 		mlx5_rxq_obj_get(dev, idx);
2057 		rte_atomic32_inc(&rxq_ctrl->refcnt);
2058 	}
2059 	return rxq_ctrl;
2060 }
2061 
2062 /**
2063  * Release a Rx queue.
2064  *
2065  * @param dev
2066  *   Pointer to Ethernet device.
2067  * @param idx
2068  *   RX queue index.
2069  *
2070  * @return
2071  *   1 while a reference on it exists, 0 when freed.
2072  */
2073 int
2074 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
2075 {
2076 	struct mlx5_priv *priv = dev->data->dev_private;
2077 	struct mlx5_rxq_ctrl *rxq_ctrl;
2078 
2079 	if (!(*priv->rxqs)[idx])
2080 		return 0;
2081 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
2082 	MLX5_ASSERT(rxq_ctrl->priv);
2083 	if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
2084 		rxq_ctrl->obj = NULL;
2085 	if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
2086 		if (rxq_ctrl->dbr_umem_id_valid)
2087 			claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
2088 						    rxq_ctrl->dbr_offset));
2089 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
2090 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
2091 		LIST_REMOVE(rxq_ctrl, next);
2092 		rte_free(rxq_ctrl);
2093 		(*priv->rxqs)[idx] = NULL;
2094 		return 0;
2095 	}
2096 	return 1;
2097 }
2098 
2099 /**
2100  * Verify the Rx Queue list is empty
2101  *
2102  * @param dev
2103  *   Pointer to Ethernet device.
2104  *
2105  * @return
2106  *   The number of object not released.
2107  */
2108 int
2109 mlx5_rxq_verify(struct rte_eth_dev *dev)
2110 {
2111 	struct mlx5_priv *priv = dev->data->dev_private;
2112 	struct mlx5_rxq_ctrl *rxq_ctrl;
2113 	int ret = 0;
2114 
2115 	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
2116 		DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
2117 			dev->data->port_id, rxq_ctrl->rxq.idx);
2118 		++ret;
2119 	}
2120 	return ret;
2121 }
2122 
2123 /**
2124  * Get a Rx queue type.
2125  *
2126  * @param dev
2127  *   Pointer to Ethernet device.
2128  * @param idx
2129  *   Rx queue index.
2130  *
2131  * @return
2132  *   The Rx queue type.
2133  */
2134 enum mlx5_rxq_type
2135 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
2136 {
2137 	struct mlx5_priv *priv = dev->data->dev_private;
2138 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
2139 
2140 	if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
2141 		rxq_ctrl = container_of((*priv->rxqs)[idx],
2142 					struct mlx5_rxq_ctrl,
2143 					rxq);
2144 		return rxq_ctrl->type;
2145 	}
2146 	return MLX5_RXQ_TYPE_UNDEFINED;
2147 }
2148 
2149 /**
2150  * Create an indirection table.
2151  *
2152  * @param dev
2153  *   Pointer to Ethernet device.
2154  * @param queues
2155  *   Queues entering in the indirection table.
2156  * @param queues_n
2157  *   Number of queues in the array.
2158  *
2159  * @return
2160  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2161  */
2162 static struct mlx5_ind_table_obj *
2163 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
2164 		       uint32_t queues_n, enum mlx5_ind_tbl_type type)
2165 {
2166 	struct mlx5_priv *priv = dev->data->dev_private;
2167 	struct mlx5_ind_table_obj *ind_tbl;
2168 	unsigned int i = 0, j = 0, k = 0;
2169 
2170 	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
2171 			     queues_n * sizeof(uint16_t), 0);
2172 	if (!ind_tbl) {
2173 		rte_errno = ENOMEM;
2174 		return NULL;
2175 	}
2176 	ind_tbl->type = type;
2177 	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2178 		const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
2179 			log2above(queues_n) :
2180 			log2above(priv->config.ind_table_max_size);
2181 		struct ibv_wq *wq[1 << wq_n];
2182 
2183 		for (i = 0; i != queues_n; ++i) {
2184 			struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2185 								 queues[i]);
2186 			if (!rxq)
2187 				goto error;
2188 			wq[i] = rxq->obj->wq;
2189 			ind_tbl->queues[i] = queues[i];
2190 		}
2191 		ind_tbl->queues_n = queues_n;
2192 		/* Finalise indirection table. */
2193 		k = i; /* Retain value of i for use in error case. */
2194 		for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
2195 			wq[k] = wq[j];
2196 		ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
2197 			(priv->sh->ctx,
2198 			 &(struct ibv_rwq_ind_table_init_attr){
2199 				.log_ind_tbl_size = wq_n,
2200 				.ind_tbl = wq,
2201 				.comp_mask = 0,
2202 			});
2203 		if (!ind_tbl->ind_table) {
2204 			rte_errno = errno;
2205 			goto error;
2206 		}
2207 	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2208 		struct mlx5_devx_rqt_attr *rqt_attr = NULL;
2209 		const unsigned int rqt_n =
2210 			1 << (rte_is_power_of_2(queues_n) ?
2211 			      log2above(queues_n) :
2212 			      log2above(priv->config.ind_table_max_size));
2213 
2214 		rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
2215 				      rqt_n * sizeof(uint32_t), 0);
2216 		if (!rqt_attr) {
2217 			DRV_LOG(ERR, "port %u cannot allocate RQT resources",
2218 				dev->data->port_id);
2219 			rte_errno = ENOMEM;
2220 			goto error;
2221 		}
2222 		rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
2223 		rqt_attr->rqt_actual_size = rqt_n;
2224 		for (i = 0; i != queues_n; ++i) {
2225 			struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
2226 								 queues[i]);
2227 			if (!rxq)
2228 				goto error;
2229 			rqt_attr->rq_list[i] = rxq->obj->rq->id;
2230 			ind_tbl->queues[i] = queues[i];
2231 		}
2232 		k = i; /* Retain value of i for use in error case. */
2233 		for (j = 0; k != rqt_n; ++k, ++j)
2234 			rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
2235 		ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
2236 							rqt_attr);
2237 		rte_free(rqt_attr);
2238 		if (!ind_tbl->rqt) {
2239 			DRV_LOG(ERR, "port %u cannot create DevX RQT",
2240 				dev->data->port_id);
2241 			rte_errno = errno;
2242 			goto error;
2243 		}
2244 		ind_tbl->queues_n = queues_n;
2245 	}
2246 	rte_atomic32_inc(&ind_tbl->refcnt);
2247 	LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
2248 	return ind_tbl;
2249 error:
2250 	for (j = 0; j < i; j++)
2251 		mlx5_rxq_release(dev, ind_tbl->queues[j]);
2252 	rte_free(ind_tbl);
2253 	DEBUG("port %u cannot create indirection table", dev->data->port_id);
2254 	return NULL;
2255 }
2256 
2257 /**
2258  * Get an indirection table.
2259  *
2260  * @param dev
2261  *   Pointer to Ethernet device.
2262  * @param queues
2263  *   Queues entering in the indirection table.
2264  * @param queues_n
2265  *   Number of queues in the array.
2266  *
2267  * @return
2268  *   An indirection table if found.
2269  */
2270 static struct mlx5_ind_table_obj *
2271 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
2272 		       uint32_t queues_n)
2273 {
2274 	struct mlx5_priv *priv = dev->data->dev_private;
2275 	struct mlx5_ind_table_obj *ind_tbl;
2276 
2277 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2278 		if ((ind_tbl->queues_n == queues_n) &&
2279 		    (memcmp(ind_tbl->queues, queues,
2280 			    ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
2281 		     == 0))
2282 			break;
2283 	}
2284 	if (ind_tbl) {
2285 		unsigned int i;
2286 
2287 		rte_atomic32_inc(&ind_tbl->refcnt);
2288 		for (i = 0; i != ind_tbl->queues_n; ++i)
2289 			mlx5_rxq_get(dev, ind_tbl->queues[i]);
2290 	}
2291 	return ind_tbl;
2292 }
2293 
2294 /**
2295  * Release an indirection table.
2296  *
2297  * @param dev
2298  *   Pointer to Ethernet device.
2299  * @param ind_table
2300  *   Indirection table to release.
2301  *
2302  * @return
2303  *   1 while a reference on it exists, 0 when freed.
2304  */
2305 static int
2306 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
2307 			   struct mlx5_ind_table_obj *ind_tbl)
2308 {
2309 	unsigned int i;
2310 
2311 	if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
2312 		if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
2313 			claim_zero(mlx5_glue->destroy_rwq_ind_table
2314 							(ind_tbl->ind_table));
2315 		else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
2316 			claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
2317 	}
2318 	for (i = 0; i != ind_tbl->queues_n; ++i)
2319 		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
2320 	if (!rte_atomic32_read(&ind_tbl->refcnt)) {
2321 		LIST_REMOVE(ind_tbl, next);
2322 		rte_free(ind_tbl);
2323 		return 0;
2324 	}
2325 	return 1;
2326 }
2327 
2328 /**
2329  * Verify the Rx Queue list is empty
2330  *
2331  * @param dev
2332  *   Pointer to Ethernet device.
2333  *
2334  * @return
2335  *   The number of object not released.
2336  */
2337 int
2338 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
2339 {
2340 	struct mlx5_priv *priv = dev->data->dev_private;
2341 	struct mlx5_ind_table_obj *ind_tbl;
2342 	int ret = 0;
2343 
2344 	LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
2345 		DRV_LOG(DEBUG,
2346 			"port %u indirection table obj %p still referenced",
2347 			dev->data->port_id, (void *)ind_tbl);
2348 		++ret;
2349 	}
2350 	return ret;
2351 }
2352 
2353 /**
2354  * Create an Rx Hash queue.
2355  *
2356  * @param dev
2357  *   Pointer to Ethernet device.
2358  * @param rss_key
2359  *   RSS key for the Rx hash queue.
2360  * @param rss_key_len
2361  *   RSS key length.
2362  * @param hash_fields
2363  *   Verbs protocol hash field to make the RSS on.
2364  * @param queues
2365  *   Queues entering in hash queue. In case of empty hash_fields only the
2366  *   first queue index will be taken for the indirection table.
2367  * @param queues_n
2368  *   Number of queues.
2369  * @param tunnel
2370  *   Tunnel type.
2371  *
2372  * @return
2373  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2374  */
2375 struct mlx5_hrxq *
2376 mlx5_hrxq_new(struct rte_eth_dev *dev,
2377 	      const uint8_t *rss_key, uint32_t rss_key_len,
2378 	      uint64_t hash_fields,
2379 	      const uint16_t *queues, uint32_t queues_n,
2380 	      int tunnel __rte_unused)
2381 {
2382 	struct mlx5_priv *priv = dev->data->dev_private;
2383 	struct mlx5_hrxq *hrxq;
2384 	struct ibv_qp *qp = NULL;
2385 	struct mlx5_ind_table_obj *ind_tbl;
2386 	int err;
2387 	struct mlx5_devx_obj *tir = NULL;
2388 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
2389 	struct mlx5_rxq_ctrl *rxq_ctrl =
2390 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
2391 
2392 	queues_n = hash_fields ? queues_n : 1;
2393 	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2394 	if (!ind_tbl) {
2395 		enum mlx5_ind_tbl_type type;
2396 
2397 		type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
2398 				MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
2399 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
2400 	}
2401 	if (!ind_tbl) {
2402 		rte_errno = ENOMEM;
2403 		return NULL;
2404 	}
2405 	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2406 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2407 		struct mlx5dv_qp_init_attr qp_init_attr;
2408 
2409 		memset(&qp_init_attr, 0, sizeof(qp_init_attr));
2410 		if (tunnel) {
2411 			qp_init_attr.comp_mask =
2412 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2413 			qp_init_attr.create_flags =
2414 				MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
2415 		}
2416 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2417 		if (dev->data->dev_conf.lpbk_mode) {
2418 			/*
2419 			 * Allow packet sent from NIC loop back
2420 			 * w/o source MAC check.
2421 			 */
2422 			qp_init_attr.comp_mask |=
2423 				MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
2424 			qp_init_attr.create_flags |=
2425 				MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
2426 		}
2427 #endif
2428 		qp = mlx5_glue->dv_create_qp
2429 			(priv->sh->ctx,
2430 			 &(struct ibv_qp_init_attr_ex){
2431 				.qp_type = IBV_QPT_RAW_PACKET,
2432 				.comp_mask =
2433 					IBV_QP_INIT_ATTR_PD |
2434 					IBV_QP_INIT_ATTR_IND_TABLE |
2435 					IBV_QP_INIT_ATTR_RX_HASH,
2436 				.rx_hash_conf = (struct ibv_rx_hash_conf){
2437 					.rx_hash_function =
2438 						IBV_RX_HASH_FUNC_TOEPLITZ,
2439 					.rx_hash_key_len = rss_key_len,
2440 					.rx_hash_key =
2441 						(void *)(uintptr_t)rss_key,
2442 					.rx_hash_fields_mask = hash_fields,
2443 				},
2444 				.rwq_ind_tbl = ind_tbl->ind_table,
2445 				.pd = priv->sh->pd,
2446 			  },
2447 			  &qp_init_attr);
2448 #else
2449 		qp = mlx5_glue->create_qp_ex
2450 			(priv->sh->ctx,
2451 			 &(struct ibv_qp_init_attr_ex){
2452 				.qp_type = IBV_QPT_RAW_PACKET,
2453 				.comp_mask =
2454 					IBV_QP_INIT_ATTR_PD |
2455 					IBV_QP_INIT_ATTR_IND_TABLE |
2456 					IBV_QP_INIT_ATTR_RX_HASH,
2457 				.rx_hash_conf = (struct ibv_rx_hash_conf){
2458 					.rx_hash_function =
2459 						IBV_RX_HASH_FUNC_TOEPLITZ,
2460 					.rx_hash_key_len = rss_key_len,
2461 					.rx_hash_key =
2462 						(void *)(uintptr_t)rss_key,
2463 					.rx_hash_fields_mask = hash_fields,
2464 				},
2465 				.rwq_ind_tbl = ind_tbl->ind_table,
2466 				.pd = priv->sh->pd,
2467 			 });
2468 #endif
2469 		if (!qp) {
2470 			rte_errno = errno;
2471 			goto error;
2472 		}
2473 	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2474 		struct mlx5_devx_tir_attr tir_attr;
2475 		uint32_t i;
2476 		uint32_t lro = 1;
2477 
2478 		/* Enable TIR LRO only if all the queues were configured for. */
2479 		for (i = 0; i < queues_n; ++i) {
2480 			if (!(*priv->rxqs)[queues[i]]->lro) {
2481 				lro = 0;
2482 				break;
2483 			}
2484 		}
2485 		memset(&tir_attr, 0, sizeof(tir_attr));
2486 		tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
2487 		tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
2488 		tir_attr.tunneled_offload_en = !!tunnel;
2489 		/* If needed, translate hash_fields bitmap to PRM format. */
2490 		if (hash_fields) {
2491 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
2492 			struct mlx5_rx_hash_field_select *rx_hash_field_select =
2493 					hash_fields & IBV_RX_HASH_INNER ?
2494 					&tir_attr.rx_hash_field_selector_inner :
2495 					&tir_attr.rx_hash_field_selector_outer;
2496 #else
2497 			struct mlx5_rx_hash_field_select *rx_hash_field_select =
2498 					&tir_attr.rx_hash_field_selector_outer;
2499 #endif
2500 
2501 			/* 1 bit: 0: IPv4, 1: IPv6. */
2502 			rx_hash_field_select->l3_prot_type =
2503 				!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
2504 			/* 1 bit: 0: TCP, 1: UDP. */
2505 			rx_hash_field_select->l4_prot_type =
2506 				!!(hash_fields & MLX5_UDP_IBV_RX_HASH);
2507 			/* Bitmask which sets which fields to use in RX Hash. */
2508 			rx_hash_field_select->selected_fields =
2509 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
2510 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
2511 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
2512 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
2513 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
2514 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
2515 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
2516 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
2517 		}
2518 		if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
2519 			tir_attr.transport_domain = priv->sh->td->id;
2520 		else
2521 			tir_attr.transport_domain = priv->sh->tdn;
2522 		memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
2523 		tir_attr.indirect_table = ind_tbl->rqt->id;
2524 		if (dev->data->dev_conf.lpbk_mode)
2525 			tir_attr.self_lb_block =
2526 					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
2527 		if (lro) {
2528 			tir_attr.lro_timeout_period_usecs =
2529 					priv->config.lro.timeout;
2530 			tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
2531 			tir_attr.lro_enable_mask =
2532 					MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2533 					MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
2534 		}
2535 		tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
2536 		if (!tir) {
2537 			DRV_LOG(ERR, "port %u cannot create DevX TIR",
2538 				dev->data->port_id);
2539 			rte_errno = errno;
2540 			goto error;
2541 		}
2542 	}
2543 	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
2544 	if (!hrxq)
2545 		goto error;
2546 	hrxq->ind_table = ind_tbl;
2547 	if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
2548 		hrxq->qp = qp;
2549 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2550 		hrxq->action =
2551 			mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2552 		if (!hrxq->action) {
2553 			rte_errno = errno;
2554 			goto error;
2555 		}
2556 #endif
2557 	} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
2558 		hrxq->tir = tir;
2559 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2560 		hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
2561 							(hrxq->tir->obj);
2562 		if (!hrxq->action) {
2563 			rte_errno = errno;
2564 			goto error;
2565 		}
2566 #endif
2567 	}
2568 	hrxq->rss_key_len = rss_key_len;
2569 	hrxq->hash_fields = hash_fields;
2570 	memcpy(hrxq->rss_key, rss_key, rss_key_len);
2571 	rte_atomic32_inc(&hrxq->refcnt);
2572 	LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
2573 	return hrxq;
2574 error:
2575 	err = rte_errno; /* Save rte_errno before cleanup. */
2576 	mlx5_ind_table_obj_release(dev, ind_tbl);
2577 	if (qp)
2578 		claim_zero(mlx5_glue->destroy_qp(qp));
2579 	else if (tir)
2580 		claim_zero(mlx5_devx_cmd_destroy(tir));
2581 	rte_errno = err; /* Restore rte_errno. */
2582 	return NULL;
2583 }
2584 
2585 /**
2586  * Get an Rx Hash queue.
2587  *
2588  * @param dev
2589  *   Pointer to Ethernet device.
2590  * @param rss_conf
2591  *   RSS configuration for the Rx hash queue.
2592  * @param queues
2593  *   Queues entering in hash queue. In case of empty hash_fields only the
2594  *   first queue index will be taken for the indirection table.
2595  * @param queues_n
2596  *   Number of queues.
2597  *
2598  * @return
2599  *   An hash Rx queue on success.
2600  */
2601 struct mlx5_hrxq *
2602 mlx5_hrxq_get(struct rte_eth_dev *dev,
2603 	      const uint8_t *rss_key, uint32_t rss_key_len,
2604 	      uint64_t hash_fields,
2605 	      const uint16_t *queues, uint32_t queues_n)
2606 {
2607 	struct mlx5_priv *priv = dev->data->dev_private;
2608 	struct mlx5_hrxq *hrxq;
2609 
2610 	queues_n = hash_fields ? queues_n : 1;
2611 	LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2612 		struct mlx5_ind_table_obj *ind_tbl;
2613 
2614 		if (hrxq->rss_key_len != rss_key_len)
2615 			continue;
2616 		if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
2617 			continue;
2618 		if (hrxq->hash_fields != hash_fields)
2619 			continue;
2620 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
2621 		if (!ind_tbl)
2622 			continue;
2623 		if (ind_tbl != hrxq->ind_table) {
2624 			mlx5_ind_table_obj_release(dev, ind_tbl);
2625 			continue;
2626 		}
2627 		rte_atomic32_inc(&hrxq->refcnt);
2628 		return hrxq;
2629 	}
2630 	return NULL;
2631 }
2632 
2633 /**
2634  * Release the hash Rx queue.
2635  *
2636  * @param dev
2637  *   Pointer to Ethernet device.
2638  * @param hrxq
2639  *   Pointer to Hash Rx queue to release.
2640  *
2641  * @return
2642  *   1 while a reference on it exists, 0 when freed.
2643  */
2644 int
2645 mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
2646 {
2647 	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2648 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2649 		mlx5_glue->destroy_flow_action(hrxq->action);
2650 #endif
2651 		if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
2652 			claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2653 		else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
2654 			claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
2655 		mlx5_ind_table_obj_release(dev, hrxq->ind_table);
2656 		LIST_REMOVE(hrxq, next);
2657 		rte_free(hrxq);
2658 		return 0;
2659 	}
2660 	claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
2661 	return 1;
2662 }
2663 
2664 /**
2665  * Verify the Rx Queue list is empty
2666  *
2667  * @param dev
2668  *   Pointer to Ethernet device.
2669  *
2670  * @return
2671  *   The number of object not released.
2672  */
2673 int
2674 mlx5_hrxq_verify(struct rte_eth_dev *dev)
2675 {
2676 	struct mlx5_priv *priv = dev->data->dev_private;
2677 	struct mlx5_hrxq *hrxq;
2678 	int ret = 0;
2679 
2680 	LIST_FOREACH(hrxq, &priv->hrxqs, next) {
2681 		DRV_LOG(DEBUG,
2682 			"port %u hash Rx queue %p still referenced",
2683 			dev->data->port_id, (void *)hrxq);
2684 		++ret;
2685 	}
2686 	return ret;
2687 }
2688 
2689 /**
2690  * Create a drop Rx queue Verbs/DevX object.
2691  *
2692  * @param dev
2693  *   Pointer to Ethernet device.
2694  *
2695  * @return
2696  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2697  */
2698 static struct mlx5_rxq_obj *
2699 mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
2700 {
2701 	struct mlx5_priv *priv = dev->data->dev_private;
2702 	struct ibv_context *ctx = priv->sh->ctx;
2703 	struct ibv_cq *cq;
2704 	struct ibv_wq *wq = NULL;
2705 	struct mlx5_rxq_obj *rxq;
2706 
2707 	if (priv->drop_queue.rxq)
2708 		return priv->drop_queue.rxq;
2709 	cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
2710 	if (!cq) {
2711 		DEBUG("port %u cannot allocate CQ for drop queue",
2712 		      dev->data->port_id);
2713 		rte_errno = errno;
2714 		goto error;
2715 	}
2716 	wq = mlx5_glue->create_wq(ctx,
2717 		 &(struct ibv_wq_init_attr){
2718 			.wq_type = IBV_WQT_RQ,
2719 			.max_wr = 1,
2720 			.max_sge = 1,
2721 			.pd = priv->sh->pd,
2722 			.cq = cq,
2723 		 });
2724 	if (!wq) {
2725 		DEBUG("port %u cannot allocate WQ for drop queue",
2726 		      dev->data->port_id);
2727 		rte_errno = errno;
2728 		goto error;
2729 	}
2730 	rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
2731 	if (!rxq) {
2732 		DEBUG("port %u cannot allocate drop Rx queue memory",
2733 		      dev->data->port_id);
2734 		rte_errno = ENOMEM;
2735 		goto error;
2736 	}
2737 	rxq->cq = cq;
2738 	rxq->wq = wq;
2739 	priv->drop_queue.rxq = rxq;
2740 	return rxq;
2741 error:
2742 	if (wq)
2743 		claim_zero(mlx5_glue->destroy_wq(wq));
2744 	if (cq)
2745 		claim_zero(mlx5_glue->destroy_cq(cq));
2746 	return NULL;
2747 }
2748 
2749 /**
2750  * Release a drop Rx queue Verbs/DevX object.
2751  *
2752  * @param dev
2753  *   Pointer to Ethernet device.
2754  *
2755  * @return
2756  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2757  */
2758 static void
2759 mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
2760 {
2761 	struct mlx5_priv *priv = dev->data->dev_private;
2762 	struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
2763 
2764 	if (rxq->wq)
2765 		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
2766 	if (rxq->cq)
2767 		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
2768 	rte_free(rxq);
2769 	priv->drop_queue.rxq = NULL;
2770 }
2771 
2772 /**
2773  * Create a drop indirection table.
2774  *
2775  * @param dev
2776  *   Pointer to Ethernet device.
2777  *
2778  * @return
2779  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2780  */
2781 static struct mlx5_ind_table_obj *
2782 mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
2783 {
2784 	struct mlx5_priv *priv = dev->data->dev_private;
2785 	struct mlx5_ind_table_obj *ind_tbl;
2786 	struct mlx5_rxq_obj *rxq;
2787 	struct mlx5_ind_table_obj tmpl;
2788 
2789 	rxq = mlx5_rxq_obj_drop_new(dev);
2790 	if (!rxq)
2791 		return NULL;
2792 	tmpl.ind_table = mlx5_glue->create_rwq_ind_table
2793 		(priv->sh->ctx,
2794 		 &(struct ibv_rwq_ind_table_init_attr){
2795 			.log_ind_tbl_size = 0,
2796 			.ind_tbl = &rxq->wq,
2797 			.comp_mask = 0,
2798 		 });
2799 	if (!tmpl.ind_table) {
2800 		DEBUG("port %u cannot allocate indirection table for drop"
2801 		      " queue",
2802 		      dev->data->port_id);
2803 		rte_errno = errno;
2804 		goto error;
2805 	}
2806 	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
2807 	if (!ind_tbl) {
2808 		rte_errno = ENOMEM;
2809 		goto error;
2810 	}
2811 	ind_tbl->ind_table = tmpl.ind_table;
2812 	return ind_tbl;
2813 error:
2814 	mlx5_rxq_obj_drop_release(dev);
2815 	return NULL;
2816 }
2817 
2818 /**
2819  * Release a drop indirection table.
2820  *
2821  * @param dev
2822  *   Pointer to Ethernet device.
2823  */
2824 static void
2825 mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
2826 {
2827 	struct mlx5_priv *priv = dev->data->dev_private;
2828 	struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
2829 
2830 	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
2831 	mlx5_rxq_obj_drop_release(dev);
2832 	rte_free(ind_tbl);
2833 	priv->drop_queue.hrxq->ind_table = NULL;
2834 }
2835 
2836 /**
2837  * Create a drop Rx Hash queue.
2838  *
2839  * @param dev
2840  *   Pointer to Ethernet device.
2841  *
2842  * @return
2843  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
2844  */
2845 struct mlx5_hrxq *
2846 mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
2847 {
2848 	struct mlx5_priv *priv = dev->data->dev_private;
2849 	struct mlx5_ind_table_obj *ind_tbl = NULL;
2850 	struct ibv_qp *qp = NULL;
2851 	struct mlx5_hrxq *hrxq = NULL;
2852 
2853 	if (priv->drop_queue.hrxq) {
2854 		rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
2855 		return priv->drop_queue.hrxq;
2856 	}
2857 	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
2858 	if (!hrxq) {
2859 		DRV_LOG(WARNING,
2860 			"port %u cannot allocate memory for drop queue",
2861 			dev->data->port_id);
2862 		rte_errno = ENOMEM;
2863 		goto error;
2864 	}
2865 	priv->drop_queue.hrxq = hrxq;
2866 	ind_tbl = mlx5_ind_table_obj_drop_new(dev);
2867 	if (!ind_tbl)
2868 		goto error;
2869 	hrxq->ind_table = ind_tbl;
2870 	qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
2871 		 &(struct ibv_qp_init_attr_ex){
2872 			.qp_type = IBV_QPT_RAW_PACKET,
2873 			.comp_mask =
2874 				IBV_QP_INIT_ATTR_PD |
2875 				IBV_QP_INIT_ATTR_IND_TABLE |
2876 				IBV_QP_INIT_ATTR_RX_HASH,
2877 			.rx_hash_conf = (struct ibv_rx_hash_conf){
2878 				.rx_hash_function =
2879 					IBV_RX_HASH_FUNC_TOEPLITZ,
2880 				.rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
2881 				.rx_hash_key = rss_hash_default_key,
2882 				.rx_hash_fields_mask = 0,
2883 				},
2884 			.rwq_ind_tbl = ind_tbl->ind_table,
2885 			.pd = priv->sh->pd
2886 		 });
2887 	if (!qp) {
2888 		DEBUG("port %u cannot allocate QP for drop queue",
2889 		      dev->data->port_id);
2890 		rte_errno = errno;
2891 		goto error;
2892 	}
2893 	hrxq->qp = qp;
2894 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2895 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
2896 	if (!hrxq->action) {
2897 		rte_errno = errno;
2898 		goto error;
2899 	}
2900 #endif
2901 	rte_atomic32_set(&hrxq->refcnt, 1);
2902 	return hrxq;
2903 error:
2904 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2905 	if (hrxq && hrxq->action)
2906 		mlx5_glue->destroy_flow_action(hrxq->action);
2907 #endif
2908 	if (qp)
2909 		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2910 	if (ind_tbl)
2911 		mlx5_ind_table_obj_drop_release(dev);
2912 	if (hrxq) {
2913 		priv->drop_queue.hrxq = NULL;
2914 		rte_free(hrxq);
2915 	}
2916 	return NULL;
2917 }
2918 
2919 /**
2920  * Release a drop hash Rx queue.
2921  *
2922  * @param dev
2923  *   Pointer to Ethernet device.
2924  */
2925 void
2926 mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
2927 {
2928 	struct mlx5_priv *priv = dev->data->dev_private;
2929 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
2930 
2931 	if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
2932 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2933 		mlx5_glue->destroy_flow_action(hrxq->action);
2934 #endif
2935 		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
2936 		mlx5_ind_table_obj_drop_release(dev);
2937 		rte_free(hrxq);
2938 		priv->drop_queue.hrxq = NULL;
2939 	}
2940 }
2941