xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision f2d43ff54d2f82b0ad6fc524e1cdcf331a42565f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <stddef.h>
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <sys/queue.h>
11 
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_malloc.h>
20 
21 #include "mlx5.h"
22 #include "mlx5_common_os.h"
23 #include "mlx5_tx.h"
24 #include "mlx5_rx.h"
25 #include "mlx5_utils.h"
26 #include "mlx5_devx.h"
27 #include "mlx5_flow.h"
28 #include "mlx5_flow_os.h"
29 
30 /**
31  * Modify RQ vlan stripping offload
32  *
33  * @param rxq
34  *   Rx queue.
35  * @param on
36  *   Enable/disable VLAN stripping.
37  *
38  * @return
39  *   0 on success, non-0 otherwise
40  */
41 static int
42 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
43 {
44 	struct mlx5_devx_modify_rq_attr rq_attr;
45 
46 	memset(&rq_attr, 0, sizeof(rq_attr));
47 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
48 	rq_attr.state = MLX5_RQC_STATE_RDY;
49 	rq_attr.vsd = (on ? 0 : 1);
50 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
51 	return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
52 }
53 
54 /**
55  * Modify RQ using DevX API.
56  *
57  * @param rxq
58  *   DevX rx queue.
59  * @param type
60  *   Type of change queue state.
61  *
62  * @return
63  *   0 on success, a negative errno value otherwise and rte_errno is set.
64  */
65 int
66 mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
67 {
68 	struct mlx5_devx_modify_rq_attr rq_attr;
69 
70 	memset(&rq_attr, 0, sizeof(rq_attr));
71 	switch (type) {
72 	case MLX5_RXQ_MOD_ERR2RST:
73 		rq_attr.rq_state = MLX5_RQC_STATE_ERR;
74 		rq_attr.state = MLX5_RQC_STATE_RST;
75 		break;
76 	case MLX5_RXQ_MOD_RST2RDY:
77 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
78 		rq_attr.state = MLX5_RQC_STATE_RDY;
79 		if (rxq->lwm) {
80 			rq_attr.modify_bitmask |=
81 				MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM;
82 			rq_attr.lwm = rxq->lwm;
83 		}
84 		break;
85 	case MLX5_RXQ_MOD_RDY2ERR:
86 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
87 		rq_attr.state = MLX5_RQC_STATE_ERR;
88 		break;
89 	case MLX5_RXQ_MOD_RDY2RST:
90 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
91 		rq_attr.state = MLX5_RQC_STATE_RST;
92 		break;
93 	case MLX5_RXQ_MOD_RDY2RDY:
94 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
95 		rq_attr.state = MLX5_RQC_STATE_RDY;
96 		rq_attr.modify_bitmask |= MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM;
97 		rq_attr.lwm = rxq->lwm;
98 		break;
99 	default:
100 		break;
101 	}
102 	if (rxq->ctrl->is_hairpin)
103 		return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);
104 	return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
105 }
106 
107 /**
108  * Modify SQ using DevX API.
109  *
110  * @param txq_obj
111  *   DevX Tx queue object.
112  * @param type
113  *   Type of change queue state.
114  * @param dev_port
115  *   Unnecessary.
116  *
117  * @return
118  *   0 on success, a negative errno value otherwise and rte_errno is set.
119  */
120 int
121 mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
122 		     uint8_t dev_port)
123 {
124 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
125 	int ret;
126 
127 	if (type != MLX5_TXQ_MOD_RST2RDY) {
128 		/* Change queue state to reset. */
129 		if (type == MLX5_TXQ_MOD_ERR2RDY)
130 			msq_attr.sq_state = MLX5_SQC_STATE_ERR;
131 		else
132 			msq_attr.sq_state = MLX5_SQC_STATE_RDY;
133 		msq_attr.state = MLX5_SQC_STATE_RST;
134 		ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
135 		if (ret) {
136 			DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
137 				" %s", strerror(errno));
138 			rte_errno = errno;
139 			return ret;
140 		}
141 	}
142 	if (type != MLX5_TXQ_MOD_RDY2RST) {
143 		/* Change queue state to ready. */
144 		msq_attr.sq_state = MLX5_SQC_STATE_RST;
145 		msq_attr.state = MLX5_SQC_STATE_RDY;
146 		ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
147 		if (ret) {
148 			DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
149 				" %s", strerror(errno));
150 			rte_errno = errno;
151 			return ret;
152 		}
153 	}
154 	/*
155 	 * The dev_port variable is relevant only in Verbs API, and there is a
156 	 * pointer that points to this function and a parallel function in verbs
157 	 * intermittently, so they should have the same parameters.
158 	 */
159 	(void)dev_port;
160 	return 0;
161 }
162 
163 /**
164  * Release an Rx DevX queue object.
165  *
166  * @param rxq
167  *   DevX Rx queue.
168  */
169 static void
170 mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
171 {
172 	struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
173 
174 	if (rxq_obj == NULL)
175 		return;
176 	if (rxq_obj->rxq_ctrl->is_hairpin) {
177 		if (rxq_obj->rq == NULL)
178 			return;
179 		mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
180 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
181 	} else {
182 		if (rxq->devx_rq.rq == NULL)
183 			return;
184 		mlx5_devx_rq_destroy(&rxq->devx_rq);
185 		if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0)
186 			return;
187 		mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
188 		memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
189 		if (rxq_obj->devx_channel) {
190 			mlx5_os_devx_destroy_event_channel
191 							(rxq_obj->devx_channel);
192 			rxq_obj->devx_channel = NULL;
193 		}
194 	}
195 	rxq->ctrl->started = false;
196 }
197 
198 /**
199  * Get event for an Rx DevX queue object.
200  *
201  * @param rxq_obj
202  *   DevX Rx queue object.
203  *
204  * @return
205  *   0 on success, a negative errno value otherwise and rte_errno is set.
206  */
207 static int
208 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
209 {
210 #ifdef HAVE_IBV_DEVX_EVENT
211 	union {
212 		struct mlx5dv_devx_async_event_hdr event_resp;
213 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
214 	} out;
215 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
216 					    &out.event_resp,
217 					    sizeof(out.buf));
218 
219 	if (ret < 0) {
220 		rte_errno = errno;
221 		return -rte_errno;
222 	}
223 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) {
224 		rte_errno = EINVAL;
225 		return -rte_errno;
226 	}
227 	return 0;
228 #else
229 	(void)rxq_obj;
230 	rte_errno = ENOTSUP;
231 	return -rte_errno;
232 #endif /* HAVE_IBV_DEVX_EVENT */
233 }
234 
235 /**
236  * Get LWM event for shared context, return the correct port/rxq for this event.
237  *
238  * @param priv
239  *   Mlx5_priv object.
240  * @param rxq_idx [out]
241  *   Which rxq gets this event.
242  * @param port_id [out]
243  *   Which port gets this event.
244  *
245  * @return
246  *   0 on success, a negative errno value otherwise and rte_errno is set.
247  */
248 static int
249 mlx5_rx_devx_get_event_lwm(struct mlx5_priv *priv, int *rxq_idx, int *port_id)
250 {
251 #ifdef HAVE_IBV_DEVX_EVENT
252 	union {
253 		struct mlx5dv_devx_async_event_hdr event_resp;
254 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
255 	} out;
256 	int ret;
257 
258 	memset(&out, 0, sizeof(out));
259 	ret = mlx5_glue->devx_get_event(priv->sh->devx_channel_lwm,
260 					&out.event_resp,
261 					sizeof(out.buf));
262 	if (ret < 0) {
263 		rte_errno = errno;
264 		DRV_LOG(WARNING, "%s err\n", __func__);
265 		return -rte_errno;
266 	}
267 	*port_id = (((uint32_t)out.event_resp.cookie) >>
268 		    LWM_COOKIE_PORTID_OFFSET) & LWM_COOKIE_PORTID_MASK;
269 	*rxq_idx = (((uint32_t)out.event_resp.cookie) >>
270 		    LWM_COOKIE_RXQID_OFFSET) & LWM_COOKIE_RXQID_MASK;
271 	return 0;
272 #else
273 	(void)priv;
274 	(void)rxq_idx;
275 	(void)port_id;
276 	rte_errno = ENOTSUP;
277 	return -rte_errno;
278 #endif /* HAVE_IBV_DEVX_EVENT */
279 }
280 
281 /**
282  * Create a RQ object using DevX.
283  *
284  * @param rxq
285  *   Pointer to Rx queue.
286  *
287  * @return
288  *   0 on success, a negative errno value otherwise and rte_errno is set.
289  */
290 static int
291 mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
292 {
293 	struct mlx5_priv *priv = rxq->priv;
294 	struct mlx5_common_device *cdev = priv->sh->cdev;
295 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
296 	struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
297 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
298 	uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
299 	uint32_t wqe_size, log_wqe_size;
300 
301 	/* Fill RQ attributes. */
302 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
303 	rq_attr.flush_in_error_en = 1;
304 	rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
305 	rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
306 	rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
307 	rq_attr.ts_format =
308 			mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
309 	/* Fill WQ attributes for this RQ. */
310 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
311 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
312 		/*
313 		 * Number of strides in each WQE:
314 		 * 512*2^single_wqe_log_num_of_strides.
315 		 */
316 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
317 				rxq_data->log_strd_num -
318 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
319 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
320 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
321 				rxq_data->log_strd_sz -
322 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
323 		wqe_size = sizeof(struct mlx5_wqe_mprq);
324 	} else {
325 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
326 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
327 	}
328 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
329 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
330 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
331 	rq_attr.wq_attr.log_wq_sz = log_desc_n;
332 	rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
333 						MLX5_WQ_END_PAD_MODE_ALIGN :
334 						MLX5_WQ_END_PAD_MODE_NONE;
335 	rq_attr.wq_attr.pd = cdev->pdn;
336 	rq_attr.counter_set_id = priv->counter_set_id;
337 	rq_attr.delay_drop_en = rxq_data->delay_drop;
338 	rq_attr.user_index = rte_cpu_to_be_16(priv->dev_data->port_id);
339 	if (rxq_data->shared) /* Create RMP based RQ. */
340 		rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
341 	/* Create RQ using DevX API. */
342 	return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,
343 				   log_desc_n, &rq_attr, rxq_ctrl->socket);
344 }
345 
346 /**
347  * Create a DevX CQ object for an Rx queue.
348  *
349  * @param rxq
350  *   Pointer to Rx queue.
351  *
352  * @return
353  *   0 on success, a negative errno value otherwise and rte_errno is set.
354  */
355 static int
356 mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
357 {
358 	struct mlx5_devx_cq *cq_obj = 0;
359 	struct mlx5_devx_cq_attr cq_attr = { 0 };
360 	struct mlx5_priv *priv = rxq->priv;
361 	struct mlx5_dev_ctx_shared *sh = priv->sh;
362 	uint16_t port_id = priv->dev_data->port_id;
363 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
364 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
365 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
366 	uint32_t log_cqe_n;
367 	uint16_t event_nums[1] = { 0 };
368 	int ret = 0;
369 
370 	if (rxq_ctrl->started)
371 		return 0;
372 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
373 	    !rxq_data->lro) {
374 		cq_attr.cqe_comp_en = 1u;
375 		rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
376 		rxq_data->byte_mask = UINT32_MAX;
377 		switch (priv->config.cqe_comp_fmt) {
378 		case MLX5_CQE_RESP_FORMAT_HASH:
379 			/* fallthrough */
380 		case MLX5_CQE_RESP_FORMAT_CSUM:
381 			/*
382 			 * Select CSUM miniCQE format only for non-vectorized
383 			 * MPRQ Rx burst, use HASH miniCQE format for others.
384 			 */
385 			if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
386 			    mlx5_rxq_mprq_enabled(rxq_data))
387 				cq_attr.mini_cqe_res_format =
388 					MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
389 			else
390 				cq_attr.mini_cqe_res_format =
391 					MLX5_CQE_RESP_FORMAT_HASH;
392 			rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
393 			break;
394 		case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
395 			rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
396 			/* fallthrough */
397 		case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
398 			cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
399 			break;
400 		case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
401 			cq_attr.mini_cqe_res_format = 0;
402 			cq_attr.mini_cqe_res_format_ext = 1;
403 			break;
404 		}
405 		DRV_LOG(DEBUG,
406 			"Port %u Rx CQE compression is enabled, format %d.",
407 			port_id, priv->config.cqe_comp_fmt);
408 		/*
409 		 * For vectorized Rx, it must not be doubled in order to
410 		 * make cq_ci and rq_ci aligned.
411 		 */
412 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
413 			cqe_n *= 2;
414 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
415 		DRV_LOG(DEBUG,
416 			"Port %u Rx CQE compression is disabled for HW timestamp.",
417 			port_id);
418 	} else if (priv->config.cqe_comp && rxq_data->lro) {
419 		DRV_LOG(DEBUG,
420 			"Port %u Rx CQE compression is disabled for LRO.",
421 			port_id);
422 	}
423 	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->rx_uar.obj);
424 	log_cqe_n = log2above(cqe_n);
425 	/* Create CQ using DevX API. */
426 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
427 				  log_cqe_n, &cq_attr, sh->numa_node);
428 	if (ret)
429 		return ret;
430 	cq_obj = &rxq_ctrl->obj->cq_obj;
431 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])
432 							(uintptr_t)cq_obj->cqes;
433 	rxq_data->cq_db = cq_obj->db_rec;
434 	rxq_data->uar_data = sh->rx_uar.cq_db;
435 	rxq_data->cqe_n = log_cqe_n;
436 	rxq_data->cqn = cq_obj->cq->id;
437 	rxq_data->cq_ci = 0;
438 	if (rxq_ctrl->obj->devx_channel) {
439 		ret = mlx5_os_devx_subscribe_devx_event
440 					      (rxq_ctrl->obj->devx_channel,
441 					       cq_obj->cq->obj,
442 					       sizeof(event_nums),
443 					       event_nums,
444 					       (uint64_t)(uintptr_t)cq_obj->cq);
445 		if (ret) {
446 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
447 			ret = errno;
448 			mlx5_devx_cq_destroy(cq_obj);
449 			memset(cq_obj, 0, sizeof(*cq_obj));
450 			rte_errno = ret;
451 			return -ret;
452 		}
453 	}
454 	return 0;
455 }
456 
457 /**
458  * Create the Rx hairpin queue object.
459  *
460  * @param rxq
461  *   Pointer to Rx queue.
462  *
463  * @return
464  *   0 on success, a negative errno value otherwise and rte_errno is set.
465  */
466 static int
467 mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
468 {
469 	uint16_t idx = rxq->idx;
470 	struct mlx5_priv *priv = rxq->priv;
471 	struct mlx5_hca_attr *hca_attr __rte_unused = &priv->sh->cdev->config.hca_attr;
472 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
473 	struct mlx5_devx_create_rq_attr unlocked_attr = { 0 };
474 	struct mlx5_devx_create_rq_attr locked_attr = { 0 };
475 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
476 	uint32_t max_wq_data;
477 
478 	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
479 	tmpl->rxq_ctrl = rxq_ctrl;
480 	unlocked_attr.hairpin = 1;
481 	max_wq_data =
482 		priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
483 	/* Jumbo frames > 9KB should be supported, and more packets. */
484 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
485 		if (priv->config.log_hp_size > max_wq_data) {
486 			DRV_LOG(ERR, "Total data size %u power of 2 is "
487 				"too large for hairpin.",
488 				priv->config.log_hp_size);
489 			rte_errno = ERANGE;
490 			return -rte_errno;
491 		}
492 		unlocked_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
493 	} else {
494 		unlocked_attr.wq_attr.log_hairpin_data_sz =
495 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
496 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
497 	}
498 	/* Set the packets number to the maximum value for performance. */
499 	unlocked_attr.wq_attr.log_hairpin_num_packets =
500 			unlocked_attr.wq_attr.log_hairpin_data_sz -
501 			MLX5_HAIRPIN_QUEUE_STRIDE;
502 	unlocked_attr.counter_set_id = priv->counter_set_id;
503 	rxq_ctrl->rxq.delay_drop = priv->config.hp_delay_drop;
504 	unlocked_attr.delay_drop_en = priv->config.hp_delay_drop;
505 	unlocked_attr.hairpin_data_buffer_type =
506 			MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_UNLOCKED_INTERNAL_BUFFER;
507 	if (rxq->hairpin_conf.use_locked_device_memory) {
508 		/*
509 		 * It is assumed that configuration is verified against capabilities
510 		 * during queue setup.
511 		 */
512 		MLX5_ASSERT(hca_attr->hairpin_data_buffer_locked);
513 		rte_memcpy(&locked_attr, &unlocked_attr, sizeof(locked_attr));
514 		locked_attr.hairpin_data_buffer_type =
515 				MLX5_RQC_HAIRPIN_DATA_BUFFER_TYPE_LOCKED_INTERNAL_BUFFER;
516 		tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &locked_attr,
517 						   rxq_ctrl->socket);
518 		if (!tmpl->rq && rxq->hairpin_conf.force_memory) {
519 			DRV_LOG(ERR, "Port %u Rx hairpin queue %u can't create RQ object"
520 				     " with locked memory buffer",
521 				     priv->dev_data->port_id, idx);
522 			return -rte_errno;
523 		} else if (!tmpl->rq && !rxq->hairpin_conf.force_memory) {
524 			DRV_LOG(WARNING, "Port %u Rx hairpin queue %u can't create RQ object"
525 					 " with locked memory buffer. Falling back to unlocked"
526 					 " device memory.",
527 					 priv->dev_data->port_id, idx);
528 			rte_errno = 0;
529 			goto create_rq_unlocked;
530 		}
531 		goto create_rq_set_state;
532 	}
533 
534 create_rq_unlocked:
535 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &unlocked_attr,
536 					   rxq_ctrl->socket);
537 	if (!tmpl->rq) {
538 		DRV_LOG(ERR,
539 			"Port %u Rx hairpin queue %u can't create rq object.",
540 			priv->dev_data->port_id, idx);
541 		rte_errno = errno;
542 		return -rte_errno;
543 	}
544 create_rq_set_state:
545 	priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
546 	return 0;
547 }
548 
549 /**
550  * Create the Rx queue DevX object.
551  *
552  * @param rxq
553  *   Pointer to Rx queue.
554  *
555  * @return
556  *   0 on success, a negative errno value otherwise and rte_errno is set.
557  */
558 static int
559 mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
560 {
561 	struct mlx5_priv *priv = rxq->priv;
562 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
563 	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
564 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
565 	int ret = 0;
566 
567 	MLX5_ASSERT(rxq_data);
568 	MLX5_ASSERT(tmpl);
569 	if (rxq_ctrl->is_hairpin)
570 		return mlx5_rxq_obj_hairpin_new(rxq);
571 	tmpl->rxq_ctrl = rxq_ctrl;
572 	if (rxq_ctrl->irq && !rxq_ctrl->started) {
573 		int devx_ev_flag =
574 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
575 
576 		tmpl->devx_channel = mlx5_os_devx_create_event_channel
577 							(priv->sh->cdev->ctx,
578 							 devx_ev_flag);
579 		if (!tmpl->devx_channel) {
580 			rte_errno = errno;
581 			DRV_LOG(ERR, "Failed to create event channel %d.",
582 				rte_errno);
583 			goto error;
584 		}
585 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
586 	}
587 	/* Create CQ using DevX API. */
588 	ret = mlx5_rxq_create_devx_cq_resources(rxq);
589 	if (ret) {
590 		DRV_LOG(ERR, "Failed to create CQ.");
591 		goto error;
592 	}
593 	rxq_data->delay_drop = priv->config.std_delay_drop;
594 	/* Create RQ using DevX API. */
595 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
596 	if (ret) {
597 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
598 			priv->dev_data->port_id, rxq->idx);
599 		rte_errno = ENOMEM;
600 		goto error;
601 	}
602 	/* Change queue state to ready. */
603 	ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
604 	if (ret)
605 		goto error;
606 	if (!rxq_data->shared) {
607 		rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
608 		rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
609 	} else if (!rxq_ctrl->started) {
610 		rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;
611 		rxq_data->rq_db =
612 				(uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec;
613 	}
614 	if (!rxq_ctrl->started) {
615 		mlx5_rxq_initialize(rxq_data);
616 		rxq_ctrl->wqn = rxq->devx_rq.rq->id;
617 	}
618 	priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
619 	return 0;
620 error:
621 	ret = rte_errno; /* Save rte_errno before cleanup. */
622 	mlx5_rxq_devx_obj_release(rxq);
623 	rte_errno = ret; /* Restore rte_errno. */
624 	return -rte_errno;
625 }
626 
627 /**
628  * Prepare RQT attribute structure for DevX RQT API.
629  *
630  * @param dev
631  *   Pointer to Ethernet device.
632  * @param log_n
633  *   Log of number of queues in the array.
634  * @param queues
635  *   List of RX queue indices or NULL, in which case
636  *   the attribute will be filled by drop queue ID.
637  * @param queues_n
638  *   Size of @p queues array or 0 if it is NULL.
639  * @param ind_tbl
640  *   DevX indirection table object.
641  *
642  * @return
643  *   The RQT attr object initialized, NULL otherwise and rte_errno is set.
644  */
645 static struct mlx5_devx_rqt_attr *
646 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
647 				     const unsigned int log_n,
648 				     const uint16_t *queues,
649 				     const uint32_t queues_n)
650 {
651 	struct mlx5_priv *priv = dev->data->dev_private;
652 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
653 	const unsigned int rqt_n = 1 << log_n;
654 	unsigned int i, j;
655 
656 	rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
657 			      rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
658 	if (!rqt_attr) {
659 		DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
660 			dev->data->port_id);
661 		rte_errno = ENOMEM;
662 		return NULL;
663 	}
664 	rqt_attr->rqt_max_size = priv->sh->dev_cap.ind_table_max_size;
665 	rqt_attr->rqt_actual_size = rqt_n;
666 	if (queues == NULL) {
667 		for (i = 0; i < rqt_n; i++)
668 			rqt_attr->rq_list[i] =
669 					priv->drop_queue.rxq->devx_rq.rq->id;
670 		return rqt_attr;
671 	}
672 	for (i = 0; i != queues_n; ++i) {
673 		if (mlx5_is_external_rxq(dev, queues[i])) {
674 			struct mlx5_external_rxq *ext_rxq =
675 					mlx5_ext_rxq_get(dev, queues[i]);
676 
677 			rqt_attr->rq_list[i] = ext_rxq->hw_id;
678 		} else {
679 			struct mlx5_rxq_priv *rxq =
680 					mlx5_rxq_get(dev, queues[i]);
681 
682 			MLX5_ASSERT(rxq != NULL);
683 			if (rxq->ctrl->is_hairpin)
684 				rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
685 			else
686 				rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
687 		}
688 	}
689 	MLX5_ASSERT(i > 0);
690 	for (j = 0; i != rqt_n; ++j, ++i)
691 		rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
692 	return rqt_attr;
693 }
694 
695 /**
696  * Create RQT using DevX API as a filed of indirection table.
697  *
698  * @param dev
699  *   Pointer to Ethernet device.
700  * @param log_n
701  *   Log of number of queues in the array.
702  * @param ind_tbl
703  *   DevX indirection table object.
704  *
705  * @return
706  *   0 on success, a negative errno value otherwise and rte_errno is set.
707  */
708 static int
709 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
710 			struct mlx5_ind_table_obj *ind_tbl)
711 {
712 	struct mlx5_priv *priv = dev->data->dev_private;
713 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
714 	const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues :
715 							  NULL;
716 
717 	MLX5_ASSERT(ind_tbl);
718 	rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues,
719 						       ind_tbl->queues_n);
720 	if (!rqt_attr)
721 		return -rte_errno;
722 	ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
723 	mlx5_free(rqt_attr);
724 	if (!ind_tbl->rqt) {
725 		DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
726 			dev->data->port_id);
727 		rte_errno = errno;
728 		return -rte_errno;
729 	}
730 	return 0;
731 }
732 
733 /**
734  * Modify RQT using DevX API as a filed of indirection table.
735  *
736  * @param dev
737  *   Pointer to Ethernet device.
738  * @param log_n
739  *   Log of number of queues in the array.
740  * @param ind_tbl
741  *   DevX indirection table object.
742  *
743  * @return
744  *   0 on success, a negative errno value otherwise and rte_errno is set.
745  */
746 static int
747 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
748 			   const uint16_t *queues, const uint32_t queues_n,
749 			   struct mlx5_ind_table_obj *ind_tbl)
750 {
751 	int ret = 0;
752 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
753 
754 	MLX5_ASSERT(ind_tbl);
755 	rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
756 							queues,
757 							queues_n);
758 	if (!rqt_attr)
759 		return -rte_errno;
760 	ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
761 	mlx5_free(rqt_attr);
762 	if (ret)
763 		DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
764 			dev->data->port_id);
765 	return ret;
766 }
767 
768 /**
769  * Destroy the DevX RQT object.
770  *
771  * @param ind_table
772  *   Indirection table to release.
773  */
774 static void
775 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
776 {
777 	claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
778 }
779 
780 /**
781  * Set TIR attribute struct with relevant input values.
782  *
783  * @param[in] dev
784  *   Pointer to Ethernet device.
785  * @param[in] rss_key
786  *   RSS key for the Rx hash queue.
787  * @param[in] hash_fields
788  *   Verbs protocol hash field to make the RSS on.
789  * @param[in] ind_tbl
790  *   Indirection table for TIR. If table queues array is NULL,
791  *   a TIR for drop queue is assumed.
792  * @param[in] tunnel
793  *   Tunnel type.
794  * @param[out] tir_attr
795  *   Parameters structure for TIR creation/modification.
796  *
797  * @return
798  *   The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
799  */
800 static void
801 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
802 		       uint64_t hash_fields,
803 		       const struct mlx5_ind_table_obj *ind_tbl,
804 		       int tunnel, struct mlx5_devx_tir_attr *tir_attr)
805 {
806 	struct mlx5_priv *priv = dev->data->dev_private;
807 	bool is_hairpin;
808 	bool lro = false;
809 	uint32_t i;
810 
811 	/* NULL queues designate drop queue. */
812 	if (ind_tbl->queues == NULL) {
813 		is_hairpin = priv->drop_queue.rxq->ctrl->is_hairpin;
814 	} else if (mlx5_is_external_rxq(dev, ind_tbl->queues[0])) {
815 		/* External RxQ supports neither Hairpin nor LRO. */
816 		is_hairpin = false;
817 	} else {
818 		is_hairpin = mlx5_rxq_is_hairpin(dev, ind_tbl->queues[0]);
819 		lro = true;
820 		/* Enable TIR LRO only if all the queues were configured for. */
821 		for (i = 0; i < ind_tbl->queues_n; ++i) {
822 			struct mlx5_rxq_data *rxq_i =
823 				mlx5_rxq_data_get(dev, ind_tbl->queues[i]);
824 
825 			if (rxq_i != NULL && !rxq_i->lro) {
826 				lro = false;
827 				break;
828 			}
829 		}
830 	}
831 	memset(tir_attr, 0, sizeof(*tir_attr));
832 	tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
833 	tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
834 	tir_attr->tunneled_offload_en = !!tunnel;
835 	/* If needed, translate hash_fields bitmap to PRM format. */
836 	if (hash_fields) {
837 		struct mlx5_rx_hash_field_select *rx_hash_field_select =
838 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
839 			hash_fields & IBV_RX_HASH_INNER ?
840 				&tir_attr->rx_hash_field_selector_inner :
841 #endif
842 				&tir_attr->rx_hash_field_selector_outer;
843 		/* 1 bit: 0: IPv4, 1: IPv6. */
844 		rx_hash_field_select->l3_prot_type =
845 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
846 		/* 1 bit: 0: TCP, 1: UDP. */
847 		rx_hash_field_select->l4_prot_type =
848 					!!(hash_fields & MLX5_UDP_IBV_RX_HASH);
849 		/* Bitmask which sets which fields to use in RX Hash. */
850 		rx_hash_field_select->selected_fields =
851 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
852 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
853 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
854 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
855 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
856 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
857 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
858 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT |
859 			(!!(hash_fields & IBV_RX_HASH_IPSEC_SPI)) <<
860 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI;
861 	}
862 	if (is_hairpin)
863 		tir_attr->transport_domain = priv->sh->td->id;
864 	else
865 		tir_attr->transport_domain = priv->sh->tdn;
866 	memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
867 	tir_attr->indirect_table = ind_tbl->rqt->id;
868 	if (dev->data->dev_conf.lpbk_mode)
869 		tir_attr->self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
870 	if (lro) {
871 		MLX5_ASSERT(priv->sh->config.lro_allowed);
872 		tir_attr->lro_timeout_period_usecs = priv->config.lro_timeout;
873 		tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
874 		tir_attr->lro_enable_mask =
875 				MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
876 				MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
877 	}
878 }
879 
880 /**
881  * Create an Rx Hash queue.
882  *
883  * @param dev
884  *   Pointer to Ethernet device.
885  * @param hrxq
886  *   Pointer to Rx Hash queue.
887  * @param tunnel
888  *   Tunnel type.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 static int
894 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
895 		   int tunnel __rte_unused)
896 {
897 	struct mlx5_priv *priv = dev->data->dev_private;
898 	struct mlx5_devx_tir_attr tir_attr = {0};
899 	int err;
900 
901 	mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
902 			       hrxq->ind_table, tunnel, &tir_attr);
903 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
904 	if (!hrxq->tir) {
905 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
906 			dev->data->port_id);
907 		rte_errno = errno;
908 		goto error;
909 	}
910 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
911 	if (hrxq->hws_flags) {
912 		hrxq->action = mlx5dr_action_create_dest_tir
913 			(priv->dr_ctx,
914 			 (struct mlx5dr_devx_obj *)hrxq->tir, hrxq->hws_flags);
915 		if (!hrxq->action)
916 			goto error;
917 		return 0;
918 	}
919 	if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
920 							  &hrxq->action)) {
921 		rte_errno = errno;
922 		goto error;
923 	}
924 #endif
925 	return 0;
926 error:
927 	err = rte_errno; /* Save rte_errno before cleanup. */
928 	if (hrxq->tir)
929 		claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
930 	rte_errno = err; /* Restore rte_errno. */
931 	return -rte_errno;
932 }
933 
934 /**
935  * Destroy a DevX TIR object.
936  *
937  * @param hrxq
938  *   Hash Rx queue to release its tir.
939  */
940 static void
941 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
942 {
943 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
944 }
945 
946 /**
947  * Modify an Rx Hash queue configuration.
948  *
949  * @param dev
950  *   Pointer to Ethernet device.
951  * @param hrxq
952  *   Hash Rx queue to modify.
953  * @param rss_key
954  *   RSS key for the Rx hash queue.
955  * @param hash_fields
956  *   Verbs protocol hash field to make the RSS on.
957  * @param[in] ind_tbl
958  *   Indirection table for TIR.
959  *
960  * @return
961  *   0 on success, a negative errno value otherwise and rte_errno is set.
962  */
963 static int
964 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
965 		       const uint8_t *rss_key,
966 		       uint64_t hash_fields,
967 		       const struct mlx5_ind_table_obj *ind_tbl)
968 {
969 	struct mlx5_devx_modify_tir_attr modify_tir = {0};
970 
971 	/*
972 	 * untested for modification fields:
973 	 * - rx_hash_symmetric not set in hrxq_new(),
974 	 * - rx_hash_fn set hard-coded in hrxq_new(),
975 	 * - lro_xxx not set after rxq setup
976 	 */
977 	if (ind_tbl != hrxq->ind_table)
978 		modify_tir.modify_bitmask |=
979 			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
980 	if (hash_fields != hrxq->hash_fields ||
981 			memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
982 		modify_tir.modify_bitmask |=
983 			MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
984 	mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
985 			       0, /* N/A - tunnel modification unsupported */
986 			       &modify_tir.tir);
987 	modify_tir.tirn = hrxq->tir->id;
988 	if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
989 		DRV_LOG(ERR, "port %u cannot modify DevX TIR",
990 			dev->data->port_id);
991 		rte_errno = errno;
992 		return -rte_errno;
993 	}
994 	return 0;
995 }
996 
997 /**
998  * Create a DevX drop Rx queue.
999  *
1000  * @param dev
1001  *   Pointer to Ethernet device.
1002  *
1003  * @return
1004  *   0 on success, a negative errno value otherwise and rte_errno is set.
1005  */
1006 static int
1007 mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
1008 {
1009 	struct mlx5_priv *priv = dev->data->dev_private;
1010 	int socket_id = dev->device->numa_node;
1011 	struct mlx5_rxq_priv *rxq;
1012 	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
1013 	struct mlx5_rxq_obj *rxq_obj = NULL;
1014 	int ret;
1015 
1016 	/*
1017 	 * Initialize dummy control structures.
1018 	 * They are required to hold pointers for cleanup
1019 	 * and are only accessible via drop queue DevX objects.
1020 	 */
1021 	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
1022 	if (rxq == NULL) {
1023 		DRV_LOG(ERR, "Port %u could not allocate drop queue private",
1024 			dev->data->port_id);
1025 		rte_errno = ENOMEM;
1026 		goto error;
1027 	}
1028 	rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
1029 			       0, socket_id);
1030 	if (rxq_ctrl == NULL) {
1031 		DRV_LOG(ERR, "Port %u could not allocate drop queue control",
1032 			dev->data->port_id);
1033 		rte_errno = ENOMEM;
1034 		goto error;
1035 	}
1036 	rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);
1037 	if (rxq_obj == NULL) {
1038 		DRV_LOG(ERR, "Port %u could not allocate drop queue object",
1039 			dev->data->port_id);
1040 		rte_errno = ENOMEM;
1041 		goto error;
1042 	}
1043 	/* set the CPU socket ID where the rxq_ctrl was allocated */
1044 	rxq_ctrl->socket = socket_id;
1045 	rxq_obj->rxq_ctrl = rxq_ctrl;
1046 	rxq_ctrl->is_hairpin = false;
1047 	rxq_ctrl->sh = priv->sh;
1048 	rxq_ctrl->obj = rxq_obj;
1049 	rxq->ctrl = rxq_ctrl;
1050 	rxq->priv = priv;
1051 	LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
1052 	/* Create CQ using DevX API. */
1053 	ret = mlx5_rxq_create_devx_cq_resources(rxq);
1054 	if (ret != 0) {
1055 		DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
1056 			dev->data->port_id);
1057 		goto error;
1058 	}
1059 	rxq_ctrl->rxq.delay_drop = 0;
1060 	/* Create RQ using DevX API. */
1061 	ret = mlx5_rxq_create_devx_rq_resources(rxq);
1062 	if (ret != 0) {
1063 		DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
1064 			dev->data->port_id);
1065 		rte_errno = ENOMEM;
1066 		goto error;
1067 	}
1068 	/* Change queue state to ready. */
1069 	ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
1070 	if (ret != 0)
1071 		goto error;
1072 	/* Initialize drop queue. */
1073 	priv->drop_queue.rxq = rxq;
1074 	return 0;
1075 error:
1076 	ret = rte_errno; /* Save rte_errno before cleanup. */
1077 	if (rxq != NULL && rxq->devx_rq.rq != NULL)
1078 		mlx5_devx_rq_destroy(&rxq->devx_rq);
1079 	if (rxq_obj != NULL) {
1080 		if (rxq_obj->cq_obj.cq != NULL)
1081 			mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
1082 		if (rxq_obj->devx_channel)
1083 			mlx5_os_devx_destroy_event_channel
1084 							(rxq_obj->devx_channel);
1085 		mlx5_free(rxq_obj);
1086 	}
1087 	if (rxq_ctrl != NULL)
1088 		mlx5_free(rxq_ctrl);
1089 	if (rxq != NULL)
1090 		mlx5_free(rxq);
1091 	rte_errno = ret; /* Restore rte_errno. */
1092 	return -rte_errno;
1093 }
1094 
1095 /**
1096  * Release drop Rx queue resources.
1097  *
1098  * @param dev
1099  *   Pointer to Ethernet device.
1100  */
1101 static void
1102 mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
1103 {
1104 	struct mlx5_priv *priv = dev->data->dev_private;
1105 	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
1106 	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
1107 
1108 	mlx5_rxq_devx_obj_release(rxq);
1109 	mlx5_free(rxq_ctrl->obj);
1110 	mlx5_free(rxq_ctrl);
1111 	mlx5_free(rxq);
1112 	priv->drop_queue.rxq = NULL;
1113 }
1114 
1115 /**
1116  * Release a drop hash Rx queue.
1117  *
1118  * @param dev
1119  *   Pointer to Ethernet device.
1120  */
1121 static void
1122 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
1123 {
1124 	struct mlx5_priv *priv = dev->data->dev_private;
1125 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1126 
1127 	if (hrxq->tir != NULL)
1128 		mlx5_devx_tir_destroy(hrxq);
1129 	if (hrxq->ind_table->ind_table != NULL)
1130 		mlx5_devx_ind_table_destroy(hrxq->ind_table);
1131 	if (priv->drop_queue.rxq->devx_rq.rq != NULL)
1132 		mlx5_rxq_devx_obj_drop_release(dev);
1133 }
1134 
1135 /**
1136  * Create a DevX drop action for Rx Hash queue.
1137  *
1138  * @param dev
1139  *   Pointer to Ethernet device.
1140  *
1141  * @return
1142  *   0 on success, a negative errno value otherwise and rte_errno is set.
1143  */
1144 static int
1145 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
1146 {
1147 	struct mlx5_priv *priv = dev->data->dev_private;
1148 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1149 	int ret;
1150 
1151 	ret = mlx5_rxq_devx_obj_drop_create(dev);
1152 	if (ret != 0) {
1153 		DRV_LOG(ERR, "Cannot create drop RX queue");
1154 		return ret;
1155 	}
1156 	if (priv->sh->config.dv_flow_en == 2)
1157 		return 0;
1158 	/* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
1159 	ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
1160 	if (ret != 0) {
1161 		DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table");
1162 		goto error;
1163 	}
1164 	ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false);
1165 	if (ret != 0) {
1166 		DRV_LOG(ERR, "Cannot create drop hash RX queue");
1167 		goto error;
1168 	}
1169 	return 0;
1170 error:
1171 	mlx5_devx_drop_action_destroy(dev);
1172 	return ret;
1173 }
1174 
1175 /**
1176  * Select TXQ TIS number.
1177  *
1178  * @param dev
1179  *   Pointer to Ethernet device.
1180  * @param queue_idx
1181  *   Queue index in DPDK Tx queue array.
1182  *
1183  * @return
1184  *   > 0 on success, a negative errno value otherwise.
1185  */
1186 static uint32_t
1187 mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)
1188 {
1189 	struct mlx5_priv *priv = dev->data->dev_private;
1190 	int tis_idx;
1191 
1192 	if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==
1193 			MLX5_LAG_MODE_TIS) {
1194 		tis_idx = (priv->lag_affinity_idx + queue_idx) %
1195 			priv->sh->bond.n_port;
1196 		DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.",
1197 			dev->data->port_id, queue_idx, tis_idx + 1,
1198 			priv->sh->lag.tx_remap_affinity[tis_idx]);
1199 	} else {
1200 		tis_idx = 0;
1201 	}
1202 	MLX5_ASSERT(priv->sh->tis[tis_idx]);
1203 	return priv->sh->tis[tis_idx]->id;
1204 }
1205 
1206 /**
1207  * Create the Tx hairpin queue object.
1208  *
1209  * @param dev
1210  *   Pointer to Ethernet device.
1211  * @param idx
1212  *   Queue index in DPDK Tx queue array.
1213  *
1214  * @return
1215  *   0 on success, a negative errno value otherwise and rte_errno is set.
1216  */
1217 static int
1218 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1219 {
1220 	struct mlx5_priv *priv = dev->data->dev_private;
1221 	struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1222 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1223 	struct mlx5_txq_ctrl *txq_ctrl =
1224 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
1225 	struct mlx5_devx_create_sq_attr dev_mem_attr = { 0 };
1226 	struct mlx5_devx_create_sq_attr host_mem_attr = { 0 };
1227 	struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
1228 	void *umem_buf = NULL;
1229 	void *umem_obj = NULL;
1230 	uint32_t max_wq_data;
1231 
1232 	MLX5_ASSERT(txq_data);
1233 	MLX5_ASSERT(tmpl);
1234 	tmpl->txq_ctrl = txq_ctrl;
1235 	dev_mem_attr.hairpin = 1;
1236 	dev_mem_attr.tis_lst_sz = 1;
1237 	dev_mem_attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
1238 	max_wq_data =
1239 		priv->sh->cdev->config.hca_attr.log_max_hairpin_wq_data_sz;
1240 	/* Jumbo frames > 9KB should be supported, and more packets. */
1241 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1242 		if (priv->config.log_hp_size > max_wq_data) {
1243 			DRV_LOG(ERR, "Total data size %u power of 2 is "
1244 				"too large for hairpin.",
1245 				priv->config.log_hp_size);
1246 			rte_errno = ERANGE;
1247 			return -rte_errno;
1248 		}
1249 		dev_mem_attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1250 	} else {
1251 		dev_mem_attr.wq_attr.log_hairpin_data_sz =
1252 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1253 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1254 	}
1255 	/* Set the packets number to the maximum value for performance. */
1256 	dev_mem_attr.wq_attr.log_hairpin_num_packets =
1257 			dev_mem_attr.wq_attr.log_hairpin_data_sz -
1258 			MLX5_HAIRPIN_QUEUE_STRIDE;
1259 	dev_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_INTERNAL_BUFFER;
1260 	if (txq_ctrl->hairpin_conf.use_rte_memory) {
1261 		uint32_t umem_size;
1262 		uint32_t umem_dbrec;
1263 		size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
1264 
1265 		if (alignment == (size_t)-1) {
1266 			DRV_LOG(ERR, "Failed to get WQE buf alignment.");
1267 			rte_errno = ENOMEM;
1268 			return -rte_errno;
1269 		}
1270 		/*
1271 		 * It is assumed that configuration is verified against capabilities
1272 		 * during queue setup.
1273 		 */
1274 		MLX5_ASSERT(hca_attr->hairpin_sq_wq_in_host_mem);
1275 		MLX5_ASSERT(hca_attr->hairpin_sq_wqe_bb_size > 0);
1276 		rte_memcpy(&host_mem_attr, &dev_mem_attr, sizeof(host_mem_attr));
1277 		umem_size = MLX5_WQE_SIZE *
1278 			RTE_BIT32(host_mem_attr.wq_attr.log_hairpin_num_packets);
1279 		umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
1280 		umem_size += MLX5_DBR_SIZE;
1281 		umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
1282 				       alignment, priv->sh->numa_node);
1283 		if (umem_buf == NULL && txq_ctrl->hairpin_conf.force_memory) {
1284 			DRV_LOG(ERR, "Failed to allocate memory for hairpin TX queue");
1285 			rte_errno = ENOMEM;
1286 			return -rte_errno;
1287 		} else if (umem_buf == NULL && !txq_ctrl->hairpin_conf.force_memory) {
1288 			DRV_LOG(WARNING, "Failed to allocate memory for hairpin TX queue."
1289 					 " Falling back to TX queue located on the device.");
1290 			goto create_sq_on_device;
1291 		}
1292 		umem_obj = mlx5_os_umem_reg(priv->sh->cdev->ctx,
1293 					    (void *)(uintptr_t)umem_buf,
1294 					    umem_size,
1295 					    IBV_ACCESS_LOCAL_WRITE);
1296 		if (umem_obj == NULL && txq_ctrl->hairpin_conf.force_memory) {
1297 			DRV_LOG(ERR, "Failed to register UMEM for hairpin TX queue");
1298 			mlx5_free(umem_buf);
1299 			return -rte_errno;
1300 		} else if (umem_obj == NULL && !txq_ctrl->hairpin_conf.force_memory) {
1301 			DRV_LOG(WARNING, "Failed to register UMEM for hairpin TX queue."
1302 					 " Falling back to TX queue located on the device.");
1303 			rte_errno = 0;
1304 			mlx5_free(umem_buf);
1305 			goto create_sq_on_device;
1306 		}
1307 		host_mem_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1308 		host_mem_attr.wq_attr.wq_umem_valid = 1;
1309 		host_mem_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
1310 		host_mem_attr.wq_attr.wq_umem_offset = 0;
1311 		host_mem_attr.wq_attr.dbr_umem_valid = 1;
1312 		host_mem_attr.wq_attr.dbr_umem_id = host_mem_attr.wq_attr.wq_umem_id;
1313 		host_mem_attr.wq_attr.dbr_addr = umem_dbrec;
1314 		host_mem_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
1315 		host_mem_attr.wq_attr.log_wq_sz =
1316 				host_mem_attr.wq_attr.log_hairpin_num_packets *
1317 				hca_attr->hairpin_sq_wqe_bb_size;
1318 		host_mem_attr.wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
1319 		host_mem_attr.hairpin_wq_buffer_type = MLX5_SQC_HAIRPIN_WQ_BUFFER_TYPE_HOST_MEMORY;
1320 		tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &host_mem_attr);
1321 		if (!tmpl->sq && txq_ctrl->hairpin_conf.force_memory) {
1322 			DRV_LOG(ERR,
1323 				"Port %u tx hairpin queue %u can't create SQ object.",
1324 				dev->data->port_id, idx);
1325 			claim_zero(mlx5_os_umem_dereg(umem_obj));
1326 			mlx5_free(umem_buf);
1327 			return -rte_errno;
1328 		} else if (!tmpl->sq && !txq_ctrl->hairpin_conf.force_memory) {
1329 			DRV_LOG(WARNING,
1330 				"Port %u tx hairpin queue %u failed to allocate SQ object"
1331 				" using host memory. Falling back to TX queue located"
1332 				" on the device",
1333 				dev->data->port_id, idx);
1334 			rte_errno = 0;
1335 			claim_zero(mlx5_os_umem_dereg(umem_obj));
1336 			mlx5_free(umem_buf);
1337 			goto create_sq_on_device;
1338 		}
1339 		tmpl->umem_buf_wq_buffer = umem_buf;
1340 		tmpl->umem_obj_wq_buffer = umem_obj;
1341 		return 0;
1342 	}
1343 
1344 create_sq_on_device:
1345 	tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &dev_mem_attr);
1346 	if (!tmpl->sq) {
1347 		DRV_LOG(ERR,
1348 			"Port %u tx hairpin queue %u can't create SQ object.",
1349 			dev->data->port_id, idx);
1350 		rte_errno = errno;
1351 		return -rte_errno;
1352 	}
1353 	return 0;
1354 }
1355 
1356 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1357 /**
1358  * Destroy the Tx queue DevX object.
1359  *
1360  * @param txq_obj
1361  *   Txq object to destroy.
1362  */
1363 static void
1364 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
1365 {
1366 	mlx5_devx_sq_destroy(&txq_obj->sq_obj);
1367 	memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj));
1368 	mlx5_devx_cq_destroy(&txq_obj->cq_obj);
1369 	memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
1370 }
1371 
1372 /**
1373  * Create a SQ object and its resources using DevX.
1374  *
1375  * @param dev
1376  *   Pointer to Ethernet device.
1377  * @param idx
1378  *   Queue index in DPDK Tx queue array.
1379  * @param[in] log_desc_n
1380  *   Log of number of descriptors in queue.
1381  *
1382  * @return
1383  *   0 on success, a negative errno value otherwise and rte_errno is set.
1384  */
1385 static int
1386 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
1387 				  uint16_t log_desc_n)
1388 {
1389 	struct mlx5_priv *priv = dev->data->dev_private;
1390 	struct mlx5_common_device *cdev = priv->sh->cdev;
1391 	struct mlx5_uar *uar = &priv->sh->tx_uar;
1392 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1393 	struct mlx5_txq_ctrl *txq_ctrl =
1394 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
1395 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1396 	struct mlx5_devx_create_sq_attr sq_attr = {
1397 		.flush_in_error_en = 1,
1398 		.allow_multi_pkt_send_wqe = !!priv->config.mps,
1399 		.min_wqe_inline_mode = cdev->config.hca_attr.vport_inline_mode,
1400 		.allow_swp = !!priv->sh->dev_cap.swp,
1401 		.cqn = txq_obj->cq_obj.cq->id,
1402 		.tis_lst_sz = 1,
1403 		.wq_attr = (struct mlx5_devx_wq_attr){
1404 			.pd = cdev->pdn,
1405 			.uar_page = mlx5_os_get_devx_uar_page_id(uar->obj),
1406 		},
1407 		.ts_format =
1408 			mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
1409 		.tis_num = mlx5_get_txq_tis_num(dev, idx),
1410 	};
1411 
1412 	/* Create Send Queue object with DevX. */
1413 	return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
1414 				   log_desc_n, &sq_attr, priv->sh->numa_node);
1415 }
1416 #endif
1417 
1418 /**
1419  * Create the Tx queue DevX object.
1420  *
1421  * @param dev
1422  *   Pointer to Ethernet device.
1423  * @param idx
1424  *   Queue index in DPDK Tx queue array.
1425  *
1426  * @return
1427  *   0 on success, a negative errno value otherwise and rte_errno is set.
1428  */
1429 int
1430 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1431 {
1432 	struct mlx5_priv *priv = dev->data->dev_private;
1433 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1434 	struct mlx5_txq_ctrl *txq_ctrl =
1435 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
1436 
1437 	if (txq_ctrl->is_hairpin)
1438 		return mlx5_txq_obj_hairpin_new(dev, idx);
1439 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
1440 	DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1441 		     dev->data->port_id, idx);
1442 	rte_errno = ENOMEM;
1443 	return -rte_errno;
1444 #else
1445 	struct mlx5_proc_priv *ppriv = MLX5_PROC_PRIV(PORT_ID(priv));
1446 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1447 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1448 	struct mlx5_devx_cq_attr cq_attr = {
1449 		.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar.obj),
1450 	};
1451 	uint32_t cqe_n, log_desc_n;
1452 	uint32_t wqe_n, wqe_size;
1453 	int ret = 0;
1454 
1455 	MLX5_ASSERT(txq_data);
1456 	MLX5_ASSERT(txq_obj);
1457 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1458 	MLX5_ASSERT(ppriv);
1459 	txq_obj->txq_ctrl = txq_ctrl;
1460 	txq_obj->dev = dev;
1461 	cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1462 		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1463 	log_desc_n = log2above(cqe_n);
1464 	cqe_n = 1UL << log_desc_n;
1465 	if (cqe_n > UINT16_MAX) {
1466 		DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
1467 			dev->data->port_id, txq_data->idx, cqe_n);
1468 		rte_errno = EINVAL;
1469 		return 0;
1470 	}
1471 	/* Create completion queue object with DevX. */
1472 	ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
1473 				  &cq_attr, priv->sh->numa_node);
1474 	if (ret) {
1475 		DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1476 			dev->data->port_id, idx);
1477 		goto error;
1478 	}
1479 	txq_data->cqe_n = log_desc_n;
1480 	txq_data->cqe_s = cqe_n;
1481 	txq_data->cqe_m = txq_data->cqe_s - 1;
1482 	txq_data->cqes = txq_obj->cq_obj.cqes;
1483 	txq_data->cq_ci = 0;
1484 	txq_data->cq_pi = 0;
1485 	txq_data->cq_db = txq_obj->cq_obj.db_rec;
1486 	*txq_data->cq_db = 0;
1487 	/*
1488 	 * Adjust the amount of WQEs depending on inline settings.
1489 	 * The number of descriptors should be enough to handle
1490 	 * the specified number of packets. If queue is being created
1491 	 * with Verbs the rdma-core does queue size adjustment
1492 	 * internally in the mlx5_calc_sq_size(), we do the same
1493 	 * for the queue being created with DevX at this point.
1494 	 */
1495 	wqe_size = txq_data->tso_en ?
1496 		   RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
1497 	wqe_size += sizeof(struct mlx5_wqe_cseg) +
1498 		    sizeof(struct mlx5_wqe_eseg) +
1499 		    sizeof(struct mlx5_wqe_dseg);
1500 	if (txq_data->inlen_send)
1501 		wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
1502 					     sizeof(struct mlx5_wqe_eseg) +
1503 					     RTE_ALIGN(txq_data->inlen_send +
1504 						       sizeof(uint32_t),
1505 						       MLX5_WSEG_SIZE));
1506 	wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
1507 	/* Create Send Queue object with DevX. */
1508 	wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
1509 			(uint32_t)priv->sh->dev_cap.max_qp_wr);
1510 	log_desc_n = log2above(wqe_n);
1511 	ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
1512 	if (ret) {
1513 		DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1514 			dev->data->port_id, idx);
1515 		rte_errno = errno;
1516 		goto error;
1517 	}
1518 	/* Create the Work Queue. */
1519 	txq_data->wqe_n = log_desc_n;
1520 	txq_data->wqe_s = 1 << txq_data->wqe_n;
1521 	txq_data->wqe_m = txq_data->wqe_s - 1;
1522 	txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes;
1523 	txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1524 	txq_data->wqe_ci = 0;
1525 	txq_data->wqe_pi = 0;
1526 	txq_data->wqe_comp = 0;
1527 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1528 	txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR];
1529 	*txq_data->qp_db = 0;
1530 	txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
1531 	txq_data->db_heu = sh->cdev->config.dbnc == MLX5_SQ_DB_HEURISTIC;
1532 	txq_data->db_nc = sh->tx_uar.dbnc;
1533 	txq_data->wait_on_time = !!(!sh->config.tx_pp &&
1534 				    sh->cdev->config.hca_attr.wait_on_time);
1535 	/* Change Send Queue state to Ready-to-Send. */
1536 	ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1537 	if (ret) {
1538 		rte_errno = errno;
1539 		DRV_LOG(ERR,
1540 			"Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1541 			dev->data->port_id, idx);
1542 		goto error;
1543 	}
1544 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1545 	/*
1546 	 * If using DevX need to query and store TIS transport domain value.
1547 	 * This is done once per port.
1548 	 * Will use this value on Rx, when creating matching TIR.
1549 	 */
1550 	if (!priv->sh->tdn)
1551 		priv->sh->tdn = priv->sh->td->id;
1552 #endif
1553 	txq_ctrl->uar_mmap_offset =
1554 			mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar.obj);
1555 	ppriv->uar_table[txq_data->idx] = sh->tx_uar.bf_db;
1556 	dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1557 	return 0;
1558 error:
1559 	ret = rte_errno; /* Save rte_errno before cleanup. */
1560 	mlx5_txq_release_devx_resources(txq_obj);
1561 	rte_errno = ret; /* Restore rte_errno. */
1562 	return -rte_errno;
1563 #endif
1564 }
1565 
1566 /**
1567  * Release an Tx DevX queue object.
1568  *
1569  * @param txq_obj
1570  *   DevX Tx queue object.
1571  */
1572 void
1573 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1574 {
1575 	MLX5_ASSERT(txq_obj);
1576 	if (txq_obj->txq_ctrl->is_hairpin) {
1577 		if (txq_obj->sq) {
1578 			claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq));
1579 			txq_obj->sq = NULL;
1580 		}
1581 		if (txq_obj->tis)
1582 			claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1583 		if (txq_obj->umem_obj_wq_buffer) {
1584 			claim_zero(mlx5_os_umem_dereg(txq_obj->umem_obj_wq_buffer));
1585 			txq_obj->umem_obj_wq_buffer = NULL;
1586 		}
1587 		if (txq_obj->umem_buf_wq_buffer) {
1588 			mlx5_free(txq_obj->umem_buf_wq_buffer);
1589 			txq_obj->umem_buf_wq_buffer = NULL;
1590 		}
1591 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1592 	} else {
1593 		mlx5_txq_release_devx_resources(txq_obj);
1594 #endif
1595 	}
1596 }
1597 
1598 struct mlx5_obj_ops devx_obj_ops = {
1599 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1600 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
1601 	.rxq_event_get = mlx5_rx_devx_get_event,
1602 	.rxq_obj_modify = mlx5_devx_modify_rq,
1603 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
1604 	.rxq_event_get_lwm = mlx5_rx_devx_get_event_lwm,
1605 	.ind_table_new = mlx5_devx_ind_table_new,
1606 	.ind_table_modify = mlx5_devx_ind_table_modify,
1607 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
1608 	.hrxq_new = mlx5_devx_hrxq_new,
1609 	.hrxq_destroy = mlx5_devx_tir_destroy,
1610 	.hrxq_modify = mlx5_devx_hrxq_modify,
1611 	.drop_action_create = mlx5_devx_drop_action_create,
1612 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
1613 	.txq_obj_new = mlx5_txq_devx_obj_new,
1614 	.txq_obj_modify = mlx5_txq_devx_modify,
1615 	.txq_obj_release = mlx5_txq_devx_obj_release,
1616 	.lb_dummy_queue_create = NULL,
1617 	.lb_dummy_queue_release = NULL,
1618 };
1619