xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision e96242efa44cd06309677c1fe1bf23404a92e862)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <stddef.h>
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <sys/queue.h>
11 
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_malloc.h>
19 
20 #include "mlx5.h"
21 #include "mlx5_common_os.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_devx.h"
25 #include "mlx5_flow.h"
26 
27 
28 /**
29  * Modify RQ vlan stripping offload
30  *
31  * @param rxq_obj
32  *   Rx queue object.
33  *
34  * @return
35  *   0 on success, non-0 otherwise
36  */
37 static int
38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
39 {
40 	struct mlx5_devx_modify_rq_attr rq_attr;
41 
42 	memset(&rq_attr, 0, sizeof(rq_attr));
43 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
44 	rq_attr.state = MLX5_RQC_STATE_RDY;
45 	rq_attr.vsd = (on ? 0 : 1);
46 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
47 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
48 }
49 
50 /**
51  * Modify RQ using DevX API.
52  *
53  * @param rxq_obj
54  *   DevX Rx queue object.
55  * @param type
56  *   Type of change queue state.
57  *
58  * @return
59  *   0 on success, a negative errno value otherwise and rte_errno is set.
60  */
61 static int
62 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
63 {
64 	struct mlx5_devx_modify_rq_attr rq_attr;
65 
66 	memset(&rq_attr, 0, sizeof(rq_attr));
67 	switch (type) {
68 	case MLX5_RXQ_MOD_ERR2RST:
69 		rq_attr.rq_state = MLX5_RQC_STATE_ERR;
70 		rq_attr.state = MLX5_RQC_STATE_RST;
71 		break;
72 	case MLX5_RXQ_MOD_RST2RDY:
73 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
74 		rq_attr.state = MLX5_RQC_STATE_RDY;
75 		break;
76 	case MLX5_RXQ_MOD_RDY2ERR:
77 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
78 		rq_attr.state = MLX5_RQC_STATE_ERR;
79 		break;
80 	case MLX5_RXQ_MOD_RDY2RST:
81 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
82 		rq_attr.state = MLX5_RQC_STATE_RST;
83 		break;
84 	default:
85 		break;
86 	}
87 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
88 }
89 
90 /**
91  * Modify SQ using DevX API.
92  *
93  * @param txq_obj
94  *   DevX Tx queue object.
95  * @param type
96  *   Type of change queue state.
97  * @param dev_port
98  *   Unnecessary.
99  *
100  * @return
101  *   0 on success, a negative errno value otherwise and rte_errno is set.
102  */
103 static int
104 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
105 		    uint8_t dev_port)
106 {
107 	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
108 	int ret;
109 
110 	if (type != MLX5_TXQ_MOD_RST2RDY) {
111 		/* Change queue state to reset. */
112 		if (type == MLX5_TXQ_MOD_ERR2RDY)
113 			msq_attr.sq_state = MLX5_SQC_STATE_ERR;
114 		else
115 			msq_attr.sq_state = MLX5_SQC_STATE_RDY;
116 		msq_attr.state = MLX5_SQC_STATE_RST;
117 		ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
118 		if (ret) {
119 			DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
120 				" %s", strerror(errno));
121 			rte_errno = errno;
122 			return ret;
123 		}
124 	}
125 	if (type != MLX5_TXQ_MOD_RDY2RST) {
126 		/* Change queue state to ready. */
127 		msq_attr.sq_state = MLX5_SQC_STATE_RST;
128 		msq_attr.state = MLX5_SQC_STATE_RDY;
129 		ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
130 		if (ret) {
131 			DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
132 				" %s", strerror(errno));
133 			rte_errno = errno;
134 			return ret;
135 		}
136 	}
137 	/*
138 	 * The dev_port variable is relevant only in Verbs API, and there is a
139 	 * pointer that points to this function and a parallel function in verbs
140 	 * intermittently, so they should have the same parameters.
141 	 */
142 	(void)dev_port;
143 	return 0;
144 }
145 
146 /**
147  * Release the resources allocated for an RQ DevX object.
148  *
149  * @param rxq_ctrl
150  *   DevX Rx queue object.
151  */
152 static void
153 mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
154 {
155 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
156 
157 	if (rxq_ctrl->rxq.wqes) {
158 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
159 		rxq_ctrl->rxq.wqes = NULL;
160 	}
161 	if (rxq_ctrl->wq_umem) {
162 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
163 		rxq_ctrl->wq_umem = NULL;
164 	}
165 	if (dbr_page) {
166 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
167 					    mlx5_os_get_umem_id(dbr_page->umem),
168 					    rxq_ctrl->rq_dbr_offset));
169 		rxq_ctrl->rq_dbrec_page = NULL;
170 	}
171 }
172 
173 /**
174  * Release the resources allocated for the Rx CQ DevX object.
175  *
176  * @param rxq_ctrl
177  *   DevX Rx queue object.
178  */
179 static void
180 mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
181 {
182 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
183 
184 	if (rxq_ctrl->rxq.cqes) {
185 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
186 		rxq_ctrl->rxq.cqes = NULL;
187 	}
188 	if (rxq_ctrl->cq_umem) {
189 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
190 		rxq_ctrl->cq_umem = NULL;
191 	}
192 	if (dbr_page) {
193 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
194 					    mlx5_os_get_umem_id(dbr_page->umem),
195 					    rxq_ctrl->cq_dbr_offset));
196 		rxq_ctrl->cq_dbrec_page = NULL;
197 	}
198 }
199 
200 /**
201  * Release an Rx DevX queue object.
202  *
203  * @param rxq_obj
204  *   DevX Rx queue object.
205  */
206 static void
207 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
208 {
209 	MLX5_ASSERT(rxq_obj);
210 	MLX5_ASSERT(rxq_obj->rq);
211 	if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
212 		mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
213 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
214 	} else {
215 		MLX5_ASSERT(rxq_obj->devx_cq);
216 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
217 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
218 		if (rxq_obj->devx_channel)
219 			mlx5_glue->devx_destroy_event_channel
220 							(rxq_obj->devx_channel);
221 		mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
222 		mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
223 	}
224 }
225 
226 /**
227  * Get event for an Rx DevX queue object.
228  *
229  * @param rxq_obj
230  *   DevX Rx queue object.
231  *
232  * @return
233  *   0 on success, a negative errno value otherwise and rte_errno is set.
234  */
235 static int
236 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
237 {
238 #ifdef HAVE_IBV_DEVX_EVENT
239 	union {
240 		struct mlx5dv_devx_async_event_hdr event_resp;
241 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
242 	} out;
243 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
244 					    &out.event_resp,
245 					    sizeof(out.buf));
246 
247 	if (ret < 0) {
248 		rte_errno = errno;
249 		return -rte_errno;
250 	}
251 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
252 		rte_errno = EINVAL;
253 		return -rte_errno;
254 	}
255 	return 0;
256 #else
257 	(void)rxq_obj;
258 	rte_errno = ENOTSUP;
259 	return -rte_errno;
260 #endif /* HAVE_IBV_DEVX_EVENT */
261 }
262 
263 /**
264  * Fill common fields of create RQ attributes structure.
265  *
266  * @param rxq_data
267  *   Pointer to Rx queue data.
268  * @param cqn
269  *   CQ number to use with this RQ.
270  * @param rq_attr
271  *   RQ attributes structure to fill..
272  */
273 static void
274 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
275 			      struct mlx5_devx_create_rq_attr *rq_attr)
276 {
277 	rq_attr->state = MLX5_RQC_STATE_RST;
278 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
279 	rq_attr->cqn = cqn;
280 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
281 }
282 
283 /**
284  * Fill common fields of DevX WQ attributes structure.
285  *
286  * @param priv
287  *   Pointer to device private data.
288  * @param rxq_ctrl
289  *   Pointer to Rx queue control structure.
290  * @param wq_attr
291  *   WQ attributes structure to fill..
292  */
293 static void
294 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
295 		       struct mlx5_devx_wq_attr *wq_attr)
296 {
297 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
298 					MLX5_WQ_END_PAD_MODE_ALIGN :
299 					MLX5_WQ_END_PAD_MODE_NONE;
300 	wq_attr->pd = priv->sh->pdn;
301 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
302 	wq_attr->dbr_umem_id =
303 			mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
304 	wq_attr->dbr_umem_valid = 1;
305 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
306 	wq_attr->wq_umem_valid = 1;
307 }
308 
309 /**
310  * Create a RQ object using DevX.
311  *
312  * @param dev
313  *   Pointer to Ethernet device.
314  * @param idx
315  *   Queue index in DPDK Rx queue array.
316  *
317  * @return
318  *   The DevX RQ object initialized, NULL otherwise and rte_errno is set.
319  */
320 static struct mlx5_devx_obj *
321 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
322 {
323 	struct mlx5_priv *priv = dev->data->dev_private;
324 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
325 	struct mlx5_rxq_ctrl *rxq_ctrl =
326 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
327 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
328 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
329 	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
330 	struct mlx5_devx_dbr_page *dbr_page;
331 	int64_t dbr_offset;
332 	uint32_t wq_size = 0;
333 	uint32_t wqe_size = 0;
334 	uint32_t log_wqe_size = 0;
335 	void *buf = NULL;
336 	struct mlx5_devx_obj *rq;
337 
338 	/* Fill RQ attributes. */
339 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
340 	rq_attr.flush_in_error_en = 1;
341 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
342 	/* Fill WQ attributes for this RQ. */
343 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
344 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
345 		/*
346 		 * Number of strides in each WQE:
347 		 * 512*2^single_wqe_log_num_of_strides.
348 		 */
349 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
350 				rxq_data->strd_num_n -
351 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
352 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
353 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
354 				rxq_data->strd_sz_n -
355 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
356 		wqe_size = sizeof(struct mlx5_wqe_mprq);
357 	} else {
358 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
359 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
360 	}
361 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
362 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
363 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
364 	/* Calculate and allocate WQ memory space. */
365 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
366 	wq_size = wqe_n * wqe_size;
367 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
368 	if (alignment == (size_t)-1) {
369 		DRV_LOG(ERR, "Failed to get mem page size");
370 		rte_errno = ENOMEM;
371 		return NULL;
372 	}
373 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
374 			  alignment, rxq_ctrl->socket);
375 	if (!buf)
376 		return NULL;
377 	rxq_data->wqes = buf;
378 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
379 						     buf, wq_size, 0);
380 	if (!rxq_ctrl->wq_umem)
381 		goto error;
382 	/* Allocate RQ door-bell. */
383 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
384 	if (dbr_offset < 0) {
385 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
386 		goto error;
387 	}
388 	rxq_ctrl->rq_dbr_offset = dbr_offset;
389 	rxq_ctrl->rq_dbrec_page = dbr_page;
390 	rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
391 			  (uintptr_t)rxq_ctrl->rq_dbr_offset);
392 	/* Create RQ using DevX API. */
393 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
394 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
395 	if (!rq)
396 		goto error;
397 	return rq;
398 error:
399 	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
400 	return NULL;
401 }
402 
403 /**
404  * Create a DevX CQ object for an Rx queue.
405  *
406  * @param dev
407  *   Pointer to Ethernet device.
408  * @param idx
409  *   Queue index in DPDK Rx queue array.
410  *
411  * @return
412  *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
413  */
414 static struct mlx5_devx_obj *
415 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
416 {
417 	struct mlx5_devx_obj *cq_obj = 0;
418 	struct mlx5_devx_cq_attr cq_attr = { 0 };
419 	struct mlx5_priv *priv = dev->data->dev_private;
420 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
421 	struct mlx5_rxq_ctrl *rxq_ctrl =
422 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
423 	size_t page_size = rte_mem_page_size();
424 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
425 	struct mlx5_devx_dbr_page *dbr_page;
426 	int64_t dbr_offset;
427 	void *buf = NULL;
428 	uint16_t event_nums[1] = {0};
429 	uint32_t log_cqe_n;
430 	uint32_t cq_size;
431 	int ret = 0;
432 
433 	if (page_size == (size_t)-1) {
434 		DRV_LOG(ERR, "Failed to get page_size.");
435 		goto error;
436 	}
437 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
438 	    !rxq_data->lro) {
439 		cq_attr.cqe_comp_en = 1u;
440 		cq_attr.mini_cqe_res_format =
441 				mlx5_rxq_mprq_enabled(rxq_data) ?
442 					MLX5_CQE_RESP_FORMAT_CSUM_STRIDX :
443 					MLX5_CQE_RESP_FORMAT_HASH;
444 		/*
445 		 * For vectorized Rx, it must not be doubled in order to
446 		 * make cq_ci and rq_ci aligned.
447 		 */
448 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
449 			cqe_n *= 2;
450 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
451 		DRV_LOG(DEBUG,
452 			"Port %u Rx CQE compression is disabled for HW"
453 			" timestamp.",
454 			dev->data->port_id);
455 	} else if (priv->config.cqe_comp && rxq_data->lro) {
456 		DRV_LOG(DEBUG,
457 			"Port %u Rx CQE compression is disabled for LRO.",
458 			dev->data->port_id);
459 	}
460 	if (priv->config.cqe_pad)
461 		cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
462 	log_cqe_n = log2above(cqe_n);
463 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
464 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
465 				rxq_ctrl->socket);
466 	if (!buf) {
467 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
468 		goto error;
469 	}
470 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
471 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
472 						     cq_size,
473 						     IBV_ACCESS_LOCAL_WRITE);
474 	if (!rxq_ctrl->cq_umem) {
475 		DRV_LOG(ERR, "Failed to register umem for CQ.");
476 		goto error;
477 	}
478 	/* Allocate CQ door-bell. */
479 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
480 	if (dbr_offset < 0) {
481 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
482 		goto error;
483 	}
484 	rxq_ctrl->cq_dbr_offset = dbr_offset;
485 	rxq_ctrl->cq_dbrec_page = dbr_page;
486 	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
487 			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
488 	rxq_data->cq_uar =
489 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
490 	/* Create CQ using DevX API. */
491 	cq_attr.eqn = priv->sh->eqn;
492 	cq_attr.uar_page_id =
493 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
494 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
495 	cq_attr.q_umem_valid = 1;
496 	cq_attr.log_cq_size = log_cqe_n;
497 	cq_attr.log_page_size = rte_log2_u32(page_size);
498 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
499 	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
500 	cq_attr.db_umem_valid = 1;
501 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
502 	if (!cq_obj)
503 		goto error;
504 	rxq_data->cqe_n = log_cqe_n;
505 	rxq_data->cqn = cq_obj->id;
506 	if (rxq_ctrl->obj->devx_channel) {
507 		ret = mlx5_glue->devx_subscribe_devx_event
508 						(rxq_ctrl->obj->devx_channel,
509 						 cq_obj->obj,
510 						 sizeof(event_nums),
511 						 event_nums,
512 						 (uint64_t)(uintptr_t)cq_obj);
513 		if (ret) {
514 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
515 			rte_errno = errno;
516 			goto error;
517 		}
518 	}
519 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
520 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
521 	return cq_obj;
522 error:
523 	if (cq_obj)
524 		mlx5_devx_cmd_destroy(cq_obj);
525 	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
526 	return NULL;
527 }
528 
529 /**
530  * Create the Rx hairpin queue object.
531  *
532  * @param dev
533  *   Pointer to Ethernet device.
534  * @param idx
535  *   Queue index in DPDK Rx queue array.
536  *
537  * @return
538  *   0 on success, a negative errno value otherwise and rte_errno is set.
539  */
540 static int
541 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
542 {
543 	struct mlx5_priv *priv = dev->data->dev_private;
544 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
545 	struct mlx5_rxq_ctrl *rxq_ctrl =
546 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
547 	struct mlx5_devx_create_rq_attr attr = { 0 };
548 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
549 	uint32_t max_wq_data;
550 
551 	MLX5_ASSERT(rxq_data);
552 	MLX5_ASSERT(tmpl);
553 	tmpl->rxq_ctrl = rxq_ctrl;
554 	attr.hairpin = 1;
555 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
556 	/* Jumbo frames > 9KB should be supported, and more packets. */
557 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
558 		if (priv->config.log_hp_size > max_wq_data) {
559 			DRV_LOG(ERR, "Total data size %u power of 2 is "
560 				"too large for hairpin.",
561 				priv->config.log_hp_size);
562 			rte_errno = ERANGE;
563 			return -rte_errno;
564 		}
565 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
566 	} else {
567 		attr.wq_attr.log_hairpin_data_sz =
568 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
569 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
570 	}
571 	/* Set the packets number to the maximum value for performance. */
572 	attr.wq_attr.log_hairpin_num_packets =
573 			attr.wq_attr.log_hairpin_data_sz -
574 			MLX5_HAIRPIN_QUEUE_STRIDE;
575 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
576 					   rxq_ctrl->socket);
577 	if (!tmpl->rq) {
578 		DRV_LOG(ERR,
579 			"Port %u Rx hairpin queue %u can't create rq object.",
580 			dev->data->port_id, idx);
581 		rte_errno = errno;
582 		return -rte_errno;
583 	}
584 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
585 	return 0;
586 }
587 
588 /**
589  * Create the Rx queue DevX object.
590  *
591  * @param dev
592  *   Pointer to Ethernet device.
593  * @param idx
594  *   Queue index in DPDK Rx queue array.
595  *
596  * @return
597  *   0 on success, a negative errno value otherwise and rte_errno is set.
598  */
599 static int
600 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
601 {
602 	struct mlx5_priv *priv = dev->data->dev_private;
603 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
604 	struct mlx5_rxq_ctrl *rxq_ctrl =
605 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
606 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
607 	int ret = 0;
608 
609 	MLX5_ASSERT(rxq_data);
610 	MLX5_ASSERT(tmpl);
611 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
612 		return mlx5_rxq_obj_hairpin_new(dev, idx);
613 	tmpl->rxq_ctrl = rxq_ctrl;
614 	if (rxq_ctrl->irq) {
615 		int devx_ev_flag =
616 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
617 
618 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
619 								(priv->sh->ctx,
620 								 devx_ev_flag);
621 		if (!tmpl->devx_channel) {
622 			rte_errno = errno;
623 			DRV_LOG(ERR, "Failed to create event channel %d.",
624 				rte_errno);
625 			goto error;
626 		}
627 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
628 	}
629 	/* Create CQ using DevX API. */
630 	tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
631 	if (!tmpl->devx_cq) {
632 		DRV_LOG(ERR, "Failed to create CQ.");
633 		goto error;
634 	}
635 	/* Create RQ using DevX API. */
636 	tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx);
637 	if (!tmpl->rq) {
638 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
639 			dev->data->port_id, idx);
640 		rte_errno = ENOMEM;
641 		goto error;
642 	}
643 	/* Change queue state to ready. */
644 	ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
645 	if (ret)
646 		goto error;
647 	rxq_data->cq_arm_sn = 0;
648 	mlx5_rxq_initialize(rxq_data);
649 	rxq_data->cq_ci = 0;
650 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
651 	rxq_ctrl->wqn = tmpl->rq->id;
652 	return 0;
653 error:
654 	ret = rte_errno; /* Save rte_errno before cleanup. */
655 	if (tmpl->rq)
656 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
657 	if (tmpl->devx_cq)
658 		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
659 	if (tmpl->devx_channel)
660 		mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
661 	mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
662 	mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
663 	rte_errno = ret; /* Restore rte_errno. */
664 	return -rte_errno;
665 }
666 
667 /**
668  * Create RQT using DevX API as a filed of indirection table.
669  *
670  * @param dev
671  *   Pointer to Ethernet device.
672  * @param log_n
673  *   Log of number of queues in the array.
674  * @param ind_tbl
675  *   DevX indirection table object.
676  *
677  * @return
678  *   0 on success, a negative errno value otherwise and rte_errno is set.
679  */
680 static int
681 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
682 			struct mlx5_ind_table_obj *ind_tbl)
683 {
684 	struct mlx5_priv *priv = dev->data->dev_private;
685 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
686 	const unsigned int rqt_n = 1 << log_n;
687 	unsigned int i, j;
688 
689 	MLX5_ASSERT(ind_tbl);
690 	rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
691 			      rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
692 	if (!rqt_attr) {
693 		DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
694 			dev->data->port_id);
695 		rte_errno = ENOMEM;
696 		return -rte_errno;
697 	}
698 	rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
699 	rqt_attr->rqt_actual_size = rqt_n;
700 	for (i = 0; i != ind_tbl->queues_n; ++i) {
701 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
702 		struct mlx5_rxq_ctrl *rxq_ctrl =
703 				container_of(rxq, struct mlx5_rxq_ctrl, rxq);
704 
705 		rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
706 	}
707 	MLX5_ASSERT(i > 0);
708 	for (j = 0; i != rqt_n; ++j, ++i)
709 		rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
710 	ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
711 	mlx5_free(rqt_attr);
712 	if (!ind_tbl->rqt) {
713 		DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
714 			dev->data->port_id);
715 		rte_errno = errno;
716 		return -rte_errno;
717 	}
718 	return 0;
719 }
720 
721 /**
722  * Destroy the DevX RQT object.
723  *
724  * @param ind_table
725  *   Indirection table to release.
726  */
727 static void
728 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
729 {
730 	claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
731 }
732 
733 /**
734  * Create an Rx Hash queue.
735  *
736  * @param dev
737  *   Pointer to Ethernet device.
738  * @param hrxq
739  *   Pointer to Rx Hash queue.
740  * @param tunnel
741  *   Tunnel type.
742  *
743  * @return
744  *   0 on success, a negative errno value otherwise and rte_errno is set.
745  */
746 static int
747 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
748 		   int tunnel __rte_unused)
749 {
750 	struct mlx5_priv *priv = dev->data->dev_private;
751 	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
752 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
753 	struct mlx5_rxq_ctrl *rxq_ctrl =
754 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
755 	struct mlx5_devx_tir_attr tir_attr;
756 	const uint8_t *rss_key = hrxq->rss_key;
757 	uint64_t hash_fields = hrxq->hash_fields;
758 	bool lro = true;
759 	uint32_t i;
760 	int err;
761 
762 	/* Enable TIR LRO only if all the queues were configured for. */
763 	for (i = 0; i < ind_tbl->queues_n; ++i) {
764 		if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
765 			lro = false;
766 			break;
767 		}
768 	}
769 	memset(&tir_attr, 0, sizeof(tir_attr));
770 	tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
771 	tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
772 	tir_attr.tunneled_offload_en = !!tunnel;
773 	/* If needed, translate hash_fields bitmap to PRM format. */
774 	if (hash_fields) {
775 		struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
776 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
777 		rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
778 				       &tir_attr.rx_hash_field_selector_inner :
779 				       &tir_attr.rx_hash_field_selector_outer;
780 #else
781 		rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
782 #endif
783 		/* 1 bit: 0: IPv4, 1: IPv6. */
784 		rx_hash_field_select->l3_prot_type =
785 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
786 		/* 1 bit: 0: TCP, 1: UDP. */
787 		rx_hash_field_select->l4_prot_type =
788 					 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
789 		/* Bitmask which sets which fields to use in RX Hash. */
790 		rx_hash_field_select->selected_fields =
791 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
792 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
793 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
794 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
795 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
796 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
797 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
798 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
799 	}
800 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
801 		tir_attr.transport_domain = priv->sh->td->id;
802 	else
803 		tir_attr.transport_domain = priv->sh->tdn;
804 	memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
805 	tir_attr.indirect_table = ind_tbl->rqt->id;
806 	if (dev->data->dev_conf.lpbk_mode)
807 		tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
808 	if (lro) {
809 		tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
810 		tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
811 		tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
812 					   MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
813 	}
814 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
815 	if (!hrxq->tir) {
816 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
817 			dev->data->port_id);
818 		rte_errno = errno;
819 		goto error;
820 	}
821 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
822 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
823 							       (hrxq->tir->obj);
824 	if (!hrxq->action) {
825 		rte_errno = errno;
826 		goto error;
827 	}
828 #endif
829 	return 0;
830 error:
831 	err = rte_errno; /* Save rte_errno before cleanup. */
832 	if (hrxq->tir)
833 		claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
834 	rte_errno = err; /* Restore rte_errno. */
835 	return -rte_errno;
836 }
837 
838 /**
839  * Destroy a DevX TIR object.
840  *
841  * @param hrxq
842  *   Hash Rx queue to release its tir.
843  */
844 static void
845 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
846 {
847 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
848 }
849 
850 /**
851  * Create a DevX drop action for Rx Hash queue.
852  *
853  * @param dev
854  *   Pointer to Ethernet device.
855  *
856  * @return
857  *   0 on success, a negative errno value otherwise and rte_errno is set.
858  */
859 static int
860 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
861 {
862 	(void)dev;
863 	DRV_LOG(ERR, "DevX drop action is not supported yet.");
864 	rte_errno = ENOTSUP;
865 	return -rte_errno;
866 }
867 
868 /**
869  * Release a drop hash Rx queue.
870  *
871  * @param dev
872  *   Pointer to Ethernet device.
873  */
874 static void
875 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
876 {
877 	(void)dev;
878 	DRV_LOG(ERR, "DevX drop action is not supported yet.");
879 	rte_errno = ENOTSUP;
880 }
881 
882 /**
883  * Create the Tx hairpin queue object.
884  *
885  * @param dev
886  *   Pointer to Ethernet device.
887  * @param idx
888  *   Queue index in DPDK Tx queue array.
889  *
890  * @return
891  *   0 on success, a negative errno value otherwise and rte_errno is set.
892  */
893 static int
894 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
895 {
896 	struct mlx5_priv *priv = dev->data->dev_private;
897 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
898 	struct mlx5_txq_ctrl *txq_ctrl =
899 		container_of(txq_data, struct mlx5_txq_ctrl, txq);
900 	struct mlx5_devx_create_sq_attr attr = { 0 };
901 	struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
902 	uint32_t max_wq_data;
903 
904 	MLX5_ASSERT(txq_data);
905 	MLX5_ASSERT(tmpl);
906 	tmpl->txq_ctrl = txq_ctrl;
907 	attr.hairpin = 1;
908 	attr.tis_lst_sz = 1;
909 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
910 	/* Jumbo frames > 9KB should be supported, and more packets. */
911 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
912 		if (priv->config.log_hp_size > max_wq_data) {
913 			DRV_LOG(ERR, "Total data size %u power of 2 is "
914 				"too large for hairpin.",
915 				priv->config.log_hp_size);
916 			rte_errno = ERANGE;
917 			return -rte_errno;
918 		}
919 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
920 	} else {
921 		attr.wq_attr.log_hairpin_data_sz =
922 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
923 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
924 	}
925 	/* Set the packets number to the maximum value for performance. */
926 	attr.wq_attr.log_hairpin_num_packets =
927 			attr.wq_attr.log_hairpin_data_sz -
928 			MLX5_HAIRPIN_QUEUE_STRIDE;
929 	attr.tis_num = priv->sh->tis->id;
930 	tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
931 	if (!tmpl->sq) {
932 		DRV_LOG(ERR,
933 			"Port %u tx hairpin queue %u can't create SQ object.",
934 			dev->data->port_id, idx);
935 		rte_errno = errno;
936 		return -rte_errno;
937 	}
938 	return 0;
939 }
940 
941 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
942 /**
943  * Release DevX SQ resources.
944  *
945  * @param txq_obj
946  *   DevX Tx queue object.
947  */
948 static void
949 mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
950 {
951 	if (txq_obj->sq_devx)
952 		claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
953 	if (txq_obj->sq_umem)
954 		claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
955 	if (txq_obj->sq_buf)
956 		mlx5_free(txq_obj->sq_buf);
957 	if (txq_obj->sq_dbrec_page)
958 		claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
959 					    mlx5_os_get_umem_id
960 						 (txq_obj->sq_dbrec_page->umem),
961 					    txq_obj->sq_dbrec_offset));
962 }
963 
964 /**
965  * Release DevX Tx CQ resources.
966  *
967  * @param txq_obj
968  *   DevX Tx queue object.
969  */
970 static void
971 mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
972 {
973 	if (txq_obj->cq_devx)
974 		claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
975 	if (txq_obj->cq_umem)
976 		claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
977 	if (txq_obj->cq_buf)
978 		mlx5_free(txq_obj->cq_buf);
979 	if (txq_obj->cq_dbrec_page)
980 		claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
981 					    mlx5_os_get_umem_id
982 						 (txq_obj->cq_dbrec_page->umem),
983 					    txq_obj->cq_dbrec_offset));
984 }
985 
986 /**
987  * Destroy the Tx queue DevX object.
988  *
989  * @param txq_obj
990  *   Txq object to destroy.
991  */
992 static void
993 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
994 {
995 	mlx5_txq_release_devx_cq_resources(txq_obj);
996 	mlx5_txq_release_devx_sq_resources(txq_obj);
997 }
998 
999 /**
1000  * Create a DevX CQ object and its resources for an Tx queue.
1001  *
1002  * @param dev
1003  *   Pointer to Ethernet device.
1004  * @param idx
1005  *   Queue index in DPDK Tx queue array.
1006  *
1007  * @return
1008  *   Number of CQEs in CQ, 0 otherwise and rte_errno is set.
1009  */
1010 static uint32_t
1011 mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
1012 {
1013 	struct mlx5_priv *priv = dev->data->dev_private;
1014 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1015 	struct mlx5_txq_ctrl *txq_ctrl =
1016 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
1017 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1018 	struct mlx5_devx_cq_attr cq_attr = { 0 };
1019 	struct mlx5_cqe *cqe;
1020 	size_t page_size;
1021 	size_t alignment;
1022 	uint32_t cqe_n;
1023 	uint32_t i;
1024 	int ret;
1025 
1026 	MLX5_ASSERT(txq_data);
1027 	MLX5_ASSERT(txq_obj);
1028 	page_size = rte_mem_page_size();
1029 	if (page_size == (size_t)-1) {
1030 		DRV_LOG(ERR, "Failed to get mem page size.");
1031 		rte_errno = ENOMEM;
1032 		return 0;
1033 	}
1034 	/* Allocate memory buffer for CQEs. */
1035 	alignment = MLX5_CQE_BUF_ALIGNMENT;
1036 	if (alignment == (size_t)-1) {
1037 		DRV_LOG(ERR, "Failed to get CQE buf alignment.");
1038 		rte_errno = ENOMEM;
1039 		return 0;
1040 	}
1041 	/* Create the Completion Queue. */
1042 	cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1043 		1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1044 	cqe_n = 1UL << log2above(cqe_n);
1045 	if (cqe_n > UINT16_MAX) {
1046 		DRV_LOG(ERR,
1047 			"Port %u Tx queue %u requests to many CQEs %u.",
1048 			dev->data->port_id, txq_data->idx, cqe_n);
1049 		rte_errno = EINVAL;
1050 		return 0;
1051 	}
1052 	txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1053 				      cqe_n * sizeof(struct mlx5_cqe),
1054 				      alignment,
1055 				      priv->sh->numa_node);
1056 	if (!txq_obj->cq_buf) {
1057 		DRV_LOG(ERR,
1058 			"Port %u Tx queue %u cannot allocate memory (CQ).",
1059 			dev->data->port_id, txq_data->idx);
1060 		rte_errno = ENOMEM;
1061 		return 0;
1062 	}
1063 	/* Register allocated buffer in user space with DevX. */
1064 	txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
1065 						(void *)txq_obj->cq_buf,
1066 						cqe_n * sizeof(struct mlx5_cqe),
1067 						IBV_ACCESS_LOCAL_WRITE);
1068 	if (!txq_obj->cq_umem) {
1069 		rte_errno = errno;
1070 		DRV_LOG(ERR,
1071 			"Port %u Tx queue %u cannot register memory (CQ).",
1072 			dev->data->port_id, txq_data->idx);
1073 		goto error;
1074 	}
1075 	/* Allocate doorbell record for completion queue. */
1076 	txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1077 						&priv->dbrpgs,
1078 						&txq_obj->cq_dbrec_page);
1079 	if (txq_obj->cq_dbrec_offset < 0) {
1080 		rte_errno = errno;
1081 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
1082 		goto error;
1083 	}
1084 	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
1085 			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
1086 	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1087 	cq_attr.eqn = priv->sh->eqn;
1088 	cq_attr.q_umem_valid = 1;
1089 	cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
1090 	cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
1091 	cq_attr.db_umem_valid = 1;
1092 	cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
1093 	cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
1094 	cq_attr.log_cq_size = rte_log2_u32(cqe_n);
1095 	cq_attr.log_page_size = rte_log2_u32(page_size);
1096 	/* Create completion queue object with DevX. */
1097 	txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
1098 	if (!txq_obj->cq_devx) {
1099 		rte_errno = errno;
1100 		DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1101 			dev->data->port_id, idx);
1102 		goto error;
1103 	}
1104 	/* Initial fill CQ buffer with invalid CQE opcode. */
1105 	cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
1106 	for (i = 0; i < cqe_n; i++) {
1107 		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
1108 		++cqe;
1109 	}
1110 	return cqe_n;
1111 error:
1112 	ret = rte_errno;
1113 	mlx5_txq_release_devx_cq_resources(txq_obj);
1114 	rte_errno = ret;
1115 	return 0;
1116 }
1117 
1118 /**
1119  * Create a SQ object and its resources using DevX.
1120  *
1121  * @param dev
1122  *   Pointer to Ethernet device.
1123  * @param idx
1124  *   Queue index in DPDK Tx queue array.
1125  *
1126  * @return
1127  *   Number of WQEs in SQ, 0 otherwise and rte_errno is set.
1128  */
1129 static uint32_t
1130 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
1131 {
1132 	struct mlx5_priv *priv = dev->data->dev_private;
1133 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1134 	struct mlx5_txq_ctrl *txq_ctrl =
1135 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
1136 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1137 	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
1138 	size_t page_size;
1139 	uint32_t wqe_n;
1140 	int ret;
1141 
1142 	MLX5_ASSERT(txq_data);
1143 	MLX5_ASSERT(txq_obj);
1144 	page_size = rte_mem_page_size();
1145 	if (page_size == (size_t)-1) {
1146 		DRV_LOG(ERR, "Failed to get mem page size.");
1147 		rte_errno = ENOMEM;
1148 		return 0;
1149 	}
1150 	wqe_n = RTE_MIN(1UL << txq_data->elts_n,
1151 			(uint32_t)priv->sh->device_attr.max_qp_wr);
1152 	txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1153 				      wqe_n * sizeof(struct mlx5_wqe),
1154 				      page_size, priv->sh->numa_node);
1155 	if (!txq_obj->sq_buf) {
1156 		DRV_LOG(ERR,
1157 			"Port %u Tx queue %u cannot allocate memory (SQ).",
1158 			dev->data->port_id, txq_data->idx);
1159 		rte_errno = ENOMEM;
1160 		goto error;
1161 	}
1162 	/* Register allocated buffer in user space with DevX. */
1163 	txq_obj->sq_umem = mlx5_glue->devx_umem_reg
1164 					(priv->sh->ctx,
1165 					 (void *)txq_obj->sq_buf,
1166 					 wqe_n * sizeof(struct mlx5_wqe),
1167 					 IBV_ACCESS_LOCAL_WRITE);
1168 	if (!txq_obj->sq_umem) {
1169 		rte_errno = errno;
1170 		DRV_LOG(ERR,
1171 			"Port %u Tx queue %u cannot register memory (SQ).",
1172 			dev->data->port_id, txq_data->idx);
1173 		goto error;
1174 	}
1175 	/* Allocate doorbell record for send queue. */
1176 	txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1177 						&priv->dbrpgs,
1178 						&txq_obj->sq_dbrec_page);
1179 	if (txq_obj->sq_dbrec_offset < 0) {
1180 		rte_errno = errno;
1181 		DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
1182 		goto error;
1183 	}
1184 	sq_attr.tis_lst_sz = 1;
1185 	sq_attr.tis_num = priv->sh->tis->id;
1186 	sq_attr.state = MLX5_SQC_STATE_RST;
1187 	sq_attr.cqn = txq_obj->cq_devx->id;
1188 	sq_attr.flush_in_error_en = 1;
1189 	sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
1190 	sq_attr.allow_swp = !!priv->config.swp;
1191 	sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
1192 	sq_attr.wq_attr.uar_page =
1193 				mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1194 	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1195 	sq_attr.wq_attr.pd = priv->sh->pdn;
1196 	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
1197 	sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
1198 	sq_attr.wq_attr.dbr_umem_valid = 1;
1199 	sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
1200 	sq_attr.wq_attr.dbr_umem_id =
1201 			mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
1202 	sq_attr.wq_attr.wq_umem_valid = 1;
1203 	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
1204 	sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
1205 	/* Create Send Queue object with DevX. */
1206 	txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
1207 	if (!txq_obj->sq_devx) {
1208 		rte_errno = errno;
1209 		DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1210 			dev->data->port_id, idx);
1211 		goto error;
1212 	}
1213 	return wqe_n;
1214 error:
1215 	ret = rte_errno;
1216 	mlx5_txq_release_devx_sq_resources(txq_obj);
1217 	rte_errno = ret;
1218 	return 0;
1219 }
1220 #endif
1221 
1222 /**
1223  * Create the Tx queue DevX object.
1224  *
1225  * @param dev
1226  *   Pointer to Ethernet device.
1227  * @param idx
1228  *   Queue index in DPDK Tx queue array.
1229  *
1230  * @return
1231  *   0 on success, a negative errno value otherwise and rte_errno is set.
1232  */
1233 int
1234 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1235 {
1236 	struct mlx5_priv *priv = dev->data->dev_private;
1237 	struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1238 	struct mlx5_txq_ctrl *txq_ctrl =
1239 			container_of(txq_data, struct mlx5_txq_ctrl, txq);
1240 
1241 	if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
1242 		return mlx5_txq_obj_hairpin_new(dev, idx);
1243 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
1244 	DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1245 		     dev->data->port_id, idx);
1246 	rte_errno = ENOMEM;
1247 	return -rte_errno;
1248 #else
1249 	struct mlx5_dev_ctx_shared *sh = priv->sh;
1250 	struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1251 	void *reg_addr;
1252 	uint32_t cqe_n;
1253 	uint32_t wqe_n;
1254 	int ret = 0;
1255 
1256 	MLX5_ASSERT(txq_data);
1257 	MLX5_ASSERT(txq_obj);
1258 	txq_obj->txq_ctrl = txq_ctrl;
1259 	txq_obj->dev = dev;
1260 	cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx);
1261 	if (!cqe_n) {
1262 		rte_errno = errno;
1263 		goto error;
1264 	}
1265 	txq_data->cqe_n = log2above(cqe_n);
1266 	txq_data->cqe_s = 1 << txq_data->cqe_n;
1267 	txq_data->cqe_m = txq_data->cqe_s - 1;
1268 	txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
1269 	txq_data->cq_ci = 0;
1270 	txq_data->cq_pi = 0;
1271 	txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
1272 						txq_obj->cq_dbrec_offset);
1273 	*txq_data->cq_db = 0;
1274 	/* Create Send Queue object with DevX. */
1275 	wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx);
1276 	if (!wqe_n) {
1277 		rte_errno = errno;
1278 		goto error;
1279 	}
1280 	/* Create the Work Queue. */
1281 	txq_data->wqe_n = log2above(wqe_n);
1282 	txq_data->wqe_s = 1 << txq_data->wqe_n;
1283 	txq_data->wqe_m = txq_data->wqe_s - 1;
1284 	txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
1285 	txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1286 	txq_data->wqe_ci = 0;
1287 	txq_data->wqe_pi = 0;
1288 	txq_data->wqe_comp = 0;
1289 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1290 	txq_data->qp_db = (volatile uint32_t *)
1291 					(txq_obj->sq_dbrec_page->dbrs +
1292 					 txq_obj->sq_dbrec_offset +
1293 					 MLX5_SND_DBR * sizeof(uint32_t));
1294 	*txq_data->qp_db = 0;
1295 	txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
1296 	/* Change Send Queue state to Ready-to-Send. */
1297 	ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1298 	if (ret) {
1299 		rte_errno = errno;
1300 		DRV_LOG(ERR,
1301 			"Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1302 			dev->data->port_id, idx);
1303 		goto error;
1304 	}
1305 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1306 	/*
1307 	 * If using DevX need to query and store TIS transport domain value.
1308 	 * This is done once per port.
1309 	 * Will use this value on Rx, when creating matching TIR.
1310 	 */
1311 	if (!priv->sh->tdn)
1312 		priv->sh->tdn = priv->sh->td->id;
1313 #endif
1314 	MLX5_ASSERT(sh->tx_uar);
1315 	reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1316 	MLX5_ASSERT(reg_addr);
1317 	txq_ctrl->bf_reg = reg_addr;
1318 	txq_ctrl->uar_mmap_offset =
1319 				mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1320 	txq_uar_init(txq_ctrl);
1321 	return 0;
1322 error:
1323 	ret = rte_errno; /* Save rte_errno before cleanup. */
1324 	mlx5_txq_release_devx_resources(txq_obj);
1325 	rte_errno = ret; /* Restore rte_errno. */
1326 	return -rte_errno;
1327 #endif
1328 }
1329 
1330 /**
1331  * Release an Tx DevX queue object.
1332  *
1333  * @param txq_obj
1334  *   DevX Tx queue object.
1335  */
1336 void
1337 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1338 {
1339 	MLX5_ASSERT(txq_obj);
1340 	if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
1341 		if (txq_obj->tis)
1342 			claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1343 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET
1344 	} else {
1345 		mlx5_txq_release_devx_resources(txq_obj);
1346 #endif
1347 	}
1348 }
1349 
1350 struct mlx5_obj_ops devx_obj_ops = {
1351 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1352 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
1353 	.rxq_event_get = mlx5_rx_devx_get_event,
1354 	.rxq_obj_modify = mlx5_devx_modify_rq,
1355 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
1356 	.ind_table_new = mlx5_devx_ind_table_new,
1357 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
1358 	.hrxq_new = mlx5_devx_hrxq_new,
1359 	.hrxq_destroy = mlx5_devx_tir_destroy,
1360 	.drop_action_create = mlx5_devx_drop_action_create,
1361 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
1362 	.txq_obj_new = mlx5_txq_devx_obj_new,
1363 	.txq_obj_modify = mlx5_devx_modify_sq,
1364 	.txq_obj_release = mlx5_txq_devx_obj_release,
1365 };
1366