xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision 6deb19e1b2d24ff95413d30147678d898283c67e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <stddef.h>
6 #include <errno.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <sys/queue.h>
10 
11 #include <rte_malloc.h>
12 #include <rte_common.h>
13 #include <rte_eal_paging.h>
14 
15 #include <mlx5_glue.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_malloc.h>
18 
19 #include "mlx5.h"
20 #include "mlx5_common_os.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_utils.h"
23 #include "mlx5_devx.h"
24 
25 /**
26  * Modify RQ vlan stripping offload
27  *
28  * @param rxq_obj
29  *   Rx queue object.
30  *
31  * @return 0 on success, non-0 otherwise
32  */
33 static int
34 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
35 {
36 	struct mlx5_devx_modify_rq_attr rq_attr;
37 
38 	memset(&rq_attr, 0, sizeof(rq_attr));
39 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
40 	rq_attr.state = MLX5_RQC_STATE_RDY;
41 	rq_attr.vsd = (on ? 0 : 1);
42 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
43 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
44 }
45 
46 /**
47  * Release the resources allocated for an RQ DevX object.
48  *
49  * @param rxq_ctrl
50  *   DevX Rx queue object.
51  */
52 static void
53 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
54 {
55 	if (rxq_ctrl->rxq.wqes) {
56 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
57 		rxq_ctrl->rxq.wqes = NULL;
58 	}
59 	if (rxq_ctrl->wq_umem) {
60 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
61 		rxq_ctrl->wq_umem = NULL;
62 	}
63 }
64 
65 /**
66  * Release the resources allocated for the Rx CQ DevX object.
67  *
68  * @param rxq_ctrl
69  *   DevX Rx queue object.
70  */
71 static void
72 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
73 {
74 	if (rxq_ctrl->rxq.cqes) {
75 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
76 		rxq_ctrl->rxq.cqes = NULL;
77 	}
78 	if (rxq_ctrl->cq_umem) {
79 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
80 		rxq_ctrl->cq_umem = NULL;
81 	}
82 }
83 
84 /**
85  * Release an Rx hairpin related resources.
86  *
87  * @param rxq_obj
88  *   Hairpin Rx queue object.
89  */
90 static void
91 mlx5_rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
92 {
93 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
94 
95 	MLX5_ASSERT(rxq_obj);
96 	rq_attr.state = MLX5_RQC_STATE_RST;
97 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
98 	mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
99 	claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
100 }
101 
102 /**
103  * Release an Rx DevX queue object.
104  *
105  * @param rxq_obj
106  *   DevX Rx queue object.
107  */
108 static void
109 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
110 {
111 	struct mlx5_priv *priv = rxq_obj->rxq_ctrl->priv;
112 
113 	MLX5_ASSERT(rxq_obj);
114 	MLX5_ASSERT(rxq_obj->rq);
115 	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
116 		mlx5_rxq_obj_hairpin_release(rxq_obj);
117 	} else {
118 		MLX5_ASSERT(rxq_obj->devx_cq);
119 		rxq_free_elts(rxq_obj->rxq_ctrl);
120 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
121 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
122 		claim_zero(mlx5_release_dbr(&priv->dbrpgs,
123 					    rxq_obj->rxq_ctrl->rq_dbr_umem_id,
124 					    rxq_obj->rxq_ctrl->rq_dbr_offset));
125 		claim_zero(mlx5_release_dbr(&priv->dbrpgs,
126 					    rxq_obj->rxq_ctrl->cq_dbr_umem_id,
127 					    rxq_obj->rxq_ctrl->cq_dbr_offset));
128 		if (rxq_obj->devx_channel)
129 			mlx5_glue->devx_destroy_event_channel
130 							(rxq_obj->devx_channel);
131 		rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
132 		rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
133 	}
134 	LIST_REMOVE(rxq_obj, next);
135 	mlx5_free(rxq_obj);
136 }
137 
138 /**
139  * Fill common fields of create RQ attributes structure.
140  *
141  * @param rxq_data
142  *   Pointer to Rx queue data.
143  * @param cqn
144  *   CQ number to use with this RQ.
145  * @param rq_attr
146  *   RQ attributes structure to fill..
147  */
148 static void
149 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
150 			      struct mlx5_devx_create_rq_attr *rq_attr)
151 {
152 	rq_attr->state = MLX5_RQC_STATE_RST;
153 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
154 	rq_attr->cqn = cqn;
155 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
156 }
157 
158 /**
159  * Fill common fields of DevX WQ attributes structure.
160  *
161  * @param priv
162  *   Pointer to device private data.
163  * @param rxq_ctrl
164  *   Pointer to Rx queue control structure.
165  * @param wq_attr
166  *   WQ attributes structure to fill..
167  */
168 static void
169 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
170 		       struct mlx5_devx_wq_attr *wq_attr)
171 {
172 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
173 					MLX5_WQ_END_PAD_MODE_ALIGN :
174 					MLX5_WQ_END_PAD_MODE_NONE;
175 	wq_attr->pd = priv->sh->pdn;
176 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
177 	wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
178 	wq_attr->dbr_umem_valid = 1;
179 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
180 	wq_attr->wq_umem_valid = 1;
181 }
182 
183 /**
184  * Create a RQ object using DevX.
185  *
186  * @param dev
187  *   Pointer to Ethernet device.
188  * @param idx
189  *   Queue index in DPDK Rx queue array.
190  * @param cqn
191  *   CQ number to use with this RQ.
192  *
193  * @return
194  *   The DevX object initialized, NULL otherwise and rte_errno is set.
195  */
196 static struct mlx5_devx_obj *
197 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
198 {
199 	struct mlx5_priv *priv = dev->data->dev_private;
200 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
201 	struct mlx5_rxq_ctrl *rxq_ctrl =
202 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
203 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
204 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
205 	uint32_t wq_size = 0;
206 	uint32_t wqe_size = 0;
207 	uint32_t log_wqe_size = 0;
208 	void *buf = NULL;
209 	struct mlx5_devx_obj *rq;
210 
211 	/* Fill RQ attributes. */
212 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
213 	rq_attr.flush_in_error_en = 1;
214 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
215 	/* Fill WQ attributes for this RQ. */
216 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
217 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
218 		/*
219 		 * Number of strides in each WQE:
220 		 * 512*2^single_wqe_log_num_of_strides.
221 		 */
222 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
223 				rxq_data->strd_num_n -
224 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
225 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
226 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
227 				rxq_data->strd_sz_n -
228 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
229 		wqe_size = sizeof(struct mlx5_wqe_mprq);
230 	} else {
231 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
232 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
233 	}
234 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
235 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
236 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
237 	/* Calculate and allocate WQ memory space. */
238 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
239 	wq_size = wqe_n * wqe_size;
240 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
241 	if (alignment == (size_t)-1) {
242 		DRV_LOG(ERR, "Failed to get mem page size");
243 		rte_errno = ENOMEM;
244 		return NULL;
245 	}
246 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
247 			  alignment, rxq_ctrl->socket);
248 	if (!buf)
249 		return NULL;
250 	rxq_data->wqes = buf;
251 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
252 						     buf, wq_size, 0);
253 	if (!rxq_ctrl->wq_umem) {
254 		mlx5_free(buf);
255 		return NULL;
256 	}
257 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
258 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
259 	if (!rq)
260 		rxq_release_devx_rq_resources(rxq_ctrl);
261 	return rq;
262 }
263 
264 /**
265  * Create a DevX CQ object for an Rx queue.
266  *
267  * @param dev
268  *   Pointer to Ethernet device.
269  * @param cqe_n
270  *   Number of CQEs in CQ.
271  * @param idx
272  *   Queue index in DPDK Rx queue array.
273  * @param rxq_obj
274  *   Pointer to Rx queue object data.
275  *
276  * @return
277  *   The DevX object initialized, NULL otherwise and rte_errno is set.
278  */
279 static struct mlx5_devx_obj *
280 mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
281 		 struct mlx5_rxq_obj *rxq_obj)
282 {
283 	struct mlx5_devx_obj *cq_obj = 0;
284 	struct mlx5_devx_cq_attr cq_attr = { 0 };
285 	struct mlx5_priv *priv = dev->data->dev_private;
286 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
287 	struct mlx5_rxq_ctrl *rxq_ctrl =
288 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
289 	size_t page_size = rte_mem_page_size();
290 	uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
291 	uint32_t eqn = 0;
292 	void *buf = NULL;
293 	uint16_t event_nums[1] = {0};
294 	uint32_t log_cqe_n;
295 	uint32_t cq_size;
296 	int ret = 0;
297 
298 	if (page_size == (size_t)-1) {
299 		DRV_LOG(ERR, "Failed to get page_size.");
300 		goto error;
301 	}
302 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
303 	    !rxq_data->lro) {
304 		cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
305 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
306 		cq_attr.mini_cqe_res_format =
307 				mlx5_rxq_mprq_enabled(rxq_data) ?
308 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
309 				MLX5DV_CQE_RES_FORMAT_HASH;
310 #else
311 		cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
312 #endif
313 		/*
314 		 * For vectorized Rx, it must not be doubled in order to
315 		 * make cq_ci and rq_ci aligned.
316 		 */
317 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
318 			cqe_n *= 2;
319 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
320 		DRV_LOG(DEBUG,
321 			"Port %u Rx CQE compression is disabled for HW"
322 			" timestamp.",
323 			dev->data->port_id);
324 	} else if (priv->config.cqe_comp && rxq_data->lro) {
325 		DRV_LOG(DEBUG,
326 			"Port %u Rx CQE compression is disabled for LRO.",
327 			dev->data->port_id);
328 	}
329 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
330 	if (priv->config.cqe_pad)
331 		cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
332 #endif
333 	log_cqe_n = log2above(cqe_n);
334 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
335 	/* Query the EQN for this core. */
336 	if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
337 		DRV_LOG(ERR, "Failed to query EQN for CQ.");
338 		goto error;
339 	}
340 	cq_attr.eqn = eqn;
341 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
342 				rxq_ctrl->socket);
343 	if (!buf) {
344 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
345 		goto error;
346 	}
347 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
348 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
349 						     cq_size,
350 						     IBV_ACCESS_LOCAL_WRITE);
351 	if (!rxq_ctrl->cq_umem) {
352 		DRV_LOG(ERR, "Failed to register umem for CQ.");
353 		goto error;
354 	}
355 	cq_attr.uar_page_id =
356 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
357 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
358 	cq_attr.q_umem_valid = 1;
359 	cq_attr.log_cq_size = log_cqe_n;
360 	cq_attr.log_page_size = rte_log2_u32(page_size);
361 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
362 	cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
363 	cq_attr.db_umem_valid = 1;
364 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
365 	if (!cq_obj)
366 		goto error;
367 	rxq_data->cqe_n = log_cqe_n;
368 	rxq_data->cqn = cq_obj->id;
369 	if (rxq_obj->devx_channel) {
370 		ret = mlx5_glue->devx_subscribe_devx_event
371 						(rxq_obj->devx_channel,
372 						 cq_obj->obj,
373 						 sizeof(event_nums),
374 						 event_nums,
375 						 (uint64_t)(uintptr_t)cq_obj);
376 		if (ret) {
377 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
378 			rte_errno = errno;
379 			goto error;
380 		}
381 	}
382 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
383 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
384 	return cq_obj;
385 error:
386 	if (cq_obj)
387 		mlx5_devx_cmd_destroy(cq_obj);
388 	rxq_release_devx_cq_resources(rxq_ctrl);
389 	return NULL;
390 }
391 
392 /**
393  * Create the Rx hairpin queue object.
394  *
395  * @param dev
396  *   Pointer to Ethernet device.
397  * @param idx
398  *   Queue index in DPDK Rx queue array.
399  *
400  * @return
401  *   The hairpin DevX object initialized, NULL otherwise and rte_errno is set.
402  */
403 static struct mlx5_rxq_obj *
404 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
405 {
406 	struct mlx5_priv *priv = dev->data->dev_private;
407 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
408 	struct mlx5_rxq_ctrl *rxq_ctrl =
409 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
410 	struct mlx5_devx_create_rq_attr attr = { 0 };
411 	struct mlx5_rxq_obj *tmpl = NULL;
412 	uint32_t max_wq_data;
413 
414 	MLX5_ASSERT(rxq_data);
415 	MLX5_ASSERT(!rxq_ctrl->obj);
416 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
417 			   rxq_ctrl->socket);
418 	if (!tmpl) {
419 		DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
420 			dev->data->port_id, rxq_data->idx);
421 		rte_errno = ENOMEM;
422 		return NULL;
423 	}
424 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
425 	tmpl->rxq_ctrl = rxq_ctrl;
426 	attr.hairpin = 1;
427 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
428 	/* Jumbo frames > 9KB should be supported, and more packets. */
429 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
430 		if (priv->config.log_hp_size > max_wq_data) {
431 			DRV_LOG(ERR, "Total data size %u power of 2 is "
432 				"too large for hairpin.",
433 				priv->config.log_hp_size);
434 			mlx5_free(tmpl);
435 			rte_errno = ERANGE;
436 			return NULL;
437 		}
438 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
439 	} else {
440 		attr.wq_attr.log_hairpin_data_sz =
441 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
442 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
443 	}
444 	/* Set the packets number to the maximum value for performance. */
445 	attr.wq_attr.log_hairpin_num_packets =
446 			attr.wq_attr.log_hairpin_data_sz -
447 			MLX5_HAIRPIN_QUEUE_STRIDE;
448 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
449 					   rxq_ctrl->socket);
450 	if (!tmpl->rq) {
451 		DRV_LOG(ERR,
452 			"Port %u Rx hairpin queue %u can't create rq object.",
453 			dev->data->port_id, idx);
454 		mlx5_free(tmpl);
455 		rte_errno = errno;
456 		return NULL;
457 	}
458 	DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
459 		idx, (void *)&tmpl);
460 	LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
461 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
462 	return tmpl;
463 }
464 
465 /**
466  * Create the Rx queue DevX object.
467  *
468  * @param dev
469  *   Pointer to Ethernet device.
470  * @param idx
471  *   Queue index in DPDK Rx queue array.
472  *
473  * @return
474  *   The DevX object initialized, NULL otherwise and rte_errno is set.
475  */
476 static struct mlx5_rxq_obj *
477 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
478 {
479 	struct mlx5_priv *priv = dev->data->dev_private;
480 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
481 	struct mlx5_rxq_ctrl *rxq_ctrl =
482 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
483 	unsigned int cqe_n;
484 	unsigned int wqe_n = 1 << rxq_data->elts_n;
485 	struct mlx5_rxq_obj *tmpl = NULL;
486 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
487 	struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
488 	struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
489 	int64_t dbr_offset;
490 	int ret = 0;
491 
492 	MLX5_ASSERT(rxq_data);
493 	MLX5_ASSERT(!rxq_ctrl->obj);
494 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
495 		return mlx5_rxq_obj_hairpin_new(dev, idx);
496 	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
497 			   rxq_ctrl->socket);
498 	if (!tmpl) {
499 		DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
500 			dev->data->port_id, rxq_data->idx);
501 		rte_errno = ENOMEM;
502 		goto error;
503 	}
504 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
505 	tmpl->rxq_ctrl = rxq_ctrl;
506 	if (rxq_ctrl->irq) {
507 		int devx_ev_flag =
508 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
509 
510 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
511 								(priv->sh->ctx,
512 								 devx_ev_flag);
513 		if (!tmpl->devx_channel) {
514 			rte_errno = errno;
515 			DRV_LOG(ERR, "Failed to create event channel %d.",
516 				rte_errno);
517 			goto error;
518 		}
519 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
520 	}
521 	if (mlx5_rxq_mprq_enabled(rxq_data))
522 		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
523 	else
524 		cqe_n = wqe_n - 1;
525 	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
526 		dev->data->port_id, priv->sh->device_attr.max_qp_wr);
527 	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
528 		dev->data->port_id, priv->sh->device_attr.max_sge);
529 	/* Allocate CQ door-bell. */
530 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &cq_dbr_page);
531 	if (dbr_offset < 0) {
532 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
533 		goto error;
534 	}
535 	rxq_ctrl->cq_dbr_offset = dbr_offset;
536 	rxq_ctrl->cq_dbr_umem_id = mlx5_os_get_umem_id(cq_dbr_page->umem);
537 	rxq_data->cq_db = (uint32_t *)((uintptr_t)cq_dbr_page->dbrs +
538 				       (uintptr_t)rxq_ctrl->cq_dbr_offset);
539 	rxq_data->cq_uar =
540 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
541 	/* Create CQ using DevX API. */
542 	tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
543 	if (!tmpl->devx_cq) {
544 		DRV_LOG(ERR, "Failed to create CQ.");
545 		goto error;
546 	}
547 	/* Allocate RQ door-bell. */
548 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &rq_dbr_page);
549 	if (dbr_offset < 0) {
550 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
551 		goto error;
552 	}
553 	rxq_ctrl->rq_dbr_offset = dbr_offset;
554 	rxq_ctrl->rq_dbr_umem_id = mlx5_os_get_umem_id(rq_dbr_page->umem);
555 	rxq_data->rq_db = (uint32_t *)((uintptr_t)rq_dbr_page->dbrs +
556 				       (uintptr_t)rxq_ctrl->rq_dbr_offset);
557 	/* Create RQ using DevX API. */
558 	tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
559 	if (!tmpl->rq) {
560 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
561 			dev->data->port_id, idx);
562 		rte_errno = ENOMEM;
563 		goto error;
564 	}
565 	/* Change queue state to ready. */
566 	rq_attr.rq_state = MLX5_RQC_STATE_RST;
567 	rq_attr.state = MLX5_RQC_STATE_RDY;
568 	ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
569 	if (ret)
570 		goto error;
571 	rxq_data->cq_arm_sn = 0;
572 	mlx5_rxq_initialize(rxq_data);
573 	rxq_data->cq_ci = 0;
574 	DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
575 		idx, (void *)&tmpl);
576 	LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
577 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
578 	rxq_ctrl->wqn = tmpl->rq->id;
579 	return tmpl;
580 error:
581 	if (tmpl) {
582 		ret = rte_errno; /* Save rte_errno before cleanup. */
583 		if (tmpl->rq)
584 			claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
585 		if (tmpl->devx_cq)
586 			claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
587 		if (tmpl->devx_channel)
588 			mlx5_glue->devx_destroy_event_channel
589 							(tmpl->devx_channel);
590 		mlx5_free(tmpl);
591 		rte_errno = ret; /* Restore rte_errno. */
592 	}
593 	if (rq_dbr_page)
594 		claim_zero(mlx5_release_dbr(&priv->dbrpgs,
595 					    rxq_ctrl->rq_dbr_umem_id,
596 					    rxq_ctrl->rq_dbr_offset));
597 	if (cq_dbr_page)
598 		claim_zero(mlx5_release_dbr(&priv->dbrpgs,
599 					    rxq_ctrl->cq_dbr_umem_id,
600 					    rxq_ctrl->cq_dbr_offset));
601 	rxq_release_devx_rq_resources(rxq_ctrl);
602 	rxq_release_devx_cq_resources(rxq_ctrl);
603 	return NULL;
604 }
605 
606 struct mlx5_obj_ops devx_obj_ops = {
607 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
608 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
609 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
610 };
611