xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision fa2c85cc9c71183fa909071b1f990f0aa6fb8a07)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <stddef.h>
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <sys/queue.h>
11 
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_malloc.h>
19 
20 #include "mlx5.h"
21 #include "mlx5_common_os.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_devx.h"
25 
26 
27 /**
28  * Modify RQ vlan stripping offload
29  *
30  * @param rxq_obj
31  *   Rx queue object.
32  *
33  * @return
34  *   0 on success, non-0 otherwise
35  */
36 static int
37 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
38 {
39 	struct mlx5_devx_modify_rq_attr rq_attr;
40 
41 	memset(&rq_attr, 0, sizeof(rq_attr));
42 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
43 	rq_attr.state = MLX5_RQC_STATE_RDY;
44 	rq_attr.vsd = (on ? 0 : 1);
45 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
46 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
47 }
48 
49 /**
50  * Modify RQ using DevX API.
51  *
52  * @param rxq_obj
53  *   DevX Rx queue object.
54  *
55  * @return
56  *   0 on success, a negative errno value otherwise and rte_errno is set.
57  */
58 static int
59 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
60 {
61 	struct mlx5_devx_modify_rq_attr rq_attr;
62 
63 	memset(&rq_attr, 0, sizeof(rq_attr));
64 	if (is_start) {
65 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
66 		rq_attr.state = MLX5_RQC_STATE_RDY;
67 	} else {
68 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
69 		rq_attr.state = MLX5_RQC_STATE_RST;
70 	}
71 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
72 }
73 
74 /**
75  * Release the resources allocated for an RQ DevX object.
76  *
77  * @param rxq_ctrl
78  *   DevX Rx queue object.
79  */
80 static void
81 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
82 {
83 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
84 
85 	if (rxq_ctrl->rxq.wqes) {
86 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
87 		rxq_ctrl->rxq.wqes = NULL;
88 	}
89 	if (rxq_ctrl->wq_umem) {
90 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
91 		rxq_ctrl->wq_umem = NULL;
92 	}
93 	if (dbr_page) {
94 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
95 					    mlx5_os_get_umem_id(dbr_page->umem),
96 					    rxq_ctrl->rq_dbr_offset));
97 		rxq_ctrl->rq_dbrec_page = NULL;
98 	}
99 }
100 
101 /**
102  * Release the resources allocated for the Rx CQ DevX object.
103  *
104  * @param rxq_ctrl
105  *   DevX Rx queue object.
106  */
107 static void
108 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
109 {
110 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
111 
112 	if (rxq_ctrl->rxq.cqes) {
113 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
114 		rxq_ctrl->rxq.cqes = NULL;
115 	}
116 	if (rxq_ctrl->cq_umem) {
117 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
118 		rxq_ctrl->cq_umem = NULL;
119 	}
120 	if (dbr_page) {
121 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
122 					    mlx5_os_get_umem_id(dbr_page->umem),
123 					    rxq_ctrl->cq_dbr_offset));
124 		rxq_ctrl->cq_dbrec_page = NULL;
125 	}
126 }
127 
128 /**
129  * Release an Rx DevX queue object.
130  *
131  * @param rxq_obj
132  *   DevX Rx queue object.
133  */
134 static void
135 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
136 {
137 	MLX5_ASSERT(rxq_obj);
138 	MLX5_ASSERT(rxq_obj->rq);
139 	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
140 		mlx5_devx_modify_rq(rxq_obj, false);
141 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
142 	} else {
143 		MLX5_ASSERT(rxq_obj->devx_cq);
144 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
145 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
146 		if (rxq_obj->devx_channel)
147 			mlx5_glue->devx_destroy_event_channel
148 							(rxq_obj->devx_channel);
149 		rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
150 		rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
151 	}
152 }
153 
154 /**
155  * Get event for an Rx DevX queue object.
156  *
157  * @param rxq_obj
158  *   DevX Rx queue object.
159  *
160  * @return
161  *   0 on success, a negative errno value otherwise and rte_errno is set.
162  */
163 static int
164 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
165 {
166 #ifdef HAVE_IBV_DEVX_EVENT
167 	union {
168 		struct mlx5dv_devx_async_event_hdr event_resp;
169 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
170 	} out;
171 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
172 					    &out.event_resp,
173 					    sizeof(out.buf));
174 
175 	if (ret < 0) {
176 		rte_errno = errno;
177 		return -rte_errno;
178 	}
179 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
180 		rte_errno = EINVAL;
181 		return -rte_errno;
182 	}
183 	return 0;
184 #else
185 	(void)rxq_obj;
186 	rte_errno = ENOTSUP;
187 	return -rte_errno;
188 #endif /* HAVE_IBV_DEVX_EVENT */
189 }
190 
191 /**
192  * Fill common fields of create RQ attributes structure.
193  *
194  * @param rxq_data
195  *   Pointer to Rx queue data.
196  * @param cqn
197  *   CQ number to use with this RQ.
198  * @param rq_attr
199  *   RQ attributes structure to fill..
200  */
201 static void
202 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
203 			      struct mlx5_devx_create_rq_attr *rq_attr)
204 {
205 	rq_attr->state = MLX5_RQC_STATE_RST;
206 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
207 	rq_attr->cqn = cqn;
208 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
209 }
210 
211 /**
212  * Fill common fields of DevX WQ attributes structure.
213  *
214  * @param priv
215  *   Pointer to device private data.
216  * @param rxq_ctrl
217  *   Pointer to Rx queue control structure.
218  * @param wq_attr
219  *   WQ attributes structure to fill..
220  */
221 static void
222 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
223 		       struct mlx5_devx_wq_attr *wq_attr)
224 {
225 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
226 					MLX5_WQ_END_PAD_MODE_ALIGN :
227 					MLX5_WQ_END_PAD_MODE_NONE;
228 	wq_attr->pd = priv->sh->pdn;
229 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
230 	wq_attr->dbr_umem_id =
231 			mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
232 	wq_attr->dbr_umem_valid = 1;
233 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
234 	wq_attr->wq_umem_valid = 1;
235 }
236 
237 /**
238  * Create a RQ object using DevX.
239  *
240  * @param dev
241  *   Pointer to Ethernet device.
242  * @param idx
243  *   Queue index in DPDK Rx queue array.
244  *
245  * @return
246  *   The DevX RQ object initialized, NULL otherwise and rte_errno is set.
247  */
248 static struct mlx5_devx_obj *
249 rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
250 {
251 	struct mlx5_priv *priv = dev->data->dev_private;
252 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
253 	struct mlx5_rxq_ctrl *rxq_ctrl =
254 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
255 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
256 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
257 	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
258 	struct mlx5_devx_dbr_page *dbr_page;
259 	int64_t dbr_offset;
260 	uint32_t wq_size = 0;
261 	uint32_t wqe_size = 0;
262 	uint32_t log_wqe_size = 0;
263 	void *buf = NULL;
264 	struct mlx5_devx_obj *rq;
265 
266 	/* Fill RQ attributes. */
267 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
268 	rq_attr.flush_in_error_en = 1;
269 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
270 	/* Fill WQ attributes for this RQ. */
271 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
272 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
273 		/*
274 		 * Number of strides in each WQE:
275 		 * 512*2^single_wqe_log_num_of_strides.
276 		 */
277 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
278 				rxq_data->strd_num_n -
279 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
280 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
281 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
282 				rxq_data->strd_sz_n -
283 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
284 		wqe_size = sizeof(struct mlx5_wqe_mprq);
285 	} else {
286 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
287 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
288 	}
289 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
290 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
291 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
292 	/* Calculate and allocate WQ memory space. */
293 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
294 	wq_size = wqe_n * wqe_size;
295 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
296 	if (alignment == (size_t)-1) {
297 		DRV_LOG(ERR, "Failed to get mem page size");
298 		rte_errno = ENOMEM;
299 		return NULL;
300 	}
301 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
302 			  alignment, rxq_ctrl->socket);
303 	if (!buf)
304 		return NULL;
305 	rxq_data->wqes = buf;
306 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
307 						     buf, wq_size, 0);
308 	if (!rxq_ctrl->wq_umem)
309 		goto error;
310 	/* Allocate RQ door-bell. */
311 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
312 	if (dbr_offset < 0) {
313 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
314 		goto error;
315 	}
316 	rxq_ctrl->rq_dbr_offset = dbr_offset;
317 	rxq_ctrl->rq_dbrec_page = dbr_page;
318 	rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
319 			  (uintptr_t)rxq_ctrl->rq_dbr_offset);
320 	/* Create RQ using DevX API. */
321 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
322 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
323 	if (!rq)
324 		goto error;
325 	return rq;
326 error:
327 	rxq_release_devx_rq_resources(rxq_ctrl);
328 	return NULL;
329 }
330 
331 /**
332  * Create a DevX CQ object for an Rx queue.
333  *
334  * @param dev
335  *   Pointer to Ethernet device.
336  * @param idx
337  *   Queue index in DPDK Rx queue array.
338  *
339  * @return
340  *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
341  */
342 static struct mlx5_devx_obj *
343 rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
344 {
345 	struct mlx5_devx_obj *cq_obj = 0;
346 	struct mlx5_devx_cq_attr cq_attr = { 0 };
347 	struct mlx5_priv *priv = dev->data->dev_private;
348 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
349 	struct mlx5_rxq_ctrl *rxq_ctrl =
350 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
351 	size_t page_size = rte_mem_page_size();
352 	uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
353 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
354 	struct mlx5_devx_dbr_page *dbr_page;
355 	int64_t dbr_offset;
356 	uint32_t eqn = 0;
357 	void *buf = NULL;
358 	uint16_t event_nums[1] = {0};
359 	uint32_t log_cqe_n;
360 	uint32_t cq_size;
361 	int ret = 0;
362 
363 	if (page_size == (size_t)-1) {
364 		DRV_LOG(ERR, "Failed to get page_size.");
365 		goto error;
366 	}
367 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
368 	    !rxq_data->lro) {
369 		cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
370 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
371 		cq_attr.mini_cqe_res_format =
372 				mlx5_rxq_mprq_enabled(rxq_data) ?
373 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
374 				MLX5DV_CQE_RES_FORMAT_HASH;
375 #else
376 		cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
377 #endif
378 		/*
379 		 * For vectorized Rx, it must not be doubled in order to
380 		 * make cq_ci and rq_ci aligned.
381 		 */
382 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
383 			cqe_n *= 2;
384 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
385 		DRV_LOG(DEBUG,
386 			"Port %u Rx CQE compression is disabled for HW"
387 			" timestamp.",
388 			dev->data->port_id);
389 	} else if (priv->config.cqe_comp && rxq_data->lro) {
390 		DRV_LOG(DEBUG,
391 			"Port %u Rx CQE compression is disabled for LRO.",
392 			dev->data->port_id);
393 	}
394 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
395 	if (priv->config.cqe_pad)
396 		cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
397 #endif
398 	log_cqe_n = log2above(cqe_n);
399 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
400 	/* Query the EQN for this core. */
401 	if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
402 		DRV_LOG(ERR, "Failed to query EQN for CQ.");
403 		goto error;
404 	}
405 	cq_attr.eqn = eqn;
406 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
407 				rxq_ctrl->socket);
408 	if (!buf) {
409 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
410 		goto error;
411 	}
412 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
413 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
414 						     cq_size,
415 						     IBV_ACCESS_LOCAL_WRITE);
416 	if (!rxq_ctrl->cq_umem) {
417 		DRV_LOG(ERR, "Failed to register umem for CQ.");
418 		goto error;
419 	}
420 	/* Allocate CQ door-bell. */
421 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
422 	if (dbr_offset < 0) {
423 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
424 		goto error;
425 	}
426 	rxq_ctrl->cq_dbr_offset = dbr_offset;
427 	rxq_ctrl->cq_dbrec_page = dbr_page;
428 	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
429 			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
430 	rxq_data->cq_uar =
431 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
432 	/* Create CQ using DevX API. */
433 	cq_attr.uar_page_id =
434 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
435 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
436 	cq_attr.q_umem_valid = 1;
437 	cq_attr.log_cq_size = log_cqe_n;
438 	cq_attr.log_page_size = rte_log2_u32(page_size);
439 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
440 	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
441 	cq_attr.db_umem_valid = 1;
442 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
443 	if (!cq_obj)
444 		goto error;
445 	rxq_data->cqe_n = log_cqe_n;
446 	rxq_data->cqn = cq_obj->id;
447 	if (rxq_ctrl->obj->devx_channel) {
448 		ret = mlx5_glue->devx_subscribe_devx_event
449 						(rxq_ctrl->obj->devx_channel,
450 						 cq_obj->obj,
451 						 sizeof(event_nums),
452 						 event_nums,
453 						 (uint64_t)(uintptr_t)cq_obj);
454 		if (ret) {
455 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
456 			rte_errno = errno;
457 			goto error;
458 		}
459 	}
460 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
461 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
462 	return cq_obj;
463 error:
464 	if (cq_obj)
465 		mlx5_devx_cmd_destroy(cq_obj);
466 	rxq_release_devx_cq_resources(rxq_ctrl);
467 	return NULL;
468 }
469 
470 /**
471  * Create the Rx hairpin queue object.
472  *
473  * @param dev
474  *   Pointer to Ethernet device.
475  * @param idx
476  *   Queue index in DPDK Rx queue array.
477  *
478  * @return
479  *   0 on success, a negative errno value otherwise and rte_errno is set.
480  */
481 static int
482 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
483 {
484 	struct mlx5_priv *priv = dev->data->dev_private;
485 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
486 	struct mlx5_rxq_ctrl *rxq_ctrl =
487 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
488 	struct mlx5_devx_create_rq_attr attr = { 0 };
489 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
490 	uint32_t max_wq_data;
491 
492 	MLX5_ASSERT(rxq_data);
493 	MLX5_ASSERT(tmpl);
494 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
495 	tmpl->rxq_ctrl = rxq_ctrl;
496 	attr.hairpin = 1;
497 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
498 	/* Jumbo frames > 9KB should be supported, and more packets. */
499 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
500 		if (priv->config.log_hp_size > max_wq_data) {
501 			DRV_LOG(ERR, "Total data size %u power of 2 is "
502 				"too large for hairpin.",
503 				priv->config.log_hp_size);
504 			rte_errno = ERANGE;
505 			return -rte_errno;
506 		}
507 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
508 	} else {
509 		attr.wq_attr.log_hairpin_data_sz =
510 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
511 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
512 	}
513 	/* Set the packets number to the maximum value for performance. */
514 	attr.wq_attr.log_hairpin_num_packets =
515 			attr.wq_attr.log_hairpin_data_sz -
516 			MLX5_HAIRPIN_QUEUE_STRIDE;
517 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
518 					   rxq_ctrl->socket);
519 	if (!tmpl->rq) {
520 		DRV_LOG(ERR,
521 			"Port %u Rx hairpin queue %u can't create rq object.",
522 			dev->data->port_id, idx);
523 		rte_errno = errno;
524 		return -rte_errno;
525 	}
526 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
527 	return 0;
528 }
529 
530 /**
531  * Create the Rx queue DevX object.
532  *
533  * @param dev
534  *   Pointer to Ethernet device.
535  * @param idx
536  *   Queue index in DPDK Rx queue array.
537  *
538  * @return
539  *   0 on success, a negative errno value otherwise and rte_errno is set.
540  */
541 static int
542 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
543 {
544 	struct mlx5_priv *priv = dev->data->dev_private;
545 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
546 	struct mlx5_rxq_ctrl *rxq_ctrl =
547 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
548 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
549 	int ret = 0;
550 
551 	MLX5_ASSERT(rxq_data);
552 	MLX5_ASSERT(tmpl);
553 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
554 		return mlx5_rxq_obj_hairpin_new(dev, idx);
555 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
556 	tmpl->rxq_ctrl = rxq_ctrl;
557 	if (rxq_ctrl->irq) {
558 		int devx_ev_flag =
559 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
560 
561 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
562 								(priv->sh->ctx,
563 								 devx_ev_flag);
564 		if (!tmpl->devx_channel) {
565 			rte_errno = errno;
566 			DRV_LOG(ERR, "Failed to create event channel %d.",
567 				rte_errno);
568 			goto error;
569 		}
570 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
571 	}
572 	/* Create CQ using DevX API. */
573 	tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx);
574 	if (!tmpl->devx_cq) {
575 		DRV_LOG(ERR, "Failed to create CQ.");
576 		goto error;
577 	}
578 	/* Create RQ using DevX API. */
579 	tmpl->rq = rxq_create_devx_rq_resources(dev, idx);
580 	if (!tmpl->rq) {
581 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
582 			dev->data->port_id, idx);
583 		rte_errno = ENOMEM;
584 		goto error;
585 	}
586 	/* Change queue state to ready. */
587 	ret = mlx5_devx_modify_rq(tmpl, true);
588 	if (ret)
589 		goto error;
590 	rxq_data->cq_arm_sn = 0;
591 	mlx5_rxq_initialize(rxq_data);
592 	rxq_data->cq_ci = 0;
593 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
594 	rxq_ctrl->wqn = tmpl->rq->id;
595 	return 0;
596 error:
597 	ret = rte_errno; /* Save rte_errno before cleanup. */
598 	if (tmpl->rq)
599 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
600 	if (tmpl->devx_cq)
601 		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
602 	if (tmpl->devx_channel)
603 		mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
604 	rxq_release_devx_rq_resources(rxq_ctrl);
605 	rxq_release_devx_cq_resources(rxq_ctrl);
606 	rte_errno = ret; /* Restore rte_errno. */
607 	return -rte_errno;
608 }
609 
610 struct mlx5_obj_ops devx_obj_ops = {
611 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
612 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
613 	.rxq_event_get = mlx5_rx_devx_get_event,
614 	.rxq_obj_modify = mlx5_devx_modify_rq,
615 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
616 };
617