xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision f6dee900589f55fc147a5cc4baf1adb52e7dc4da)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <stddef.h>
6 #include <errno.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <sys/queue.h>
10 
11 #include <rte_malloc.h>
12 #include <rte_common.h>
13 #include <rte_eal_paging.h>
14 
15 #include <mlx5_glue.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_malloc.h>
18 
19 #include "mlx5.h"
20 #include "mlx5_common_os.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_utils.h"
23 #include "mlx5_devx.h"
24 
25 
26 /**
27  * Calculate the number of CQEs in CQ for the Rx queue.
28  *
29  *  @param rxq_data
30  *     Pointer to receive queue structure.
31  *
32  * @return
33  *   Number of CQEs in CQ.
34  */
35 static unsigned int
36 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data)
37 {
38 	unsigned int cqe_n;
39 	unsigned int wqe_n = 1 << rxq_data->elts_n;
40 
41 	if (mlx5_rxq_mprq_enabled(rxq_data))
42 		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
43 	else
44 		cqe_n = wqe_n - 1;
45 	return cqe_n;
46 }
47 
48 /**
49  * Modify RQ vlan stripping offload
50  *
51  * @param rxq_obj
52  *   Rx queue object.
53  *
54  * @return
55  *   0 on success, non-0 otherwise
56  */
57 static int
58 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
59 {
60 	struct mlx5_devx_modify_rq_attr rq_attr;
61 
62 	memset(&rq_attr, 0, sizeof(rq_attr));
63 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
64 	rq_attr.state = MLX5_RQC_STATE_RDY;
65 	rq_attr.vsd = (on ? 0 : 1);
66 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
67 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
68 }
69 
70 /**
71  * Release the resources allocated for an RQ DevX object.
72  *
73  * @param rxq_ctrl
74  *   DevX Rx queue object.
75  */
76 static void
77 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
78 {
79 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
80 
81 	if (rxq_ctrl->rxq.wqes) {
82 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
83 		rxq_ctrl->rxq.wqes = NULL;
84 	}
85 	if (rxq_ctrl->wq_umem) {
86 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
87 		rxq_ctrl->wq_umem = NULL;
88 	}
89 	if (dbr_page) {
90 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
91 					    mlx5_os_get_umem_id(dbr_page->umem),
92 					    rxq_ctrl->rq_dbr_offset));
93 		rxq_ctrl->rq_dbrec_page = NULL;
94 	}
95 }
96 
97 /**
98  * Release the resources allocated for the Rx CQ DevX object.
99  *
100  * @param rxq_ctrl
101  *   DevX Rx queue object.
102  */
103 static void
104 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
105 {
106 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
107 
108 	if (rxq_ctrl->rxq.cqes) {
109 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
110 		rxq_ctrl->rxq.cqes = NULL;
111 	}
112 	if (rxq_ctrl->cq_umem) {
113 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
114 		rxq_ctrl->cq_umem = NULL;
115 	}
116 	if (dbr_page) {
117 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
118 					    mlx5_os_get_umem_id(dbr_page->umem),
119 					    rxq_ctrl->cq_dbr_offset));
120 		rxq_ctrl->cq_dbrec_page = NULL;
121 	}
122 }
123 
124 /**
125  * Release an Rx hairpin related resources.
126  *
127  * @param rxq_obj
128  *   Hairpin Rx queue object.
129  */
130 static void
131 mlx5_rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
132 {
133 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
134 
135 	MLX5_ASSERT(rxq_obj);
136 	rq_attr.state = MLX5_RQC_STATE_RST;
137 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
138 	mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
139 	claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
140 }
141 
142 /**
143  * Release an Rx DevX queue object.
144  *
145  * @param rxq_obj
146  *   DevX Rx queue object.
147  */
148 static void
149 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
150 {
151 	MLX5_ASSERT(rxq_obj);
152 	MLX5_ASSERT(rxq_obj->rq);
153 	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
154 		mlx5_rxq_obj_hairpin_release(rxq_obj);
155 	} else {
156 		MLX5_ASSERT(rxq_obj->devx_cq);
157 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
158 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
159 		if (rxq_obj->devx_channel)
160 			mlx5_glue->devx_destroy_event_channel
161 							(rxq_obj->devx_channel);
162 		rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
163 		rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
164 	}
165 }
166 
167 /**
168  * Get event for an Rx DevX queue object.
169  *
170  * @param rxq_obj
171  *   DevX Rx queue object.
172  *
173  * @return
174  *   0 on success, a negative errno value otherwise and rte_errno is set.
175  */
176 static int
177 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
178 {
179 #ifdef HAVE_IBV_DEVX_EVENT
180 	union {
181 		struct mlx5dv_devx_async_event_hdr event_resp;
182 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
183 	} out;
184 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
185 					    &out.event_resp,
186 					    sizeof(out.buf));
187 
188 	if (ret < 0) {
189 		rte_errno = errno;
190 		return -rte_errno;
191 	}
192 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
193 		rte_errno = EINVAL;
194 		return -rte_errno;
195 	}
196 	return 0;
197 #else
198 	(void)rxq_obj;
199 	rte_errno = ENOTSUP;
200 	return -rte_errno;
201 #endif /* HAVE_IBV_DEVX_EVENT */
202 }
203 
204 /**
205  * Fill common fields of create RQ attributes structure.
206  *
207  * @param rxq_data
208  *   Pointer to Rx queue data.
209  * @param cqn
210  *   CQ number to use with this RQ.
211  * @param rq_attr
212  *   RQ attributes structure to fill..
213  */
214 static void
215 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
216 			      struct mlx5_devx_create_rq_attr *rq_attr)
217 {
218 	rq_attr->state = MLX5_RQC_STATE_RST;
219 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
220 	rq_attr->cqn = cqn;
221 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
222 }
223 
224 /**
225  * Fill common fields of DevX WQ attributes structure.
226  *
227  * @param priv
228  *   Pointer to device private data.
229  * @param rxq_ctrl
230  *   Pointer to Rx queue control structure.
231  * @param wq_attr
232  *   WQ attributes structure to fill..
233  */
234 static void
235 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
236 		       struct mlx5_devx_wq_attr *wq_attr)
237 {
238 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
239 					MLX5_WQ_END_PAD_MODE_ALIGN :
240 					MLX5_WQ_END_PAD_MODE_NONE;
241 	wq_attr->pd = priv->sh->pdn;
242 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
243 	wq_attr->dbr_umem_id =
244 			mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
245 	wq_attr->dbr_umem_valid = 1;
246 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
247 	wq_attr->wq_umem_valid = 1;
248 }
249 
250 /**
251  * Create a RQ object using DevX.
252  *
253  * @param dev
254  *   Pointer to Ethernet device.
255  * @param idx
256  *   Queue index in DPDK Rx queue array.
257  *
258  * @return
259  *   The DevX RQ object initialized, NULL otherwise and rte_errno is set.
260  */
261 static struct mlx5_devx_obj *
262 rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
263 {
264 	struct mlx5_priv *priv = dev->data->dev_private;
265 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
266 	struct mlx5_rxq_ctrl *rxq_ctrl =
267 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
268 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
269 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
270 	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
271 	struct mlx5_devx_dbr_page *dbr_page;
272 	int64_t dbr_offset;
273 	uint32_t wq_size = 0;
274 	uint32_t wqe_size = 0;
275 	uint32_t log_wqe_size = 0;
276 	void *buf = NULL;
277 	struct mlx5_devx_obj *rq;
278 
279 	/* Fill RQ attributes. */
280 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
281 	rq_attr.flush_in_error_en = 1;
282 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
283 	/* Fill WQ attributes for this RQ. */
284 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
285 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
286 		/*
287 		 * Number of strides in each WQE:
288 		 * 512*2^single_wqe_log_num_of_strides.
289 		 */
290 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
291 				rxq_data->strd_num_n -
292 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
293 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
294 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
295 				rxq_data->strd_sz_n -
296 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
297 		wqe_size = sizeof(struct mlx5_wqe_mprq);
298 	} else {
299 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
300 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
301 	}
302 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
303 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
304 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
305 	/* Calculate and allocate WQ memory space. */
306 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
307 	wq_size = wqe_n * wqe_size;
308 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
309 	if (alignment == (size_t)-1) {
310 		DRV_LOG(ERR, "Failed to get mem page size");
311 		rte_errno = ENOMEM;
312 		return NULL;
313 	}
314 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
315 			  alignment, rxq_ctrl->socket);
316 	if (!buf)
317 		return NULL;
318 	rxq_data->wqes = buf;
319 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
320 						     buf, wq_size, 0);
321 	if (!rxq_ctrl->wq_umem)
322 		goto error;
323 	/* Allocate RQ door-bell. */
324 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
325 	if (dbr_offset < 0) {
326 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
327 		goto error;
328 	}
329 	rxq_ctrl->rq_dbr_offset = dbr_offset;
330 	rxq_ctrl->rq_dbrec_page = dbr_page;
331 	rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
332 			  (uintptr_t)rxq_ctrl->rq_dbr_offset);
333 	/* Create RQ using DevX API. */
334 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
335 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
336 	if (!rq)
337 		goto error;
338 	return rq;
339 error:
340 	rxq_release_devx_rq_resources(rxq_ctrl);
341 	return NULL;
342 }
343 
344 /**
345  * Create a DevX CQ object for an Rx queue.
346  *
347  * @param dev
348  *   Pointer to Ethernet device.
349  * @param idx
350  *   Queue index in DPDK Rx queue array.
351  *
352  * @return
353  *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
354  */
355 static struct mlx5_devx_obj *
356 rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
357 {
358 	struct mlx5_devx_obj *cq_obj = 0;
359 	struct mlx5_devx_cq_attr cq_attr = { 0 };
360 	struct mlx5_priv *priv = dev->data->dev_private;
361 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
362 	struct mlx5_rxq_ctrl *rxq_ctrl =
363 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
364 	size_t page_size = rte_mem_page_size();
365 	uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
366 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
367 	struct mlx5_devx_dbr_page *dbr_page;
368 	int64_t dbr_offset;
369 	uint32_t eqn = 0;
370 	void *buf = NULL;
371 	uint16_t event_nums[1] = {0};
372 	uint32_t log_cqe_n;
373 	uint32_t cq_size;
374 	int ret = 0;
375 
376 	if (page_size == (size_t)-1) {
377 		DRV_LOG(ERR, "Failed to get page_size.");
378 		goto error;
379 	}
380 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
381 	    !rxq_data->lro) {
382 		cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
383 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
384 		cq_attr.mini_cqe_res_format =
385 				mlx5_rxq_mprq_enabled(rxq_data) ?
386 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
387 				MLX5DV_CQE_RES_FORMAT_HASH;
388 #else
389 		cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
390 #endif
391 		/*
392 		 * For vectorized Rx, it must not be doubled in order to
393 		 * make cq_ci and rq_ci aligned.
394 		 */
395 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
396 			cqe_n *= 2;
397 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
398 		DRV_LOG(DEBUG,
399 			"Port %u Rx CQE compression is disabled for HW"
400 			" timestamp.",
401 			dev->data->port_id);
402 	} else if (priv->config.cqe_comp && rxq_data->lro) {
403 		DRV_LOG(DEBUG,
404 			"Port %u Rx CQE compression is disabled for LRO.",
405 			dev->data->port_id);
406 	}
407 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
408 	if (priv->config.cqe_pad)
409 		cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
410 #endif
411 	log_cqe_n = log2above(cqe_n);
412 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
413 	/* Query the EQN for this core. */
414 	if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
415 		DRV_LOG(ERR, "Failed to query EQN for CQ.");
416 		goto error;
417 	}
418 	cq_attr.eqn = eqn;
419 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
420 				rxq_ctrl->socket);
421 	if (!buf) {
422 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
423 		goto error;
424 	}
425 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
426 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
427 						     cq_size,
428 						     IBV_ACCESS_LOCAL_WRITE);
429 	if (!rxq_ctrl->cq_umem) {
430 		DRV_LOG(ERR, "Failed to register umem for CQ.");
431 		goto error;
432 	}
433 	/* Allocate CQ door-bell. */
434 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
435 	if (dbr_offset < 0) {
436 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
437 		goto error;
438 	}
439 	rxq_ctrl->cq_dbr_offset = dbr_offset;
440 	rxq_ctrl->cq_dbrec_page = dbr_page;
441 	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
442 			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
443 	rxq_data->cq_uar =
444 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
445 	/* Create CQ using DevX API. */
446 	cq_attr.uar_page_id =
447 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
448 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
449 	cq_attr.q_umem_valid = 1;
450 	cq_attr.log_cq_size = log_cqe_n;
451 	cq_attr.log_page_size = rte_log2_u32(page_size);
452 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
453 	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
454 	cq_attr.db_umem_valid = 1;
455 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
456 	if (!cq_obj)
457 		goto error;
458 	rxq_data->cqe_n = log_cqe_n;
459 	rxq_data->cqn = cq_obj->id;
460 	if (rxq_ctrl->obj->devx_channel) {
461 		ret = mlx5_glue->devx_subscribe_devx_event
462 						(rxq_ctrl->obj->devx_channel,
463 						 cq_obj->obj,
464 						 sizeof(event_nums),
465 						 event_nums,
466 						 (uint64_t)(uintptr_t)cq_obj);
467 		if (ret) {
468 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
469 			rte_errno = errno;
470 			goto error;
471 		}
472 	}
473 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
474 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
475 	return cq_obj;
476 error:
477 	if (cq_obj)
478 		mlx5_devx_cmd_destroy(cq_obj);
479 	rxq_release_devx_cq_resources(rxq_ctrl);
480 	return NULL;
481 }
482 
483 /**
484  * Create the Rx hairpin queue object.
485  *
486  * @param dev
487  *   Pointer to Ethernet device.
488  * @param idx
489  *   Queue index in DPDK Rx queue array.
490  *
491  * @return
492  *   0 on success, a negative errno value otherwise and rte_errno is set.
493  */
494 static int
495 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
496 {
497 	struct mlx5_priv *priv = dev->data->dev_private;
498 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
499 	struct mlx5_rxq_ctrl *rxq_ctrl =
500 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
501 	struct mlx5_devx_create_rq_attr attr = { 0 };
502 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
503 	uint32_t max_wq_data;
504 
505 	MLX5_ASSERT(rxq_data);
506 	MLX5_ASSERT(tmpl);
507 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
508 	tmpl->rxq_ctrl = rxq_ctrl;
509 	attr.hairpin = 1;
510 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
511 	/* Jumbo frames > 9KB should be supported, and more packets. */
512 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
513 		if (priv->config.log_hp_size > max_wq_data) {
514 			DRV_LOG(ERR, "Total data size %u power of 2 is "
515 				"too large for hairpin.",
516 				priv->config.log_hp_size);
517 			rte_errno = ERANGE;
518 			return -rte_errno;
519 		}
520 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
521 	} else {
522 		attr.wq_attr.log_hairpin_data_sz =
523 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
524 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
525 	}
526 	/* Set the packets number to the maximum value for performance. */
527 	attr.wq_attr.log_hairpin_num_packets =
528 			attr.wq_attr.log_hairpin_data_sz -
529 			MLX5_HAIRPIN_QUEUE_STRIDE;
530 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
531 					   rxq_ctrl->socket);
532 	if (!tmpl->rq) {
533 		DRV_LOG(ERR,
534 			"Port %u Rx hairpin queue %u can't create rq object.",
535 			dev->data->port_id, idx);
536 		rte_errno = errno;
537 		return -rte_errno;
538 	}
539 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
540 	return 0;
541 }
542 
543 /**
544  * Create the Rx queue DevX object.
545  *
546  * @param dev
547  *   Pointer to Ethernet device.
548  * @param idx
549  *   Queue index in DPDK Rx queue array.
550  *
551  * @return
552  *   0 on success, a negative errno value otherwise and rte_errno is set.
553  */
554 static int
555 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
556 {
557 	struct mlx5_priv *priv = dev->data->dev_private;
558 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
559 	struct mlx5_rxq_ctrl *rxq_ctrl =
560 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
561 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
562 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
563 	int ret = 0;
564 
565 	MLX5_ASSERT(rxq_data);
566 	MLX5_ASSERT(tmpl);
567 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
568 		return mlx5_rxq_obj_hairpin_new(dev, idx);
569 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
570 	tmpl->rxq_ctrl = rxq_ctrl;
571 	if (rxq_ctrl->irq) {
572 		int devx_ev_flag =
573 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
574 
575 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
576 								(priv->sh->ctx,
577 								 devx_ev_flag);
578 		if (!tmpl->devx_channel) {
579 			rte_errno = errno;
580 			DRV_LOG(ERR, "Failed to create event channel %d.",
581 				rte_errno);
582 			goto error;
583 		}
584 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
585 	}
586 	/* Create CQ using DevX API. */
587 	tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx);
588 	if (!tmpl->devx_cq) {
589 		DRV_LOG(ERR, "Failed to create CQ.");
590 		goto error;
591 	}
592 	/* Create RQ using DevX API. */
593 	tmpl->rq = rxq_create_devx_rq_resources(dev, idx);
594 	if (!tmpl->rq) {
595 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
596 			dev->data->port_id, idx);
597 		rte_errno = ENOMEM;
598 		goto error;
599 	}
600 	/* Change queue state to ready. */
601 	rq_attr.rq_state = MLX5_RQC_STATE_RST;
602 	rq_attr.state = MLX5_RQC_STATE_RDY;
603 	ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
604 	if (ret)
605 		goto error;
606 	rxq_data->cq_arm_sn = 0;
607 	mlx5_rxq_initialize(rxq_data);
608 	rxq_data->cq_ci = 0;
609 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
610 	rxq_ctrl->wqn = tmpl->rq->id;
611 	return 0;
612 error:
613 	ret = rte_errno; /* Save rte_errno before cleanup. */
614 	if (tmpl->rq)
615 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
616 	if (tmpl->devx_cq)
617 		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
618 	if (tmpl->devx_channel)
619 		mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
620 	rxq_release_devx_rq_resources(rxq_ctrl);
621 	rxq_release_devx_cq_resources(rxq_ctrl);
622 	rte_errno = ret; /* Restore rte_errno. */
623 	return -rte_errno;
624 }
625 
626 struct mlx5_obj_ops devx_obj_ops = {
627 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
628 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
629 	.rxq_event_get = mlx5_rx_devx_get_event,
630 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
631 };
632