xref: /dpdk/drivers/net/mlx5/mlx5_devx.c (revision db4e81351fb85ff623bd0438d1b5a8fb55fe9fee)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 
5 #include <stddef.h>
6 #include <errno.h>
7 #include <stdbool.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <sys/queue.h>
11 
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
15 
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_malloc.h>
19 
20 #include "mlx5.h"
21 #include "mlx5_common_os.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_devx.h"
25 #include "mlx5_flow.h"
26 
27 
28 /**
29  * Modify RQ vlan stripping offload
30  *
31  * @param rxq_obj
32  *   Rx queue object.
33  *
34  * @return
35  *   0 on success, non-0 otherwise
36  */
37 static int
38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
39 {
40 	struct mlx5_devx_modify_rq_attr rq_attr;
41 
42 	memset(&rq_attr, 0, sizeof(rq_attr));
43 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
44 	rq_attr.state = MLX5_RQC_STATE_RDY;
45 	rq_attr.vsd = (on ? 0 : 1);
46 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
47 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
48 }
49 
50 /**
51  * Modify RQ using DevX API.
52  *
53  * @param rxq_obj
54  *   DevX Rx queue object.
55  *
56  * @return
57  *   0 on success, a negative errno value otherwise and rte_errno is set.
58  */
59 static int
60 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
61 {
62 	struct mlx5_devx_modify_rq_attr rq_attr;
63 
64 	memset(&rq_attr, 0, sizeof(rq_attr));
65 	if (is_start) {
66 		rq_attr.rq_state = MLX5_RQC_STATE_RST;
67 		rq_attr.state = MLX5_RQC_STATE_RDY;
68 	} else {
69 		rq_attr.rq_state = MLX5_RQC_STATE_RDY;
70 		rq_attr.state = MLX5_RQC_STATE_RST;
71 	}
72 	return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
73 }
74 
75 /**
76  * Release the resources allocated for an RQ DevX object.
77  *
78  * @param rxq_ctrl
79  *   DevX Rx queue object.
80  */
81 static void
82 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
83 {
84 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
85 
86 	if (rxq_ctrl->rxq.wqes) {
87 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
88 		rxq_ctrl->rxq.wqes = NULL;
89 	}
90 	if (rxq_ctrl->wq_umem) {
91 		mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
92 		rxq_ctrl->wq_umem = NULL;
93 	}
94 	if (dbr_page) {
95 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
96 					    mlx5_os_get_umem_id(dbr_page->umem),
97 					    rxq_ctrl->rq_dbr_offset));
98 		rxq_ctrl->rq_dbrec_page = NULL;
99 	}
100 }
101 
102 /**
103  * Release the resources allocated for the Rx CQ DevX object.
104  *
105  * @param rxq_ctrl
106  *   DevX Rx queue object.
107  */
108 static void
109 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
110 {
111 	struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
112 
113 	if (rxq_ctrl->rxq.cqes) {
114 		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
115 		rxq_ctrl->rxq.cqes = NULL;
116 	}
117 	if (rxq_ctrl->cq_umem) {
118 		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
119 		rxq_ctrl->cq_umem = NULL;
120 	}
121 	if (dbr_page) {
122 		claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
123 					    mlx5_os_get_umem_id(dbr_page->umem),
124 					    rxq_ctrl->cq_dbr_offset));
125 		rxq_ctrl->cq_dbrec_page = NULL;
126 	}
127 }
128 
129 /**
130  * Release an Rx DevX queue object.
131  *
132  * @param rxq_obj
133  *   DevX Rx queue object.
134  */
135 static void
136 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
137 {
138 	MLX5_ASSERT(rxq_obj);
139 	MLX5_ASSERT(rxq_obj->rq);
140 	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
141 		mlx5_devx_modify_rq(rxq_obj, false);
142 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
143 	} else {
144 		MLX5_ASSERT(rxq_obj->devx_cq);
145 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
146 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
147 		if (rxq_obj->devx_channel)
148 			mlx5_glue->devx_destroy_event_channel
149 							(rxq_obj->devx_channel);
150 		rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
151 		rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
152 	}
153 }
154 
155 /**
156  * Get event for an Rx DevX queue object.
157  *
158  * @param rxq_obj
159  *   DevX Rx queue object.
160  *
161  * @return
162  *   0 on success, a negative errno value otherwise and rte_errno is set.
163  */
164 static int
165 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
166 {
167 #ifdef HAVE_IBV_DEVX_EVENT
168 	union {
169 		struct mlx5dv_devx_async_event_hdr event_resp;
170 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
171 	} out;
172 	int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
173 					    &out.event_resp,
174 					    sizeof(out.buf));
175 
176 	if (ret < 0) {
177 		rte_errno = errno;
178 		return -rte_errno;
179 	}
180 	if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
181 		rte_errno = EINVAL;
182 		return -rte_errno;
183 	}
184 	return 0;
185 #else
186 	(void)rxq_obj;
187 	rte_errno = ENOTSUP;
188 	return -rte_errno;
189 #endif /* HAVE_IBV_DEVX_EVENT */
190 }
191 
192 /**
193  * Fill common fields of create RQ attributes structure.
194  *
195  * @param rxq_data
196  *   Pointer to Rx queue data.
197  * @param cqn
198  *   CQ number to use with this RQ.
199  * @param rq_attr
200  *   RQ attributes structure to fill..
201  */
202 static void
203 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
204 			      struct mlx5_devx_create_rq_attr *rq_attr)
205 {
206 	rq_attr->state = MLX5_RQC_STATE_RST;
207 	rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
208 	rq_attr->cqn = cqn;
209 	rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
210 }
211 
212 /**
213  * Fill common fields of DevX WQ attributes structure.
214  *
215  * @param priv
216  *   Pointer to device private data.
217  * @param rxq_ctrl
218  *   Pointer to Rx queue control structure.
219  * @param wq_attr
220  *   WQ attributes structure to fill..
221  */
222 static void
223 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
224 		       struct mlx5_devx_wq_attr *wq_attr)
225 {
226 	wq_attr->end_padding_mode = priv->config.cqe_pad ?
227 					MLX5_WQ_END_PAD_MODE_ALIGN :
228 					MLX5_WQ_END_PAD_MODE_NONE;
229 	wq_attr->pd = priv->sh->pdn;
230 	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
231 	wq_attr->dbr_umem_id =
232 			mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
233 	wq_attr->dbr_umem_valid = 1;
234 	wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
235 	wq_attr->wq_umem_valid = 1;
236 }
237 
238 /**
239  * Create a RQ object using DevX.
240  *
241  * @param dev
242  *   Pointer to Ethernet device.
243  * @param idx
244  *   Queue index in DPDK Rx queue array.
245  *
246  * @return
247  *   The DevX RQ object initialized, NULL otherwise and rte_errno is set.
248  */
249 static struct mlx5_devx_obj *
250 rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
251 {
252 	struct mlx5_priv *priv = dev->data->dev_private;
253 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
254 	struct mlx5_rxq_ctrl *rxq_ctrl =
255 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
256 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
257 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
258 	uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
259 	struct mlx5_devx_dbr_page *dbr_page;
260 	int64_t dbr_offset;
261 	uint32_t wq_size = 0;
262 	uint32_t wqe_size = 0;
263 	uint32_t log_wqe_size = 0;
264 	void *buf = NULL;
265 	struct mlx5_devx_obj *rq;
266 
267 	/* Fill RQ attributes. */
268 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
269 	rq_attr.flush_in_error_en = 1;
270 	mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
271 	/* Fill WQ attributes for this RQ. */
272 	if (mlx5_rxq_mprq_enabled(rxq_data)) {
273 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
274 		/*
275 		 * Number of strides in each WQE:
276 		 * 512*2^single_wqe_log_num_of_strides.
277 		 */
278 		rq_attr.wq_attr.single_wqe_log_num_of_strides =
279 				rxq_data->strd_num_n -
280 				MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
281 		/* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
282 		rq_attr.wq_attr.single_stride_log_num_of_bytes =
283 				rxq_data->strd_sz_n -
284 				MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
285 		wqe_size = sizeof(struct mlx5_wqe_mprq);
286 	} else {
287 		rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
288 		wqe_size = sizeof(struct mlx5_wqe_data_seg);
289 	}
290 	log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
291 	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
292 	rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
293 	/* Calculate and allocate WQ memory space. */
294 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
295 	wq_size = wqe_n * wqe_size;
296 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
297 	if (alignment == (size_t)-1) {
298 		DRV_LOG(ERR, "Failed to get mem page size");
299 		rte_errno = ENOMEM;
300 		return NULL;
301 	}
302 	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
303 			  alignment, rxq_ctrl->socket);
304 	if (!buf)
305 		return NULL;
306 	rxq_data->wqes = buf;
307 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
308 						     buf, wq_size, 0);
309 	if (!rxq_ctrl->wq_umem)
310 		goto error;
311 	/* Allocate RQ door-bell. */
312 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
313 	if (dbr_offset < 0) {
314 		DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
315 		goto error;
316 	}
317 	rxq_ctrl->rq_dbr_offset = dbr_offset;
318 	rxq_ctrl->rq_dbrec_page = dbr_page;
319 	rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
320 			  (uintptr_t)rxq_ctrl->rq_dbr_offset);
321 	/* Create RQ using DevX API. */
322 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
323 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
324 	if (!rq)
325 		goto error;
326 	return rq;
327 error:
328 	rxq_release_devx_rq_resources(rxq_ctrl);
329 	return NULL;
330 }
331 
332 /**
333  * Create a DevX CQ object for an Rx queue.
334  *
335  * @param dev
336  *   Pointer to Ethernet device.
337  * @param idx
338  *   Queue index in DPDK Rx queue array.
339  *
340  * @return
341  *   The DevX CQ object initialized, NULL otherwise and rte_errno is set.
342  */
343 static struct mlx5_devx_obj *
344 rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
345 {
346 	struct mlx5_devx_obj *cq_obj = 0;
347 	struct mlx5_devx_cq_attr cq_attr = { 0 };
348 	struct mlx5_priv *priv = dev->data->dev_private;
349 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
350 	struct mlx5_rxq_ctrl *rxq_ctrl =
351 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
352 	size_t page_size = rte_mem_page_size();
353 	uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
354 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
355 	struct mlx5_devx_dbr_page *dbr_page;
356 	int64_t dbr_offset;
357 	uint32_t eqn = 0;
358 	void *buf = NULL;
359 	uint16_t event_nums[1] = {0};
360 	uint32_t log_cqe_n;
361 	uint32_t cq_size;
362 	int ret = 0;
363 
364 	if (page_size == (size_t)-1) {
365 		DRV_LOG(ERR, "Failed to get page_size.");
366 		goto error;
367 	}
368 	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
369 	    !rxq_data->lro) {
370 		cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
371 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
372 		cq_attr.mini_cqe_res_format =
373 				mlx5_rxq_mprq_enabled(rxq_data) ?
374 				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
375 				MLX5DV_CQE_RES_FORMAT_HASH;
376 #else
377 		cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
378 #endif
379 		/*
380 		 * For vectorized Rx, it must not be doubled in order to
381 		 * make cq_ci and rq_ci aligned.
382 		 */
383 		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
384 			cqe_n *= 2;
385 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
386 		DRV_LOG(DEBUG,
387 			"Port %u Rx CQE compression is disabled for HW"
388 			" timestamp.",
389 			dev->data->port_id);
390 	} else if (priv->config.cqe_comp && rxq_data->lro) {
391 		DRV_LOG(DEBUG,
392 			"Port %u Rx CQE compression is disabled for LRO.",
393 			dev->data->port_id);
394 	}
395 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
396 	if (priv->config.cqe_pad)
397 		cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
398 #endif
399 	log_cqe_n = log2above(cqe_n);
400 	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
401 	/* Query the EQN for this core. */
402 	if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
403 		DRV_LOG(ERR, "Failed to query EQN for CQ.");
404 		goto error;
405 	}
406 	cq_attr.eqn = eqn;
407 	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
408 				rxq_ctrl->socket);
409 	if (!buf) {
410 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
411 		goto error;
412 	}
413 	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
414 	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
415 						     cq_size,
416 						     IBV_ACCESS_LOCAL_WRITE);
417 	if (!rxq_ctrl->cq_umem) {
418 		DRV_LOG(ERR, "Failed to register umem for CQ.");
419 		goto error;
420 	}
421 	/* Allocate CQ door-bell. */
422 	dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
423 	if (dbr_offset < 0) {
424 		DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
425 		goto error;
426 	}
427 	rxq_ctrl->cq_dbr_offset = dbr_offset;
428 	rxq_ctrl->cq_dbrec_page = dbr_page;
429 	rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
430 			  (uintptr_t)rxq_ctrl->cq_dbr_offset);
431 	rxq_data->cq_uar =
432 			mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
433 	/* Create CQ using DevX API. */
434 	cq_attr.uar_page_id =
435 			mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
436 	cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
437 	cq_attr.q_umem_valid = 1;
438 	cq_attr.log_cq_size = log_cqe_n;
439 	cq_attr.log_page_size = rte_log2_u32(page_size);
440 	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
441 	cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
442 	cq_attr.db_umem_valid = 1;
443 	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
444 	if (!cq_obj)
445 		goto error;
446 	rxq_data->cqe_n = log_cqe_n;
447 	rxq_data->cqn = cq_obj->id;
448 	if (rxq_ctrl->obj->devx_channel) {
449 		ret = mlx5_glue->devx_subscribe_devx_event
450 						(rxq_ctrl->obj->devx_channel,
451 						 cq_obj->obj,
452 						 sizeof(event_nums),
453 						 event_nums,
454 						 (uint64_t)(uintptr_t)cq_obj);
455 		if (ret) {
456 			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
457 			rte_errno = errno;
458 			goto error;
459 		}
460 	}
461 	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
462 	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
463 	return cq_obj;
464 error:
465 	if (cq_obj)
466 		mlx5_devx_cmd_destroy(cq_obj);
467 	rxq_release_devx_cq_resources(rxq_ctrl);
468 	return NULL;
469 }
470 
471 /**
472  * Create the Rx hairpin queue object.
473  *
474  * @param dev
475  *   Pointer to Ethernet device.
476  * @param idx
477  *   Queue index in DPDK Rx queue array.
478  *
479  * @return
480  *   0 on success, a negative errno value otherwise and rte_errno is set.
481  */
482 static int
483 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
484 {
485 	struct mlx5_priv *priv = dev->data->dev_private;
486 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
487 	struct mlx5_rxq_ctrl *rxq_ctrl =
488 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
489 	struct mlx5_devx_create_rq_attr attr = { 0 };
490 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
491 	uint32_t max_wq_data;
492 
493 	MLX5_ASSERT(rxq_data);
494 	MLX5_ASSERT(tmpl);
495 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
496 	tmpl->rxq_ctrl = rxq_ctrl;
497 	attr.hairpin = 1;
498 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
499 	/* Jumbo frames > 9KB should be supported, and more packets. */
500 	if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
501 		if (priv->config.log_hp_size > max_wq_data) {
502 			DRV_LOG(ERR, "Total data size %u power of 2 is "
503 				"too large for hairpin.",
504 				priv->config.log_hp_size);
505 			rte_errno = ERANGE;
506 			return -rte_errno;
507 		}
508 		attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
509 	} else {
510 		attr.wq_attr.log_hairpin_data_sz =
511 				(max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
512 				 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
513 	}
514 	/* Set the packets number to the maximum value for performance. */
515 	attr.wq_attr.log_hairpin_num_packets =
516 			attr.wq_attr.log_hairpin_data_sz -
517 			MLX5_HAIRPIN_QUEUE_STRIDE;
518 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
519 					   rxq_ctrl->socket);
520 	if (!tmpl->rq) {
521 		DRV_LOG(ERR,
522 			"Port %u Rx hairpin queue %u can't create rq object.",
523 			dev->data->port_id, idx);
524 		rte_errno = errno;
525 		return -rte_errno;
526 	}
527 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
528 	return 0;
529 }
530 
531 /**
532  * Create the Rx queue DevX object.
533  *
534  * @param dev
535  *   Pointer to Ethernet device.
536  * @param idx
537  *   Queue index in DPDK Rx queue array.
538  *
539  * @return
540  *   0 on success, a negative errno value otherwise and rte_errno is set.
541  */
542 static int
543 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
544 {
545 	struct mlx5_priv *priv = dev->data->dev_private;
546 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
547 	struct mlx5_rxq_ctrl *rxq_ctrl =
548 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
549 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
550 	int ret = 0;
551 
552 	MLX5_ASSERT(rxq_data);
553 	MLX5_ASSERT(tmpl);
554 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
555 		return mlx5_rxq_obj_hairpin_new(dev, idx);
556 	tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
557 	tmpl->rxq_ctrl = rxq_ctrl;
558 	if (rxq_ctrl->irq) {
559 		int devx_ev_flag =
560 			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
561 
562 		tmpl->devx_channel = mlx5_glue->devx_create_event_channel
563 								(priv->sh->ctx,
564 								 devx_ev_flag);
565 		if (!tmpl->devx_channel) {
566 			rte_errno = errno;
567 			DRV_LOG(ERR, "Failed to create event channel %d.",
568 				rte_errno);
569 			goto error;
570 		}
571 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
572 	}
573 	/* Create CQ using DevX API. */
574 	tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx);
575 	if (!tmpl->devx_cq) {
576 		DRV_LOG(ERR, "Failed to create CQ.");
577 		goto error;
578 	}
579 	/* Create RQ using DevX API. */
580 	tmpl->rq = rxq_create_devx_rq_resources(dev, idx);
581 	if (!tmpl->rq) {
582 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
583 			dev->data->port_id, idx);
584 		rte_errno = ENOMEM;
585 		goto error;
586 	}
587 	/* Change queue state to ready. */
588 	ret = mlx5_devx_modify_rq(tmpl, true);
589 	if (ret)
590 		goto error;
591 	rxq_data->cq_arm_sn = 0;
592 	mlx5_rxq_initialize(rxq_data);
593 	rxq_data->cq_ci = 0;
594 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
595 	rxq_ctrl->wqn = tmpl->rq->id;
596 	return 0;
597 error:
598 	ret = rte_errno; /* Save rte_errno before cleanup. */
599 	if (tmpl->rq)
600 		claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
601 	if (tmpl->devx_cq)
602 		claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
603 	if (tmpl->devx_channel)
604 		mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
605 	rxq_release_devx_rq_resources(rxq_ctrl);
606 	rxq_release_devx_cq_resources(rxq_ctrl);
607 	rte_errno = ret; /* Restore rte_errno. */
608 	return -rte_errno;
609 }
610 
611 /**
612  * Create RQT using DevX API as a filed of indirection table.
613  *
614  * @param dev
615  *   Pointer to Ethernet device.
616  * @param log_n
617  *   Log of number of queues in the array.
618  * @param ind_tbl
619  *   DevX indirection table object.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
626 			struct mlx5_ind_table_obj *ind_tbl)
627 {
628 	struct mlx5_priv *priv = dev->data->dev_private;
629 	struct mlx5_devx_rqt_attr *rqt_attr = NULL;
630 	const unsigned int rqt_n = 1 << log_n;
631 	unsigned int i, j;
632 
633 	MLX5_ASSERT(ind_tbl);
634 	rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
635 			      rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
636 	if (!rqt_attr) {
637 		DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
638 			dev->data->port_id);
639 		rte_errno = ENOMEM;
640 		return -rte_errno;
641 	}
642 	rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
643 	rqt_attr->rqt_actual_size = rqt_n;
644 	for (i = 0; i != ind_tbl->queues_n; ++i) {
645 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
646 		struct mlx5_rxq_ctrl *rxq_ctrl =
647 				container_of(rxq, struct mlx5_rxq_ctrl, rxq);
648 
649 		rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
650 	}
651 	MLX5_ASSERT(i > 0);
652 	for (j = 0; i != rqt_n; ++j, ++i)
653 		rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
654 	ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
655 	mlx5_free(rqt_attr);
656 	if (!ind_tbl->rqt) {
657 		DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
658 			dev->data->port_id);
659 		rte_errno = errno;
660 		return -rte_errno;
661 	}
662 	return 0;
663 }
664 
665 /**
666  * Destroy the DevX RQT object.
667  *
668  * @param ind_table
669  *   Indirection table to release.
670  */
671 static void
672 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
673 {
674 	claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
675 }
676 
677 /**
678  * Create an Rx Hash queue.
679  *
680  * @param dev
681  *   Pointer to Ethernet device.
682  * @param hrxq
683  *   Pointer to Rx Hash queue.
684  * @param tunnel
685  *   Tunnel type.
686  *
687  * @return
688  *   0 on success, a negative errno value otherwise and rte_errno is set.
689  */
690 static int
691 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
692 		   int tunnel __rte_unused)
693 {
694 	struct mlx5_priv *priv = dev->data->dev_private;
695 	struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
696 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
697 	struct mlx5_rxq_ctrl *rxq_ctrl =
698 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
699 	struct mlx5_devx_tir_attr tir_attr;
700 	const uint8_t *rss_key = hrxq->rss_key;
701 	uint64_t hash_fields = hrxq->hash_fields;
702 	bool lro = true;
703 	uint32_t i;
704 	int err;
705 
706 	/* Enable TIR LRO only if all the queues were configured for. */
707 	for (i = 0; i < ind_tbl->queues_n; ++i) {
708 		if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
709 			lro = false;
710 			break;
711 		}
712 	}
713 	memset(&tir_attr, 0, sizeof(tir_attr));
714 	tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
715 	tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
716 	tir_attr.tunneled_offload_en = !!tunnel;
717 	/* If needed, translate hash_fields bitmap to PRM format. */
718 	if (hash_fields) {
719 		struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
720 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
721 		rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
722 				       &tir_attr.rx_hash_field_selector_inner :
723 				       &tir_attr.rx_hash_field_selector_outer;
724 #else
725 		rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
726 #endif
727 		/* 1 bit: 0: IPv4, 1: IPv6. */
728 		rx_hash_field_select->l3_prot_type =
729 					!!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
730 		/* 1 bit: 0: TCP, 1: UDP. */
731 		rx_hash_field_select->l4_prot_type =
732 					 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
733 		/* Bitmask which sets which fields to use in RX Hash. */
734 		rx_hash_field_select->selected_fields =
735 			((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
736 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
737 			(!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
738 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
739 			(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
740 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
741 			(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
742 			 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
743 	}
744 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
745 		tir_attr.transport_domain = priv->sh->td->id;
746 	else
747 		tir_attr.transport_domain = priv->sh->tdn;
748 	memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
749 	tir_attr.indirect_table = ind_tbl->rqt->id;
750 	if (dev->data->dev_conf.lpbk_mode)
751 		tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
752 	if (lro) {
753 		tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
754 		tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
755 		tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
756 					   MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
757 	}
758 	hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
759 	if (!hrxq->tir) {
760 		DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
761 			dev->data->port_id);
762 		rte_errno = errno;
763 		goto error;
764 	}
765 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
766 	hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
767 							       (hrxq->tir->obj);
768 	if (!hrxq->action) {
769 		rte_errno = errno;
770 		goto error;
771 	}
772 #endif
773 	return 0;
774 error:
775 	err = rte_errno; /* Save rte_errno before cleanup. */
776 	if (hrxq->tir)
777 		claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
778 	rte_errno = err; /* Restore rte_errno. */
779 	return -rte_errno;
780 }
781 
782 /**
783  * Destroy a DevX TIR object.
784  *
785  * @param hrxq
786  *   Hash Rx queue to release its tir.
787  */
788 static void
789 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
790 {
791 	claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
792 }
793 
794 /**
795  * Create a DevX drop action for Rx Hash queue.
796  *
797  * @param dev
798  *   Pointer to Ethernet device.
799  *
800  * @return
801  *   0 on success, a negative errno value otherwise and rte_errno is set.
802  */
803 static int
804 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
805 {
806 	(void)dev;
807 	DRV_LOG(ERR, "DevX drop action is not supported yet");
808 	rte_errno = ENOTSUP;
809 	return -rte_errno;
810 }
811 
812 /**
813  * Release a drop hash Rx queue.
814  *
815  * @param dev
816  *   Pointer to Ethernet device.
817  */
818 static void
819 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
820 {
821 	(void)dev;
822 	DRV_LOG(ERR, "DevX drop action is not supported yet");
823 	rte_errno = ENOTSUP;
824 }
825 
826 struct mlx5_obj_ops devx_obj_ops = {
827 	.rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
828 	.rxq_obj_new = mlx5_rxq_devx_obj_new,
829 	.rxq_event_get = mlx5_rx_devx_get_event,
830 	.rxq_obj_modify = mlx5_devx_modify_rq,
831 	.rxq_obj_release = mlx5_rxq_devx_obj_release,
832 	.ind_table_new = mlx5_devx_ind_table_new,
833 	.ind_table_destroy = mlx5_devx_ind_table_destroy,
834 	.hrxq_new = mlx5_devx_hrxq_new,
835 	.hrxq_destroy = mlx5_devx_tir_destroy,
836 	.drop_action_create = mlx5_devx_drop_action_create,
837 	.drop_action_destroy = mlx5_devx_drop_action_destroy,
838 };
839