xref: /dpdk/drivers/common/mlx5/mlx5_common_devx.c (revision 25245d5dc9ecfa8bc9964c69a756beca6ee1ca72)
19dab4d62SMichael Baum /* SPDX-License-Identifier: BSD-3-Clause
29dab4d62SMichael Baum  * Copyright 2020 Mellanox Technologies, Ltd
39dab4d62SMichael Baum  */
49dab4d62SMichael Baum #include <stdint.h>
59dab4d62SMichael Baum 
69dab4d62SMichael Baum #include <rte_errno.h>
79dab4d62SMichael Baum #include <rte_common.h>
89dab4d62SMichael Baum #include <rte_eal_paging.h>
99dab4d62SMichael Baum 
109dab4d62SMichael Baum #include <mlx5_glue.h>
119dab4d62SMichael Baum #include <mlx5_common_os.h>
129dab4d62SMichael Baum 
139dab4d62SMichael Baum #include "mlx5_prm.h"
149dab4d62SMichael Baum #include "mlx5_devx_cmds.h"
15*25245d5dSShiri Kuzin #include "mlx5_common_log.h"
169dab4d62SMichael Baum #include "mlx5_malloc.h"
179dab4d62SMichael Baum #include "mlx5_common.h"
189dab4d62SMichael Baum #include "mlx5_common_devx.h"
199dab4d62SMichael Baum 
209dab4d62SMichael Baum /**
219dab4d62SMichael Baum  * Destroy DevX Completion Queue.
229dab4d62SMichael Baum  *
239dab4d62SMichael Baum  * @param[in] cq
249dab4d62SMichael Baum  *   DevX CQ to destroy.
259dab4d62SMichael Baum  */
269dab4d62SMichael Baum void
279dab4d62SMichael Baum mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq)
289dab4d62SMichael Baum {
299dab4d62SMichael Baum 	if (cq->cq)
309dab4d62SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(cq->cq));
319dab4d62SMichael Baum 	if (cq->umem_obj)
329dab4d62SMichael Baum 		claim_zero(mlx5_os_umem_dereg(cq->umem_obj));
339dab4d62SMichael Baum 	if (cq->umem_buf)
349dab4d62SMichael Baum 		mlx5_free((void *)(uintptr_t)cq->umem_buf);
359dab4d62SMichael Baum }
369dab4d62SMichael Baum 
379dab4d62SMichael Baum /* Mark all CQEs initially as invalid. */
389dab4d62SMichael Baum static void
399dab4d62SMichael Baum mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size)
409dab4d62SMichael Baum {
419dab4d62SMichael Baum 	volatile struct mlx5_cqe *cqe = cq_obj->cqes;
429dab4d62SMichael Baum 	uint16_t i;
439dab4d62SMichael Baum 
449dab4d62SMichael Baum 	for (i = 0; i < cq_size; i++, cqe++)
459dab4d62SMichael Baum 		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
469dab4d62SMichael Baum }
479dab4d62SMichael Baum 
489dab4d62SMichael Baum /**
499dab4d62SMichael Baum  * Create Completion Queue using DevX API.
509dab4d62SMichael Baum  *
519dab4d62SMichael Baum  * Get a pointer to partially initialized attributes structure, and updates the
529dab4d62SMichael Baum  * following fields:
539dab4d62SMichael Baum  *   q_umem_valid
549dab4d62SMichael Baum  *   q_umem_id
559dab4d62SMichael Baum  *   q_umem_offset
569dab4d62SMichael Baum  *   db_umem_valid
579dab4d62SMichael Baum  *   db_umem_id
589dab4d62SMichael Baum  *   db_umem_offset
599dab4d62SMichael Baum  *   eqn
609dab4d62SMichael Baum  *   log_cq_size
619dab4d62SMichael Baum  *   log_page_size
629dab4d62SMichael Baum  * All other fields are updated by caller.
639dab4d62SMichael Baum  *
649dab4d62SMichael Baum  * @param[in] ctx
659dab4d62SMichael Baum  *   Context returned from mlx5 open_device() glue function.
669dab4d62SMichael Baum  * @param[in/out] cq_obj
679dab4d62SMichael Baum  *   Pointer to CQ to create.
689dab4d62SMichael Baum  * @param[in] log_desc_n
699dab4d62SMichael Baum  *   Log of number of descriptors in queue.
709dab4d62SMichael Baum  * @param[in] attr
719dab4d62SMichael Baum  *   Pointer to CQ attributes structure.
729dab4d62SMichael Baum  * @param[in] socket
739dab4d62SMichael Baum  *   Socket to use for allocation.
749dab4d62SMichael Baum  *
759dab4d62SMichael Baum  * @return
769dab4d62SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
779dab4d62SMichael Baum  */
789dab4d62SMichael Baum int
799dab4d62SMichael Baum mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n,
809dab4d62SMichael Baum 		    struct mlx5_devx_cq_attr *attr, int socket)
819dab4d62SMichael Baum {
829dab4d62SMichael Baum 	struct mlx5_devx_obj *cq = NULL;
839dab4d62SMichael Baum 	struct mlx5dv_devx_umem *umem_obj = NULL;
849dab4d62SMichael Baum 	void *umem_buf = NULL;
859dab4d62SMichael Baum 	size_t page_size = rte_mem_page_size();
869dab4d62SMichael Baum 	size_t alignment = MLX5_CQE_BUF_ALIGNMENT;
879dab4d62SMichael Baum 	uint32_t umem_size, umem_dbrec;
889dab4d62SMichael Baum 	uint32_t eqn;
899dab4d62SMichael Baum 	uint16_t cq_size = 1 << log_desc_n;
909dab4d62SMichael Baum 	int ret;
919dab4d62SMichael Baum 
929dab4d62SMichael Baum 	if (page_size == (size_t)-1 || alignment == (size_t)-1) {
939dab4d62SMichael Baum 		DRV_LOG(ERR, "Failed to get page_size.");
949dab4d62SMichael Baum 		rte_errno = ENOMEM;
959dab4d62SMichael Baum 		return -rte_errno;
969dab4d62SMichael Baum 	}
979dab4d62SMichael Baum 	/* Query first EQN. */
989dab4d62SMichael Baum 	ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn);
999dab4d62SMichael Baum 	if (ret) {
1009dab4d62SMichael Baum 		rte_errno = errno;
1019dab4d62SMichael Baum 		DRV_LOG(ERR, "Failed to query event queue number.");
1029dab4d62SMichael Baum 		return -rte_errno;
1039dab4d62SMichael Baum 	}
1049dab4d62SMichael Baum 	/* Allocate memory buffer for CQEs and doorbell record. */
1059dab4d62SMichael Baum 	umem_size = sizeof(struct mlx5_cqe) * cq_size;
1069dab4d62SMichael Baum 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
1079dab4d62SMichael Baum 	umem_size += MLX5_DBR_SIZE;
1089dab4d62SMichael Baum 	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
1099dab4d62SMichael Baum 			       alignment, socket);
1109dab4d62SMichael Baum 	if (!umem_buf) {
1119dab4d62SMichael Baum 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
1129dab4d62SMichael Baum 		rte_errno = ENOMEM;
1139dab4d62SMichael Baum 		return -rte_errno;
1149dab4d62SMichael Baum 	}
1159dab4d62SMichael Baum 	/* Register allocated buffer in user space with DevX. */
1169dab4d62SMichael Baum 	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
1179dab4d62SMichael Baum 				    IBV_ACCESS_LOCAL_WRITE);
1189dab4d62SMichael Baum 	if (!umem_obj) {
1199dab4d62SMichael Baum 		DRV_LOG(ERR, "Failed to register umem for CQ.");
1209dab4d62SMichael Baum 		rte_errno = errno;
1219dab4d62SMichael Baum 		goto error;
1229dab4d62SMichael Baum 	}
1239dab4d62SMichael Baum 	/* Fill attributes for CQ object creation. */
1249dab4d62SMichael Baum 	attr->q_umem_valid = 1;
1259dab4d62SMichael Baum 	attr->q_umem_id = mlx5_os_get_umem_id(umem_obj);
1269dab4d62SMichael Baum 	attr->q_umem_offset = 0;
1279dab4d62SMichael Baum 	attr->db_umem_valid = 1;
1289dab4d62SMichael Baum 	attr->db_umem_id = attr->q_umem_id;
1299dab4d62SMichael Baum 	attr->db_umem_offset = umem_dbrec;
1309dab4d62SMichael Baum 	attr->eqn = eqn;
1319dab4d62SMichael Baum 	attr->log_cq_size = log_desc_n;
1329dab4d62SMichael Baum 	attr->log_page_size = rte_log2_u32(page_size);
1339dab4d62SMichael Baum 	/* Create completion queue object with DevX. */
1349dab4d62SMichael Baum 	cq = mlx5_devx_cmd_create_cq(ctx, attr);
1359dab4d62SMichael Baum 	if (!cq) {
1369dab4d62SMichael Baum 		DRV_LOG(ERR, "Can't create DevX CQ object.");
1379dab4d62SMichael Baum 		rte_errno  = ENOMEM;
1389dab4d62SMichael Baum 		goto error;
1399dab4d62SMichael Baum 	}
1409dab4d62SMichael Baum 	cq_obj->umem_buf = umem_buf;
1419dab4d62SMichael Baum 	cq_obj->umem_obj = umem_obj;
1429dab4d62SMichael Baum 	cq_obj->cq = cq;
1439dab4d62SMichael Baum 	cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec);
1449dab4d62SMichael Baum 	/* Mark all CQEs initially as invalid. */
1459dab4d62SMichael Baum 	mlx5_cq_init(cq_obj, cq_size);
1469dab4d62SMichael Baum 	return 0;
1479dab4d62SMichael Baum error:
1489dab4d62SMichael Baum 	ret = rte_errno;
1499dab4d62SMichael Baum 	if (umem_obj)
1509dab4d62SMichael Baum 		claim_zero(mlx5_os_umem_dereg(umem_obj));
1519dab4d62SMichael Baum 	if (umem_buf)
1529dab4d62SMichael Baum 		mlx5_free((void *)(uintptr_t)umem_buf);
1539dab4d62SMichael Baum 	rte_errno = ret;
1549dab4d62SMichael Baum 	return -rte_errno;
1559dab4d62SMichael Baum }
15638f53763SMichael Baum 
15738f53763SMichael Baum /**
15838f53763SMichael Baum  * Destroy DevX Send Queue.
15938f53763SMichael Baum  *
16038f53763SMichael Baum  * @param[in] sq
16138f53763SMichael Baum  *   DevX SQ to destroy.
16238f53763SMichael Baum  */
16338f53763SMichael Baum void
16438f53763SMichael Baum mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq)
16538f53763SMichael Baum {
16638f53763SMichael Baum 	if (sq->sq)
16738f53763SMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(sq->sq));
16838f53763SMichael Baum 	if (sq->umem_obj)
16938f53763SMichael Baum 		claim_zero(mlx5_os_umem_dereg(sq->umem_obj));
17038f53763SMichael Baum 	if (sq->umem_buf)
17138f53763SMichael Baum 		mlx5_free((void *)(uintptr_t)sq->umem_buf);
17238f53763SMichael Baum }
17338f53763SMichael Baum 
17438f53763SMichael Baum /**
17538f53763SMichael Baum  * Create Send Queue using DevX API.
17638f53763SMichael Baum  *
17738f53763SMichael Baum  * Get a pointer to partially initialized attributes structure, and updates the
17838f53763SMichael Baum  * following fields:
17938f53763SMichael Baum  *   wq_type
18038f53763SMichael Baum  *   wq_umem_valid
18138f53763SMichael Baum  *   wq_umem_id
18238f53763SMichael Baum  *   wq_umem_offset
18338f53763SMichael Baum  *   dbr_umem_valid
18438f53763SMichael Baum  *   dbr_umem_id
18538f53763SMichael Baum  *   dbr_addr
18638f53763SMichael Baum  *   log_wq_stride
18738f53763SMichael Baum  *   log_wq_sz
18838f53763SMichael Baum  *   log_wq_pg_sz
18938f53763SMichael Baum  * All other fields are updated by caller.
19038f53763SMichael Baum  *
19138f53763SMichael Baum  * @param[in] ctx
19238f53763SMichael Baum  *   Context returned from mlx5 open_device() glue function.
19338f53763SMichael Baum  * @param[in/out] sq_obj
19438f53763SMichael Baum  *   Pointer to SQ to create.
19538f53763SMichael Baum  * @param[in] log_wqbb_n
19638f53763SMichael Baum  *   Log of number of WQBBs in queue.
19738f53763SMichael Baum  * @param[in] attr
19838f53763SMichael Baum  *   Pointer to SQ attributes structure.
19938f53763SMichael Baum  * @param[in] socket
20038f53763SMichael Baum  *   Socket to use for allocation.
20138f53763SMichael Baum  *
20238f53763SMichael Baum  * @return
20338f53763SMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
20438f53763SMichael Baum  */
20538f53763SMichael Baum int
20638f53763SMichael Baum mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,
20738f53763SMichael Baum 		    struct mlx5_devx_create_sq_attr *attr, int socket)
20838f53763SMichael Baum {
20938f53763SMichael Baum 	struct mlx5_devx_obj *sq = NULL;
21038f53763SMichael Baum 	struct mlx5dv_devx_umem *umem_obj = NULL;
21138f53763SMichael Baum 	void *umem_buf = NULL;
21238f53763SMichael Baum 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
21338f53763SMichael Baum 	uint32_t umem_size, umem_dbrec;
21438f53763SMichael Baum 	uint16_t sq_size = 1 << log_wqbb_n;
21538f53763SMichael Baum 	int ret;
21638f53763SMichael Baum 
21738f53763SMichael Baum 	if (alignment == (size_t)-1) {
21838f53763SMichael Baum 		DRV_LOG(ERR, "Failed to get WQE buf alignment.");
21938f53763SMichael Baum 		rte_errno = ENOMEM;
22038f53763SMichael Baum 		return -rte_errno;
22138f53763SMichael Baum 	}
22238f53763SMichael Baum 	/* Allocate memory buffer for WQEs and doorbell record. */
22338f53763SMichael Baum 	umem_size = MLX5_WQE_SIZE * sq_size;
22438f53763SMichael Baum 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
22538f53763SMichael Baum 	umem_size += MLX5_DBR_SIZE;
22638f53763SMichael Baum 	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
22738f53763SMichael Baum 			       alignment, socket);
22838f53763SMichael Baum 	if (!umem_buf) {
22938f53763SMichael Baum 		DRV_LOG(ERR, "Failed to allocate memory for SQ.");
23038f53763SMichael Baum 		rte_errno = ENOMEM;
23138f53763SMichael Baum 		return -rte_errno;
23238f53763SMichael Baum 	}
23338f53763SMichael Baum 	/* Register allocated buffer in user space with DevX. */
23438f53763SMichael Baum 	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
23538f53763SMichael Baum 				    IBV_ACCESS_LOCAL_WRITE);
23638f53763SMichael Baum 	if (!umem_obj) {
23738f53763SMichael Baum 		DRV_LOG(ERR, "Failed to register umem for SQ.");
23838f53763SMichael Baum 		rte_errno = errno;
23938f53763SMichael Baum 		goto error;
24038f53763SMichael Baum 	}
24138f53763SMichael Baum 	/* Fill attributes for SQ object creation. */
24238f53763SMichael Baum 	attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
24338f53763SMichael Baum 	attr->wq_attr.wq_umem_valid = 1;
24438f53763SMichael Baum 	attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
24538f53763SMichael Baum 	attr->wq_attr.wq_umem_offset = 0;
24638f53763SMichael Baum 	attr->wq_attr.dbr_umem_valid = 1;
24738f53763SMichael Baum 	attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id;
24838f53763SMichael Baum 	attr->wq_attr.dbr_addr = umem_dbrec;
24938f53763SMichael Baum 	attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
25038f53763SMichael Baum 	attr->wq_attr.log_wq_sz = log_wqbb_n;
25138f53763SMichael Baum 	attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
25238f53763SMichael Baum 	/* Create send queue object with DevX. */
25338f53763SMichael Baum 	sq = mlx5_devx_cmd_create_sq(ctx, attr);
25438f53763SMichael Baum 	if (!sq) {
25538f53763SMichael Baum 		DRV_LOG(ERR, "Can't create DevX SQ object.");
25638f53763SMichael Baum 		rte_errno = ENOMEM;
25738f53763SMichael Baum 		goto error;
25838f53763SMichael Baum 	}
25938f53763SMichael Baum 	sq_obj->umem_buf = umem_buf;
26038f53763SMichael Baum 	sq_obj->umem_obj = umem_obj;
26138f53763SMichael Baum 	sq_obj->sq = sq;
26238f53763SMichael Baum 	sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec);
26338f53763SMichael Baum 	return 0;
26438f53763SMichael Baum error:
26538f53763SMichael Baum 	ret = rte_errno;
26638f53763SMichael Baum 	if (umem_obj)
26738f53763SMichael Baum 		claim_zero(mlx5_os_umem_dereg(umem_obj));
26838f53763SMichael Baum 	if (umem_buf)
26938f53763SMichael Baum 		mlx5_free((void *)(uintptr_t)umem_buf);
27038f53763SMichael Baum 	rte_errno = ret;
27138f53763SMichael Baum 	return -rte_errno;
27238f53763SMichael Baum }
27338f53763SMichael Baum 
274edb704daSMichael Baum /**
275edb704daSMichael Baum  * Destroy DevX Receive Queue.
276edb704daSMichael Baum  *
277edb704daSMichael Baum  * @param[in] rq
278edb704daSMichael Baum  *   DevX RQ to destroy.
279edb704daSMichael Baum  */
280edb704daSMichael Baum void
281edb704daSMichael Baum mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq)
282edb704daSMichael Baum {
283edb704daSMichael Baum 	if (rq->rq)
284edb704daSMichael Baum 		claim_zero(mlx5_devx_cmd_destroy(rq->rq));
285edb704daSMichael Baum 	if (rq->umem_obj)
286edb704daSMichael Baum 		claim_zero(mlx5_os_umem_dereg(rq->umem_obj));
287edb704daSMichael Baum 	if (rq->umem_buf)
288edb704daSMichael Baum 		mlx5_free((void *)(uintptr_t)rq->umem_buf);
289edb704daSMichael Baum }
290edb704daSMichael Baum 
291edb704daSMichael Baum /**
292edb704daSMichael Baum  * Create Receive Queue using DevX API.
293edb704daSMichael Baum  *
294edb704daSMichael Baum  * Get a pointer to partially initialized attributes structure, and updates the
295edb704daSMichael Baum  * following fields:
296edb704daSMichael Baum  *   wq_umem_valid
297edb704daSMichael Baum  *   wq_umem_id
298edb704daSMichael Baum  *   wq_umem_offset
299edb704daSMichael Baum  *   dbr_umem_valid
300edb704daSMichael Baum  *   dbr_umem_id
301edb704daSMichael Baum  *   dbr_addr
302edb704daSMichael Baum  *   log_wq_pg_sz
303edb704daSMichael Baum  * All other fields are updated by caller.
304edb704daSMichael Baum  *
305edb704daSMichael Baum  * @param[in] ctx
306edb704daSMichael Baum  *   Context returned from mlx5 open_device() glue function.
307edb704daSMichael Baum  * @param[in/out] rq_obj
308edb704daSMichael Baum  *   Pointer to RQ to create.
309edb704daSMichael Baum  * @param[in] wqe_size
310edb704daSMichael Baum  *   Size of WQE structure.
311edb704daSMichael Baum  * @param[in] log_wqbb_n
312edb704daSMichael Baum  *   Log of number of WQBBs in queue.
313edb704daSMichael Baum  * @param[in] attr
314edb704daSMichael Baum  *   Pointer to RQ attributes structure.
315edb704daSMichael Baum  * @param[in] socket
316edb704daSMichael Baum  *   Socket to use for allocation.
317edb704daSMichael Baum  *
318edb704daSMichael Baum  * @return
319edb704daSMichael Baum  *   0 on success, a negative errno value otherwise and rte_errno is set.
320edb704daSMichael Baum  */
321edb704daSMichael Baum int
322edb704daSMichael Baum mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,
323edb704daSMichael Baum 		    uint16_t log_wqbb_n,
324edb704daSMichael Baum 		    struct mlx5_devx_create_rq_attr *attr, int socket)
325edb704daSMichael Baum {
326edb704daSMichael Baum 	struct mlx5_devx_obj *rq = NULL;
327edb704daSMichael Baum 	struct mlx5dv_devx_umem *umem_obj = NULL;
328edb704daSMichael Baum 	void *umem_buf = NULL;
329edb704daSMichael Baum 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
330edb704daSMichael Baum 	uint32_t umem_size, umem_dbrec;
331edb704daSMichael Baum 	uint16_t rq_size = 1 << log_wqbb_n;
332edb704daSMichael Baum 	int ret;
333edb704daSMichael Baum 
334edb704daSMichael Baum 	if (alignment == (size_t)-1) {
335edb704daSMichael Baum 		DRV_LOG(ERR, "Failed to get WQE buf alignment.");
336edb704daSMichael Baum 		rte_errno = ENOMEM;
337edb704daSMichael Baum 		return -rte_errno;
338edb704daSMichael Baum 	}
339edb704daSMichael Baum 	/* Allocate memory buffer for WQEs and doorbell record. */
340edb704daSMichael Baum 	umem_size = wqe_size * rq_size;
341edb704daSMichael Baum 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
342edb704daSMichael Baum 	umem_size += MLX5_DBR_SIZE;
343edb704daSMichael Baum 	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
344edb704daSMichael Baum 			       alignment, socket);
345edb704daSMichael Baum 	if (!umem_buf) {
346edb704daSMichael Baum 		DRV_LOG(ERR, "Failed to allocate memory for RQ.");
347edb704daSMichael Baum 		rte_errno = ENOMEM;
348edb704daSMichael Baum 		return -rte_errno;
349edb704daSMichael Baum 	}
350edb704daSMichael Baum 	/* Register allocated buffer in user space with DevX. */
351edb704daSMichael Baum 	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf,
352edb704daSMichael Baum 				    umem_size, 0);
353edb704daSMichael Baum 	if (!umem_obj) {
354edb704daSMichael Baum 		DRV_LOG(ERR, "Failed to register umem for RQ.");
355edb704daSMichael Baum 		rte_errno = errno;
356edb704daSMichael Baum 		goto error;
357edb704daSMichael Baum 	}
358edb704daSMichael Baum 	/* Fill attributes for RQ object creation. */
359edb704daSMichael Baum 	attr->wq_attr.wq_umem_valid = 1;
360edb704daSMichael Baum 	attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
361edb704daSMichael Baum 	attr->wq_attr.wq_umem_offset = 0;
362edb704daSMichael Baum 	attr->wq_attr.dbr_umem_valid = 1;
363edb704daSMichael Baum 	attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id;
364edb704daSMichael Baum 	attr->wq_attr.dbr_addr = umem_dbrec;
365edb704daSMichael Baum 	attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
366edb704daSMichael Baum 	/* Create receive queue object with DevX. */
367edb704daSMichael Baum 	rq = mlx5_devx_cmd_create_rq(ctx, attr, socket);
368edb704daSMichael Baum 	if (!rq) {
369edb704daSMichael Baum 		DRV_LOG(ERR, "Can't create DevX RQ object.");
370edb704daSMichael Baum 		rte_errno = ENOMEM;
371edb704daSMichael Baum 		goto error;
372edb704daSMichael Baum 	}
373edb704daSMichael Baum 	rq_obj->umem_buf = umem_buf;
374edb704daSMichael Baum 	rq_obj->umem_obj = umem_obj;
375edb704daSMichael Baum 	rq_obj->rq = rq;
376edb704daSMichael Baum 	rq_obj->db_rec = RTE_PTR_ADD(rq_obj->umem_buf, umem_dbrec);
377edb704daSMichael Baum 	return 0;
378edb704daSMichael Baum error:
379edb704daSMichael Baum 	ret = rte_errno;
380edb704daSMichael Baum 	if (umem_obj)
381edb704daSMichael Baum 		claim_zero(mlx5_os_umem_dereg(umem_obj));
382edb704daSMichael Baum 	if (umem_buf)
383edb704daSMichael Baum 		mlx5_free((void *)(uintptr_t)umem_buf);
384edb704daSMichael Baum 	rte_errno = ret;
385edb704daSMichael Baum 	return -rte_errno;
386edb704daSMichael Baum }
38738f53763SMichael Baum 
388