19dab4d62SMichael Baum /* SPDX-License-Identifier: BSD-3-Clause 29dab4d62SMichael Baum * Copyright 2020 Mellanox Technologies, Ltd 39dab4d62SMichael Baum */ 49dab4d62SMichael Baum #include <stdint.h> 59dab4d62SMichael Baum 69dab4d62SMichael Baum #include <rte_errno.h> 79dab4d62SMichael Baum #include <rte_common.h> 89dab4d62SMichael Baum #include <rte_eal_paging.h> 99dab4d62SMichael Baum 109dab4d62SMichael Baum #include <mlx5_glue.h> 119dab4d62SMichael Baum #include <mlx5_common_os.h> 129dab4d62SMichael Baum 139dab4d62SMichael Baum #include "mlx5_prm.h" 149dab4d62SMichael Baum #include "mlx5_devx_cmds.h" 1525245d5dSShiri Kuzin #include "mlx5_common_log.h" 169dab4d62SMichael Baum #include "mlx5_malloc.h" 179dab4d62SMichael Baum #include "mlx5_common.h" 189dab4d62SMichael Baum #include "mlx5_common_devx.h" 199dab4d62SMichael Baum 209dab4d62SMichael Baum /** 219dab4d62SMichael Baum * Destroy DevX Completion Queue. 229dab4d62SMichael Baum * 239dab4d62SMichael Baum * @param[in] cq 249dab4d62SMichael Baum * DevX CQ to destroy. 259dab4d62SMichael Baum */ 269dab4d62SMichael Baum void 279dab4d62SMichael Baum mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq) 289dab4d62SMichael Baum { 299dab4d62SMichael Baum if (cq->cq) 309dab4d62SMichael Baum claim_zero(mlx5_devx_cmd_destroy(cq->cq)); 319dab4d62SMichael Baum if (cq->umem_obj) 329dab4d62SMichael Baum claim_zero(mlx5_os_umem_dereg(cq->umem_obj)); 339dab4d62SMichael Baum if (cq->umem_buf) 349dab4d62SMichael Baum mlx5_free((void *)(uintptr_t)cq->umem_buf); 359dab4d62SMichael Baum } 369dab4d62SMichael Baum 379dab4d62SMichael Baum /* Mark all CQEs initially as invalid. */ 389dab4d62SMichael Baum static void 399dab4d62SMichael Baum mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size) 409dab4d62SMichael Baum { 419dab4d62SMichael Baum volatile struct mlx5_cqe *cqe = cq_obj->cqes; 429dab4d62SMichael Baum uint16_t i; 439dab4d62SMichael Baum 449dab4d62SMichael Baum for (i = 0; i < cq_size; i++, cqe++) 459dab4d62SMichael Baum cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 469dab4d62SMichael Baum } 479dab4d62SMichael Baum 489dab4d62SMichael Baum /** 499dab4d62SMichael Baum * Create Completion Queue using DevX API. 509dab4d62SMichael Baum * 519dab4d62SMichael Baum * Get a pointer to partially initialized attributes structure, and updates the 529dab4d62SMichael Baum * following fields: 539dab4d62SMichael Baum * q_umem_valid 549dab4d62SMichael Baum * q_umem_id 559dab4d62SMichael Baum * q_umem_offset 569dab4d62SMichael Baum * db_umem_valid 579dab4d62SMichael Baum * db_umem_id 589dab4d62SMichael Baum * db_umem_offset 599dab4d62SMichael Baum * eqn 609dab4d62SMichael Baum * log_cq_size 619dab4d62SMichael Baum * log_page_size 629dab4d62SMichael Baum * All other fields are updated by caller. 639dab4d62SMichael Baum * 649dab4d62SMichael Baum * @param[in] ctx 659dab4d62SMichael Baum * Context returned from mlx5 open_device() glue function. 669dab4d62SMichael Baum * @param[in/out] cq_obj 679dab4d62SMichael Baum * Pointer to CQ to create. 689dab4d62SMichael Baum * @param[in] log_desc_n 699dab4d62SMichael Baum * Log of number of descriptors in queue. 709dab4d62SMichael Baum * @param[in] attr 719dab4d62SMichael Baum * Pointer to CQ attributes structure. 729dab4d62SMichael Baum * @param[in] socket 739dab4d62SMichael Baum * Socket to use for allocation. 749dab4d62SMichael Baum * 759dab4d62SMichael Baum * @return 769dab4d62SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 779dab4d62SMichael Baum */ 789dab4d62SMichael Baum int 799dab4d62SMichael Baum mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n, 809dab4d62SMichael Baum struct mlx5_devx_cq_attr *attr, int socket) 819dab4d62SMichael Baum { 829dab4d62SMichael Baum struct mlx5_devx_obj *cq = NULL; 839dab4d62SMichael Baum struct mlx5dv_devx_umem *umem_obj = NULL; 849dab4d62SMichael Baum void *umem_buf = NULL; 859dab4d62SMichael Baum size_t page_size = rte_mem_page_size(); 869dab4d62SMichael Baum size_t alignment = MLX5_CQE_BUF_ALIGNMENT; 879dab4d62SMichael Baum uint32_t umem_size, umem_dbrec; 889dab4d62SMichael Baum uint32_t eqn; 8954feeab1SRaja Zidane uint32_t num_of_cqes = RTE_BIT32(log_desc_n); 909dab4d62SMichael Baum int ret; 919dab4d62SMichael Baum 929dab4d62SMichael Baum if (page_size == (size_t)-1 || alignment == (size_t)-1) { 939dab4d62SMichael Baum DRV_LOG(ERR, "Failed to get page_size."); 949dab4d62SMichael Baum rte_errno = ENOMEM; 959dab4d62SMichael Baum return -rte_errno; 969dab4d62SMichael Baum } 979dab4d62SMichael Baum /* Query first EQN. */ 989dab4d62SMichael Baum ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn); 999dab4d62SMichael Baum if (ret) { 1009dab4d62SMichael Baum rte_errno = errno; 1019dab4d62SMichael Baum DRV_LOG(ERR, "Failed to query event queue number."); 1029dab4d62SMichael Baum return -rte_errno; 1039dab4d62SMichael Baum } 1049dab4d62SMichael Baum /* Allocate memory buffer for CQEs and doorbell record. */ 10554feeab1SRaja Zidane umem_size = sizeof(struct mlx5_cqe) * num_of_cqes; 1069dab4d62SMichael Baum umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 1079dab4d62SMichael Baum umem_size += MLX5_DBR_SIZE; 1089dab4d62SMichael Baum umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 1099dab4d62SMichael Baum alignment, socket); 1109dab4d62SMichael Baum if (!umem_buf) { 1119dab4d62SMichael Baum DRV_LOG(ERR, "Failed to allocate memory for CQ."); 1129dab4d62SMichael Baum rte_errno = ENOMEM; 1139dab4d62SMichael Baum return -rte_errno; 1149dab4d62SMichael Baum } 1159dab4d62SMichael Baum /* Register allocated buffer in user space with DevX. */ 1169dab4d62SMichael Baum umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 1179dab4d62SMichael Baum IBV_ACCESS_LOCAL_WRITE); 1189dab4d62SMichael Baum if (!umem_obj) { 1199dab4d62SMichael Baum DRV_LOG(ERR, "Failed to register umem for CQ."); 1209dab4d62SMichael Baum rte_errno = errno; 1219dab4d62SMichael Baum goto error; 1229dab4d62SMichael Baum } 1239dab4d62SMichael Baum /* Fill attributes for CQ object creation. */ 1249dab4d62SMichael Baum attr->q_umem_valid = 1; 1259dab4d62SMichael Baum attr->q_umem_id = mlx5_os_get_umem_id(umem_obj); 1269dab4d62SMichael Baum attr->q_umem_offset = 0; 1279dab4d62SMichael Baum attr->db_umem_valid = 1; 1289dab4d62SMichael Baum attr->db_umem_id = attr->q_umem_id; 1299dab4d62SMichael Baum attr->db_umem_offset = umem_dbrec; 1309dab4d62SMichael Baum attr->eqn = eqn; 1319dab4d62SMichael Baum attr->log_cq_size = log_desc_n; 1329dab4d62SMichael Baum attr->log_page_size = rte_log2_u32(page_size); 1339dab4d62SMichael Baum /* Create completion queue object with DevX. */ 1349dab4d62SMichael Baum cq = mlx5_devx_cmd_create_cq(ctx, attr); 1359dab4d62SMichael Baum if (!cq) { 1369dab4d62SMichael Baum DRV_LOG(ERR, "Can't create DevX CQ object."); 1379dab4d62SMichael Baum rte_errno = ENOMEM; 1389dab4d62SMichael Baum goto error; 1399dab4d62SMichael Baum } 1409dab4d62SMichael Baum cq_obj->umem_buf = umem_buf; 1419dab4d62SMichael Baum cq_obj->umem_obj = umem_obj; 1429dab4d62SMichael Baum cq_obj->cq = cq; 1439dab4d62SMichael Baum cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec); 1449dab4d62SMichael Baum /* Mark all CQEs initially as invalid. */ 14554feeab1SRaja Zidane mlx5_cq_init(cq_obj, num_of_cqes); 1469dab4d62SMichael Baum return 0; 1479dab4d62SMichael Baum error: 1489dab4d62SMichael Baum ret = rte_errno; 1499dab4d62SMichael Baum if (umem_obj) 1509dab4d62SMichael Baum claim_zero(mlx5_os_umem_dereg(umem_obj)); 1519dab4d62SMichael Baum if (umem_buf) 1529dab4d62SMichael Baum mlx5_free((void *)(uintptr_t)umem_buf); 1539dab4d62SMichael Baum rte_errno = ret; 1549dab4d62SMichael Baum return -rte_errno; 1559dab4d62SMichael Baum } 15638f53763SMichael Baum 15738f53763SMichael Baum /** 15838f53763SMichael Baum * Destroy DevX Send Queue. 15938f53763SMichael Baum * 16038f53763SMichael Baum * @param[in] sq 16138f53763SMichael Baum * DevX SQ to destroy. 16238f53763SMichael Baum */ 16338f53763SMichael Baum void 16438f53763SMichael Baum mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq) 16538f53763SMichael Baum { 16638f53763SMichael Baum if (sq->sq) 16738f53763SMichael Baum claim_zero(mlx5_devx_cmd_destroy(sq->sq)); 16838f53763SMichael Baum if (sq->umem_obj) 16938f53763SMichael Baum claim_zero(mlx5_os_umem_dereg(sq->umem_obj)); 17038f53763SMichael Baum if (sq->umem_buf) 17138f53763SMichael Baum mlx5_free((void *)(uintptr_t)sq->umem_buf); 17238f53763SMichael Baum } 17338f53763SMichael Baum 17438f53763SMichael Baum /** 17538f53763SMichael Baum * Create Send Queue using DevX API. 17638f53763SMichael Baum * 17738f53763SMichael Baum * Get a pointer to partially initialized attributes structure, and updates the 17838f53763SMichael Baum * following fields: 17938f53763SMichael Baum * wq_type 18038f53763SMichael Baum * wq_umem_valid 18138f53763SMichael Baum * wq_umem_id 18238f53763SMichael Baum * wq_umem_offset 18338f53763SMichael Baum * dbr_umem_valid 18438f53763SMichael Baum * dbr_umem_id 18538f53763SMichael Baum * dbr_addr 18638f53763SMichael Baum * log_wq_stride 18738f53763SMichael Baum * log_wq_sz 18838f53763SMichael Baum * log_wq_pg_sz 18938f53763SMichael Baum * All other fields are updated by caller. 19038f53763SMichael Baum * 19138f53763SMichael Baum * @param[in] ctx 19238f53763SMichael Baum * Context returned from mlx5 open_device() glue function. 19338f53763SMichael Baum * @param[in/out] sq_obj 19438f53763SMichael Baum * Pointer to SQ to create. 19538f53763SMichael Baum * @param[in] log_wqbb_n 19638f53763SMichael Baum * Log of number of WQBBs in queue. 19738f53763SMichael Baum * @param[in] attr 19838f53763SMichael Baum * Pointer to SQ attributes structure. 19938f53763SMichael Baum * @param[in] socket 20038f53763SMichael Baum * Socket to use for allocation. 20138f53763SMichael Baum * 20238f53763SMichael Baum * @return 20338f53763SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 20438f53763SMichael Baum */ 20538f53763SMichael Baum int 20638f53763SMichael Baum mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n, 20738f53763SMichael Baum struct mlx5_devx_create_sq_attr *attr, int socket) 20838f53763SMichael Baum { 20938f53763SMichael Baum struct mlx5_devx_obj *sq = NULL; 21038f53763SMichael Baum struct mlx5dv_devx_umem *umem_obj = NULL; 21138f53763SMichael Baum void *umem_buf = NULL; 21238f53763SMichael Baum size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 21338f53763SMichael Baum uint32_t umem_size, umem_dbrec; 21454feeab1SRaja Zidane uint32_t num_of_wqbbs = RTE_BIT32(log_wqbb_n); 21538f53763SMichael Baum int ret; 21638f53763SMichael Baum 21738f53763SMichael Baum if (alignment == (size_t)-1) { 21838f53763SMichael Baum DRV_LOG(ERR, "Failed to get WQE buf alignment."); 21938f53763SMichael Baum rte_errno = ENOMEM; 22038f53763SMichael Baum return -rte_errno; 22138f53763SMichael Baum } 22238f53763SMichael Baum /* Allocate memory buffer for WQEs and doorbell record. */ 22354feeab1SRaja Zidane umem_size = MLX5_WQE_SIZE * num_of_wqbbs; 22438f53763SMichael Baum umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 22538f53763SMichael Baum umem_size += MLX5_DBR_SIZE; 22638f53763SMichael Baum umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 22738f53763SMichael Baum alignment, socket); 22838f53763SMichael Baum if (!umem_buf) { 22938f53763SMichael Baum DRV_LOG(ERR, "Failed to allocate memory for SQ."); 23038f53763SMichael Baum rte_errno = ENOMEM; 23138f53763SMichael Baum return -rte_errno; 23238f53763SMichael Baum } 23338f53763SMichael Baum /* Register allocated buffer in user space with DevX. */ 23438f53763SMichael Baum umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 23538f53763SMichael Baum IBV_ACCESS_LOCAL_WRITE); 23638f53763SMichael Baum if (!umem_obj) { 23738f53763SMichael Baum DRV_LOG(ERR, "Failed to register umem for SQ."); 23838f53763SMichael Baum rte_errno = errno; 23938f53763SMichael Baum goto error; 24038f53763SMichael Baum } 24138f53763SMichael Baum /* Fill attributes for SQ object creation. */ 24238f53763SMichael Baum attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 24338f53763SMichael Baum attr->wq_attr.wq_umem_valid = 1; 24438f53763SMichael Baum attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj); 24538f53763SMichael Baum attr->wq_attr.wq_umem_offset = 0; 24638f53763SMichael Baum attr->wq_attr.dbr_umem_valid = 1; 24738f53763SMichael Baum attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id; 24838f53763SMichael Baum attr->wq_attr.dbr_addr = umem_dbrec; 24938f53763SMichael Baum attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 25038f53763SMichael Baum attr->wq_attr.log_wq_sz = log_wqbb_n; 25138f53763SMichael Baum attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 25238f53763SMichael Baum /* Create send queue object with DevX. */ 25338f53763SMichael Baum sq = mlx5_devx_cmd_create_sq(ctx, attr); 25438f53763SMichael Baum if (!sq) { 25538f53763SMichael Baum DRV_LOG(ERR, "Can't create DevX SQ object."); 25638f53763SMichael Baum rte_errno = ENOMEM; 25738f53763SMichael Baum goto error; 25838f53763SMichael Baum } 25938f53763SMichael Baum sq_obj->umem_buf = umem_buf; 26038f53763SMichael Baum sq_obj->umem_obj = umem_obj; 26138f53763SMichael Baum sq_obj->sq = sq; 26238f53763SMichael Baum sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec); 26338f53763SMichael Baum return 0; 26438f53763SMichael Baum error: 26538f53763SMichael Baum ret = rte_errno; 26638f53763SMichael Baum if (umem_obj) 26738f53763SMichael Baum claim_zero(mlx5_os_umem_dereg(umem_obj)); 26838f53763SMichael Baum if (umem_buf) 26938f53763SMichael Baum mlx5_free((void *)(uintptr_t)umem_buf); 27038f53763SMichael Baum rte_errno = ret; 27138f53763SMichael Baum return -rte_errno; 27238f53763SMichael Baum } 27338f53763SMichael Baum 274edb704daSMichael Baum /** 275056c87d0SXueming Li * Destroy DevX Receive Queue resources. 276056c87d0SXueming Li * 277056c87d0SXueming Li * @param[in] rq_res 278056c87d0SXueming Li * DevX RQ resource to destroy. 279056c87d0SXueming Li */ 280056c87d0SXueming Li static void 281056c87d0SXueming Li mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res *rq_res) 282056c87d0SXueming Li { 283056c87d0SXueming Li if (rq_res->umem_obj) 284056c87d0SXueming Li claim_zero(mlx5_os_umem_dereg(rq_res->umem_obj)); 285056c87d0SXueming Li if (rq_res->umem_buf) 286056c87d0SXueming Li mlx5_free((void *)(uintptr_t)rq_res->umem_buf); 287056c87d0SXueming Li memset(rq_res, 0, sizeof(*rq_res)); 288056c87d0SXueming Li } 289056c87d0SXueming Li 290056c87d0SXueming Li /** 291056c87d0SXueming Li * Destroy DevX Receive Memory Pool. 292056c87d0SXueming Li * 293056c87d0SXueming Li * @param[in] rmp 294056c87d0SXueming Li * DevX RMP to destroy. 295056c87d0SXueming Li */ 296056c87d0SXueming Li static void 297056c87d0SXueming Li mlx5_devx_rmp_destroy(struct mlx5_devx_rmp *rmp) 298056c87d0SXueming Li { 299056c87d0SXueming Li MLX5_ASSERT(rmp->ref_cnt == 0); 300056c87d0SXueming Li if (rmp->rmp) { 301056c87d0SXueming Li claim_zero(mlx5_devx_cmd_destroy(rmp->rmp)); 302056c87d0SXueming Li rmp->rmp = NULL; 303056c87d0SXueming Li } 304056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rmp->wq); 305056c87d0SXueming Li } 306056c87d0SXueming Li 307056c87d0SXueming Li /** 308f9213ab1SRaja Zidane * Destroy DevX Queue Pair. 309f9213ab1SRaja Zidane * 310f9213ab1SRaja Zidane * @param[in] qp 311f9213ab1SRaja Zidane * DevX QP to destroy. 312f9213ab1SRaja Zidane */ 313f9213ab1SRaja Zidane void 314f9213ab1SRaja Zidane mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp) 315f9213ab1SRaja Zidane { 316f9213ab1SRaja Zidane if (qp->qp) 317f9213ab1SRaja Zidane claim_zero(mlx5_devx_cmd_destroy(qp->qp)); 318f9213ab1SRaja Zidane if (qp->umem_obj) 319f9213ab1SRaja Zidane claim_zero(mlx5_os_umem_dereg(qp->umem_obj)); 320f9213ab1SRaja Zidane if (qp->umem_buf) 321f9213ab1SRaja Zidane mlx5_free((void *)(uintptr_t)qp->umem_buf); 322f9213ab1SRaja Zidane } 323f9213ab1SRaja Zidane 324f9213ab1SRaja Zidane /** 325f9213ab1SRaja Zidane * Create Queue Pair using DevX API. 326f9213ab1SRaja Zidane * 327f9213ab1SRaja Zidane * Get a pointer to partially initialized attributes structure, and updates the 328f9213ab1SRaja Zidane * following fields: 329f9213ab1SRaja Zidane * wq_umem_id 330f9213ab1SRaja Zidane * wq_umem_offset 331f9213ab1SRaja Zidane * dbr_umem_valid 332f9213ab1SRaja Zidane * dbr_umem_id 333f9213ab1SRaja Zidane * dbr_address 334f9213ab1SRaja Zidane * log_page_size 335f9213ab1SRaja Zidane * All other fields are updated by caller. 336f9213ab1SRaja Zidane * 337f9213ab1SRaja Zidane * @param[in] ctx 338f9213ab1SRaja Zidane * Context returned from mlx5 open_device() glue function. 339f9213ab1SRaja Zidane * @param[in/out] qp_obj 340f9213ab1SRaja Zidane * Pointer to QP to create. 341*bba8281dSRaja Zidane * @param[in] queue_size 342*bba8281dSRaja Zidane * Size of queue to create. 343f9213ab1SRaja Zidane * @param[in] attr 344f9213ab1SRaja Zidane * Pointer to QP attributes structure. 345f9213ab1SRaja Zidane * @param[in] socket 346f9213ab1SRaja Zidane * Socket to use for allocation. 347f9213ab1SRaja Zidane * 348f9213ab1SRaja Zidane * @return 349f9213ab1SRaja Zidane * 0 on success, a negative errno value otherwise and rte_errno is set. 350f9213ab1SRaja Zidane */ 351f9213ab1SRaja Zidane int 352*bba8281dSRaja Zidane mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint32_t queue_size, 353f9213ab1SRaja Zidane struct mlx5_devx_qp_attr *attr, int socket) 354f9213ab1SRaja Zidane { 355f9213ab1SRaja Zidane struct mlx5_devx_obj *qp = NULL; 356f9213ab1SRaja Zidane struct mlx5dv_devx_umem *umem_obj = NULL; 357f9213ab1SRaja Zidane void *umem_buf = NULL; 358f9213ab1SRaja Zidane size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 359f9213ab1SRaja Zidane uint32_t umem_size, umem_dbrec; 360f9213ab1SRaja Zidane int ret; 361f9213ab1SRaja Zidane 362f9213ab1SRaja Zidane if (alignment == (size_t)-1) { 363f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to get WQE buf alignment."); 364f9213ab1SRaja Zidane rte_errno = ENOMEM; 365f9213ab1SRaja Zidane return -rte_errno; 366f9213ab1SRaja Zidane } 367f9213ab1SRaja Zidane /* Allocate memory buffer for WQEs and doorbell record. */ 368*bba8281dSRaja Zidane umem_size = queue_size; 369f9213ab1SRaja Zidane umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 370f9213ab1SRaja Zidane umem_size += MLX5_DBR_SIZE; 371f9213ab1SRaja Zidane umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 372f9213ab1SRaja Zidane alignment, socket); 373f9213ab1SRaja Zidane if (!umem_buf) { 374f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to allocate memory for QP."); 375f9213ab1SRaja Zidane rte_errno = ENOMEM; 376f9213ab1SRaja Zidane return -rte_errno; 377f9213ab1SRaja Zidane } 378f9213ab1SRaja Zidane /* Register allocated buffer in user space with DevX. */ 379f9213ab1SRaja Zidane umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 380f9213ab1SRaja Zidane IBV_ACCESS_LOCAL_WRITE); 381f9213ab1SRaja Zidane if (!umem_obj) { 382f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to register umem for QP."); 383f9213ab1SRaja Zidane rte_errno = errno; 384f9213ab1SRaja Zidane goto error; 385f9213ab1SRaja Zidane } 386f9213ab1SRaja Zidane /* Fill attributes for SQ object creation. */ 387f9213ab1SRaja Zidane attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 388f9213ab1SRaja Zidane attr->wq_umem_offset = 0; 389f9213ab1SRaja Zidane attr->dbr_umem_valid = 1; 390f9213ab1SRaja Zidane attr->dbr_umem_id = attr->wq_umem_id; 391f9213ab1SRaja Zidane attr->dbr_address = umem_dbrec; 392f9213ab1SRaja Zidane attr->log_page_size = MLX5_LOG_PAGE_SIZE; 393f9213ab1SRaja Zidane /* Create send queue object with DevX. */ 394f9213ab1SRaja Zidane qp = mlx5_devx_cmd_create_qp(ctx, attr); 395f9213ab1SRaja Zidane if (!qp) { 396f9213ab1SRaja Zidane DRV_LOG(ERR, "Can't create DevX QP object."); 397f9213ab1SRaja Zidane rte_errno = ENOMEM; 398f9213ab1SRaja Zidane goto error; 399f9213ab1SRaja Zidane } 400f9213ab1SRaja Zidane qp_obj->umem_buf = umem_buf; 401f9213ab1SRaja Zidane qp_obj->umem_obj = umem_obj; 402f9213ab1SRaja Zidane qp_obj->qp = qp; 403f9213ab1SRaja Zidane qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec); 404f9213ab1SRaja Zidane return 0; 405f9213ab1SRaja Zidane error: 406f9213ab1SRaja Zidane ret = rte_errno; 407f9213ab1SRaja Zidane if (umem_obj) 408f9213ab1SRaja Zidane claim_zero(mlx5_os_umem_dereg(umem_obj)); 409f9213ab1SRaja Zidane if (umem_buf) 410f9213ab1SRaja Zidane mlx5_free((void *)(uintptr_t)umem_buf); 411f9213ab1SRaja Zidane rte_errno = ret; 412f9213ab1SRaja Zidane return -rte_errno; 413f9213ab1SRaja Zidane } 414f9213ab1SRaja Zidane 415f9213ab1SRaja Zidane /** 416edb704daSMichael Baum * Destroy DevX Receive Queue. 417edb704daSMichael Baum * 418edb704daSMichael Baum * @param[in] rq 419edb704daSMichael Baum * DevX RQ to destroy. 420edb704daSMichael Baum */ 421edb704daSMichael Baum void 422edb704daSMichael Baum mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq) 423edb704daSMichael Baum { 424056c87d0SXueming Li if (rq->rq) { 425edb704daSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rq->rq)); 426056c87d0SXueming Li rq->rq = NULL; 427056c87d0SXueming Li if (rq->rmp) 428056c87d0SXueming Li rq->rmp->ref_cnt--; 429056c87d0SXueming Li } 430056c87d0SXueming Li if (rq->rmp == NULL) { 431056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rq->wq); 432056c87d0SXueming Li } else { 433056c87d0SXueming Li if (rq->rmp->ref_cnt == 0) 434056c87d0SXueming Li mlx5_devx_rmp_destroy(rq->rmp); 435056c87d0SXueming Li } 436edb704daSMichael Baum } 437edb704daSMichael Baum 438edb704daSMichael Baum /** 439056c87d0SXueming Li * Create WQ resources using DevX API. 440056c87d0SXueming Li * 441056c87d0SXueming Li * @param[in] ctx 442056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 443056c87d0SXueming Li * @param[in] wqe_size 444056c87d0SXueming Li * Size of WQE structure. 445056c87d0SXueming Li * @param[in] log_wqbb_n 446056c87d0SXueming Li * Log of number of WQBBs in queue. 447056c87d0SXueming Li * @param[in] socket 448056c87d0SXueming Li * Socket to use for allocation. 449056c87d0SXueming Li * @param[out] wq_attr 450056c87d0SXueming Li * Pointer to WQ attributes structure. 451056c87d0SXueming Li * @param[out] wq_res 452056c87d0SXueming Li * Pointer to WQ resource to create. 453056c87d0SXueming Li * 454056c87d0SXueming Li * @return 455056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 456056c87d0SXueming Li */ 457056c87d0SXueming Li static int 458056c87d0SXueming Li mlx5_devx_wq_init(void *ctx, uint32_t wqe_size, uint16_t log_wqbb_n, int socket, 459056c87d0SXueming Li struct mlx5_devx_wq_attr *wq_attr, 460056c87d0SXueming Li struct mlx5_devx_wq_res *wq_res) 461056c87d0SXueming Li { 462056c87d0SXueming Li struct mlx5dv_devx_umem *umem_obj = NULL; 463056c87d0SXueming Li void *umem_buf = NULL; 464056c87d0SXueming Li size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 465056c87d0SXueming Li uint32_t umem_size, umem_dbrec; 466056c87d0SXueming Li int ret; 467056c87d0SXueming Li 468056c87d0SXueming Li if (alignment == (size_t)-1) { 469056c87d0SXueming Li DRV_LOG(ERR, "Failed to get WQE buf alignment."); 470056c87d0SXueming Li rte_errno = ENOMEM; 471056c87d0SXueming Li return -rte_errno; 472056c87d0SXueming Li } 473056c87d0SXueming Li /* Allocate memory buffer for WQEs and doorbell record. */ 474056c87d0SXueming Li umem_size = wqe_size * (1 << log_wqbb_n); 475056c87d0SXueming Li umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 476056c87d0SXueming Li umem_size += MLX5_DBR_SIZE; 477056c87d0SXueming Li umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 478056c87d0SXueming Li alignment, socket); 479056c87d0SXueming Li if (!umem_buf) { 480056c87d0SXueming Li DRV_LOG(ERR, "Failed to allocate memory for RQ."); 481056c87d0SXueming Li rte_errno = ENOMEM; 482056c87d0SXueming Li return -rte_errno; 483056c87d0SXueming Li } 484056c87d0SXueming Li /* Register allocated buffer in user space with DevX. */ 485056c87d0SXueming Li umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, 486056c87d0SXueming Li umem_size, 0); 487056c87d0SXueming Li if (!umem_obj) { 488056c87d0SXueming Li DRV_LOG(ERR, "Failed to register umem for RQ."); 489056c87d0SXueming Li rte_errno = errno; 490056c87d0SXueming Li goto error; 491056c87d0SXueming Li } 492056c87d0SXueming Li /* Fill WQ attributes for RQ/RMP object creation. */ 493056c87d0SXueming Li wq_attr->wq_umem_valid = 1; 494056c87d0SXueming Li wq_attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 495056c87d0SXueming Li wq_attr->wq_umem_offset = 0; 496056c87d0SXueming Li wq_attr->dbr_umem_valid = 1; 497056c87d0SXueming Li wq_attr->dbr_umem_id = wq_attr->wq_umem_id; 498056c87d0SXueming Li wq_attr->dbr_addr = umem_dbrec; 499056c87d0SXueming Li wq_attr->log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 500056c87d0SXueming Li /* Fill attributes for RQ object creation. */ 501056c87d0SXueming Li wq_res->umem_buf = umem_buf; 502056c87d0SXueming Li wq_res->umem_obj = umem_obj; 503056c87d0SXueming Li wq_res->db_rec = RTE_PTR_ADD(umem_buf, umem_dbrec); 504056c87d0SXueming Li return 0; 505056c87d0SXueming Li error: 506056c87d0SXueming Li ret = rte_errno; 507056c87d0SXueming Li if (umem_obj) 508056c87d0SXueming Li claim_zero(mlx5_os_umem_dereg(umem_obj)); 509056c87d0SXueming Li if (umem_buf) 510056c87d0SXueming Li mlx5_free((void *)(uintptr_t)umem_buf); 511056c87d0SXueming Li rte_errno = ret; 512056c87d0SXueming Li return -rte_errno; 513056c87d0SXueming Li } 514056c87d0SXueming Li 515056c87d0SXueming Li /** 516056c87d0SXueming Li * Create standalone Receive Queue using DevX API. 517056c87d0SXueming Li * 518056c87d0SXueming Li * @param[in] ctx 519056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 520056c87d0SXueming Li * @param[in/out] rq_obj 521056c87d0SXueming Li * Pointer to RQ to create. 522056c87d0SXueming Li * @param[in] wqe_size 523056c87d0SXueming Li * Size of WQE structure. 524056c87d0SXueming Li * @param[in] log_wqbb_n 525056c87d0SXueming Li * Log of number of WQBBs in queue. 526056c87d0SXueming Li * @param[in] attr 527056c87d0SXueming Li * Pointer to RQ attributes structure. 528056c87d0SXueming Li * @param[in] socket 529056c87d0SXueming Li * Socket to use for allocation. 530056c87d0SXueming Li * 531056c87d0SXueming Li * @return 532056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 533056c87d0SXueming Li */ 534056c87d0SXueming Li static int 535056c87d0SXueming Li mlx5_devx_rq_std_create(void *ctx, struct mlx5_devx_rq *rq_obj, 536056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 537056c87d0SXueming Li struct mlx5_devx_create_rq_attr *attr, int socket) 538056c87d0SXueming Li { 539056c87d0SXueming Li struct mlx5_devx_obj *rq; 540056c87d0SXueming Li int ret; 541056c87d0SXueming Li 542056c87d0SXueming Li ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 543056c87d0SXueming Li &attr->wq_attr, &rq_obj->wq); 544056c87d0SXueming Li if (ret != 0) 545056c87d0SXueming Li return ret; 546056c87d0SXueming Li /* Create receive queue object with DevX. */ 547056c87d0SXueming Li rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 548056c87d0SXueming Li if (!rq) { 549056c87d0SXueming Li DRV_LOG(ERR, "Can't create DevX RQ object."); 550056c87d0SXueming Li rte_errno = ENOMEM; 551056c87d0SXueming Li goto error; 552056c87d0SXueming Li } 553056c87d0SXueming Li rq_obj->rq = rq; 554056c87d0SXueming Li return 0; 555056c87d0SXueming Li error: 556056c87d0SXueming Li ret = rte_errno; 557056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rq_obj->wq); 558056c87d0SXueming Li rte_errno = ret; 559056c87d0SXueming Li return -rte_errno; 560056c87d0SXueming Li } 561056c87d0SXueming Li 562056c87d0SXueming Li /** 563056c87d0SXueming Li * Create Receive Memory Pool using DevX API. 564056c87d0SXueming Li * 565056c87d0SXueming Li * @param[in] ctx 566056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 567056c87d0SXueming Li * @param[in/out] rq_obj 568056c87d0SXueming Li * Pointer to RQ to create. 569056c87d0SXueming Li * @param[in] wqe_size 570056c87d0SXueming Li * Size of WQE structure. 571056c87d0SXueming Li * @param[in] log_wqbb_n 572056c87d0SXueming Li * Log of number of WQBBs in queue. 573056c87d0SXueming Li * @param[in] attr 574056c87d0SXueming Li * Pointer to RQ attributes structure. 575056c87d0SXueming Li * @param[in] socket 576056c87d0SXueming Li * Socket to use for allocation. 577056c87d0SXueming Li * 578056c87d0SXueming Li * @return 579056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 580056c87d0SXueming Li */ 581056c87d0SXueming Li static int 582056c87d0SXueming Li mlx5_devx_rmp_create(void *ctx, struct mlx5_devx_rmp *rmp_obj, 583056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 584056c87d0SXueming Li struct mlx5_devx_wq_attr *wq_attr, int socket) 585056c87d0SXueming Li { 586056c87d0SXueming Li struct mlx5_devx_create_rmp_attr rmp_attr = { 0 }; 587056c87d0SXueming Li int ret; 588056c87d0SXueming Li 589056c87d0SXueming Li if (rmp_obj->rmp != NULL) 590056c87d0SXueming Li return 0; 591056c87d0SXueming Li rmp_attr.wq_attr = *wq_attr; 592056c87d0SXueming Li ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 593056c87d0SXueming Li &rmp_attr.wq_attr, &rmp_obj->wq); 594056c87d0SXueming Li if (ret != 0) 595056c87d0SXueming Li return ret; 596056c87d0SXueming Li rmp_attr.state = MLX5_RMPC_STATE_RDY; 597056c87d0SXueming Li rmp_attr.basic_cyclic_rcv_wqe = 598056c87d0SXueming Li wq_attr->wq_type != MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 599056c87d0SXueming Li /* Create receive memory pool object with DevX. */ 600056c87d0SXueming Li rmp_obj->rmp = mlx5_devx_cmd_create_rmp(ctx, &rmp_attr, socket); 601056c87d0SXueming Li if (rmp_obj->rmp == NULL) { 602056c87d0SXueming Li DRV_LOG(ERR, "Can't create DevX RMP object."); 603056c87d0SXueming Li rte_errno = ENOMEM; 604056c87d0SXueming Li goto error; 605056c87d0SXueming Li } 606056c87d0SXueming Li return 0; 607056c87d0SXueming Li error: 608056c87d0SXueming Li ret = rte_errno; 609056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rmp_obj->wq); 610056c87d0SXueming Li rte_errno = ret; 611056c87d0SXueming Li return -rte_errno; 612056c87d0SXueming Li } 613056c87d0SXueming Li 614056c87d0SXueming Li /** 615056c87d0SXueming Li * Create Shared Receive Queue based on RMP using DevX API. 616056c87d0SXueming Li * 617056c87d0SXueming Li * @param[in] ctx 618056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 619056c87d0SXueming Li * @param[in/out] rq_obj 620056c87d0SXueming Li * Pointer to RQ to create. 621056c87d0SXueming Li * @param[in] wqe_size 622056c87d0SXueming Li * Size of WQE structure. 623056c87d0SXueming Li * @param[in] log_wqbb_n 624056c87d0SXueming Li * Log of number of WQBBs in queue. 625056c87d0SXueming Li * @param[in] attr 626056c87d0SXueming Li * Pointer to RQ attributes structure. 627056c87d0SXueming Li * @param[in] socket 628056c87d0SXueming Li * Socket to use for allocation. 629056c87d0SXueming Li * 630056c87d0SXueming Li * @return 631056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 632056c87d0SXueming Li */ 633056c87d0SXueming Li static int 634056c87d0SXueming Li mlx5_devx_rq_shared_create(void *ctx, struct mlx5_devx_rq *rq_obj, 635056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 636056c87d0SXueming Li struct mlx5_devx_create_rq_attr *attr, int socket) 637056c87d0SXueming Li { 638056c87d0SXueming Li struct mlx5_devx_obj *rq; 639056c87d0SXueming Li int ret; 640056c87d0SXueming Li 641056c87d0SXueming Li ret = mlx5_devx_rmp_create(ctx, rq_obj->rmp, wqe_size, log_wqbb_n, 642056c87d0SXueming Li &attr->wq_attr, socket); 643056c87d0SXueming Li if (ret != 0) 644056c87d0SXueming Li return ret; 645056c87d0SXueming Li attr->mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP; 646056c87d0SXueming Li attr->rmpn = rq_obj->rmp->rmp->id; 647056c87d0SXueming Li attr->flush_in_error_en = 0; 648056c87d0SXueming Li memset(&attr->wq_attr, 0, sizeof(attr->wq_attr)); 649056c87d0SXueming Li /* Create receive queue object with DevX. */ 650056c87d0SXueming Li rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 651056c87d0SXueming Li if (!rq) { 652056c87d0SXueming Li DRV_LOG(ERR, "Can't create DevX RMP RQ object."); 653056c87d0SXueming Li rte_errno = ENOMEM; 654056c87d0SXueming Li goto error; 655056c87d0SXueming Li } 656056c87d0SXueming Li rq_obj->rq = rq; 657056c87d0SXueming Li rq_obj->rmp->ref_cnt++; 658056c87d0SXueming Li return 0; 659056c87d0SXueming Li error: 660056c87d0SXueming Li ret = rte_errno; 661056c87d0SXueming Li mlx5_devx_rq_destroy(rq_obj); 662056c87d0SXueming Li rte_errno = ret; 663056c87d0SXueming Li return -rte_errno; 664056c87d0SXueming Li } 665056c87d0SXueming Li 666056c87d0SXueming Li /** 667056c87d0SXueming Li * Create Receive Queue using DevX API. Shared RQ is created only if rmp set. 668edb704daSMichael Baum * 669edb704daSMichael Baum * Get a pointer to partially initialized attributes structure, and updates the 670edb704daSMichael Baum * following fields: 671edb704daSMichael Baum * wq_umem_valid 672edb704daSMichael Baum * wq_umem_id 673edb704daSMichael Baum * wq_umem_offset 674edb704daSMichael Baum * dbr_umem_valid 675edb704daSMichael Baum * dbr_umem_id 676edb704daSMichael Baum * dbr_addr 677edb704daSMichael Baum * log_wq_pg_sz 678edb704daSMichael Baum * All other fields are updated by caller. 679edb704daSMichael Baum * 680edb704daSMichael Baum * @param[in] ctx 681edb704daSMichael Baum * Context returned from mlx5 open_device() glue function. 682edb704daSMichael Baum * @param[in/out] rq_obj 683edb704daSMichael Baum * Pointer to RQ to create. 684edb704daSMichael Baum * @param[in] wqe_size 685edb704daSMichael Baum * Size of WQE structure. 686edb704daSMichael Baum * @param[in] log_wqbb_n 687edb704daSMichael Baum * Log of number of WQBBs in queue. 688edb704daSMichael Baum * @param[in] attr 689edb704daSMichael Baum * Pointer to RQ attributes structure. 690edb704daSMichael Baum * @param[in] socket 691edb704daSMichael Baum * Socket to use for allocation. 692edb704daSMichael Baum * 693edb704daSMichael Baum * @return 694edb704daSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 695edb704daSMichael Baum */ 696edb704daSMichael Baum int 697056c87d0SXueming Li mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, 698056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 699edb704daSMichael Baum struct mlx5_devx_create_rq_attr *attr, int socket) 700edb704daSMichael Baum { 701056c87d0SXueming Li if (rq_obj->rmp == NULL) 702056c87d0SXueming Li return mlx5_devx_rq_std_create(ctx, rq_obj, wqe_size, 703056c87d0SXueming Li log_wqbb_n, attr, socket); 704056c87d0SXueming Li return mlx5_devx_rq_shared_create(ctx, rq_obj, wqe_size, 705056c87d0SXueming Li log_wqbb_n, attr, socket); 706edb704daSMichael Baum } 707f9213ab1SRaja Zidane 708f9213ab1SRaja Zidane /** 709f9213ab1SRaja Zidane * Change QP state to RTS. 710f9213ab1SRaja Zidane * 711f9213ab1SRaja Zidane * @param[in] qp 712f9213ab1SRaja Zidane * DevX QP to change. 713f9213ab1SRaja Zidane * @param[in] remote_qp_id 714f9213ab1SRaja Zidane * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 715f9213ab1SRaja Zidane * 716f9213ab1SRaja Zidane * @return 717f9213ab1SRaja Zidane * 0 on success, a negative errno value otherwise and rte_errno is set. 718f9213ab1SRaja Zidane */ 719f9213ab1SRaja Zidane int 720f9213ab1SRaja Zidane mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id) 721f9213ab1SRaja Zidane { 722f9213ab1SRaja Zidane if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP, 723f9213ab1SRaja Zidane remote_qp_id)) { 724f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).", 725f9213ab1SRaja Zidane rte_errno); 726f9213ab1SRaja Zidane return -1; 727f9213ab1SRaja Zidane } 728f9213ab1SRaja Zidane if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP, 729f9213ab1SRaja Zidane remote_qp_id)) { 730f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).", 731f9213ab1SRaja Zidane rte_errno); 732f9213ab1SRaja Zidane return -1; 733f9213ab1SRaja Zidane } 734f9213ab1SRaja Zidane if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP, 735f9213ab1SRaja Zidane remote_qp_id)) { 736f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).", 737f9213ab1SRaja Zidane rte_errno); 738f9213ab1SRaja Zidane return -1; 739f9213ab1SRaja Zidane } 740f9213ab1SRaja Zidane return 0; 741f9213ab1SRaja Zidane } 742