19dab4d62SMichael Baum /* SPDX-License-Identifier: BSD-3-Clause 29dab4d62SMichael Baum * Copyright 2020 Mellanox Technologies, Ltd 39dab4d62SMichael Baum */ 49dab4d62SMichael Baum #include <stdint.h> 59dab4d62SMichael Baum 69dab4d62SMichael Baum #include <rte_errno.h> 79dab4d62SMichael Baum #include <rte_common.h> 89dab4d62SMichael Baum #include <rte_eal_paging.h> 99dab4d62SMichael Baum 109dab4d62SMichael Baum #include <mlx5_glue.h> 119dab4d62SMichael Baum #include <mlx5_common_os.h> 129dab4d62SMichael Baum 139dab4d62SMichael Baum #include "mlx5_prm.h" 149dab4d62SMichael Baum #include "mlx5_devx_cmds.h" 1525245d5dSShiri Kuzin #include "mlx5_common_log.h" 169dab4d62SMichael Baum #include "mlx5_malloc.h" 179dab4d62SMichael Baum #include "mlx5_common.h" 189dab4d62SMichael Baum #include "mlx5_common_devx.h" 199dab4d62SMichael Baum 209dab4d62SMichael Baum /** 219dab4d62SMichael Baum * Destroy DevX Completion Queue. 229dab4d62SMichael Baum * 239dab4d62SMichael Baum * @param[in] cq 249dab4d62SMichael Baum * DevX CQ to destroy. 259dab4d62SMichael Baum */ 269dab4d62SMichael Baum void 279dab4d62SMichael Baum mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq) 289dab4d62SMichael Baum { 299dab4d62SMichael Baum if (cq->cq) 309dab4d62SMichael Baum claim_zero(mlx5_devx_cmd_destroy(cq->cq)); 319dab4d62SMichael Baum if (cq->umem_obj) 329dab4d62SMichael Baum claim_zero(mlx5_os_umem_dereg(cq->umem_obj)); 339dab4d62SMichael Baum if (cq->umem_buf) 349dab4d62SMichael Baum mlx5_free((void *)(uintptr_t)cq->umem_buf); 359dab4d62SMichael Baum } 369dab4d62SMichael Baum 379dab4d62SMichael Baum /* Mark all CQEs initially as invalid. */ 389dab4d62SMichael Baum static void 399dab4d62SMichael Baum mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size) 409dab4d62SMichael Baum { 419dab4d62SMichael Baum volatile struct mlx5_cqe *cqe = cq_obj->cqes; 429dab4d62SMichael Baum uint16_t i; 439dab4d62SMichael Baum 449dab4d62SMichael Baum for (i = 0; i < cq_size; i++, cqe++) 459dab4d62SMichael Baum cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 469dab4d62SMichael Baum } 479dab4d62SMichael Baum 489dab4d62SMichael Baum /** 499dab4d62SMichael Baum * Create Completion Queue using DevX API. 509dab4d62SMichael Baum * 519dab4d62SMichael Baum * Get a pointer to partially initialized attributes structure, and updates the 529dab4d62SMichael Baum * following fields: 539dab4d62SMichael Baum * q_umem_valid 549dab4d62SMichael Baum * q_umem_id 559dab4d62SMichael Baum * q_umem_offset 569dab4d62SMichael Baum * db_umem_valid 579dab4d62SMichael Baum * db_umem_id 589dab4d62SMichael Baum * db_umem_offset 599dab4d62SMichael Baum * eqn 609dab4d62SMichael Baum * log_cq_size 619dab4d62SMichael Baum * log_page_size 629dab4d62SMichael Baum * All other fields are updated by caller. 639dab4d62SMichael Baum * 649dab4d62SMichael Baum * @param[in] ctx 659dab4d62SMichael Baum * Context returned from mlx5 open_device() glue function. 669dab4d62SMichael Baum * @param[in/out] cq_obj 679dab4d62SMichael Baum * Pointer to CQ to create. 689dab4d62SMichael Baum * @param[in] log_desc_n 699dab4d62SMichael Baum * Log of number of descriptors in queue. 709dab4d62SMichael Baum * @param[in] attr 719dab4d62SMichael Baum * Pointer to CQ attributes structure. 729dab4d62SMichael Baum * @param[in] socket 739dab4d62SMichael Baum * Socket to use for allocation. 749dab4d62SMichael Baum * 759dab4d62SMichael Baum * @return 769dab4d62SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 779dab4d62SMichael Baum */ 789dab4d62SMichael Baum int 799dab4d62SMichael Baum mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n, 809dab4d62SMichael Baum struct mlx5_devx_cq_attr *attr, int socket) 819dab4d62SMichael Baum { 829dab4d62SMichael Baum struct mlx5_devx_obj *cq = NULL; 839dab4d62SMichael Baum struct mlx5dv_devx_umem *umem_obj = NULL; 849dab4d62SMichael Baum void *umem_buf = NULL; 859dab4d62SMichael Baum size_t page_size = rte_mem_page_size(); 869dab4d62SMichael Baum size_t alignment = MLX5_CQE_BUF_ALIGNMENT; 879dab4d62SMichael Baum uint32_t umem_size, umem_dbrec; 889dab4d62SMichael Baum uint32_t eqn; 899dab4d62SMichael Baum uint16_t cq_size = 1 << log_desc_n; 909dab4d62SMichael Baum int ret; 919dab4d62SMichael Baum 929dab4d62SMichael Baum if (page_size == (size_t)-1 || alignment == (size_t)-1) { 939dab4d62SMichael Baum DRV_LOG(ERR, "Failed to get page_size."); 949dab4d62SMichael Baum rte_errno = ENOMEM; 959dab4d62SMichael Baum return -rte_errno; 969dab4d62SMichael Baum } 979dab4d62SMichael Baum /* Query first EQN. */ 989dab4d62SMichael Baum ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn); 999dab4d62SMichael Baum if (ret) { 1009dab4d62SMichael Baum rte_errno = errno; 1019dab4d62SMichael Baum DRV_LOG(ERR, "Failed to query event queue number."); 1029dab4d62SMichael Baum return -rte_errno; 1039dab4d62SMichael Baum } 1049dab4d62SMichael Baum /* Allocate memory buffer for CQEs and doorbell record. */ 1059dab4d62SMichael Baum umem_size = sizeof(struct mlx5_cqe) * cq_size; 1069dab4d62SMichael Baum umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 1079dab4d62SMichael Baum umem_size += MLX5_DBR_SIZE; 1089dab4d62SMichael Baum umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 1099dab4d62SMichael Baum alignment, socket); 1109dab4d62SMichael Baum if (!umem_buf) { 1119dab4d62SMichael Baum DRV_LOG(ERR, "Failed to allocate memory for CQ."); 1129dab4d62SMichael Baum rte_errno = ENOMEM; 1139dab4d62SMichael Baum return -rte_errno; 1149dab4d62SMichael Baum } 1159dab4d62SMichael Baum /* Register allocated buffer in user space with DevX. */ 1169dab4d62SMichael Baum umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 1179dab4d62SMichael Baum IBV_ACCESS_LOCAL_WRITE); 1189dab4d62SMichael Baum if (!umem_obj) { 1199dab4d62SMichael Baum DRV_LOG(ERR, "Failed to register umem for CQ."); 1209dab4d62SMichael Baum rte_errno = errno; 1219dab4d62SMichael Baum goto error; 1229dab4d62SMichael Baum } 1239dab4d62SMichael Baum /* Fill attributes for CQ object creation. */ 1249dab4d62SMichael Baum attr->q_umem_valid = 1; 1259dab4d62SMichael Baum attr->q_umem_id = mlx5_os_get_umem_id(umem_obj); 1269dab4d62SMichael Baum attr->q_umem_offset = 0; 1279dab4d62SMichael Baum attr->db_umem_valid = 1; 1289dab4d62SMichael Baum attr->db_umem_id = attr->q_umem_id; 1299dab4d62SMichael Baum attr->db_umem_offset = umem_dbrec; 1309dab4d62SMichael Baum attr->eqn = eqn; 1319dab4d62SMichael Baum attr->log_cq_size = log_desc_n; 1329dab4d62SMichael Baum attr->log_page_size = rte_log2_u32(page_size); 1339dab4d62SMichael Baum /* Create completion queue object with DevX. */ 1349dab4d62SMichael Baum cq = mlx5_devx_cmd_create_cq(ctx, attr); 1359dab4d62SMichael Baum if (!cq) { 1369dab4d62SMichael Baum DRV_LOG(ERR, "Can't create DevX CQ object."); 1379dab4d62SMichael Baum rte_errno = ENOMEM; 1389dab4d62SMichael Baum goto error; 1399dab4d62SMichael Baum } 1409dab4d62SMichael Baum cq_obj->umem_buf = umem_buf; 1419dab4d62SMichael Baum cq_obj->umem_obj = umem_obj; 1429dab4d62SMichael Baum cq_obj->cq = cq; 1439dab4d62SMichael Baum cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec); 1449dab4d62SMichael Baum /* Mark all CQEs initially as invalid. */ 1459dab4d62SMichael Baum mlx5_cq_init(cq_obj, cq_size); 1469dab4d62SMichael Baum return 0; 1479dab4d62SMichael Baum error: 1489dab4d62SMichael Baum ret = rte_errno; 1499dab4d62SMichael Baum if (umem_obj) 1509dab4d62SMichael Baum claim_zero(mlx5_os_umem_dereg(umem_obj)); 1519dab4d62SMichael Baum if (umem_buf) 1529dab4d62SMichael Baum mlx5_free((void *)(uintptr_t)umem_buf); 1539dab4d62SMichael Baum rte_errno = ret; 1549dab4d62SMichael Baum return -rte_errno; 1559dab4d62SMichael Baum } 15638f53763SMichael Baum 15738f53763SMichael Baum /** 15838f53763SMichael Baum * Destroy DevX Send Queue. 15938f53763SMichael Baum * 16038f53763SMichael Baum * @param[in] sq 16138f53763SMichael Baum * DevX SQ to destroy. 16238f53763SMichael Baum */ 16338f53763SMichael Baum void 16438f53763SMichael Baum mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq) 16538f53763SMichael Baum { 16638f53763SMichael Baum if (sq->sq) 16738f53763SMichael Baum claim_zero(mlx5_devx_cmd_destroy(sq->sq)); 16838f53763SMichael Baum if (sq->umem_obj) 16938f53763SMichael Baum claim_zero(mlx5_os_umem_dereg(sq->umem_obj)); 17038f53763SMichael Baum if (sq->umem_buf) 17138f53763SMichael Baum mlx5_free((void *)(uintptr_t)sq->umem_buf); 17238f53763SMichael Baum } 17338f53763SMichael Baum 17438f53763SMichael Baum /** 17538f53763SMichael Baum * Create Send Queue using DevX API. 17638f53763SMichael Baum * 17738f53763SMichael Baum * Get a pointer to partially initialized attributes structure, and updates the 17838f53763SMichael Baum * following fields: 17938f53763SMichael Baum * wq_type 18038f53763SMichael Baum * wq_umem_valid 18138f53763SMichael Baum * wq_umem_id 18238f53763SMichael Baum * wq_umem_offset 18338f53763SMichael Baum * dbr_umem_valid 18438f53763SMichael Baum * dbr_umem_id 18538f53763SMichael Baum * dbr_addr 18638f53763SMichael Baum * log_wq_stride 18738f53763SMichael Baum * log_wq_sz 18838f53763SMichael Baum * log_wq_pg_sz 18938f53763SMichael Baum * All other fields are updated by caller. 19038f53763SMichael Baum * 19138f53763SMichael Baum * @param[in] ctx 19238f53763SMichael Baum * Context returned from mlx5 open_device() glue function. 19338f53763SMichael Baum * @param[in/out] sq_obj 19438f53763SMichael Baum * Pointer to SQ to create. 19538f53763SMichael Baum * @param[in] log_wqbb_n 19638f53763SMichael Baum * Log of number of WQBBs in queue. 19738f53763SMichael Baum * @param[in] attr 19838f53763SMichael Baum * Pointer to SQ attributes structure. 19938f53763SMichael Baum * @param[in] socket 20038f53763SMichael Baum * Socket to use for allocation. 20138f53763SMichael Baum * 20238f53763SMichael Baum * @return 20338f53763SMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 20438f53763SMichael Baum */ 20538f53763SMichael Baum int 20638f53763SMichael Baum mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n, 20738f53763SMichael Baum struct mlx5_devx_create_sq_attr *attr, int socket) 20838f53763SMichael Baum { 20938f53763SMichael Baum struct mlx5_devx_obj *sq = NULL; 21038f53763SMichael Baum struct mlx5dv_devx_umem *umem_obj = NULL; 21138f53763SMichael Baum void *umem_buf = NULL; 21238f53763SMichael Baum size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 21338f53763SMichael Baum uint32_t umem_size, umem_dbrec; 21438f53763SMichael Baum uint16_t sq_size = 1 << log_wqbb_n; 21538f53763SMichael Baum int ret; 21638f53763SMichael Baum 21738f53763SMichael Baum if (alignment == (size_t)-1) { 21838f53763SMichael Baum DRV_LOG(ERR, "Failed to get WQE buf alignment."); 21938f53763SMichael Baum rte_errno = ENOMEM; 22038f53763SMichael Baum return -rte_errno; 22138f53763SMichael Baum } 22238f53763SMichael Baum /* Allocate memory buffer for WQEs and doorbell record. */ 22338f53763SMichael Baum umem_size = MLX5_WQE_SIZE * sq_size; 22438f53763SMichael Baum umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 22538f53763SMichael Baum umem_size += MLX5_DBR_SIZE; 22638f53763SMichael Baum umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 22738f53763SMichael Baum alignment, socket); 22838f53763SMichael Baum if (!umem_buf) { 22938f53763SMichael Baum DRV_LOG(ERR, "Failed to allocate memory for SQ."); 23038f53763SMichael Baum rte_errno = ENOMEM; 23138f53763SMichael Baum return -rte_errno; 23238f53763SMichael Baum } 23338f53763SMichael Baum /* Register allocated buffer in user space with DevX. */ 23438f53763SMichael Baum umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 23538f53763SMichael Baum IBV_ACCESS_LOCAL_WRITE); 23638f53763SMichael Baum if (!umem_obj) { 23738f53763SMichael Baum DRV_LOG(ERR, "Failed to register umem for SQ."); 23838f53763SMichael Baum rte_errno = errno; 23938f53763SMichael Baum goto error; 24038f53763SMichael Baum } 24138f53763SMichael Baum /* Fill attributes for SQ object creation. */ 24238f53763SMichael Baum attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 24338f53763SMichael Baum attr->wq_attr.wq_umem_valid = 1; 24438f53763SMichael Baum attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj); 24538f53763SMichael Baum attr->wq_attr.wq_umem_offset = 0; 24638f53763SMichael Baum attr->wq_attr.dbr_umem_valid = 1; 24738f53763SMichael Baum attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id; 24838f53763SMichael Baum attr->wq_attr.dbr_addr = umem_dbrec; 24938f53763SMichael Baum attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 25038f53763SMichael Baum attr->wq_attr.log_wq_sz = log_wqbb_n; 25138f53763SMichael Baum attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 25238f53763SMichael Baum /* Create send queue object with DevX. */ 25338f53763SMichael Baum sq = mlx5_devx_cmd_create_sq(ctx, attr); 25438f53763SMichael Baum if (!sq) { 25538f53763SMichael Baum DRV_LOG(ERR, "Can't create DevX SQ object."); 25638f53763SMichael Baum rte_errno = ENOMEM; 25738f53763SMichael Baum goto error; 25838f53763SMichael Baum } 25938f53763SMichael Baum sq_obj->umem_buf = umem_buf; 26038f53763SMichael Baum sq_obj->umem_obj = umem_obj; 26138f53763SMichael Baum sq_obj->sq = sq; 26238f53763SMichael Baum sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec); 26338f53763SMichael Baum return 0; 26438f53763SMichael Baum error: 26538f53763SMichael Baum ret = rte_errno; 26638f53763SMichael Baum if (umem_obj) 26738f53763SMichael Baum claim_zero(mlx5_os_umem_dereg(umem_obj)); 26838f53763SMichael Baum if (umem_buf) 26938f53763SMichael Baum mlx5_free((void *)(uintptr_t)umem_buf); 27038f53763SMichael Baum rte_errno = ret; 27138f53763SMichael Baum return -rte_errno; 27238f53763SMichael Baum } 27338f53763SMichael Baum 274edb704daSMichael Baum /** 275*056c87d0SXueming Li * Destroy DevX Receive Queue resources. 276*056c87d0SXueming Li * 277*056c87d0SXueming Li * @param[in] rq_res 278*056c87d0SXueming Li * DevX RQ resource to destroy. 279*056c87d0SXueming Li */ 280*056c87d0SXueming Li static void 281*056c87d0SXueming Li mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res *rq_res) 282*056c87d0SXueming Li { 283*056c87d0SXueming Li if (rq_res->umem_obj) 284*056c87d0SXueming Li claim_zero(mlx5_os_umem_dereg(rq_res->umem_obj)); 285*056c87d0SXueming Li if (rq_res->umem_buf) 286*056c87d0SXueming Li mlx5_free((void *)(uintptr_t)rq_res->umem_buf); 287*056c87d0SXueming Li memset(rq_res, 0, sizeof(*rq_res)); 288*056c87d0SXueming Li } 289*056c87d0SXueming Li 290*056c87d0SXueming Li /** 291*056c87d0SXueming Li * Destroy DevX Receive Memory Pool. 292*056c87d0SXueming Li * 293*056c87d0SXueming Li * @param[in] rmp 294*056c87d0SXueming Li * DevX RMP to destroy. 295*056c87d0SXueming Li */ 296*056c87d0SXueming Li static void 297*056c87d0SXueming Li mlx5_devx_rmp_destroy(struct mlx5_devx_rmp *rmp) 298*056c87d0SXueming Li { 299*056c87d0SXueming Li MLX5_ASSERT(rmp->ref_cnt == 0); 300*056c87d0SXueming Li if (rmp->rmp) { 301*056c87d0SXueming Li claim_zero(mlx5_devx_cmd_destroy(rmp->rmp)); 302*056c87d0SXueming Li rmp->rmp = NULL; 303*056c87d0SXueming Li } 304*056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rmp->wq); 305*056c87d0SXueming Li } 306*056c87d0SXueming Li 307*056c87d0SXueming Li /** 308f9213ab1SRaja Zidane * Destroy DevX Queue Pair. 309f9213ab1SRaja Zidane * 310f9213ab1SRaja Zidane * @param[in] qp 311f9213ab1SRaja Zidane * DevX QP to destroy. 312f9213ab1SRaja Zidane */ 313f9213ab1SRaja Zidane void 314f9213ab1SRaja Zidane mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp) 315f9213ab1SRaja Zidane { 316f9213ab1SRaja Zidane if (qp->qp) 317f9213ab1SRaja Zidane claim_zero(mlx5_devx_cmd_destroy(qp->qp)); 318f9213ab1SRaja Zidane if (qp->umem_obj) 319f9213ab1SRaja Zidane claim_zero(mlx5_os_umem_dereg(qp->umem_obj)); 320f9213ab1SRaja Zidane if (qp->umem_buf) 321f9213ab1SRaja Zidane mlx5_free((void *)(uintptr_t)qp->umem_buf); 322f9213ab1SRaja Zidane } 323f9213ab1SRaja Zidane 324f9213ab1SRaja Zidane /** 325f9213ab1SRaja Zidane * Create Queue Pair using DevX API. 326f9213ab1SRaja Zidane * 327f9213ab1SRaja Zidane * Get a pointer to partially initialized attributes structure, and updates the 328f9213ab1SRaja Zidane * following fields: 329f9213ab1SRaja Zidane * wq_umem_id 330f9213ab1SRaja Zidane * wq_umem_offset 331f9213ab1SRaja Zidane * dbr_umem_valid 332f9213ab1SRaja Zidane * dbr_umem_id 333f9213ab1SRaja Zidane * dbr_address 334f9213ab1SRaja Zidane * log_page_size 335f9213ab1SRaja Zidane * All other fields are updated by caller. 336f9213ab1SRaja Zidane * 337f9213ab1SRaja Zidane * @param[in] ctx 338f9213ab1SRaja Zidane * Context returned from mlx5 open_device() glue function. 339f9213ab1SRaja Zidane * @param[in/out] qp_obj 340f9213ab1SRaja Zidane * Pointer to QP to create. 341f9213ab1SRaja Zidane * @param[in] log_wqbb_n 342f9213ab1SRaja Zidane * Log of number of WQBBs in queue. 343f9213ab1SRaja Zidane * @param[in] attr 344f9213ab1SRaja Zidane * Pointer to QP attributes structure. 345f9213ab1SRaja Zidane * @param[in] socket 346f9213ab1SRaja Zidane * Socket to use for allocation. 347f9213ab1SRaja Zidane * 348f9213ab1SRaja Zidane * @return 349f9213ab1SRaja Zidane * 0 on success, a negative errno value otherwise and rte_errno is set. 350f9213ab1SRaja Zidane */ 351f9213ab1SRaja Zidane int 352f9213ab1SRaja Zidane mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint16_t log_wqbb_n, 353f9213ab1SRaja Zidane struct mlx5_devx_qp_attr *attr, int socket) 354f9213ab1SRaja Zidane { 355f9213ab1SRaja Zidane struct mlx5_devx_obj *qp = NULL; 356f9213ab1SRaja Zidane struct mlx5dv_devx_umem *umem_obj = NULL; 357f9213ab1SRaja Zidane void *umem_buf = NULL; 358f9213ab1SRaja Zidane size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 359f9213ab1SRaja Zidane uint32_t umem_size, umem_dbrec; 360f9213ab1SRaja Zidane uint16_t qp_size = 1 << log_wqbb_n; 361f9213ab1SRaja Zidane int ret; 362f9213ab1SRaja Zidane 363f9213ab1SRaja Zidane if (alignment == (size_t)-1) { 364f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to get WQE buf alignment."); 365f9213ab1SRaja Zidane rte_errno = ENOMEM; 366f9213ab1SRaja Zidane return -rte_errno; 367f9213ab1SRaja Zidane } 368f9213ab1SRaja Zidane /* Allocate memory buffer for WQEs and doorbell record. */ 369f9213ab1SRaja Zidane umem_size = MLX5_WQE_SIZE * qp_size; 370f9213ab1SRaja Zidane umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 371f9213ab1SRaja Zidane umem_size += MLX5_DBR_SIZE; 372f9213ab1SRaja Zidane umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 373f9213ab1SRaja Zidane alignment, socket); 374f9213ab1SRaja Zidane if (!umem_buf) { 375f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to allocate memory for QP."); 376f9213ab1SRaja Zidane rte_errno = ENOMEM; 377f9213ab1SRaja Zidane return -rte_errno; 378f9213ab1SRaja Zidane } 379f9213ab1SRaja Zidane /* Register allocated buffer in user space with DevX. */ 380f9213ab1SRaja Zidane umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 381f9213ab1SRaja Zidane IBV_ACCESS_LOCAL_WRITE); 382f9213ab1SRaja Zidane if (!umem_obj) { 383f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to register umem for QP."); 384f9213ab1SRaja Zidane rte_errno = errno; 385f9213ab1SRaja Zidane goto error; 386f9213ab1SRaja Zidane } 387f9213ab1SRaja Zidane /* Fill attributes for SQ object creation. */ 388f9213ab1SRaja Zidane attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 389f9213ab1SRaja Zidane attr->wq_umem_offset = 0; 390f9213ab1SRaja Zidane attr->dbr_umem_valid = 1; 391f9213ab1SRaja Zidane attr->dbr_umem_id = attr->wq_umem_id; 392f9213ab1SRaja Zidane attr->dbr_address = umem_dbrec; 393f9213ab1SRaja Zidane attr->log_page_size = MLX5_LOG_PAGE_SIZE; 394f9213ab1SRaja Zidane /* Create send queue object with DevX. */ 395f9213ab1SRaja Zidane qp = mlx5_devx_cmd_create_qp(ctx, attr); 396f9213ab1SRaja Zidane if (!qp) { 397f9213ab1SRaja Zidane DRV_LOG(ERR, "Can't create DevX QP object."); 398f9213ab1SRaja Zidane rte_errno = ENOMEM; 399f9213ab1SRaja Zidane goto error; 400f9213ab1SRaja Zidane } 401f9213ab1SRaja Zidane qp_obj->umem_buf = umem_buf; 402f9213ab1SRaja Zidane qp_obj->umem_obj = umem_obj; 403f9213ab1SRaja Zidane qp_obj->qp = qp; 404f9213ab1SRaja Zidane qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec); 405f9213ab1SRaja Zidane return 0; 406f9213ab1SRaja Zidane error: 407f9213ab1SRaja Zidane ret = rte_errno; 408f9213ab1SRaja Zidane if (umem_obj) 409f9213ab1SRaja Zidane claim_zero(mlx5_os_umem_dereg(umem_obj)); 410f9213ab1SRaja Zidane if (umem_buf) 411f9213ab1SRaja Zidane mlx5_free((void *)(uintptr_t)umem_buf); 412f9213ab1SRaja Zidane rte_errno = ret; 413f9213ab1SRaja Zidane return -rte_errno; 414f9213ab1SRaja Zidane } 415f9213ab1SRaja Zidane 416f9213ab1SRaja Zidane /** 417edb704daSMichael Baum * Destroy DevX Receive Queue. 418edb704daSMichael Baum * 419edb704daSMichael Baum * @param[in] rq 420edb704daSMichael Baum * DevX RQ to destroy. 421edb704daSMichael Baum */ 422edb704daSMichael Baum void 423edb704daSMichael Baum mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq) 424edb704daSMichael Baum { 425*056c87d0SXueming Li if (rq->rq) { 426edb704daSMichael Baum claim_zero(mlx5_devx_cmd_destroy(rq->rq)); 427*056c87d0SXueming Li rq->rq = NULL; 428*056c87d0SXueming Li if (rq->rmp) 429*056c87d0SXueming Li rq->rmp->ref_cnt--; 430*056c87d0SXueming Li } 431*056c87d0SXueming Li if (rq->rmp == NULL) { 432*056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rq->wq); 433*056c87d0SXueming Li } else { 434*056c87d0SXueming Li if (rq->rmp->ref_cnt == 0) 435*056c87d0SXueming Li mlx5_devx_rmp_destroy(rq->rmp); 436*056c87d0SXueming Li } 437edb704daSMichael Baum } 438edb704daSMichael Baum 439edb704daSMichael Baum /** 440*056c87d0SXueming Li * Create WQ resources using DevX API. 441*056c87d0SXueming Li * 442*056c87d0SXueming Li * @param[in] ctx 443*056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 444*056c87d0SXueming Li * @param[in] wqe_size 445*056c87d0SXueming Li * Size of WQE structure. 446*056c87d0SXueming Li * @param[in] log_wqbb_n 447*056c87d0SXueming Li * Log of number of WQBBs in queue. 448*056c87d0SXueming Li * @param[in] socket 449*056c87d0SXueming Li * Socket to use for allocation. 450*056c87d0SXueming Li * @param[out] wq_attr 451*056c87d0SXueming Li * Pointer to WQ attributes structure. 452*056c87d0SXueming Li * @param[out] wq_res 453*056c87d0SXueming Li * Pointer to WQ resource to create. 454*056c87d0SXueming Li * 455*056c87d0SXueming Li * @return 456*056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 457*056c87d0SXueming Li */ 458*056c87d0SXueming Li static int 459*056c87d0SXueming Li mlx5_devx_wq_init(void *ctx, uint32_t wqe_size, uint16_t log_wqbb_n, int socket, 460*056c87d0SXueming Li struct mlx5_devx_wq_attr *wq_attr, 461*056c87d0SXueming Li struct mlx5_devx_wq_res *wq_res) 462*056c87d0SXueming Li { 463*056c87d0SXueming Li struct mlx5dv_devx_umem *umem_obj = NULL; 464*056c87d0SXueming Li void *umem_buf = NULL; 465*056c87d0SXueming Li size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 466*056c87d0SXueming Li uint32_t umem_size, umem_dbrec; 467*056c87d0SXueming Li int ret; 468*056c87d0SXueming Li 469*056c87d0SXueming Li if (alignment == (size_t)-1) { 470*056c87d0SXueming Li DRV_LOG(ERR, "Failed to get WQE buf alignment."); 471*056c87d0SXueming Li rte_errno = ENOMEM; 472*056c87d0SXueming Li return -rte_errno; 473*056c87d0SXueming Li } 474*056c87d0SXueming Li /* Allocate memory buffer for WQEs and doorbell record. */ 475*056c87d0SXueming Li umem_size = wqe_size * (1 << log_wqbb_n); 476*056c87d0SXueming Li umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 477*056c87d0SXueming Li umem_size += MLX5_DBR_SIZE; 478*056c87d0SXueming Li umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 479*056c87d0SXueming Li alignment, socket); 480*056c87d0SXueming Li if (!umem_buf) { 481*056c87d0SXueming Li DRV_LOG(ERR, "Failed to allocate memory for RQ."); 482*056c87d0SXueming Li rte_errno = ENOMEM; 483*056c87d0SXueming Li return -rte_errno; 484*056c87d0SXueming Li } 485*056c87d0SXueming Li /* Register allocated buffer in user space with DevX. */ 486*056c87d0SXueming Li umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, 487*056c87d0SXueming Li umem_size, 0); 488*056c87d0SXueming Li if (!umem_obj) { 489*056c87d0SXueming Li DRV_LOG(ERR, "Failed to register umem for RQ."); 490*056c87d0SXueming Li rte_errno = errno; 491*056c87d0SXueming Li goto error; 492*056c87d0SXueming Li } 493*056c87d0SXueming Li /* Fill WQ attributes for RQ/RMP object creation. */ 494*056c87d0SXueming Li wq_attr->wq_umem_valid = 1; 495*056c87d0SXueming Li wq_attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 496*056c87d0SXueming Li wq_attr->wq_umem_offset = 0; 497*056c87d0SXueming Li wq_attr->dbr_umem_valid = 1; 498*056c87d0SXueming Li wq_attr->dbr_umem_id = wq_attr->wq_umem_id; 499*056c87d0SXueming Li wq_attr->dbr_addr = umem_dbrec; 500*056c87d0SXueming Li wq_attr->log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 501*056c87d0SXueming Li /* Fill attributes for RQ object creation. */ 502*056c87d0SXueming Li wq_res->umem_buf = umem_buf; 503*056c87d0SXueming Li wq_res->umem_obj = umem_obj; 504*056c87d0SXueming Li wq_res->db_rec = RTE_PTR_ADD(umem_buf, umem_dbrec); 505*056c87d0SXueming Li return 0; 506*056c87d0SXueming Li error: 507*056c87d0SXueming Li ret = rte_errno; 508*056c87d0SXueming Li if (umem_obj) 509*056c87d0SXueming Li claim_zero(mlx5_os_umem_dereg(umem_obj)); 510*056c87d0SXueming Li if (umem_buf) 511*056c87d0SXueming Li mlx5_free((void *)(uintptr_t)umem_buf); 512*056c87d0SXueming Li rte_errno = ret; 513*056c87d0SXueming Li return -rte_errno; 514*056c87d0SXueming Li } 515*056c87d0SXueming Li 516*056c87d0SXueming Li /** 517*056c87d0SXueming Li * Create standalone Receive Queue using DevX API. 518*056c87d0SXueming Li * 519*056c87d0SXueming Li * @param[in] ctx 520*056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 521*056c87d0SXueming Li * @param[in/out] rq_obj 522*056c87d0SXueming Li * Pointer to RQ to create. 523*056c87d0SXueming Li * @param[in] wqe_size 524*056c87d0SXueming Li * Size of WQE structure. 525*056c87d0SXueming Li * @param[in] log_wqbb_n 526*056c87d0SXueming Li * Log of number of WQBBs in queue. 527*056c87d0SXueming Li * @param[in] attr 528*056c87d0SXueming Li * Pointer to RQ attributes structure. 529*056c87d0SXueming Li * @param[in] socket 530*056c87d0SXueming Li * Socket to use for allocation. 531*056c87d0SXueming Li * 532*056c87d0SXueming Li * @return 533*056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 534*056c87d0SXueming Li */ 535*056c87d0SXueming Li static int 536*056c87d0SXueming Li mlx5_devx_rq_std_create(void *ctx, struct mlx5_devx_rq *rq_obj, 537*056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 538*056c87d0SXueming Li struct mlx5_devx_create_rq_attr *attr, int socket) 539*056c87d0SXueming Li { 540*056c87d0SXueming Li struct mlx5_devx_obj *rq; 541*056c87d0SXueming Li int ret; 542*056c87d0SXueming Li 543*056c87d0SXueming Li ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 544*056c87d0SXueming Li &attr->wq_attr, &rq_obj->wq); 545*056c87d0SXueming Li if (ret != 0) 546*056c87d0SXueming Li return ret; 547*056c87d0SXueming Li /* Create receive queue object with DevX. */ 548*056c87d0SXueming Li rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 549*056c87d0SXueming Li if (!rq) { 550*056c87d0SXueming Li DRV_LOG(ERR, "Can't create DevX RQ object."); 551*056c87d0SXueming Li rte_errno = ENOMEM; 552*056c87d0SXueming Li goto error; 553*056c87d0SXueming Li } 554*056c87d0SXueming Li rq_obj->rq = rq; 555*056c87d0SXueming Li return 0; 556*056c87d0SXueming Li error: 557*056c87d0SXueming Li ret = rte_errno; 558*056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rq_obj->wq); 559*056c87d0SXueming Li rte_errno = ret; 560*056c87d0SXueming Li return -rte_errno; 561*056c87d0SXueming Li } 562*056c87d0SXueming Li 563*056c87d0SXueming Li /** 564*056c87d0SXueming Li * Create Receive Memory Pool using DevX API. 565*056c87d0SXueming Li * 566*056c87d0SXueming Li * @param[in] ctx 567*056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 568*056c87d0SXueming Li * @param[in/out] rq_obj 569*056c87d0SXueming Li * Pointer to RQ to create. 570*056c87d0SXueming Li * @param[in] wqe_size 571*056c87d0SXueming Li * Size of WQE structure. 572*056c87d0SXueming Li * @param[in] log_wqbb_n 573*056c87d0SXueming Li * Log of number of WQBBs in queue. 574*056c87d0SXueming Li * @param[in] attr 575*056c87d0SXueming Li * Pointer to RQ attributes structure. 576*056c87d0SXueming Li * @param[in] socket 577*056c87d0SXueming Li * Socket to use for allocation. 578*056c87d0SXueming Li * 579*056c87d0SXueming Li * @return 580*056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 581*056c87d0SXueming Li */ 582*056c87d0SXueming Li static int 583*056c87d0SXueming Li mlx5_devx_rmp_create(void *ctx, struct mlx5_devx_rmp *rmp_obj, 584*056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 585*056c87d0SXueming Li struct mlx5_devx_wq_attr *wq_attr, int socket) 586*056c87d0SXueming Li { 587*056c87d0SXueming Li struct mlx5_devx_create_rmp_attr rmp_attr = { 0 }; 588*056c87d0SXueming Li int ret; 589*056c87d0SXueming Li 590*056c87d0SXueming Li if (rmp_obj->rmp != NULL) 591*056c87d0SXueming Li return 0; 592*056c87d0SXueming Li rmp_attr.wq_attr = *wq_attr; 593*056c87d0SXueming Li ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 594*056c87d0SXueming Li &rmp_attr.wq_attr, &rmp_obj->wq); 595*056c87d0SXueming Li if (ret != 0) 596*056c87d0SXueming Li return ret; 597*056c87d0SXueming Li rmp_attr.state = MLX5_RMPC_STATE_RDY; 598*056c87d0SXueming Li rmp_attr.basic_cyclic_rcv_wqe = 599*056c87d0SXueming Li wq_attr->wq_type != MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 600*056c87d0SXueming Li /* Create receive memory pool object with DevX. */ 601*056c87d0SXueming Li rmp_obj->rmp = mlx5_devx_cmd_create_rmp(ctx, &rmp_attr, socket); 602*056c87d0SXueming Li if (rmp_obj->rmp == NULL) { 603*056c87d0SXueming Li DRV_LOG(ERR, "Can't create DevX RMP object."); 604*056c87d0SXueming Li rte_errno = ENOMEM; 605*056c87d0SXueming Li goto error; 606*056c87d0SXueming Li } 607*056c87d0SXueming Li return 0; 608*056c87d0SXueming Li error: 609*056c87d0SXueming Li ret = rte_errno; 610*056c87d0SXueming Li mlx5_devx_wq_res_destroy(&rmp_obj->wq); 611*056c87d0SXueming Li rte_errno = ret; 612*056c87d0SXueming Li return -rte_errno; 613*056c87d0SXueming Li } 614*056c87d0SXueming Li 615*056c87d0SXueming Li /** 616*056c87d0SXueming Li * Create Shared Receive Queue based on RMP using DevX API. 617*056c87d0SXueming Li * 618*056c87d0SXueming Li * @param[in] ctx 619*056c87d0SXueming Li * Context returned from mlx5 open_device() glue function. 620*056c87d0SXueming Li * @param[in/out] rq_obj 621*056c87d0SXueming Li * Pointer to RQ to create. 622*056c87d0SXueming Li * @param[in] wqe_size 623*056c87d0SXueming Li * Size of WQE structure. 624*056c87d0SXueming Li * @param[in] log_wqbb_n 625*056c87d0SXueming Li * Log of number of WQBBs in queue. 626*056c87d0SXueming Li * @param[in] attr 627*056c87d0SXueming Li * Pointer to RQ attributes structure. 628*056c87d0SXueming Li * @param[in] socket 629*056c87d0SXueming Li * Socket to use for allocation. 630*056c87d0SXueming Li * 631*056c87d0SXueming Li * @return 632*056c87d0SXueming Li * 0 on success, a negative errno value otherwise and rte_errno is set. 633*056c87d0SXueming Li */ 634*056c87d0SXueming Li static int 635*056c87d0SXueming Li mlx5_devx_rq_shared_create(void *ctx, struct mlx5_devx_rq *rq_obj, 636*056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 637*056c87d0SXueming Li struct mlx5_devx_create_rq_attr *attr, int socket) 638*056c87d0SXueming Li { 639*056c87d0SXueming Li struct mlx5_devx_obj *rq; 640*056c87d0SXueming Li int ret; 641*056c87d0SXueming Li 642*056c87d0SXueming Li ret = mlx5_devx_rmp_create(ctx, rq_obj->rmp, wqe_size, log_wqbb_n, 643*056c87d0SXueming Li &attr->wq_attr, socket); 644*056c87d0SXueming Li if (ret != 0) 645*056c87d0SXueming Li return ret; 646*056c87d0SXueming Li attr->mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP; 647*056c87d0SXueming Li attr->rmpn = rq_obj->rmp->rmp->id; 648*056c87d0SXueming Li attr->flush_in_error_en = 0; 649*056c87d0SXueming Li memset(&attr->wq_attr, 0, sizeof(attr->wq_attr)); 650*056c87d0SXueming Li /* Create receive queue object with DevX. */ 651*056c87d0SXueming Li rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 652*056c87d0SXueming Li if (!rq) { 653*056c87d0SXueming Li DRV_LOG(ERR, "Can't create DevX RMP RQ object."); 654*056c87d0SXueming Li rte_errno = ENOMEM; 655*056c87d0SXueming Li goto error; 656*056c87d0SXueming Li } 657*056c87d0SXueming Li rq_obj->rq = rq; 658*056c87d0SXueming Li rq_obj->rmp->ref_cnt++; 659*056c87d0SXueming Li return 0; 660*056c87d0SXueming Li error: 661*056c87d0SXueming Li ret = rte_errno; 662*056c87d0SXueming Li mlx5_devx_rq_destroy(rq_obj); 663*056c87d0SXueming Li rte_errno = ret; 664*056c87d0SXueming Li return -rte_errno; 665*056c87d0SXueming Li } 666*056c87d0SXueming Li 667*056c87d0SXueming Li /** 668*056c87d0SXueming Li * Create Receive Queue using DevX API. Shared RQ is created only if rmp set. 669edb704daSMichael Baum * 670edb704daSMichael Baum * Get a pointer to partially initialized attributes structure, and updates the 671edb704daSMichael Baum * following fields: 672edb704daSMichael Baum * wq_umem_valid 673edb704daSMichael Baum * wq_umem_id 674edb704daSMichael Baum * wq_umem_offset 675edb704daSMichael Baum * dbr_umem_valid 676edb704daSMichael Baum * dbr_umem_id 677edb704daSMichael Baum * dbr_addr 678edb704daSMichael Baum * log_wq_pg_sz 679edb704daSMichael Baum * All other fields are updated by caller. 680edb704daSMichael Baum * 681edb704daSMichael Baum * @param[in] ctx 682edb704daSMichael Baum * Context returned from mlx5 open_device() glue function. 683edb704daSMichael Baum * @param[in/out] rq_obj 684edb704daSMichael Baum * Pointer to RQ to create. 685edb704daSMichael Baum * @param[in] wqe_size 686edb704daSMichael Baum * Size of WQE structure. 687edb704daSMichael Baum * @param[in] log_wqbb_n 688edb704daSMichael Baum * Log of number of WQBBs in queue. 689edb704daSMichael Baum * @param[in] attr 690edb704daSMichael Baum * Pointer to RQ attributes structure. 691edb704daSMichael Baum * @param[in] socket 692edb704daSMichael Baum * Socket to use for allocation. 693edb704daSMichael Baum * 694edb704daSMichael Baum * @return 695edb704daSMichael Baum * 0 on success, a negative errno value otherwise and rte_errno is set. 696edb704daSMichael Baum */ 697edb704daSMichael Baum int 698*056c87d0SXueming Li mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, 699*056c87d0SXueming Li uint32_t wqe_size, uint16_t log_wqbb_n, 700edb704daSMichael Baum struct mlx5_devx_create_rq_attr *attr, int socket) 701edb704daSMichael Baum { 702*056c87d0SXueming Li if (rq_obj->rmp == NULL) 703*056c87d0SXueming Li return mlx5_devx_rq_std_create(ctx, rq_obj, wqe_size, 704*056c87d0SXueming Li log_wqbb_n, attr, socket); 705*056c87d0SXueming Li return mlx5_devx_rq_shared_create(ctx, rq_obj, wqe_size, 706*056c87d0SXueming Li log_wqbb_n, attr, socket); 707edb704daSMichael Baum } 708f9213ab1SRaja Zidane 709f9213ab1SRaja Zidane /** 710f9213ab1SRaja Zidane * Change QP state to RTS. 711f9213ab1SRaja Zidane * 712f9213ab1SRaja Zidane * @param[in] qp 713f9213ab1SRaja Zidane * DevX QP to change. 714f9213ab1SRaja Zidane * @param[in] remote_qp_id 715f9213ab1SRaja Zidane * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 716f9213ab1SRaja Zidane * 717f9213ab1SRaja Zidane * @return 718f9213ab1SRaja Zidane * 0 on success, a negative errno value otherwise and rte_errno is set. 719f9213ab1SRaja Zidane */ 720f9213ab1SRaja Zidane int 721f9213ab1SRaja Zidane mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id) 722f9213ab1SRaja Zidane { 723f9213ab1SRaja Zidane if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP, 724f9213ab1SRaja Zidane remote_qp_id)) { 725f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).", 726f9213ab1SRaja Zidane rte_errno); 727f9213ab1SRaja Zidane return -1; 728f9213ab1SRaja Zidane } 729f9213ab1SRaja Zidane if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP, 730f9213ab1SRaja Zidane remote_qp_id)) { 731f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).", 732f9213ab1SRaja Zidane rte_errno); 733f9213ab1SRaja Zidane return -1; 734f9213ab1SRaja Zidane } 735f9213ab1SRaja Zidane if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP, 736f9213ab1SRaja Zidane remote_qp_id)) { 737f9213ab1SRaja Zidane DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).", 738f9213ab1SRaja Zidane rte_errno); 739f9213ab1SRaja Zidane return -1; 740f9213ab1SRaja Zidane } 741f9213ab1SRaja Zidane return 0; 742f9213ab1SRaja Zidane } 743