1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 #include <stdint.h> 5 6 #include <rte_errno.h> 7 #include <rte_common.h> 8 #include <rte_eal_paging.h> 9 10 #include <mlx5_glue.h> 11 #include <mlx5_common_os.h> 12 13 #include "mlx5_prm.h" 14 #include "mlx5_devx_cmds.h" 15 #include "mlx5_common_log.h" 16 #include "mlx5_malloc.h" 17 #include "mlx5_common.h" 18 #include "mlx5_common_devx.h" 19 20 /** 21 * Destroy DevX Completion Queue. 22 * 23 * @param[in] cq 24 * DevX CQ to destroy. 25 */ 26 void 27 mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq) 28 { 29 if (cq->cq) 30 claim_zero(mlx5_devx_cmd_destroy(cq->cq)); 31 if (cq->umem_obj) 32 claim_zero(mlx5_os_umem_dereg(cq->umem_obj)); 33 if (cq->umem_buf) 34 mlx5_free((void *)(uintptr_t)cq->umem_buf); 35 } 36 37 /* Mark all CQEs initially as invalid. */ 38 static void 39 mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size) 40 { 41 volatile struct mlx5_cqe *cqe = cq_obj->cqes; 42 uint16_t i; 43 44 for (i = 0; i < cq_size; i++, cqe++) 45 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 46 } 47 48 /** 49 * Create Completion Queue using DevX API. 50 * 51 * Get a pointer to partially initialized attributes structure, and updates the 52 * following fields: 53 * q_umem_valid 54 * q_umem_id 55 * q_umem_offset 56 * db_umem_valid 57 * db_umem_id 58 * db_umem_offset 59 * eqn 60 * log_cq_size 61 * log_page_size 62 * All other fields are updated by caller. 63 * 64 * @param[in] ctx 65 * Context returned from mlx5 open_device() glue function. 66 * @param[in/out] cq_obj 67 * Pointer to CQ to create. 68 * @param[in] log_desc_n 69 * Log of number of descriptors in queue. 70 * @param[in] attr 71 * Pointer to CQ attributes structure. 72 * @param[in] socket 73 * Socket to use for allocation. 74 * 75 * @return 76 * 0 on success, a negative errno value otherwise and rte_errno is set. 77 */ 78 int 79 mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n, 80 struct mlx5_devx_cq_attr *attr, int socket) 81 { 82 struct mlx5_devx_obj *cq = NULL; 83 struct mlx5dv_devx_umem *umem_obj = NULL; 84 void *umem_buf = NULL; 85 size_t page_size = rte_mem_page_size(); 86 size_t alignment = MLX5_CQE_BUF_ALIGNMENT; 87 uint32_t umem_size, umem_dbrec; 88 uint32_t eqn; 89 uint16_t cq_size = 1 << log_desc_n; 90 int ret; 91 92 if (page_size == (size_t)-1 || alignment == (size_t)-1) { 93 DRV_LOG(ERR, "Failed to get page_size."); 94 rte_errno = ENOMEM; 95 return -rte_errno; 96 } 97 /* Query first EQN. */ 98 ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn); 99 if (ret) { 100 rte_errno = errno; 101 DRV_LOG(ERR, "Failed to query event queue number."); 102 return -rte_errno; 103 } 104 /* Allocate memory buffer for CQEs and doorbell record. */ 105 umem_size = sizeof(struct mlx5_cqe) * cq_size; 106 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 107 umem_size += MLX5_DBR_SIZE; 108 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 109 alignment, socket); 110 if (!umem_buf) { 111 DRV_LOG(ERR, "Failed to allocate memory for CQ."); 112 rte_errno = ENOMEM; 113 return -rte_errno; 114 } 115 /* Register allocated buffer in user space with DevX. */ 116 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 117 IBV_ACCESS_LOCAL_WRITE); 118 if (!umem_obj) { 119 DRV_LOG(ERR, "Failed to register umem for CQ."); 120 rte_errno = errno; 121 goto error; 122 } 123 /* Fill attributes for CQ object creation. */ 124 attr->q_umem_valid = 1; 125 attr->q_umem_id = mlx5_os_get_umem_id(umem_obj); 126 attr->q_umem_offset = 0; 127 attr->db_umem_valid = 1; 128 attr->db_umem_id = attr->q_umem_id; 129 attr->db_umem_offset = umem_dbrec; 130 attr->eqn = eqn; 131 attr->log_cq_size = log_desc_n; 132 attr->log_page_size = rte_log2_u32(page_size); 133 /* Create completion queue object with DevX. */ 134 cq = mlx5_devx_cmd_create_cq(ctx, attr); 135 if (!cq) { 136 DRV_LOG(ERR, "Can't create DevX CQ object."); 137 rte_errno = ENOMEM; 138 goto error; 139 } 140 cq_obj->umem_buf = umem_buf; 141 cq_obj->umem_obj = umem_obj; 142 cq_obj->cq = cq; 143 cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec); 144 /* Mark all CQEs initially as invalid. */ 145 mlx5_cq_init(cq_obj, cq_size); 146 return 0; 147 error: 148 ret = rte_errno; 149 if (umem_obj) 150 claim_zero(mlx5_os_umem_dereg(umem_obj)); 151 if (umem_buf) 152 mlx5_free((void *)(uintptr_t)umem_buf); 153 rte_errno = ret; 154 return -rte_errno; 155 } 156 157 /** 158 * Destroy DevX Send Queue. 159 * 160 * @param[in] sq 161 * DevX SQ to destroy. 162 */ 163 void 164 mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq) 165 { 166 if (sq->sq) 167 claim_zero(mlx5_devx_cmd_destroy(sq->sq)); 168 if (sq->umem_obj) 169 claim_zero(mlx5_os_umem_dereg(sq->umem_obj)); 170 if (sq->umem_buf) 171 mlx5_free((void *)(uintptr_t)sq->umem_buf); 172 } 173 174 /** 175 * Create Send Queue using DevX API. 176 * 177 * Get a pointer to partially initialized attributes structure, and updates the 178 * following fields: 179 * wq_type 180 * wq_umem_valid 181 * wq_umem_id 182 * wq_umem_offset 183 * dbr_umem_valid 184 * dbr_umem_id 185 * dbr_addr 186 * log_wq_stride 187 * log_wq_sz 188 * log_wq_pg_sz 189 * All other fields are updated by caller. 190 * 191 * @param[in] ctx 192 * Context returned from mlx5 open_device() glue function. 193 * @param[in/out] sq_obj 194 * Pointer to SQ to create. 195 * @param[in] log_wqbb_n 196 * Log of number of WQBBs in queue. 197 * @param[in] attr 198 * Pointer to SQ attributes structure. 199 * @param[in] socket 200 * Socket to use for allocation. 201 * 202 * @return 203 * 0 on success, a negative errno value otherwise and rte_errno is set. 204 */ 205 int 206 mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n, 207 struct mlx5_devx_create_sq_attr *attr, int socket) 208 { 209 struct mlx5_devx_obj *sq = NULL; 210 struct mlx5dv_devx_umem *umem_obj = NULL; 211 void *umem_buf = NULL; 212 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 213 uint32_t umem_size, umem_dbrec; 214 uint16_t sq_size = 1 << log_wqbb_n; 215 int ret; 216 217 if (alignment == (size_t)-1) { 218 DRV_LOG(ERR, "Failed to get WQE buf alignment."); 219 rte_errno = ENOMEM; 220 return -rte_errno; 221 } 222 /* Allocate memory buffer for WQEs and doorbell record. */ 223 umem_size = MLX5_WQE_SIZE * sq_size; 224 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 225 umem_size += MLX5_DBR_SIZE; 226 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 227 alignment, socket); 228 if (!umem_buf) { 229 DRV_LOG(ERR, "Failed to allocate memory for SQ."); 230 rte_errno = ENOMEM; 231 return -rte_errno; 232 } 233 /* Register allocated buffer in user space with DevX. */ 234 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 235 IBV_ACCESS_LOCAL_WRITE); 236 if (!umem_obj) { 237 DRV_LOG(ERR, "Failed to register umem for SQ."); 238 rte_errno = errno; 239 goto error; 240 } 241 /* Fill attributes for SQ object creation. */ 242 attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 243 attr->wq_attr.wq_umem_valid = 1; 244 attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj); 245 attr->wq_attr.wq_umem_offset = 0; 246 attr->wq_attr.dbr_umem_valid = 1; 247 attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id; 248 attr->wq_attr.dbr_addr = umem_dbrec; 249 attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 250 attr->wq_attr.log_wq_sz = log_wqbb_n; 251 attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 252 /* Create send queue object with DevX. */ 253 sq = mlx5_devx_cmd_create_sq(ctx, attr); 254 if (!sq) { 255 DRV_LOG(ERR, "Can't create DevX SQ object."); 256 rte_errno = ENOMEM; 257 goto error; 258 } 259 sq_obj->umem_buf = umem_buf; 260 sq_obj->umem_obj = umem_obj; 261 sq_obj->sq = sq; 262 sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec); 263 return 0; 264 error: 265 ret = rte_errno; 266 if (umem_obj) 267 claim_zero(mlx5_os_umem_dereg(umem_obj)); 268 if (umem_buf) 269 mlx5_free((void *)(uintptr_t)umem_buf); 270 rte_errno = ret; 271 return -rte_errno; 272 } 273 274 /** 275 * Destroy DevX Receive Queue resources. 276 * 277 * @param[in] rq_res 278 * DevX RQ resource to destroy. 279 */ 280 static void 281 mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res *rq_res) 282 { 283 if (rq_res->umem_obj) 284 claim_zero(mlx5_os_umem_dereg(rq_res->umem_obj)); 285 if (rq_res->umem_buf) 286 mlx5_free((void *)(uintptr_t)rq_res->umem_buf); 287 memset(rq_res, 0, sizeof(*rq_res)); 288 } 289 290 /** 291 * Destroy DevX Receive Memory Pool. 292 * 293 * @param[in] rmp 294 * DevX RMP to destroy. 295 */ 296 static void 297 mlx5_devx_rmp_destroy(struct mlx5_devx_rmp *rmp) 298 { 299 MLX5_ASSERT(rmp->ref_cnt == 0); 300 if (rmp->rmp) { 301 claim_zero(mlx5_devx_cmd_destroy(rmp->rmp)); 302 rmp->rmp = NULL; 303 } 304 mlx5_devx_wq_res_destroy(&rmp->wq); 305 } 306 307 /** 308 * Destroy DevX Queue Pair. 309 * 310 * @param[in] qp 311 * DevX QP to destroy. 312 */ 313 void 314 mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp) 315 { 316 if (qp->qp) 317 claim_zero(mlx5_devx_cmd_destroy(qp->qp)); 318 if (qp->umem_obj) 319 claim_zero(mlx5_os_umem_dereg(qp->umem_obj)); 320 if (qp->umem_buf) 321 mlx5_free((void *)(uintptr_t)qp->umem_buf); 322 } 323 324 /** 325 * Create Queue Pair using DevX API. 326 * 327 * Get a pointer to partially initialized attributes structure, and updates the 328 * following fields: 329 * wq_umem_id 330 * wq_umem_offset 331 * dbr_umem_valid 332 * dbr_umem_id 333 * dbr_address 334 * log_page_size 335 * All other fields are updated by caller. 336 * 337 * @param[in] ctx 338 * Context returned from mlx5 open_device() glue function. 339 * @param[in/out] qp_obj 340 * Pointer to QP to create. 341 * @param[in] log_wqbb_n 342 * Log of number of WQBBs in queue. 343 * @param[in] attr 344 * Pointer to QP attributes structure. 345 * @param[in] socket 346 * Socket to use for allocation. 347 * 348 * @return 349 * 0 on success, a negative errno value otherwise and rte_errno is set. 350 */ 351 int 352 mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint16_t log_wqbb_n, 353 struct mlx5_devx_qp_attr *attr, int socket) 354 { 355 struct mlx5_devx_obj *qp = NULL; 356 struct mlx5dv_devx_umem *umem_obj = NULL; 357 void *umem_buf = NULL; 358 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 359 uint32_t umem_size, umem_dbrec; 360 uint16_t qp_size = 1 << log_wqbb_n; 361 int ret; 362 363 if (alignment == (size_t)-1) { 364 DRV_LOG(ERR, "Failed to get WQE buf alignment."); 365 rte_errno = ENOMEM; 366 return -rte_errno; 367 } 368 /* Allocate memory buffer for WQEs and doorbell record. */ 369 umem_size = MLX5_WQE_SIZE * qp_size; 370 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 371 umem_size += MLX5_DBR_SIZE; 372 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 373 alignment, socket); 374 if (!umem_buf) { 375 DRV_LOG(ERR, "Failed to allocate memory for QP."); 376 rte_errno = ENOMEM; 377 return -rte_errno; 378 } 379 /* Register allocated buffer in user space with DevX. */ 380 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 381 IBV_ACCESS_LOCAL_WRITE); 382 if (!umem_obj) { 383 DRV_LOG(ERR, "Failed to register umem for QP."); 384 rte_errno = errno; 385 goto error; 386 } 387 /* Fill attributes for SQ object creation. */ 388 attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 389 attr->wq_umem_offset = 0; 390 attr->dbr_umem_valid = 1; 391 attr->dbr_umem_id = attr->wq_umem_id; 392 attr->dbr_address = umem_dbrec; 393 attr->log_page_size = MLX5_LOG_PAGE_SIZE; 394 /* Create send queue object with DevX. */ 395 qp = mlx5_devx_cmd_create_qp(ctx, attr); 396 if (!qp) { 397 DRV_LOG(ERR, "Can't create DevX QP object."); 398 rte_errno = ENOMEM; 399 goto error; 400 } 401 qp_obj->umem_buf = umem_buf; 402 qp_obj->umem_obj = umem_obj; 403 qp_obj->qp = qp; 404 qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec); 405 return 0; 406 error: 407 ret = rte_errno; 408 if (umem_obj) 409 claim_zero(mlx5_os_umem_dereg(umem_obj)); 410 if (umem_buf) 411 mlx5_free((void *)(uintptr_t)umem_buf); 412 rte_errno = ret; 413 return -rte_errno; 414 } 415 416 /** 417 * Destroy DevX Receive Queue. 418 * 419 * @param[in] rq 420 * DevX RQ to destroy. 421 */ 422 void 423 mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq) 424 { 425 if (rq->rq) { 426 claim_zero(mlx5_devx_cmd_destroy(rq->rq)); 427 rq->rq = NULL; 428 if (rq->rmp) 429 rq->rmp->ref_cnt--; 430 } 431 if (rq->rmp == NULL) { 432 mlx5_devx_wq_res_destroy(&rq->wq); 433 } else { 434 if (rq->rmp->ref_cnt == 0) 435 mlx5_devx_rmp_destroy(rq->rmp); 436 } 437 } 438 439 /** 440 * Create WQ resources using DevX API. 441 * 442 * @param[in] ctx 443 * Context returned from mlx5 open_device() glue function. 444 * @param[in] wqe_size 445 * Size of WQE structure. 446 * @param[in] log_wqbb_n 447 * Log of number of WQBBs in queue. 448 * @param[in] socket 449 * Socket to use for allocation. 450 * @param[out] wq_attr 451 * Pointer to WQ attributes structure. 452 * @param[out] wq_res 453 * Pointer to WQ resource to create. 454 * 455 * @return 456 * 0 on success, a negative errno value otherwise and rte_errno is set. 457 */ 458 static int 459 mlx5_devx_wq_init(void *ctx, uint32_t wqe_size, uint16_t log_wqbb_n, int socket, 460 struct mlx5_devx_wq_attr *wq_attr, 461 struct mlx5_devx_wq_res *wq_res) 462 { 463 struct mlx5dv_devx_umem *umem_obj = NULL; 464 void *umem_buf = NULL; 465 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 466 uint32_t umem_size, umem_dbrec; 467 int ret; 468 469 if (alignment == (size_t)-1) { 470 DRV_LOG(ERR, "Failed to get WQE buf alignment."); 471 rte_errno = ENOMEM; 472 return -rte_errno; 473 } 474 /* Allocate memory buffer for WQEs and doorbell record. */ 475 umem_size = wqe_size * (1 << log_wqbb_n); 476 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 477 umem_size += MLX5_DBR_SIZE; 478 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 479 alignment, socket); 480 if (!umem_buf) { 481 DRV_LOG(ERR, "Failed to allocate memory for RQ."); 482 rte_errno = ENOMEM; 483 return -rte_errno; 484 } 485 /* Register allocated buffer in user space with DevX. */ 486 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, 487 umem_size, 0); 488 if (!umem_obj) { 489 DRV_LOG(ERR, "Failed to register umem for RQ."); 490 rte_errno = errno; 491 goto error; 492 } 493 /* Fill WQ attributes for RQ/RMP object creation. */ 494 wq_attr->wq_umem_valid = 1; 495 wq_attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 496 wq_attr->wq_umem_offset = 0; 497 wq_attr->dbr_umem_valid = 1; 498 wq_attr->dbr_umem_id = wq_attr->wq_umem_id; 499 wq_attr->dbr_addr = umem_dbrec; 500 wq_attr->log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 501 /* Fill attributes for RQ object creation. */ 502 wq_res->umem_buf = umem_buf; 503 wq_res->umem_obj = umem_obj; 504 wq_res->db_rec = RTE_PTR_ADD(umem_buf, umem_dbrec); 505 return 0; 506 error: 507 ret = rte_errno; 508 if (umem_obj) 509 claim_zero(mlx5_os_umem_dereg(umem_obj)); 510 if (umem_buf) 511 mlx5_free((void *)(uintptr_t)umem_buf); 512 rte_errno = ret; 513 return -rte_errno; 514 } 515 516 /** 517 * Create standalone Receive Queue using DevX API. 518 * 519 * @param[in] ctx 520 * Context returned from mlx5 open_device() glue function. 521 * @param[in/out] rq_obj 522 * Pointer to RQ to create. 523 * @param[in] wqe_size 524 * Size of WQE structure. 525 * @param[in] log_wqbb_n 526 * Log of number of WQBBs in queue. 527 * @param[in] attr 528 * Pointer to RQ attributes structure. 529 * @param[in] socket 530 * Socket to use for allocation. 531 * 532 * @return 533 * 0 on success, a negative errno value otherwise and rte_errno is set. 534 */ 535 static int 536 mlx5_devx_rq_std_create(void *ctx, struct mlx5_devx_rq *rq_obj, 537 uint32_t wqe_size, uint16_t log_wqbb_n, 538 struct mlx5_devx_create_rq_attr *attr, int socket) 539 { 540 struct mlx5_devx_obj *rq; 541 int ret; 542 543 ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 544 &attr->wq_attr, &rq_obj->wq); 545 if (ret != 0) 546 return ret; 547 /* Create receive queue object with DevX. */ 548 rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 549 if (!rq) { 550 DRV_LOG(ERR, "Can't create DevX RQ object."); 551 rte_errno = ENOMEM; 552 goto error; 553 } 554 rq_obj->rq = rq; 555 return 0; 556 error: 557 ret = rte_errno; 558 mlx5_devx_wq_res_destroy(&rq_obj->wq); 559 rte_errno = ret; 560 return -rte_errno; 561 } 562 563 /** 564 * Create Receive Memory Pool using DevX API. 565 * 566 * @param[in] ctx 567 * Context returned from mlx5 open_device() glue function. 568 * @param[in/out] rq_obj 569 * Pointer to RQ to create. 570 * @param[in] wqe_size 571 * Size of WQE structure. 572 * @param[in] log_wqbb_n 573 * Log of number of WQBBs in queue. 574 * @param[in] attr 575 * Pointer to RQ attributes structure. 576 * @param[in] socket 577 * Socket to use for allocation. 578 * 579 * @return 580 * 0 on success, a negative errno value otherwise and rte_errno is set. 581 */ 582 static int 583 mlx5_devx_rmp_create(void *ctx, struct mlx5_devx_rmp *rmp_obj, 584 uint32_t wqe_size, uint16_t log_wqbb_n, 585 struct mlx5_devx_wq_attr *wq_attr, int socket) 586 { 587 struct mlx5_devx_create_rmp_attr rmp_attr = { 0 }; 588 int ret; 589 590 if (rmp_obj->rmp != NULL) 591 return 0; 592 rmp_attr.wq_attr = *wq_attr; 593 ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 594 &rmp_attr.wq_attr, &rmp_obj->wq); 595 if (ret != 0) 596 return ret; 597 rmp_attr.state = MLX5_RMPC_STATE_RDY; 598 rmp_attr.basic_cyclic_rcv_wqe = 599 wq_attr->wq_type != MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 600 /* Create receive memory pool object with DevX. */ 601 rmp_obj->rmp = mlx5_devx_cmd_create_rmp(ctx, &rmp_attr, socket); 602 if (rmp_obj->rmp == NULL) { 603 DRV_LOG(ERR, "Can't create DevX RMP object."); 604 rte_errno = ENOMEM; 605 goto error; 606 } 607 return 0; 608 error: 609 ret = rte_errno; 610 mlx5_devx_wq_res_destroy(&rmp_obj->wq); 611 rte_errno = ret; 612 return -rte_errno; 613 } 614 615 /** 616 * Create Shared Receive Queue based on RMP using DevX API. 617 * 618 * @param[in] ctx 619 * Context returned from mlx5 open_device() glue function. 620 * @param[in/out] rq_obj 621 * Pointer to RQ to create. 622 * @param[in] wqe_size 623 * Size of WQE structure. 624 * @param[in] log_wqbb_n 625 * Log of number of WQBBs in queue. 626 * @param[in] attr 627 * Pointer to RQ attributes structure. 628 * @param[in] socket 629 * Socket to use for allocation. 630 * 631 * @return 632 * 0 on success, a negative errno value otherwise and rte_errno is set. 633 */ 634 static int 635 mlx5_devx_rq_shared_create(void *ctx, struct mlx5_devx_rq *rq_obj, 636 uint32_t wqe_size, uint16_t log_wqbb_n, 637 struct mlx5_devx_create_rq_attr *attr, int socket) 638 { 639 struct mlx5_devx_obj *rq; 640 int ret; 641 642 ret = mlx5_devx_rmp_create(ctx, rq_obj->rmp, wqe_size, log_wqbb_n, 643 &attr->wq_attr, socket); 644 if (ret != 0) 645 return ret; 646 attr->mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP; 647 attr->rmpn = rq_obj->rmp->rmp->id; 648 attr->flush_in_error_en = 0; 649 memset(&attr->wq_attr, 0, sizeof(attr->wq_attr)); 650 /* Create receive queue object with DevX. */ 651 rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 652 if (!rq) { 653 DRV_LOG(ERR, "Can't create DevX RMP RQ object."); 654 rte_errno = ENOMEM; 655 goto error; 656 } 657 rq_obj->rq = rq; 658 rq_obj->rmp->ref_cnt++; 659 return 0; 660 error: 661 ret = rte_errno; 662 mlx5_devx_rq_destroy(rq_obj); 663 rte_errno = ret; 664 return -rte_errno; 665 } 666 667 /** 668 * Create Receive Queue using DevX API. Shared RQ is created only if rmp set. 669 * 670 * Get a pointer to partially initialized attributes structure, and updates the 671 * following fields: 672 * wq_umem_valid 673 * wq_umem_id 674 * wq_umem_offset 675 * dbr_umem_valid 676 * dbr_umem_id 677 * dbr_addr 678 * log_wq_pg_sz 679 * All other fields are updated by caller. 680 * 681 * @param[in] ctx 682 * Context returned from mlx5 open_device() glue function. 683 * @param[in/out] rq_obj 684 * Pointer to RQ to create. 685 * @param[in] wqe_size 686 * Size of WQE structure. 687 * @param[in] log_wqbb_n 688 * Log of number of WQBBs in queue. 689 * @param[in] attr 690 * Pointer to RQ attributes structure. 691 * @param[in] socket 692 * Socket to use for allocation. 693 * 694 * @return 695 * 0 on success, a negative errno value otherwise and rte_errno is set. 696 */ 697 int 698 mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, 699 uint32_t wqe_size, uint16_t log_wqbb_n, 700 struct mlx5_devx_create_rq_attr *attr, int socket) 701 { 702 if (rq_obj->rmp == NULL) 703 return mlx5_devx_rq_std_create(ctx, rq_obj, wqe_size, 704 log_wqbb_n, attr, socket); 705 return mlx5_devx_rq_shared_create(ctx, rq_obj, wqe_size, 706 log_wqbb_n, attr, socket); 707 } 708 709 /** 710 * Change QP state to RTS. 711 * 712 * @param[in] qp 713 * DevX QP to change. 714 * @param[in] remote_qp_id 715 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 716 * 717 * @return 718 * 0 on success, a negative errno value otherwise and rte_errno is set. 719 */ 720 int 721 mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id) 722 { 723 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP, 724 remote_qp_id)) { 725 DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).", 726 rte_errno); 727 return -1; 728 } 729 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP, 730 remote_qp_id)) { 731 DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).", 732 rte_errno); 733 return -1; 734 } 735 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP, 736 remote_qp_id)) { 737 DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).", 738 rte_errno); 739 return -1; 740 } 741 return 0; 742 } 743