1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 #include <stdint.h> 5 6 #include <rte_errno.h> 7 #include <rte_common.h> 8 #include <rte_eal_paging.h> 9 10 #include <mlx5_glue.h> 11 #include <mlx5_common_os.h> 12 13 #include "mlx5_prm.h" 14 #include "mlx5_devx_cmds.h" 15 #include "mlx5_common_log.h" 16 #include "mlx5_malloc.h" 17 #include "mlx5_common.h" 18 #include "mlx5_common_devx.h" 19 20 /** 21 * Destroy DevX Completion Queue. 22 * 23 * @param[in] cq 24 * DevX CQ to destroy. 25 */ 26 void 27 mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq) 28 { 29 if (cq->cq) 30 claim_zero(mlx5_devx_cmd_destroy(cq->cq)); 31 if (cq->umem_obj) 32 claim_zero(mlx5_os_umem_dereg(cq->umem_obj)); 33 if (cq->umem_buf) 34 mlx5_free((void *)(uintptr_t)cq->umem_buf); 35 } 36 37 /* Mark all CQEs initially as invalid. */ 38 static void 39 mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size) 40 { 41 volatile struct mlx5_cqe *cqe = cq_obj->cqes; 42 uint16_t i; 43 44 for (i = 0; i < cq_size; i++, cqe++) 45 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 46 } 47 48 /** 49 * Create Completion Queue using DevX API. 50 * 51 * Get a pointer to partially initialized attributes structure, and updates the 52 * following fields: 53 * q_umem_valid 54 * q_umem_id 55 * q_umem_offset 56 * db_umem_valid 57 * db_umem_id 58 * db_umem_offset 59 * eqn 60 * log_cq_size 61 * log_page_size 62 * All other fields are updated by caller. 63 * 64 * @param[in] ctx 65 * Context returned from mlx5 open_device() glue function. 66 * @param[in/out] cq_obj 67 * Pointer to CQ to create. 68 * @param[in] log_desc_n 69 * Log of number of descriptors in queue. 70 * @param[in] attr 71 * Pointer to CQ attributes structure. 72 * @param[in] socket 73 * Socket to use for allocation. 74 * 75 * @return 76 * 0 on success, a negative errno value otherwise and rte_errno is set. 77 */ 78 int 79 mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n, 80 struct mlx5_devx_cq_attr *attr, int socket) 81 { 82 struct mlx5_devx_obj *cq = NULL; 83 struct mlx5dv_devx_umem *umem_obj = NULL; 84 void *umem_buf = NULL; 85 size_t page_size = rte_mem_page_size(); 86 size_t alignment = MLX5_CQE_BUF_ALIGNMENT; 87 uint32_t umem_size, umem_dbrec; 88 uint32_t eqn; 89 uint32_t num_of_cqes = RTE_BIT32(log_desc_n); 90 int ret; 91 92 if (page_size == (size_t)-1 || alignment == (size_t)-1) { 93 DRV_LOG(ERR, "Failed to get page_size."); 94 rte_errno = ENOMEM; 95 return -rte_errno; 96 } 97 /* Query first EQN. */ 98 ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn); 99 if (ret) { 100 rte_errno = errno; 101 DRV_LOG(ERR, "Failed to query event queue number."); 102 return -rte_errno; 103 } 104 /* Allocate memory buffer for CQEs and doorbell record. */ 105 umem_size = sizeof(struct mlx5_cqe) * num_of_cqes; 106 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 107 umem_size += MLX5_DBR_SIZE; 108 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 109 alignment, socket); 110 if (!umem_buf) { 111 DRV_LOG(ERR, "Failed to allocate memory for CQ."); 112 rte_errno = ENOMEM; 113 return -rte_errno; 114 } 115 /* Register allocated buffer in user space with DevX. */ 116 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 117 IBV_ACCESS_LOCAL_WRITE); 118 if (!umem_obj) { 119 DRV_LOG(ERR, "Failed to register umem for CQ."); 120 rte_errno = errno; 121 goto error; 122 } 123 /* Fill attributes for CQ object creation. */ 124 attr->q_umem_valid = 1; 125 attr->q_umem_id = mlx5_os_get_umem_id(umem_obj); 126 attr->q_umem_offset = 0; 127 attr->db_umem_valid = 1; 128 attr->db_umem_id = attr->q_umem_id; 129 attr->db_umem_offset = umem_dbrec; 130 attr->eqn = eqn; 131 attr->log_cq_size = log_desc_n; 132 attr->log_page_size = rte_log2_u32(page_size); 133 /* Create completion queue object with DevX. */ 134 cq = mlx5_devx_cmd_create_cq(ctx, attr); 135 if (!cq) { 136 DRV_LOG(ERR, "Can't create DevX CQ object."); 137 rte_errno = ENOMEM; 138 goto error; 139 } 140 cq_obj->umem_buf = umem_buf; 141 cq_obj->umem_obj = umem_obj; 142 cq_obj->cq = cq; 143 cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec); 144 /* Mark all CQEs initially as invalid. */ 145 mlx5_cq_init(cq_obj, num_of_cqes); 146 return 0; 147 error: 148 ret = rte_errno; 149 if (umem_obj) 150 claim_zero(mlx5_os_umem_dereg(umem_obj)); 151 if (umem_buf) 152 mlx5_free((void *)(uintptr_t)umem_buf); 153 rte_errno = ret; 154 return -rte_errno; 155 } 156 157 /** 158 * Destroy DevX Send Queue. 159 * 160 * @param[in] sq 161 * DevX SQ to destroy. 162 */ 163 void 164 mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq) 165 { 166 if (sq->sq) 167 claim_zero(mlx5_devx_cmd_destroy(sq->sq)); 168 if (sq->umem_obj) 169 claim_zero(mlx5_os_umem_dereg(sq->umem_obj)); 170 if (sq->umem_buf) 171 mlx5_free((void *)(uintptr_t)sq->umem_buf); 172 } 173 174 /** 175 * Create Send Queue using DevX API. 176 * 177 * Get a pointer to partially initialized attributes structure, and updates the 178 * following fields: 179 * wq_type 180 * wq_umem_valid 181 * wq_umem_id 182 * wq_umem_offset 183 * dbr_umem_valid 184 * dbr_umem_id 185 * dbr_addr 186 * log_wq_stride 187 * log_wq_sz 188 * log_wq_pg_sz 189 * All other fields are updated by caller. 190 * 191 * @param[in] ctx 192 * Context returned from mlx5 open_device() glue function. 193 * @param[in/out] sq_obj 194 * Pointer to SQ to create. 195 * @param[in] log_wqbb_n 196 * Log of number of WQBBs in queue. 197 * @param[in] attr 198 * Pointer to SQ attributes structure. 199 * @param[in] socket 200 * Socket to use for allocation. 201 * 202 * @return 203 * 0 on success, a negative errno value otherwise and rte_errno is set. 204 */ 205 int 206 mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n, 207 struct mlx5_devx_create_sq_attr *attr, int socket) 208 { 209 struct mlx5_devx_obj *sq = NULL; 210 struct mlx5dv_devx_umem *umem_obj = NULL; 211 void *umem_buf = NULL; 212 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 213 uint32_t umem_size, umem_dbrec; 214 uint32_t num_of_wqbbs = RTE_BIT32(log_wqbb_n); 215 int ret; 216 217 if (alignment == (size_t)-1) { 218 DRV_LOG(ERR, "Failed to get WQE buf alignment."); 219 rte_errno = ENOMEM; 220 return -rte_errno; 221 } 222 /* Allocate memory buffer for WQEs and doorbell record. */ 223 umem_size = MLX5_WQE_SIZE * num_of_wqbbs; 224 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 225 umem_size += MLX5_DBR_SIZE; 226 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 227 alignment, socket); 228 if (!umem_buf) { 229 DRV_LOG(ERR, "Failed to allocate memory for SQ."); 230 rte_errno = ENOMEM; 231 return -rte_errno; 232 } 233 /* Register allocated buffer in user space with DevX. */ 234 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 235 IBV_ACCESS_LOCAL_WRITE); 236 if (!umem_obj) { 237 DRV_LOG(ERR, "Failed to register umem for SQ."); 238 rte_errno = errno; 239 goto error; 240 } 241 /* Fill attributes for SQ object creation. */ 242 attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 243 attr->wq_attr.wq_umem_valid = 1; 244 attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj); 245 attr->wq_attr.wq_umem_offset = 0; 246 attr->wq_attr.dbr_umem_valid = 1; 247 attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id; 248 attr->wq_attr.dbr_addr = umem_dbrec; 249 attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 250 attr->wq_attr.log_wq_sz = log_wqbb_n; 251 attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 252 /* Create send queue object with DevX. */ 253 sq = mlx5_devx_cmd_create_sq(ctx, attr); 254 if (!sq) { 255 DRV_LOG(ERR, "Can't create DevX SQ object."); 256 rte_errno = ENOMEM; 257 goto error; 258 } 259 sq_obj->umem_buf = umem_buf; 260 sq_obj->umem_obj = umem_obj; 261 sq_obj->sq = sq; 262 sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec); 263 return 0; 264 error: 265 ret = rte_errno; 266 if (umem_obj) 267 claim_zero(mlx5_os_umem_dereg(umem_obj)); 268 if (umem_buf) 269 mlx5_free((void *)(uintptr_t)umem_buf); 270 rte_errno = ret; 271 return -rte_errno; 272 } 273 274 /** 275 * Destroy DevX Receive Queue resources. 276 * 277 * @param[in] rq_res 278 * DevX RQ resource to destroy. 279 */ 280 static void 281 mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res *rq_res) 282 { 283 if (rq_res->umem_obj) 284 claim_zero(mlx5_os_umem_dereg(rq_res->umem_obj)); 285 if (rq_res->umem_buf) 286 mlx5_free((void *)(uintptr_t)rq_res->umem_buf); 287 memset(rq_res, 0, sizeof(*rq_res)); 288 } 289 290 /** 291 * Destroy DevX Receive Memory Pool. 292 * 293 * @param[in] rmp 294 * DevX RMP to destroy. 295 */ 296 static void 297 mlx5_devx_rmp_destroy(struct mlx5_devx_rmp *rmp) 298 { 299 MLX5_ASSERT(rmp->ref_cnt == 0); 300 if (rmp->rmp) { 301 claim_zero(mlx5_devx_cmd_destroy(rmp->rmp)); 302 rmp->rmp = NULL; 303 } 304 mlx5_devx_wq_res_destroy(&rmp->wq); 305 } 306 307 /** 308 * Destroy DevX Queue Pair. 309 * 310 * @param[in] qp 311 * DevX QP to destroy. 312 */ 313 void 314 mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp) 315 { 316 if (qp->qp) 317 claim_zero(mlx5_devx_cmd_destroy(qp->qp)); 318 if (qp->umem_obj) 319 claim_zero(mlx5_os_umem_dereg(qp->umem_obj)); 320 if (qp->umem_buf) 321 mlx5_free((void *)(uintptr_t)qp->umem_buf); 322 } 323 324 /** 325 * Create Queue Pair using DevX API. 326 * 327 * Get a pointer to partially initialized attributes structure, and updates the 328 * following fields: 329 * wq_umem_id 330 * wq_umem_offset 331 * dbr_umem_valid 332 * dbr_umem_id 333 * dbr_address 334 * log_page_size 335 * All other fields are updated by caller. 336 * 337 * @param[in] ctx 338 * Context returned from mlx5 open_device() glue function. 339 * @param[in/out] qp_obj 340 * Pointer to QP to create. 341 * @param[in] queue_size 342 * Size of queue to create. 343 * @param[in] attr 344 * Pointer to QP attributes structure. 345 * @param[in] socket 346 * Socket to use for allocation. 347 * 348 * @return 349 * 0 on success, a negative errno value otherwise and rte_errno is set. 350 */ 351 int 352 mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint32_t queue_size, 353 struct mlx5_devx_qp_attr *attr, int socket) 354 { 355 struct mlx5_devx_obj *qp = NULL; 356 struct mlx5dv_devx_umem *umem_obj = NULL; 357 void *umem_buf = NULL; 358 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 359 uint32_t umem_size, umem_dbrec; 360 int ret; 361 362 if (alignment == (size_t)-1) { 363 DRV_LOG(ERR, "Failed to get WQE buf alignment."); 364 rte_errno = ENOMEM; 365 return -rte_errno; 366 } 367 /* Allocate memory buffer for WQEs and doorbell record. */ 368 umem_size = queue_size; 369 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 370 umem_size += MLX5_DBR_SIZE; 371 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 372 alignment, socket); 373 if (!umem_buf) { 374 DRV_LOG(ERR, "Failed to allocate memory for QP."); 375 rte_errno = ENOMEM; 376 return -rte_errno; 377 } 378 /* Register allocated buffer in user space with DevX. */ 379 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size, 380 IBV_ACCESS_LOCAL_WRITE); 381 if (!umem_obj) { 382 DRV_LOG(ERR, "Failed to register umem for QP."); 383 rte_errno = errno; 384 goto error; 385 } 386 /* Fill attributes for SQ object creation. */ 387 attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 388 attr->wq_umem_offset = 0; 389 attr->dbr_umem_valid = 1; 390 attr->dbr_umem_id = attr->wq_umem_id; 391 attr->dbr_address = umem_dbrec; 392 attr->log_page_size = MLX5_LOG_PAGE_SIZE; 393 /* Create send queue object with DevX. */ 394 qp = mlx5_devx_cmd_create_qp(ctx, attr); 395 if (!qp) { 396 DRV_LOG(ERR, "Can't create DevX QP object."); 397 rte_errno = ENOMEM; 398 goto error; 399 } 400 qp_obj->umem_buf = umem_buf; 401 qp_obj->umem_obj = umem_obj; 402 qp_obj->qp = qp; 403 qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec); 404 return 0; 405 error: 406 ret = rte_errno; 407 if (umem_obj) 408 claim_zero(mlx5_os_umem_dereg(umem_obj)); 409 if (umem_buf) 410 mlx5_free((void *)(uintptr_t)umem_buf); 411 rte_errno = ret; 412 return -rte_errno; 413 } 414 415 /** 416 * Destroy DevX Receive Queue. 417 * 418 * @param[in] rq 419 * DevX RQ to destroy. 420 */ 421 void 422 mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq) 423 { 424 if (rq->rq) { 425 claim_zero(mlx5_devx_cmd_destroy(rq->rq)); 426 rq->rq = NULL; 427 if (rq->rmp) 428 rq->rmp->ref_cnt--; 429 } 430 if (rq->rmp == NULL) { 431 mlx5_devx_wq_res_destroy(&rq->wq); 432 } else { 433 if (rq->rmp->ref_cnt == 0) 434 mlx5_devx_rmp_destroy(rq->rmp); 435 } 436 } 437 438 /** 439 * Create WQ resources using DevX API. 440 * 441 * @param[in] ctx 442 * Context returned from mlx5 open_device() glue function. 443 * @param[in] wqe_size 444 * Size of WQE structure. 445 * @param[in] log_wqbb_n 446 * Log of number of WQBBs in queue. 447 * @param[in] socket 448 * Socket to use for allocation. 449 * @param[out] wq_attr 450 * Pointer to WQ attributes structure. 451 * @param[out] wq_res 452 * Pointer to WQ resource to create. 453 * 454 * @return 455 * 0 on success, a negative errno value otherwise and rte_errno is set. 456 */ 457 static int 458 mlx5_devx_wq_init(void *ctx, uint32_t wqe_size, uint16_t log_wqbb_n, int socket, 459 struct mlx5_devx_wq_attr *wq_attr, 460 struct mlx5_devx_wq_res *wq_res) 461 { 462 struct mlx5dv_devx_umem *umem_obj = NULL; 463 void *umem_buf = NULL; 464 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 465 uint32_t umem_size, umem_dbrec; 466 int ret; 467 468 if (alignment == (size_t)-1) { 469 DRV_LOG(ERR, "Failed to get WQE buf alignment."); 470 rte_errno = ENOMEM; 471 return -rte_errno; 472 } 473 /* Allocate memory buffer for WQEs and doorbell record. */ 474 umem_size = wqe_size * (1 << log_wqbb_n); 475 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE); 476 umem_size += MLX5_DBR_SIZE; 477 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size, 478 alignment, socket); 479 if (!umem_buf) { 480 DRV_LOG(ERR, "Failed to allocate memory for RQ."); 481 rte_errno = ENOMEM; 482 return -rte_errno; 483 } 484 /* Register allocated buffer in user space with DevX. */ 485 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, 486 umem_size, 0); 487 if (!umem_obj) { 488 DRV_LOG(ERR, "Failed to register umem for RQ."); 489 rte_errno = errno; 490 goto error; 491 } 492 /* Fill WQ attributes for RQ/RMP object creation. */ 493 wq_attr->wq_umem_valid = 1; 494 wq_attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj); 495 wq_attr->wq_umem_offset = 0; 496 wq_attr->dbr_umem_valid = 1; 497 wq_attr->dbr_umem_id = wq_attr->wq_umem_id; 498 wq_attr->dbr_addr = umem_dbrec; 499 wq_attr->log_wq_pg_sz = MLX5_LOG_PAGE_SIZE; 500 /* Fill attributes for RQ object creation. */ 501 wq_res->umem_buf = umem_buf; 502 wq_res->umem_obj = umem_obj; 503 wq_res->db_rec = RTE_PTR_ADD(umem_buf, umem_dbrec); 504 return 0; 505 error: 506 ret = rte_errno; 507 if (umem_obj) 508 claim_zero(mlx5_os_umem_dereg(umem_obj)); 509 if (umem_buf) 510 mlx5_free((void *)(uintptr_t)umem_buf); 511 rte_errno = ret; 512 return -rte_errno; 513 } 514 515 /** 516 * Create standalone Receive Queue using DevX API. 517 * 518 * @param[in] ctx 519 * Context returned from mlx5 open_device() glue function. 520 * @param[in/out] rq_obj 521 * Pointer to RQ to create. 522 * @param[in] wqe_size 523 * Size of WQE structure. 524 * @param[in] log_wqbb_n 525 * Log of number of WQBBs in queue. 526 * @param[in] attr 527 * Pointer to RQ attributes structure. 528 * @param[in] socket 529 * Socket to use for allocation. 530 * 531 * @return 532 * 0 on success, a negative errno value otherwise and rte_errno is set. 533 */ 534 static int 535 mlx5_devx_rq_std_create(void *ctx, struct mlx5_devx_rq *rq_obj, 536 uint32_t wqe_size, uint16_t log_wqbb_n, 537 struct mlx5_devx_create_rq_attr *attr, int socket) 538 { 539 struct mlx5_devx_obj *rq; 540 int ret; 541 542 ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 543 &attr->wq_attr, &rq_obj->wq); 544 if (ret != 0) 545 return ret; 546 /* Create receive queue object with DevX. */ 547 rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 548 if (!rq) { 549 DRV_LOG(ERR, "Can't create DevX RQ object."); 550 rte_errno = ENOMEM; 551 goto error; 552 } 553 rq_obj->rq = rq; 554 return 0; 555 error: 556 ret = rte_errno; 557 mlx5_devx_wq_res_destroy(&rq_obj->wq); 558 rte_errno = ret; 559 return -rte_errno; 560 } 561 562 /** 563 * Create Receive Memory Pool using DevX API. 564 * 565 * @param[in] ctx 566 * Context returned from mlx5 open_device() glue function. 567 * @param[in/out] rq_obj 568 * Pointer to RQ to create. 569 * @param[in] wqe_size 570 * Size of WQE structure. 571 * @param[in] log_wqbb_n 572 * Log of number of WQBBs in queue. 573 * @param[in] attr 574 * Pointer to RQ attributes structure. 575 * @param[in] socket 576 * Socket to use for allocation. 577 * 578 * @return 579 * 0 on success, a negative errno value otherwise and rte_errno is set. 580 */ 581 static int 582 mlx5_devx_rmp_create(void *ctx, struct mlx5_devx_rmp *rmp_obj, 583 uint32_t wqe_size, uint16_t log_wqbb_n, 584 struct mlx5_devx_wq_attr *wq_attr, int socket) 585 { 586 struct mlx5_devx_create_rmp_attr rmp_attr = { 0 }; 587 int ret; 588 589 if (rmp_obj->rmp != NULL) 590 return 0; 591 rmp_attr.wq_attr = *wq_attr; 592 ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket, 593 &rmp_attr.wq_attr, &rmp_obj->wq); 594 if (ret != 0) 595 return ret; 596 rmp_attr.state = MLX5_RMPC_STATE_RDY; 597 rmp_attr.basic_cyclic_rcv_wqe = 598 wq_attr->wq_type != MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 599 /* Create receive memory pool object with DevX. */ 600 rmp_obj->rmp = mlx5_devx_cmd_create_rmp(ctx, &rmp_attr, socket); 601 if (rmp_obj->rmp == NULL) { 602 DRV_LOG(ERR, "Can't create DevX RMP object."); 603 rte_errno = ENOMEM; 604 goto error; 605 } 606 return 0; 607 error: 608 ret = rte_errno; 609 mlx5_devx_wq_res_destroy(&rmp_obj->wq); 610 rte_errno = ret; 611 return -rte_errno; 612 } 613 614 /** 615 * Create Shared Receive Queue based on RMP using DevX API. 616 * 617 * @param[in] ctx 618 * Context returned from mlx5 open_device() glue function. 619 * @param[in/out] rq_obj 620 * Pointer to RQ to create. 621 * @param[in] wqe_size 622 * Size of WQE structure. 623 * @param[in] log_wqbb_n 624 * Log of number of WQBBs in queue. 625 * @param[in] attr 626 * Pointer to RQ attributes structure. 627 * @param[in] socket 628 * Socket to use for allocation. 629 * 630 * @return 631 * 0 on success, a negative errno value otherwise and rte_errno is set. 632 */ 633 static int 634 mlx5_devx_rq_shared_create(void *ctx, struct mlx5_devx_rq *rq_obj, 635 uint32_t wqe_size, uint16_t log_wqbb_n, 636 struct mlx5_devx_create_rq_attr *attr, int socket) 637 { 638 struct mlx5_devx_obj *rq; 639 int ret; 640 641 ret = mlx5_devx_rmp_create(ctx, rq_obj->rmp, wqe_size, log_wqbb_n, 642 &attr->wq_attr, socket); 643 if (ret != 0) 644 return ret; 645 attr->mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP; 646 attr->rmpn = rq_obj->rmp->rmp->id; 647 attr->flush_in_error_en = 0; 648 memset(&attr->wq_attr, 0, sizeof(attr->wq_attr)); 649 /* Create receive queue object with DevX. */ 650 rq = mlx5_devx_cmd_create_rq(ctx, attr, socket); 651 if (!rq) { 652 DRV_LOG(ERR, "Can't create DevX RMP RQ object."); 653 rte_errno = ENOMEM; 654 goto error; 655 } 656 rq_obj->rq = rq; 657 rq_obj->rmp->ref_cnt++; 658 return 0; 659 error: 660 ret = rte_errno; 661 mlx5_devx_rq_destroy(rq_obj); 662 rte_errno = ret; 663 return -rte_errno; 664 } 665 666 /** 667 * Create Receive Queue using DevX API. Shared RQ is created only if rmp set. 668 * 669 * Get a pointer to partially initialized attributes structure, and updates the 670 * following fields: 671 * wq_umem_valid 672 * wq_umem_id 673 * wq_umem_offset 674 * dbr_umem_valid 675 * dbr_umem_id 676 * dbr_addr 677 * log_wq_pg_sz 678 * All other fields are updated by caller. 679 * 680 * @param[in] ctx 681 * Context returned from mlx5 open_device() glue function. 682 * @param[in/out] rq_obj 683 * Pointer to RQ to create. 684 * @param[in] wqe_size 685 * Size of WQE structure. 686 * @param[in] log_wqbb_n 687 * Log of number of WQBBs in queue. 688 * @param[in] attr 689 * Pointer to RQ attributes structure. 690 * @param[in] socket 691 * Socket to use for allocation. 692 * 693 * @return 694 * 0 on success, a negative errno value otherwise and rte_errno is set. 695 */ 696 int 697 mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, 698 uint32_t wqe_size, uint16_t log_wqbb_n, 699 struct mlx5_devx_create_rq_attr *attr, int socket) 700 { 701 if (rq_obj->rmp == NULL) 702 return mlx5_devx_rq_std_create(ctx, rq_obj, wqe_size, 703 log_wqbb_n, attr, socket); 704 return mlx5_devx_rq_shared_create(ctx, rq_obj, wqe_size, 705 log_wqbb_n, attr, socket); 706 } 707 708 /** 709 * Change QP state to RTS. 710 * 711 * @param[in] qp 712 * DevX QP to change. 713 * @param[in] remote_qp_id 714 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation. 715 * 716 * @return 717 * 0 on success, a negative errno value otherwise and rte_errno is set. 718 */ 719 int 720 mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id) 721 { 722 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP, 723 remote_qp_id)) { 724 DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).", 725 rte_errno); 726 return -1; 727 } 728 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP, 729 remote_qp_id)) { 730 DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).", 731 rte_errno); 732 return -1; 733 } 734 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP, 735 remote_qp_id)) { 736 DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).", 737 rte_errno); 738 return -1; 739 } 740 return 0; 741 } 742