1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 */
4 #include <stdint.h>
5
6 #include <rte_errno.h>
7 #include <rte_common.h>
8 #include <rte_eal_paging.h>
9
10 #include <mlx5_glue.h>
11 #include <mlx5_common_os.h>
12
13 #include "mlx5_prm.h"
14 #include "mlx5_devx_cmds.h"
15 #include "mlx5_common_log.h"
16 #include "mlx5_malloc.h"
17 #include "mlx5_common.h"
18 #include "mlx5_common_devx.h"
19
20 /**
21 * Destroy DevX Completion Queue.
22 *
23 * @param[in] cq
24 * DevX CQ to destroy.
25 */
26 void
mlx5_devx_cq_destroy(struct mlx5_devx_cq * cq)27 mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq)
28 {
29 if (cq->cq)
30 claim_zero(mlx5_devx_cmd_destroy(cq->cq));
31 if (cq->umem_obj)
32 claim_zero(mlx5_os_umem_dereg(cq->umem_obj));
33 if (cq->umem_buf)
34 mlx5_free((void *)(uintptr_t)cq->umem_buf);
35 }
36
37 /* Mark all CQEs initially as invalid. */
38 static void
mlx5_cq_init(struct mlx5_devx_cq * cq_obj,uint16_t cq_size)39 mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size)
40 {
41 volatile struct mlx5_cqe *cqe = cq_obj->cqes;
42 uint16_t i;
43
44 for (i = 0; i < cq_size; i++, cqe++) {
45 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
46 cqe->validity_iteration_count = MLX5_CQE_VIC_INIT;
47 }
48 }
49
50 /**
51 * Create Completion Queue using DevX API.
52 *
53 * Get a pointer to partially initialized attributes structure, and updates the
54 * following fields:
55 * q_umem_valid
56 * q_umem_id
57 * q_umem_offset
58 * db_umem_valid
59 * db_umem_id
60 * db_umem_offset
61 * eqn
62 * log_cq_size
63 * log_page_size
64 * All other fields are updated by caller.
65 *
66 * @param[in] ctx
67 * Context returned from mlx5 open_device() glue function.
68 * @param[in/out] cq_obj
69 * Pointer to CQ to create.
70 * @param[in] log_desc_n
71 * Log of number of descriptors in queue.
72 * @param[in] attr
73 * Pointer to CQ attributes structure.
74 * @param[in] socket
75 * Socket to use for allocation.
76 *
77 * @return
78 * 0 on success, a negative errno value otherwise and rte_errno is set.
79 */
80 int
mlx5_devx_cq_create(void * ctx,struct mlx5_devx_cq * cq_obj,uint16_t log_desc_n,struct mlx5_devx_cq_attr * attr,int socket)81 mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n,
82 struct mlx5_devx_cq_attr *attr, int socket)
83 {
84 struct mlx5_devx_obj *cq = NULL;
85 struct mlx5dv_devx_umem *umem_obj = NULL;
86 void *umem_buf = NULL;
87 size_t page_size = rte_mem_page_size();
88 size_t alignment = MLX5_CQE_BUF_ALIGNMENT;
89 uint32_t umem_size, umem_dbrec;
90 uint32_t eqn;
91 uint32_t num_of_cqes = RTE_BIT32(log_desc_n);
92 int ret;
93
94 if (page_size == (size_t)-1 || alignment == (size_t)-1) {
95 DRV_LOG(ERR, "Failed to get page_size.");
96 rte_errno = ENOMEM;
97 return -rte_errno;
98 }
99 /* Query first EQN. */
100 ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn);
101 if (ret) {
102 rte_errno = errno;
103 DRV_LOG(ERR, "Failed to query event queue number.");
104 return -rte_errno;
105 }
106 /* Allocate memory buffer for CQEs and doorbell record. */
107 umem_size = sizeof(struct mlx5_cqe) * num_of_cqes;
108 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
109 umem_size += MLX5_DBR_SIZE;
110 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
111 alignment, socket);
112 if (!umem_buf) {
113 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
114 rte_errno = ENOMEM;
115 return -rte_errno;
116 }
117 /* Register allocated buffer in user space with DevX. */
118 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
119 IBV_ACCESS_LOCAL_WRITE);
120 if (!umem_obj) {
121 DRV_LOG(ERR, "Failed to register umem for CQ.");
122 rte_errno = errno;
123 goto error;
124 }
125 /* Fill attributes for CQ object creation. */
126 attr->q_umem_valid = 1;
127 attr->q_umem_id = mlx5_os_get_umem_id(umem_obj);
128 attr->q_umem_offset = 0;
129 attr->db_umem_valid = 1;
130 attr->db_umem_id = attr->q_umem_id;
131 attr->db_umem_offset = umem_dbrec;
132 attr->eqn = eqn;
133 attr->log_cq_size = log_desc_n;
134 attr->log_page_size = rte_log2_u32(page_size);
135 /* Create completion queue object with DevX. */
136 cq = mlx5_devx_cmd_create_cq(ctx, attr);
137 if (!cq) {
138 DRV_LOG(ERR, "Can't create DevX CQ object.");
139 rte_errno = ENOMEM;
140 goto error;
141 }
142 cq_obj->umem_buf = umem_buf;
143 cq_obj->umem_obj = umem_obj;
144 cq_obj->cq = cq;
145 cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec);
146 /* Mark all CQEs initially as invalid. */
147 mlx5_cq_init(cq_obj, num_of_cqes);
148 return 0;
149 error:
150 ret = rte_errno;
151 if (umem_obj)
152 claim_zero(mlx5_os_umem_dereg(umem_obj));
153 if (umem_buf)
154 mlx5_free((void *)(uintptr_t)umem_buf);
155 rte_errno = ret;
156 return -rte_errno;
157 }
158
159 /**
160 * Destroy DevX Send Queue.
161 *
162 * @param[in] sq
163 * DevX SQ to destroy.
164 */
165 void
mlx5_devx_sq_destroy(struct mlx5_devx_sq * sq)166 mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq)
167 {
168 if (sq->sq)
169 claim_zero(mlx5_devx_cmd_destroy(sq->sq));
170 if (sq->umem_obj)
171 claim_zero(mlx5_os_umem_dereg(sq->umem_obj));
172 if (sq->umem_buf)
173 mlx5_free((void *)(uintptr_t)sq->umem_buf);
174 }
175
176 /**
177 * Create Send Queue using DevX API.
178 *
179 * Get a pointer to partially initialized attributes structure, and updates the
180 * following fields:
181 * wq_type
182 * wq_umem_valid
183 * wq_umem_id
184 * wq_umem_offset
185 * dbr_umem_valid
186 * dbr_umem_id
187 * dbr_addr
188 * log_wq_stride
189 * log_wq_sz
190 * log_wq_pg_sz
191 * All other fields are updated by caller.
192 *
193 * @param[in] ctx
194 * Context returned from mlx5 open_device() glue function.
195 * @param[in/out] sq_obj
196 * Pointer to SQ to create.
197 * @param[in] log_wqbb_n
198 * Log of number of WQBBs in queue.
199 * @param[in] attr
200 * Pointer to SQ attributes structure.
201 * @param[in] socket
202 * Socket to use for allocation.
203 *
204 * @return
205 * 0 on success, a negative errno value otherwise and rte_errno is set.
206 */
207 int
mlx5_devx_sq_create(void * ctx,struct mlx5_devx_sq * sq_obj,uint16_t log_wqbb_n,struct mlx5_devx_create_sq_attr * attr,int socket)208 mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,
209 struct mlx5_devx_create_sq_attr *attr, int socket)
210 {
211 struct mlx5_devx_obj *sq = NULL;
212 struct mlx5dv_devx_umem *umem_obj = NULL;
213 void *umem_buf = NULL;
214 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
215 uint32_t umem_size, umem_dbrec;
216 uint32_t num_of_wqbbs = RTE_BIT32(log_wqbb_n);
217 int ret;
218
219 if (alignment == (size_t)-1) {
220 DRV_LOG(ERR, "Failed to get WQE buf alignment.");
221 rte_errno = ENOMEM;
222 return -rte_errno;
223 }
224 /* Allocate memory buffer for WQEs and doorbell record. */
225 umem_size = MLX5_WQE_SIZE * num_of_wqbbs;
226 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
227 umem_size += MLX5_DBR_SIZE;
228 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
229 alignment, socket);
230 if (!umem_buf) {
231 DRV_LOG(ERR, "Failed to allocate memory for SQ.");
232 rte_errno = ENOMEM;
233 return -rte_errno;
234 }
235 /* Register allocated buffer in user space with DevX. */
236 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
237 IBV_ACCESS_LOCAL_WRITE);
238 if (!umem_obj) {
239 DRV_LOG(ERR, "Failed to register umem for SQ.");
240 rte_errno = errno;
241 goto error;
242 }
243 /* Fill attributes for SQ object creation. */
244 attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
245 attr->wq_attr.wq_umem_valid = 1;
246 attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
247 attr->wq_attr.wq_umem_offset = 0;
248 attr->wq_attr.dbr_umem_valid = 1;
249 attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id;
250 attr->wq_attr.dbr_addr = umem_dbrec;
251 attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
252 attr->wq_attr.log_wq_sz = log_wqbb_n;
253 attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
254 /* Create send queue object with DevX. */
255 sq = mlx5_devx_cmd_create_sq(ctx, attr);
256 if (!sq) {
257 DRV_LOG(ERR, "Can't create DevX SQ object.");
258 rte_errno = ENOMEM;
259 goto error;
260 }
261 sq_obj->umem_buf = umem_buf;
262 sq_obj->umem_obj = umem_obj;
263 sq_obj->sq = sq;
264 sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec);
265 return 0;
266 error:
267 ret = rte_errno;
268 if (umem_obj)
269 claim_zero(mlx5_os_umem_dereg(umem_obj));
270 if (umem_buf)
271 mlx5_free((void *)(uintptr_t)umem_buf);
272 rte_errno = ret;
273 return -rte_errno;
274 }
275
276 /**
277 * Destroy DevX Receive Queue resources.
278 *
279 * @param[in] rq_res
280 * DevX RQ resource to destroy.
281 */
282 static void
mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res * rq_res)283 mlx5_devx_wq_res_destroy(struct mlx5_devx_wq_res *rq_res)
284 {
285 if (rq_res->umem_obj)
286 claim_zero(mlx5_os_umem_dereg(rq_res->umem_obj));
287 if (rq_res->umem_buf)
288 mlx5_free((void *)(uintptr_t)rq_res->umem_buf);
289 memset(rq_res, 0, sizeof(*rq_res));
290 }
291
292 /**
293 * Destroy DevX Receive Memory Pool.
294 *
295 * @param[in] rmp
296 * DevX RMP to destroy.
297 */
298 static void
mlx5_devx_rmp_destroy(struct mlx5_devx_rmp * rmp)299 mlx5_devx_rmp_destroy(struct mlx5_devx_rmp *rmp)
300 {
301 MLX5_ASSERT(rmp->ref_cnt == 0);
302 if (rmp->rmp) {
303 claim_zero(mlx5_devx_cmd_destroy(rmp->rmp));
304 rmp->rmp = NULL;
305 }
306 mlx5_devx_wq_res_destroy(&rmp->wq);
307 }
308
309 /**
310 * Destroy DevX Queue Pair.
311 *
312 * @param[in] qp
313 * DevX QP to destroy.
314 */
315 void
mlx5_devx_qp_destroy(struct mlx5_devx_qp * qp)316 mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp)
317 {
318 if (qp->qp)
319 claim_zero(mlx5_devx_cmd_destroy(qp->qp));
320 if (qp->umem_obj)
321 claim_zero(mlx5_os_umem_dereg(qp->umem_obj));
322 if (qp->umem_buf)
323 mlx5_free((void *)(uintptr_t)qp->umem_buf);
324 }
325
326 /**
327 * Create Queue Pair using DevX API.
328 *
329 * Get a pointer to partially initialized attributes structure, and updates the
330 * following fields:
331 * wq_umem_id
332 * wq_umem_offset
333 * dbr_umem_valid
334 * dbr_umem_id
335 * dbr_address
336 * log_page_size
337 * All other fields are updated by caller.
338 *
339 * @param[in] ctx
340 * Context returned from mlx5 open_device() glue function.
341 * @param[in/out] qp_obj
342 * Pointer to QP to create.
343 * @param[in] queue_size
344 * Size of queue to create.
345 * @param[in] attr
346 * Pointer to QP attributes structure.
347 * @param[in] socket
348 * Socket to use for allocation.
349 *
350 * @return
351 * 0 on success, a negative errno value otherwise and rte_errno is set.
352 */
353 int
mlx5_devx_qp_create(void * ctx,struct mlx5_devx_qp * qp_obj,uint32_t queue_size,struct mlx5_devx_qp_attr * attr,int socket)354 mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint32_t queue_size,
355 struct mlx5_devx_qp_attr *attr, int socket)
356 {
357 struct mlx5_devx_obj *qp = NULL;
358 struct mlx5dv_devx_umem *umem_obj = NULL;
359 void *umem_buf = NULL;
360 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
361 uint32_t umem_size, umem_dbrec;
362 int ret;
363
364 if (alignment == (size_t)-1) {
365 DRV_LOG(ERR, "Failed to get WQE buf alignment.");
366 rte_errno = ENOMEM;
367 return -rte_errno;
368 }
369 /* Allocate memory buffer for WQEs and doorbell record. */
370 umem_size = queue_size;
371 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
372 umem_size += MLX5_DBR_SIZE;
373 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
374 alignment, socket);
375 if (!umem_buf) {
376 DRV_LOG(ERR, "Failed to allocate memory for QP.");
377 rte_errno = ENOMEM;
378 return -rte_errno;
379 }
380 /* Register allocated buffer in user space with DevX. */
381 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
382 IBV_ACCESS_LOCAL_WRITE);
383 if (!umem_obj) {
384 DRV_LOG(ERR, "Failed to register umem for QP.");
385 rte_errno = errno;
386 goto error;
387 }
388 /* Fill attributes for SQ object creation. */
389 attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);
390 attr->wq_umem_offset = 0;
391 attr->dbr_umem_valid = 1;
392 attr->dbr_umem_id = attr->wq_umem_id;
393 attr->dbr_address = umem_dbrec;
394 attr->log_page_size = MLX5_LOG_PAGE_SIZE;
395 /* Create send queue object with DevX. */
396 qp = mlx5_devx_cmd_create_qp(ctx, attr);
397 if (!qp) {
398 DRV_LOG(ERR, "Can't create DevX QP object.");
399 rte_errno = ENOMEM;
400 goto error;
401 }
402 qp_obj->umem_buf = umem_buf;
403 qp_obj->umem_obj = umem_obj;
404 qp_obj->qp = qp;
405 qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec);
406 return 0;
407 error:
408 ret = rte_errno;
409 if (umem_obj)
410 claim_zero(mlx5_os_umem_dereg(umem_obj));
411 if (umem_buf)
412 mlx5_free((void *)(uintptr_t)umem_buf);
413 rte_errno = ret;
414 return -rte_errno;
415 }
416
417 /**
418 * Destroy DevX Receive Queue.
419 *
420 * @param[in] rq
421 * DevX RQ to destroy.
422 */
423 void
mlx5_devx_rq_destroy(struct mlx5_devx_rq * rq)424 mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq)
425 {
426 if (rq->rq) {
427 claim_zero(mlx5_devx_cmd_destroy(rq->rq));
428 rq->rq = NULL;
429 if (rq->rmp)
430 rq->rmp->ref_cnt--;
431 }
432 if (rq->rmp == NULL) {
433 mlx5_devx_wq_res_destroy(&rq->wq);
434 } else {
435 if (rq->rmp->ref_cnt == 0)
436 mlx5_devx_rmp_destroy(rq->rmp);
437 }
438 }
439
440 /**
441 * Create WQ resources using DevX API.
442 *
443 * @param[in] ctx
444 * Context returned from mlx5 open_device() glue function.
445 * @param[in] wqe_size
446 * Size of WQE structure.
447 * @param[in] log_wqbb_n
448 * Log of number of WQBBs in queue.
449 * @param[in] socket
450 * Socket to use for allocation.
451 * @param[out] wq_attr
452 * Pointer to WQ attributes structure.
453 * @param[out] wq_res
454 * Pointer to WQ resource to create.
455 *
456 * @return
457 * 0 on success, a negative errno value otherwise and rte_errno is set.
458 */
459 static int
mlx5_devx_wq_init(void * ctx,uint32_t wqe_size,uint16_t log_wqbb_n,int socket,struct mlx5_devx_wq_attr * wq_attr,struct mlx5_devx_wq_res * wq_res)460 mlx5_devx_wq_init(void *ctx, uint32_t wqe_size, uint16_t log_wqbb_n, int socket,
461 struct mlx5_devx_wq_attr *wq_attr,
462 struct mlx5_devx_wq_res *wq_res)
463 {
464 struct mlx5dv_devx_umem *umem_obj = NULL;
465 void *umem_buf = NULL;
466 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
467 uint32_t umem_size, umem_dbrec;
468 int ret;
469
470 if (alignment == (size_t)-1) {
471 DRV_LOG(ERR, "Failed to get WQE buf alignment.");
472 rte_errno = ENOMEM;
473 return -rte_errno;
474 }
475 /* Allocate memory buffer for WQEs and doorbell record. */
476 umem_size = wqe_size * (1 << log_wqbb_n);
477 umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
478 umem_size += MLX5_DBR_SIZE;
479 umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
480 alignment, socket);
481 if (!umem_buf) {
482 DRV_LOG(ERR, "Failed to allocate memory for RQ.");
483 rte_errno = ENOMEM;
484 return -rte_errno;
485 }
486 /* Register allocated buffer in user space with DevX. */
487 umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf,
488 umem_size, 0);
489 if (!umem_obj) {
490 DRV_LOG(ERR, "Failed to register umem for RQ.");
491 rte_errno = errno;
492 goto error;
493 }
494 /* Fill WQ attributes for RQ/RMP object creation. */
495 wq_attr->wq_umem_valid = 1;
496 wq_attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);
497 wq_attr->wq_umem_offset = 0;
498 wq_attr->dbr_umem_valid = 1;
499 wq_attr->dbr_umem_id = wq_attr->wq_umem_id;
500 wq_attr->dbr_addr = umem_dbrec;
501 wq_attr->log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
502 /* Fill attributes for RQ object creation. */
503 wq_res->umem_buf = umem_buf;
504 wq_res->umem_obj = umem_obj;
505 wq_res->db_rec = RTE_PTR_ADD(umem_buf, umem_dbrec);
506 return 0;
507 error:
508 ret = rte_errno;
509 if (umem_obj)
510 claim_zero(mlx5_os_umem_dereg(umem_obj));
511 if (umem_buf)
512 mlx5_free((void *)(uintptr_t)umem_buf);
513 rte_errno = ret;
514 return -rte_errno;
515 }
516
517 /**
518 * Create standalone Receive Queue using DevX API.
519 *
520 * @param[in] ctx
521 * Context returned from mlx5 open_device() glue function.
522 * @param[in/out] rq_obj
523 * Pointer to RQ to create.
524 * @param[in] wqe_size
525 * Size of WQE structure.
526 * @param[in] log_wqbb_n
527 * Log of number of WQBBs in queue.
528 * @param[in] attr
529 * Pointer to RQ attributes structure.
530 * @param[in] socket
531 * Socket to use for allocation.
532 *
533 * @return
534 * 0 on success, a negative errno value otherwise and rte_errno is set.
535 */
536 static int
mlx5_devx_rq_std_create(void * ctx,struct mlx5_devx_rq * rq_obj,uint32_t wqe_size,uint16_t log_wqbb_n,struct mlx5_devx_create_rq_attr * attr,int socket)537 mlx5_devx_rq_std_create(void *ctx, struct mlx5_devx_rq *rq_obj,
538 uint32_t wqe_size, uint16_t log_wqbb_n,
539 struct mlx5_devx_create_rq_attr *attr, int socket)
540 {
541 struct mlx5_devx_obj *rq;
542 int ret;
543
544 ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket,
545 &attr->wq_attr, &rq_obj->wq);
546 if (ret != 0)
547 return ret;
548 /* Create receive queue object with DevX. */
549 rq = mlx5_devx_cmd_create_rq(ctx, attr, socket);
550 if (!rq) {
551 DRV_LOG(ERR, "Can't create DevX RQ object.");
552 rte_errno = ENOMEM;
553 goto error;
554 }
555 rq_obj->rq = rq;
556 return 0;
557 error:
558 ret = rte_errno;
559 mlx5_devx_wq_res_destroy(&rq_obj->wq);
560 rte_errno = ret;
561 return -rte_errno;
562 }
563
564 /**
565 * Create Receive Memory Pool using DevX API.
566 *
567 * @param[in] ctx
568 * Context returned from mlx5 open_device() glue function.
569 * @param[in/out] rq_obj
570 * Pointer to RQ to create.
571 * @param[in] wqe_size
572 * Size of WQE structure.
573 * @param[in] log_wqbb_n
574 * Log of number of WQBBs in queue.
575 * @param[in] attr
576 * Pointer to RQ attributes structure.
577 * @param[in] socket
578 * Socket to use for allocation.
579 *
580 * @return
581 * 0 on success, a negative errno value otherwise and rte_errno is set.
582 */
583 static int
mlx5_devx_rmp_create(void * ctx,struct mlx5_devx_rmp * rmp_obj,uint32_t wqe_size,uint16_t log_wqbb_n,struct mlx5_devx_wq_attr * wq_attr,int socket)584 mlx5_devx_rmp_create(void *ctx, struct mlx5_devx_rmp *rmp_obj,
585 uint32_t wqe_size, uint16_t log_wqbb_n,
586 struct mlx5_devx_wq_attr *wq_attr, int socket)
587 {
588 struct mlx5_devx_create_rmp_attr rmp_attr = { 0 };
589 int ret;
590
591 if (rmp_obj->rmp != NULL)
592 return 0;
593 rmp_attr.wq_attr = *wq_attr;
594 ret = mlx5_devx_wq_init(ctx, wqe_size, log_wqbb_n, socket,
595 &rmp_attr.wq_attr, &rmp_obj->wq);
596 if (ret != 0)
597 return ret;
598 rmp_attr.state = MLX5_RMPC_STATE_RDY;
599 rmp_attr.basic_cyclic_rcv_wqe =
600 wq_attr->wq_type != MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
601 /* Create receive memory pool object with DevX. */
602 rmp_obj->rmp = mlx5_devx_cmd_create_rmp(ctx, &rmp_attr, socket);
603 if (rmp_obj->rmp == NULL) {
604 DRV_LOG(ERR, "Can't create DevX RMP object.");
605 rte_errno = ENOMEM;
606 goto error;
607 }
608 return 0;
609 error:
610 ret = rte_errno;
611 mlx5_devx_wq_res_destroy(&rmp_obj->wq);
612 rte_errno = ret;
613 return -rte_errno;
614 }
615
616 /**
617 * Create Shared Receive Queue based on RMP using DevX API.
618 *
619 * @param[in] ctx
620 * Context returned from mlx5 open_device() glue function.
621 * @param[in/out] rq_obj
622 * Pointer to RQ to create.
623 * @param[in] wqe_size
624 * Size of WQE structure.
625 * @param[in] log_wqbb_n
626 * Log of number of WQBBs in queue.
627 * @param[in] attr
628 * Pointer to RQ attributes structure.
629 * @param[in] socket
630 * Socket to use for allocation.
631 *
632 * @return
633 * 0 on success, a negative errno value otherwise and rte_errno is set.
634 */
635 static int
mlx5_devx_rq_shared_create(void * ctx,struct mlx5_devx_rq * rq_obj,uint32_t wqe_size,uint16_t log_wqbb_n,struct mlx5_devx_create_rq_attr * attr,int socket)636 mlx5_devx_rq_shared_create(void *ctx, struct mlx5_devx_rq *rq_obj,
637 uint32_t wqe_size, uint16_t log_wqbb_n,
638 struct mlx5_devx_create_rq_attr *attr, int socket)
639 {
640 struct mlx5_devx_obj *rq;
641 int ret;
642
643 ret = mlx5_devx_rmp_create(ctx, rq_obj->rmp, wqe_size, log_wqbb_n,
644 &attr->wq_attr, socket);
645 if (ret != 0)
646 return ret;
647 attr->mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP;
648 attr->rmpn = rq_obj->rmp->rmp->id;
649 attr->flush_in_error_en = 0;
650 memset(&attr->wq_attr, 0, sizeof(attr->wq_attr));
651 /* Create receive queue object with DevX. */
652 rq = mlx5_devx_cmd_create_rq(ctx, attr, socket);
653 if (!rq) {
654 DRV_LOG(ERR, "Can't create DevX RMP RQ object.");
655 rte_errno = ENOMEM;
656 goto error;
657 }
658 rq_obj->rq = rq;
659 rq_obj->rmp->ref_cnt++;
660 return 0;
661 error:
662 ret = rte_errno;
663 mlx5_devx_rq_destroy(rq_obj);
664 rte_errno = ret;
665 return -rte_errno;
666 }
667
668 /**
669 * Create Receive Queue using DevX API. Shared RQ is created only if rmp set.
670 *
671 * Get a pointer to partially initialized attributes structure, and updates the
672 * following fields:
673 * wq_umem_valid
674 * wq_umem_id
675 * wq_umem_offset
676 * dbr_umem_valid
677 * dbr_umem_id
678 * dbr_addr
679 * log_wq_pg_sz
680 * All other fields are updated by caller.
681 *
682 * @param[in] ctx
683 * Context returned from mlx5 open_device() glue function.
684 * @param[in/out] rq_obj
685 * Pointer to RQ to create.
686 * @param[in] wqe_size
687 * Size of WQE structure.
688 * @param[in] log_wqbb_n
689 * Log of number of WQBBs in queue.
690 * @param[in] attr
691 * Pointer to RQ attributes structure.
692 * @param[in] socket
693 * Socket to use for allocation.
694 *
695 * @return
696 * 0 on success, a negative errno value otherwise and rte_errno is set.
697 */
698 int
mlx5_devx_rq_create(void * ctx,struct mlx5_devx_rq * rq_obj,uint32_t wqe_size,uint16_t log_wqbb_n,struct mlx5_devx_create_rq_attr * attr,int socket)699 mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj,
700 uint32_t wqe_size, uint16_t log_wqbb_n,
701 struct mlx5_devx_create_rq_attr *attr, int socket)
702 {
703 if (rq_obj->rmp == NULL)
704 return mlx5_devx_rq_std_create(ctx, rq_obj, wqe_size,
705 log_wqbb_n, attr, socket);
706 return mlx5_devx_rq_shared_create(ctx, rq_obj, wqe_size,
707 log_wqbb_n, attr, socket);
708 }
709
710 /**
711 * Change QP state to RTS.
712 *
713 * @param[in] qp
714 * DevX QP to change.
715 * @param[in] remote_qp_id
716 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
717 *
718 * @return
719 * 0 on success, a negative errno value otherwise and rte_errno is set.
720 */
721 int
mlx5_devx_qp2rts(struct mlx5_devx_qp * qp,uint32_t remote_qp_id)722 mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id)
723 {
724 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP,
725 remote_qp_id)) {
726 DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
727 rte_errno);
728 return -1;
729 }
730 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP,
731 remote_qp_id)) {
732 DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
733 rte_errno);
734 return -1;
735 }
736 if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP,
737 remote_qp_id)) {
738 DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
739 rte_errno);
740 return -1;
741 }
742 return 0;
743 }
744