xref: /dpdk/drivers/common/mlx5/mlx5_common_devx.c (revision 99a2dd955fba6e4cc23b77d590a033650ced9c45)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4 #include <stdint.h>
5 
6 #include <rte_errno.h>
7 #include <rte_common.h>
8 #include <rte_eal_paging.h>
9 
10 #include <mlx5_glue.h>
11 #include <mlx5_common_os.h>
12 
13 #include "mlx5_prm.h"
14 #include "mlx5_devx_cmds.h"
15 #include "mlx5_common_utils.h"
16 #include "mlx5_malloc.h"
17 #include "mlx5_common.h"
18 #include "mlx5_common_devx.h"
19 
20 /**
21  * Destroy DevX Completion Queue.
22  *
23  * @param[in] cq
24  *   DevX CQ to destroy.
25  */
26 void
27 mlx5_devx_cq_destroy(struct mlx5_devx_cq *cq)
28 {
29 	if (cq->cq)
30 		claim_zero(mlx5_devx_cmd_destroy(cq->cq));
31 	if (cq->umem_obj)
32 		claim_zero(mlx5_os_umem_dereg(cq->umem_obj));
33 	if (cq->umem_buf)
34 		mlx5_free((void *)(uintptr_t)cq->umem_buf);
35 }
36 
37 /* Mark all CQEs initially as invalid. */
38 static void
39 mlx5_cq_init(struct mlx5_devx_cq *cq_obj, uint16_t cq_size)
40 {
41 	volatile struct mlx5_cqe *cqe = cq_obj->cqes;
42 	uint16_t i;
43 
44 	for (i = 0; i < cq_size; i++, cqe++)
45 		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
46 }
47 
48 /**
49  * Create Completion Queue using DevX API.
50  *
51  * Get a pointer to partially initialized attributes structure, and updates the
52  * following fields:
53  *   q_umem_valid
54  *   q_umem_id
55  *   q_umem_offset
56  *   db_umem_valid
57  *   db_umem_id
58  *   db_umem_offset
59  *   eqn
60  *   log_cq_size
61  *   log_page_size
62  * All other fields are updated by caller.
63  *
64  * @param[in] ctx
65  *   Context returned from mlx5 open_device() glue function.
66  * @param[in/out] cq_obj
67  *   Pointer to CQ to create.
68  * @param[in] log_desc_n
69  *   Log of number of descriptors in queue.
70  * @param[in] attr
71  *   Pointer to CQ attributes structure.
72  * @param[in] socket
73  *   Socket to use for allocation.
74  *
75  * @return
76  *   0 on success, a negative errno value otherwise and rte_errno is set.
77  */
78 int
79 mlx5_devx_cq_create(void *ctx, struct mlx5_devx_cq *cq_obj, uint16_t log_desc_n,
80 		    struct mlx5_devx_cq_attr *attr, int socket)
81 {
82 	struct mlx5_devx_obj *cq = NULL;
83 	struct mlx5dv_devx_umem *umem_obj = NULL;
84 	void *umem_buf = NULL;
85 	size_t page_size = rte_mem_page_size();
86 	size_t alignment = MLX5_CQE_BUF_ALIGNMENT;
87 	uint32_t umem_size, umem_dbrec;
88 	uint32_t eqn;
89 	uint16_t cq_size = 1 << log_desc_n;
90 	int ret;
91 
92 	if (page_size == (size_t)-1 || alignment == (size_t)-1) {
93 		DRV_LOG(ERR, "Failed to get page_size.");
94 		rte_errno = ENOMEM;
95 		return -rte_errno;
96 	}
97 	/* Query first EQN. */
98 	ret = mlx5_glue->devx_query_eqn(ctx, 0, &eqn);
99 	if (ret) {
100 		rte_errno = errno;
101 		DRV_LOG(ERR, "Failed to query event queue number.");
102 		return -rte_errno;
103 	}
104 	/* Allocate memory buffer for CQEs and doorbell record. */
105 	umem_size = sizeof(struct mlx5_cqe) * cq_size;
106 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
107 	umem_size += MLX5_DBR_SIZE;
108 	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
109 			       alignment, socket);
110 	if (!umem_buf) {
111 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
112 		rte_errno = ENOMEM;
113 		return -rte_errno;
114 	}
115 	/* Register allocated buffer in user space with DevX. */
116 	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
117 				    IBV_ACCESS_LOCAL_WRITE);
118 	if (!umem_obj) {
119 		DRV_LOG(ERR, "Failed to register umem for CQ.");
120 		rte_errno = errno;
121 		goto error;
122 	}
123 	/* Fill attributes for CQ object creation. */
124 	attr->q_umem_valid = 1;
125 	attr->q_umem_id = mlx5_os_get_umem_id(umem_obj);
126 	attr->q_umem_offset = 0;
127 	attr->db_umem_valid = 1;
128 	attr->db_umem_id = attr->q_umem_id;
129 	attr->db_umem_offset = umem_dbrec;
130 	attr->eqn = eqn;
131 	attr->log_cq_size = log_desc_n;
132 	attr->log_page_size = rte_log2_u32(page_size);
133 	/* Create completion queue object with DevX. */
134 	cq = mlx5_devx_cmd_create_cq(ctx, attr);
135 	if (!cq) {
136 		DRV_LOG(ERR, "Can't create DevX CQ object.");
137 		rte_errno  = ENOMEM;
138 		goto error;
139 	}
140 	cq_obj->umem_buf = umem_buf;
141 	cq_obj->umem_obj = umem_obj;
142 	cq_obj->cq = cq;
143 	cq_obj->db_rec = RTE_PTR_ADD(cq_obj->umem_buf, umem_dbrec);
144 	/* Mark all CQEs initially as invalid. */
145 	mlx5_cq_init(cq_obj, cq_size);
146 	return 0;
147 error:
148 	ret = rte_errno;
149 	if (umem_obj)
150 		claim_zero(mlx5_os_umem_dereg(umem_obj));
151 	if (umem_buf)
152 		mlx5_free((void *)(uintptr_t)umem_buf);
153 	rte_errno = ret;
154 	return -rte_errno;
155 }
156 
157 /**
158  * Destroy DevX Send Queue.
159  *
160  * @param[in] sq
161  *   DevX SQ to destroy.
162  */
163 void
164 mlx5_devx_sq_destroy(struct mlx5_devx_sq *sq)
165 {
166 	if (sq->sq)
167 		claim_zero(mlx5_devx_cmd_destroy(sq->sq));
168 	if (sq->umem_obj)
169 		claim_zero(mlx5_os_umem_dereg(sq->umem_obj));
170 	if (sq->umem_buf)
171 		mlx5_free((void *)(uintptr_t)sq->umem_buf);
172 }
173 
174 /**
175  * Create Send Queue using DevX API.
176  *
177  * Get a pointer to partially initialized attributes structure, and updates the
178  * following fields:
179  *   wq_type
180  *   wq_umem_valid
181  *   wq_umem_id
182  *   wq_umem_offset
183  *   dbr_umem_valid
184  *   dbr_umem_id
185  *   dbr_addr
186  *   log_wq_stride
187  *   log_wq_sz
188  *   log_wq_pg_sz
189  * All other fields are updated by caller.
190  *
191  * @param[in] ctx
192  *   Context returned from mlx5 open_device() glue function.
193  * @param[in/out] sq_obj
194  *   Pointer to SQ to create.
195  * @param[in] log_wqbb_n
196  *   Log of number of WQBBs in queue.
197  * @param[in] attr
198  *   Pointer to SQ attributes structure.
199  * @param[in] socket
200  *   Socket to use for allocation.
201  *
202  * @return
203  *   0 on success, a negative errno value otherwise and rte_errno is set.
204  */
205 int
206 mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,
207 		    struct mlx5_devx_create_sq_attr *attr, int socket)
208 {
209 	struct mlx5_devx_obj *sq = NULL;
210 	struct mlx5dv_devx_umem *umem_obj = NULL;
211 	void *umem_buf = NULL;
212 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
213 	uint32_t umem_size, umem_dbrec;
214 	uint16_t sq_size = 1 << log_wqbb_n;
215 	int ret;
216 
217 	if (alignment == (size_t)-1) {
218 		DRV_LOG(ERR, "Failed to get WQE buf alignment.");
219 		rte_errno = ENOMEM;
220 		return -rte_errno;
221 	}
222 	/* Allocate memory buffer for WQEs and doorbell record. */
223 	umem_size = MLX5_WQE_SIZE * sq_size;
224 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
225 	umem_size += MLX5_DBR_SIZE;
226 	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
227 			       alignment, socket);
228 	if (!umem_buf) {
229 		DRV_LOG(ERR, "Failed to allocate memory for SQ.");
230 		rte_errno = ENOMEM;
231 		return -rte_errno;
232 	}
233 	/* Register allocated buffer in user space with DevX. */
234 	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
235 				    IBV_ACCESS_LOCAL_WRITE);
236 	if (!umem_obj) {
237 		DRV_LOG(ERR, "Failed to register umem for SQ.");
238 		rte_errno = errno;
239 		goto error;
240 	}
241 	/* Fill attributes for SQ object creation. */
242 	attr->wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
243 	attr->wq_attr.wq_umem_valid = 1;
244 	attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
245 	attr->wq_attr.wq_umem_offset = 0;
246 	attr->wq_attr.dbr_umem_valid = 1;
247 	attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id;
248 	attr->wq_attr.dbr_addr = umem_dbrec;
249 	attr->wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
250 	attr->wq_attr.log_wq_sz = log_wqbb_n;
251 	attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
252 	/* Create send queue object with DevX. */
253 	sq = mlx5_devx_cmd_create_sq(ctx, attr);
254 	if (!sq) {
255 		DRV_LOG(ERR, "Can't create DevX SQ object.");
256 		rte_errno = ENOMEM;
257 		goto error;
258 	}
259 	sq_obj->umem_buf = umem_buf;
260 	sq_obj->umem_obj = umem_obj;
261 	sq_obj->sq = sq;
262 	sq_obj->db_rec = RTE_PTR_ADD(sq_obj->umem_buf, umem_dbrec);
263 	return 0;
264 error:
265 	ret = rte_errno;
266 	if (umem_obj)
267 		claim_zero(mlx5_os_umem_dereg(umem_obj));
268 	if (umem_buf)
269 		mlx5_free((void *)(uintptr_t)umem_buf);
270 	rte_errno = ret;
271 	return -rte_errno;
272 }
273 
274 /**
275  * Destroy DevX Receive Queue.
276  *
277  * @param[in] rq
278  *   DevX RQ to destroy.
279  */
280 void
281 mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq)
282 {
283 	if (rq->rq)
284 		claim_zero(mlx5_devx_cmd_destroy(rq->rq));
285 	if (rq->umem_obj)
286 		claim_zero(mlx5_os_umem_dereg(rq->umem_obj));
287 	if (rq->umem_buf)
288 		mlx5_free((void *)(uintptr_t)rq->umem_buf);
289 }
290 
291 /**
292  * Create Receive Queue using DevX API.
293  *
294  * Get a pointer to partially initialized attributes structure, and updates the
295  * following fields:
296  *   wq_umem_valid
297  *   wq_umem_id
298  *   wq_umem_offset
299  *   dbr_umem_valid
300  *   dbr_umem_id
301  *   dbr_addr
302  *   log_wq_pg_sz
303  * All other fields are updated by caller.
304  *
305  * @param[in] ctx
306  *   Context returned from mlx5 open_device() glue function.
307  * @param[in/out] rq_obj
308  *   Pointer to RQ to create.
309  * @param[in] wqe_size
310  *   Size of WQE structure.
311  * @param[in] log_wqbb_n
312  *   Log of number of WQBBs in queue.
313  * @param[in] attr
314  *   Pointer to RQ attributes structure.
315  * @param[in] socket
316  *   Socket to use for allocation.
317  *
318  * @return
319  *   0 on success, a negative errno value otherwise and rte_errno is set.
320  */
321 int
322 mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,
323 		    uint16_t log_wqbb_n,
324 		    struct mlx5_devx_create_rq_attr *attr, int socket)
325 {
326 	struct mlx5_devx_obj *rq = NULL;
327 	struct mlx5dv_devx_umem *umem_obj = NULL;
328 	void *umem_buf = NULL;
329 	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
330 	uint32_t umem_size, umem_dbrec;
331 	uint16_t rq_size = 1 << log_wqbb_n;
332 	int ret;
333 
334 	if (alignment == (size_t)-1) {
335 		DRV_LOG(ERR, "Failed to get WQE buf alignment.");
336 		rte_errno = ENOMEM;
337 		return -rte_errno;
338 	}
339 	/* Allocate memory buffer for WQEs and doorbell record. */
340 	umem_size = wqe_size * rq_size;
341 	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
342 	umem_size += MLX5_DBR_SIZE;
343 	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
344 			       alignment, socket);
345 	if (!umem_buf) {
346 		DRV_LOG(ERR, "Failed to allocate memory for RQ.");
347 		rte_errno = ENOMEM;
348 		return -rte_errno;
349 	}
350 	/* Register allocated buffer in user space with DevX. */
351 	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf,
352 				    umem_size, 0);
353 	if (!umem_obj) {
354 		DRV_LOG(ERR, "Failed to register umem for RQ.");
355 		rte_errno = errno;
356 		goto error;
357 	}
358 	/* Fill attributes for RQ object creation. */
359 	attr->wq_attr.wq_umem_valid = 1;
360 	attr->wq_attr.wq_umem_id = mlx5_os_get_umem_id(umem_obj);
361 	attr->wq_attr.wq_umem_offset = 0;
362 	attr->wq_attr.dbr_umem_valid = 1;
363 	attr->wq_attr.dbr_umem_id = attr->wq_attr.wq_umem_id;
364 	attr->wq_attr.dbr_addr = umem_dbrec;
365 	attr->wq_attr.log_wq_pg_sz = MLX5_LOG_PAGE_SIZE;
366 	/* Create receive queue object with DevX. */
367 	rq = mlx5_devx_cmd_create_rq(ctx, attr, socket);
368 	if (!rq) {
369 		DRV_LOG(ERR, "Can't create DevX RQ object.");
370 		rte_errno = ENOMEM;
371 		goto error;
372 	}
373 	rq_obj->umem_buf = umem_buf;
374 	rq_obj->umem_obj = umem_obj;
375 	rq_obj->rq = rq;
376 	rq_obj->db_rec = RTE_PTR_ADD(rq_obj->umem_buf, umem_dbrec);
377 	return 0;
378 error:
379 	ret = rte_errno;
380 	if (umem_obj)
381 		claim_zero(mlx5_os_umem_dereg(umem_obj));
382 	if (umem_buf)
383 		mlx5_free((void *)(uintptr_t)umem_buf);
384 	rte_errno = ret;
385 	return -rte_errno;
386 }
387 
388