xref: /spdk/module/accel/mlx5/accel_mlx5.c (revision 0a9c023956f8da8fd9de8986322de4e24e5d7e3b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3  */
4 
5 #include "spdk/env.h"
6 #include "spdk/thread.h"
7 #include "spdk/queue.h"
8 #include "spdk/log.h"
9 #include "spdk/string.h"
10 #include "spdk/likely.h"
11 #include "spdk/dma.h"
12 #include "spdk/json.h"
13 #include "spdk/util.h"
14 
15 #include "spdk_internal/mlx5.h"
16 #include "spdk_internal/rdma_utils.h"
17 #include "spdk/accel_module.h"
18 #include "spdk_internal/assert.h"
19 #include "spdk_internal/sgl.h"
20 #include "accel_mlx5.h"
21 
22 #include <infiniband/mlx5dv.h>
23 #include <rdma/rdma_cma.h>
24 
25 #define ACCEL_MLX5_QP_SIZE (256u)
26 #define ACCEL_MLX5_NUM_REQUESTS (2048u - 1)
27 
28 #define ACCEL_MLX5_MAX_SGE (16u)
29 #define ACCEL_MLX5_MAX_WC (64u)
30 #define ACCEL_MLX5_ALLOC_REQS_IN_BATCH (16u)
31 
32 struct accel_mlx5_io_channel;
33 struct accel_mlx5_task;
34 
35 struct accel_mlx5_crypto_dev_ctx {
36 	struct spdk_mempool *requests_pool;
37 	struct ibv_context *context;
38 	struct ibv_pd *pd;
39 	TAILQ_ENTRY(accel_mlx5_crypto_dev_ctx) link;
40 };
41 
42 struct accel_mlx5_module {
43 	struct spdk_accel_module_if module;
44 	struct accel_mlx5_crypto_dev_ctx *crypto_ctxs;
45 	uint32_t num_crypto_ctxs;
46 	struct accel_mlx5_attr attr;
47 	bool enabled;
48 };
49 
50 enum accel_mlx5_wrid_type {
51 	ACCEL_MLX5_WRID_MKEY,
52 	ACCEL_MLX5_WRID_WRITE,
53 };
54 
55 struct accel_mlx5_wrid {
56 	uint8_t wrid;
57 };
58 
59 struct accel_mlx5_req {
60 	struct accel_mlx5_task *task;
61 	struct mlx5dv_mkey *mkey;
62 	struct ibv_sge src_sg[ACCEL_MLX5_MAX_SGE];
63 	struct ibv_sge dst_sg[ACCEL_MLX5_MAX_SGE];
64 	uint16_t src_sg_count;
65 	uint16_t dst_sg_count;
66 	struct accel_mlx5_wrid mkey_wrid;
67 	struct accel_mlx5_wrid write_wrid;
68 	TAILQ_ENTRY(accel_mlx5_req) link;
69 };
70 
71 struct accel_mlx5_task {
72 	struct spdk_accel_task base;
73 	struct accel_mlx5_dev *dev;
74 	TAILQ_HEAD(, accel_mlx5_req) reqs;
75 	uint32_t num_reqs;
76 	uint32_t num_completed_reqs;
77 	uint32_t num_submitted_reqs;
78 	int rc;
79 	struct spdk_iov_sgl src;
80 	struct spdk_iov_sgl dst;
81 	struct accel_mlx5_req *cur_req;
82 	/* If set, memory data will be encrypted during TX and wire data will be
83 	  decrypted during RX.
84 	  If not set, memory data will be decrypted during TX and wire data will
85 	  be encrypted during RX. */
86 	bool encrypt_on_tx;
87 	bool inplace;
88 	TAILQ_ENTRY(accel_mlx5_task) link;
89 };
90 
91 struct accel_mlx5_qp {
92 	struct ibv_qp *qp;
93 	struct ibv_qp_ex *qpex;
94 	struct mlx5dv_qp_ex *mqpx; /* more qpairs to the god of qpairs */
95 	struct ibv_cq *cq;
96 	struct accel_mlx5_io_channel *ch;
97 	bool wr_started;
98 	uint16_t num_reqs;
99 	uint16_t num_free_reqs;
100 };
101 
102 struct accel_mlx5_dev {
103 	struct accel_mlx5_qp *qp;
104 	struct ibv_cq *cq;
105 	struct spdk_rdma_utils_mem_map *mmap;
106 	struct accel_mlx5_crypto_dev_ctx *dev_ctx;
107 	uint32_t reqs_submitted;
108 	uint32_t max_reqs;
109 	/* Pending tasks waiting for requests resources */
110 	TAILQ_HEAD(, accel_mlx5_task) nomem;
111 	/* tasks submitted to HW. We can't complete a task even in error case until we reap completions for all
112 	 * submitted requests */
113 	TAILQ_HEAD(, accel_mlx5_task) in_hw;
114 	/* tasks between wr_start and wr_complete */
115 	TAILQ_HEAD(, accel_mlx5_task) before_submit;
116 	TAILQ_ENTRY(accel_mlx5_dev) link;
117 };
118 
119 struct accel_mlx5_io_channel {
120 	struct accel_mlx5_dev *devs;
121 	struct spdk_poller *poller;
122 	uint32_t num_devs;
123 	/* Index in \b devs to be used for crypto in round-robin way */
124 	uint32_t dev_idx;
125 };
126 
127 struct accel_mlx5_req_init_ctx {
128 	struct ibv_pd *pd;
129 	int rc;
130 };
131 
132 static struct accel_mlx5_module g_accel_mlx5;
133 
134 static int
135 mlx5_qp_init_2_rts(struct ibv_qp *qp, uint32_t dest_qp_num)
136 {
137 	struct ibv_qp_attr cur_attr = {}, attr = {};
138 	struct ibv_qp_init_attr init_attr = {};
139 	struct ibv_port_attr port_attr = {};
140 	union ibv_gid gid = {};
141 	int rc;
142 	uint8_t port;
143 	int attr_mask = IBV_QP_PKEY_INDEX |
144 			IBV_QP_PORT |
145 			IBV_QP_ACCESS_FLAGS |
146 			IBV_QP_PATH_MTU |
147 			IBV_QP_AV |
148 			IBV_QP_DEST_QPN |
149 			IBV_QP_RQ_PSN |
150 			IBV_QP_MAX_DEST_RD_ATOMIC |
151 			IBV_QP_MIN_RNR_TIMER |
152 			IBV_QP_TIMEOUT |
153 			IBV_QP_RETRY_CNT |
154 			IBV_QP_RNR_RETRY |
155 			IBV_QP_SQ_PSN |
156 			IBV_QP_MAX_QP_RD_ATOMIC;
157 
158 	if (!qp) {
159 		return -EINVAL;
160 	}
161 
162 	rc = ibv_query_qp(qp, &cur_attr, attr_mask, &init_attr);
163 	if (rc) {
164 		SPDK_ERRLOG("Failed to query qp %p %u\n", qp, qp->qp_num);
165 		return rc;
166 	}
167 
168 	port = cur_attr.port_num;
169 	rc = ibv_query_port(qp->context, port, &port_attr);
170 	if (rc) {
171 		SPDK_ERRLOG("Failed to query port num %d\n", port);
172 		return rc;
173 	}
174 
175 	if (port_attr.state != IBV_PORT_ARMED && port_attr.state != IBV_PORT_ACTIVE) {
176 		SPDK_ERRLOG("Wrong port %d state %d\n", port, port_attr.state);
177 		return -ENETUNREACH;
178 	}
179 
180 	rc = ibv_query_gid(qp->context, port, 0, &gid);
181 	if (rc) {
182 		SPDK_ERRLOG("Failed to get GID on port %d, rc %d\n", port, rc);
183 		return rc;
184 	}
185 
186 	attr.qp_state = IBV_QPS_INIT;
187 	attr.pkey_index = cur_attr.pkey_index;
188 	attr.port_num = cur_attr.port_num;
189 	attr.qp_access_flags = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE;
190 	attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT | IBV_QP_ACCESS_FLAGS;
191 
192 	rc = ibv_modify_qp(qp, &attr, attr_mask);
193 	if (rc) {
194 		SPDK_ERRLOG("Failed to modify qp %p %u to INIT state, rc %d\n", qp, qp->qp_num, rc);
195 		return rc;
196 	}
197 
198 	attr.qp_state = IBV_QPS_RTR;
199 	attr.path_mtu = cur_attr.path_mtu;
200 	/* dest_qp_num == qp_num - self loopback connection */
201 	attr.dest_qp_num = dest_qp_num;
202 	attr.rq_psn = cur_attr.rq_psn;
203 	attr.max_dest_rd_atomic = cur_attr.max_dest_rd_atomic;
204 	attr.min_rnr_timer = cur_attr.min_rnr_timer;
205 	attr.ah_attr = cur_attr.ah_attr;
206 	attr.ah_attr.dlid = port_attr.lid;
207 	attr.ah_attr.sl = 0;
208 	attr.ah_attr.src_path_bits = 0;
209 
210 	if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
211 		/* Ethernet requires to set GRH */
212 		attr.ah_attr.is_global = 1;
213 		attr.ah_attr.grh.hop_limit = 1;
214 		attr.ah_attr.grh.dgid = gid;
215 	} else {
216 		attr.ah_attr.is_global = 0;
217 	}
218 
219 	assert(attr.ah_attr.port_num == port);
220 
221 	attr_mask = IBV_QP_STATE | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN | IBV_QP_RQ_PSN |
222 		    IBV_QP_MAX_DEST_RD_ATOMIC | IBV_QP_MIN_RNR_TIMER | IBV_QP_AV;
223 
224 	rc = ibv_modify_qp(qp, &attr, attr_mask);
225 	if (rc) {
226 		SPDK_ERRLOG("Failed to modify qp %p %u to RTR state, rc %d\n", qp, qp->qp_num, rc);
227 		return rc;
228 	}
229 
230 	memset(&attr, 0, sizeof(attr));
231 	attr.qp_state = IBV_QPS_RTS;
232 	attr.timeout = cur_attr.timeout;
233 	attr.retry_cnt = cur_attr.retry_cnt;
234 	attr.sq_psn = cur_attr.sq_psn;
235 	attr.rnr_retry = cur_attr.rnr_retry;
236 	attr.max_rd_atomic = cur_attr.max_rd_atomic;
237 	attr_mask = IBV_QP_STATE | IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_SQ_PSN | IBV_QP_RNR_RETRY |
238 		    IBV_QP_MAX_QP_RD_ATOMIC;
239 
240 	rc = ibv_modify_qp(qp, &attr, attr_mask);
241 	if (rc) {
242 		SPDK_ERRLOG("Failed to modify qp %p %u to RTS state, rc %d\n", qp, qp->qp_num, rc);
243 		return rc;
244 	}
245 
246 	return 0;
247 }
248 
249 static inline enum ibv_qp_state
250 accel_mlx5_get_qp_state(struct ibv_qp *qp) {
251 	struct ibv_qp_attr qp_attr;
252 	struct ibv_qp_init_attr init_attr;
253 
254 	ibv_query_qp(qp, &qp_attr, IBV_QP_STATE, &init_attr);
255 
256 	return qp_attr.qp_state;
257 }
258 
259 static inline void
260 accel_mlx5_task_complete(struct accel_mlx5_task *task)
261 {
262 	struct accel_mlx5_req *req;
263 
264 	assert(task->num_reqs == task->num_completed_reqs);
265 	SPDK_DEBUGLOG(accel_mlx5, "Complete task %p, opc %d\n", task, task->base.op_code);
266 
267 	TAILQ_FOREACH(req, &task->reqs, link) {
268 		spdk_mempool_put(task->dev->dev_ctx->requests_pool, req);
269 	}
270 	spdk_accel_task_complete(&task->base, task->rc);
271 }
272 
273 static inline int
274 accel_mlx5_flush_wrs(struct accel_mlx5_dev *dev)
275 {
276 	struct accel_mlx5_task *task;
277 	struct accel_mlx5_qp *qp = dev->qp;
278 	int rc;
279 
280 	if (spdk_unlikely(!qp->wr_started)) {
281 		return 0;
282 	}
283 
284 	SPDK_DEBUGLOG(accel_mlx5, "Completing WRs on dev %s\n", dev->dev_ctx->context->device->name);
285 	rc = ibv_wr_complete(qp->qpex);
286 	if (spdk_unlikely(rc)) {
287 		SPDK_ERRLOG("ibv_wr_complete rc %d\n", rc);
288 		/* Complete all affected requests */
289 		TAILQ_FOREACH(task, &dev->before_submit, link) {
290 			task->rc = rc;
291 			accel_mlx5_task_complete(task);
292 		}
293 		TAILQ_INIT(&dev->before_submit);
294 	} else {
295 		TAILQ_CONCAT(&dev->in_hw, &dev->before_submit, link);
296 	}
297 
298 	qp->wr_started = false;
299 
300 	return rc;
301 }
302 
303 static inline int
304 accel_mlx5_fill_block_sge(struct accel_mlx5_req *req, struct ibv_sge *sge,
305 			  struct spdk_iov_sgl *iovs)
306 {
307 	struct spdk_rdma_utils_memory_translation translation;
308 	void *addr;
309 	uint32_t remaining = req->task->base.block_size;
310 	uint32_t size;
311 	int i = 0;
312 	int rc;
313 
314 	while (remaining) {
315 		size = spdk_min(remaining, iovs->iov->iov_len - iovs->iov_offset);
316 		addr = (void *)iovs->iov->iov_base + iovs->iov_offset;
317 		rc = spdk_rdma_utils_get_translation(req->task->dev->mmap, addr, size, &translation);
318 		if (spdk_unlikely(rc)) {
319 			SPDK_ERRLOG("Memory translation failed, addr %p, length %u\n", addr, size);
320 			return rc;
321 		}
322 		spdk_iov_sgl_advance(iovs, size);
323 		sge[i].lkey = spdk_rdma_utils_memory_translation_get_lkey(&translation);
324 		sge[i].addr = (uint64_t)addr;
325 		sge[i].length = size;
326 		i++;
327 		assert(remaining >= size);
328 		remaining -= size;
329 	}
330 
331 	return i;
332 }
333 
334 static inline bool
335 accel_mlx5_compare_iovs(struct iovec *v1, struct iovec *v2, uint32_t iovcnt)
336 {
337 	uint32_t i;
338 
339 	for (i = 0; i < iovcnt; i++) {
340 		if (v1[i].iov_base != v2[i].iov_base || v1[i].iov_len != v2[i].iov_len) {
341 			return false;
342 		}
343 	}
344 
345 	return true;
346 }
347 
348 static inline uint32_t
349 accel_mlx5_task_alloc_reqs(struct accel_mlx5_task *task)
350 {
351 	struct accel_mlx5_req *reqs_tmp[ACCEL_MLX5_ALLOC_REQS_IN_BATCH], *req;
352 	uint32_t i, num_reqs, allocated_reqs = 0;
353 	uint32_t remaining_reqs = task->num_reqs - task->num_completed_reqs;
354 	uint32_t qp_slot = task->dev->max_reqs - task->dev->reqs_submitted;
355 	int rc;
356 
357 	assert(task->num_reqs >= task->num_completed_reqs);
358 	remaining_reqs = spdk_min(remaining_reqs, qp_slot);
359 
360 	while (remaining_reqs) {
361 		num_reqs = spdk_min(ACCEL_MLX5_ALLOC_REQS_IN_BATCH, remaining_reqs);
362 		rc = spdk_mempool_get_bulk(task->dev->dev_ctx->requests_pool, (void **)reqs_tmp, num_reqs);
363 		if (spdk_unlikely(rc)) {
364 			return allocated_reqs;
365 		}
366 		for (i = 0; i < num_reqs; i++) {
367 			req = reqs_tmp[i];
368 			req->src_sg_count = 0;
369 			req->dst_sg_count = 0;
370 			req->task = task;
371 			TAILQ_INSERT_TAIL(&task->reqs, req, link);
372 		}
373 		allocated_reqs += num_reqs;
374 		remaining_reqs -= num_reqs;
375 	}
376 
377 	return allocated_reqs;
378 }
379 
380 static inline int
381 accel_mlx5_task_process(struct accel_mlx5_task *mlx5_task)
382 {
383 	struct spdk_accel_task *task = &mlx5_task->base;
384 	struct accel_mlx5_dev *dev = mlx5_task->dev;
385 	struct accel_mlx5_qp *qp = dev->qp;
386 	struct ibv_qp_ex *qpx = qp->qpex;
387 	struct mlx5dv_qp_ex *mqpx = qp->mqpx;
388 	struct mlx5dv_mkey_conf_attr mkey_attr = {};
389 	struct mlx5dv_crypto_attr cattr;
390 	struct accel_mlx5_req *req;
391 	uint64_t iv;
392 	uint32_t num_setters = 3; /* access flags, layout, crypto */
393 	int rc;
394 
395 	iv = task->iv + mlx5_task->num_completed_reqs;
396 
397 	if (!qp->wr_started) {
398 		ibv_wr_start(qpx);
399 		qp->wr_started = true;
400 	}
401 
402 	SPDK_DEBUGLOG(accel_mlx5, "begin, task, %p, reqs: total %u, submitted %u, completed %u\n",
403 		      mlx5_task, mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs);
404 
405 	while (mlx5_task->cur_req && dev->reqs_submitted < dev->max_reqs) {
406 		req = mlx5_task->cur_req;
407 		rc = accel_mlx5_fill_block_sge(req, req->src_sg, &mlx5_task->src);
408 		if (spdk_unlikely(rc <= 0)) {
409 			if (rc == 0) {
410 				rc = -EINVAL;
411 			}
412 			SPDK_ERRLOG("failed set src sge, rc %d\n", rc);
413 			goto err_out;
414 		}
415 		req->src_sg_count = rc;
416 
417 		/* prepare memory key - destination for WRITE operation */
418 		qpx->wr_flags = IBV_SEND_INLINE;
419 		qpx->wr_id = (uint64_t)&req->mkey_wrid;
420 		mlx5dv_wr_mkey_configure(mqpx, req->mkey, num_setters, &mkey_attr);
421 		mlx5dv_wr_set_mkey_access_flags(mqpx,
422 						IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_REMOTE_READ);
423 		if (mlx5_task->inplace) {
424 			mlx5dv_wr_set_mkey_layout_list(mqpx, req->src_sg_count, req->src_sg);
425 		} else {
426 			rc = accel_mlx5_fill_block_sge(req, req->dst_sg, &mlx5_task->dst);
427 			if (spdk_unlikely(rc <= 0)) {
428 				if (rc == 0) {
429 					rc = -EINVAL;
430 				}
431 				SPDK_ERRLOG("failed set dst sge, rc %d\n", rc);
432 				mlx5_task->rc = rc;
433 				goto err_out;
434 			}
435 			req->dst_sg_count = rc;
436 			mlx5dv_wr_set_mkey_layout_list(mqpx, req->dst_sg_count, req->dst_sg);
437 		}
438 		SPDK_DEBUGLOG(accel_mlx5, "req %p, task %p crypto_attr: bs %u, iv %"PRIu64", enc_on_tx %d\n",
439 			      req, req->task, task->block_size, iv, mlx5_task->encrypt_on_tx);
440 		rc = spdk_mlx5_crypto_set_attr(&cattr, task->crypto_key->priv, dev->dev_ctx->pd, task->block_size,
441 					       iv++, mlx5_task->encrypt_on_tx);
442 		if (spdk_unlikely(rc)) {
443 			SPDK_ERRLOG("failed to set crypto attr, rc %d\n", rc);
444 			mlx5_task->rc = rc;
445 			goto err_out;
446 		}
447 		mlx5dv_wr_set_mkey_crypto(mqpx, &cattr);
448 
449 		/* Prepare WRITE, use rkey from mkey, remote addr is always 0 - start of the mkey */
450 		qpx->wr_flags = IBV_SEND_SIGNALED;
451 		qpx->wr_id = (uint64_t)&req->write_wrid;
452 		ibv_wr_rdma_write(qpx, req->mkey->rkey, 0);
453 		/* local buffers, SG is already filled */
454 		ibv_wr_set_sge_list(qpx, req->src_sg_count, req->src_sg);
455 
456 		mlx5_task->num_submitted_reqs++;
457 		assert(mlx5_task->num_submitted_reqs <= mlx5_task->num_reqs);
458 		dev->reqs_submitted++;
459 		mlx5_task->cur_req = TAILQ_NEXT(mlx5_task->cur_req, link);
460 	}
461 
462 	SPDK_DEBUGLOG(accel_mlx5, "end, task, %p, reqs: total %u, submitted %u, completed %u\n", mlx5_task,
463 		      mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs);
464 
465 	TAILQ_INSERT_TAIL(&dev->before_submit, mlx5_task, link);
466 
467 	return 0;
468 
469 err_out:
470 	/* Abort all WRs submitted since last wr_start */
471 	ibv_wr_abort(qpx);
472 	accel_mlx5_task_complete(mlx5_task);
473 	TAILQ_FOREACH(mlx5_task, &dev->before_submit, link) {
474 		mlx5_task->rc = rc;
475 		accel_mlx5_task_complete(mlx5_task);
476 	}
477 	TAILQ_INIT(&dev->before_submit);
478 
479 	return rc;
480 
481 }
482 
483 static inline int
484 accel_mlx5_task_continue(struct accel_mlx5_task *task)
485 {
486 	struct accel_mlx5_req *req;
487 
488 	TAILQ_FOREACH(req, &task->reqs, link) {
489 		spdk_mempool_put(task->dev->dev_ctx->requests_pool, req);
490 	}
491 	TAILQ_INIT(&task->reqs);
492 
493 	if (spdk_unlikely(task->rc)) {
494 		accel_mlx5_task_complete(task);
495 		return 0;
496 	}
497 
498 	if (spdk_unlikely(!accel_mlx5_task_alloc_reqs(task))) {
499 		/* Pool is empty, queue this task */
500 		TAILQ_INSERT_TAIL(&task->dev->nomem, task, link);
501 		return -ENOMEM;
502 	}
503 	task->cur_req = TAILQ_FIRST(&task->reqs);
504 
505 	return accel_mlx5_task_process(task);
506 }
507 
508 static inline int
509 accel_mlx5_task_init(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_dev *dev)
510 {
511 	struct spdk_accel_task *task = &mlx5_task->base;
512 	size_t src_nbytes = 0, dst_nbytes = 0;
513 	uint32_t i;
514 
515 	switch (task->op_code) {
516 	case SPDK_ACCEL_OPC_ENCRYPT:
517 		mlx5_task->encrypt_on_tx = true;
518 		break;
519 	case SPDK_ACCEL_OPC_DECRYPT:
520 		mlx5_task->encrypt_on_tx = false;
521 		break;
522 	default:
523 		SPDK_ERRLOG("Unsupported accel opcode %d\n", task->op_code);
524 		return -ENOTSUP;
525 	}
526 
527 	for (i = 0; i < task->s.iovcnt; i++) {
528 		src_nbytes += task->s.iovs[i].iov_len;
529 	}
530 
531 	for (i = 0; i < task->d.iovcnt; i++) {
532 		dst_nbytes += task->d.iovs[i].iov_len;
533 	}
534 
535 	if (spdk_unlikely(src_nbytes != dst_nbytes)) {
536 		return -EINVAL;
537 	}
538 	if (spdk_unlikely(src_nbytes % mlx5_task->base.block_size != 0)) {
539 		return -EINVAL;
540 	}
541 
542 	mlx5_task->dev = dev;
543 	mlx5_task->rc = 0;
544 	mlx5_task->num_completed_reqs = 0;
545 	mlx5_task->num_submitted_reqs = 0;
546 	mlx5_task->cur_req = NULL;
547 	mlx5_task->num_reqs = src_nbytes / mlx5_task->base.block_size;
548 	spdk_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt, 0);
549 	if (task->d.iovcnt == 0 || (task->d.iovcnt == task->s.iovcnt &&
550 				    accel_mlx5_compare_iovs(task->d.iovs, task->s.iovs, task->s.iovcnt))) {
551 		mlx5_task->inplace = true;
552 	} else {
553 		mlx5_task->inplace = false;
554 		spdk_iov_sgl_init(&mlx5_task->dst, task->d.iovs, task->d.iovcnt, 0);
555 	}
556 
557 	TAILQ_INIT(&mlx5_task->reqs);
558 	if (spdk_unlikely(!accel_mlx5_task_alloc_reqs(mlx5_task))) {
559 		/* Pool is empty, queue this task */
560 		SPDK_DEBUGLOG(accel_mlx5, "no reqs in pool, dev %s\n",
561 			      mlx5_task->dev->dev_ctx->context->device->name);
562 		return -ENOMEM;
563 	}
564 	mlx5_task->cur_req = TAILQ_FIRST(&mlx5_task->reqs);
565 
566 	SPDK_DEBUGLOG(accel_mlx5, "task %p, inplace %d, num_reqs %d\n", mlx5_task, mlx5_task->inplace,
567 		      mlx5_task->num_reqs);
568 
569 	return 0;
570 }
571 
572 static int
573 accel_mlx5_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *task)
574 {
575 	struct accel_mlx5_io_channel *ch = spdk_io_channel_get_ctx(_ch);
576 	struct accel_mlx5_task *mlx5_task = SPDK_CONTAINEROF(task, struct accel_mlx5_task, base);
577 	struct accel_mlx5_dev *dev;
578 	int rc;
579 
580 	if (!g_accel_mlx5.enabled || !task->crypto_key ||
581 	    task->crypto_key->module_if != &g_accel_mlx5.module ||
582 	    !task->crypto_key->priv) {
583 		return -EINVAL;
584 	}
585 	dev = &ch->devs[ch->dev_idx];
586 	ch->dev_idx++;
587 	if (ch->dev_idx == ch->num_devs) {
588 		ch->dev_idx = 0;
589 	}
590 
591 	rc = accel_mlx5_task_init(mlx5_task, dev);
592 	if (spdk_unlikely(rc)) {
593 		if (rc == -ENOMEM) {
594 			SPDK_DEBUGLOG(accel_mlx5, "no reqs to handle new task %p (requred %u), put to queue\n", mlx5_task,
595 				      mlx5_task->num_reqs);
596 			TAILQ_INSERT_TAIL(&dev->nomem, mlx5_task, link);
597 			return 0;
598 		}
599 		return rc;
600 	}
601 
602 	return accel_mlx5_task_process(mlx5_task);
603 }
604 
605 static inline int64_t
606 accel_mlx5_poll_cq(struct accel_mlx5_dev *dev)
607 {
608 	struct ibv_wc wc[ACCEL_MLX5_MAX_WC];
609 	struct accel_mlx5_task *task;
610 	struct accel_mlx5_req *req;
611 	struct accel_mlx5_wrid *wr;
612 	int reaped, i, rc;
613 
614 	reaped = ibv_poll_cq(dev->cq, ACCEL_MLX5_MAX_WC, wc);
615 	if (spdk_unlikely(reaped < 0)) {
616 		SPDK_ERRLOG("Error polling CQ! (%d): %s\n", errno, spdk_strerror(errno));
617 		return reaped;
618 	} else if (reaped == 0) {
619 		return 0;
620 	}
621 
622 	SPDK_DEBUGLOG(accel_mlx5, "Reaped %d cpls on dev %s\n", reaped,
623 		      dev->dev_ctx->context->device->name);
624 
625 	for (i = 0; i < reaped; i++) {
626 		wr = (struct accel_mlx5_wrid *)wc[i].wr_id;
627 
628 		switch (wr->wrid) {
629 		case ACCEL_MLX5_WRID_MKEY:
630 			/* We only get this completion in error case */
631 			req = SPDK_CONTAINEROF(wr, struct accel_mlx5_req, mkey_wrid);
632 			if (!wc[i].status) {
633 				SPDK_ERRLOG("Got unexpected cpl for mkey configure, req %p, qp %p, state %d\n",
634 					    req, dev->qp->qp, accel_mlx5_get_qp_state(dev->qp->qp));
635 			} else {
636 				SPDK_ERRLOG("MKEY: qp %p, state %d, req %p, task %p WC status %d\n",
637 					    dev->qp->qp, accel_mlx5_get_qp_state(dev->qp->qp), req, req->task, wc[i].status);
638 			}
639 			break;
640 		case ACCEL_MLX5_WRID_WRITE:
641 			req = SPDK_CONTAINEROF(wr, struct accel_mlx5_req, write_wrid);
642 			task = req->task;
643 			if (wc[i].status) {
644 				assert(req->task);
645 				SPDK_ERRLOG("WRITE: qp %p, state %d, req %p, task %p WC status %d\n", dev->qp->qp,
646 					    accel_mlx5_get_qp_state(dev->qp->qp), req, req->task, wc[i].status);
647 				if (!task->rc) {
648 					task->rc = -EIO;
649 				}
650 			}
651 
652 			task->num_completed_reqs++;
653 			assert(dev->reqs_submitted);
654 			dev->reqs_submitted--;
655 			SPDK_DEBUGLOG(accel_mlx5, "req %p, task %p, remaining %u\n", req, task,
656 				      task->num_reqs - task->num_completed_reqs);
657 			if (task->num_completed_reqs == task->num_reqs) {
658 				TAILQ_REMOVE(&dev->in_hw, task, link);
659 				accel_mlx5_task_complete(task);
660 			} else if (task->num_completed_reqs == task->num_submitted_reqs) {
661 				assert(task->num_submitted_reqs < task->num_reqs);
662 				TAILQ_REMOVE(&dev->in_hw, task, link);
663 				rc = accel_mlx5_task_continue(task);
664 				if (spdk_unlikely(rc)) {
665 					if (rc != -ENOMEM) {
666 						task->rc = rc;
667 						accel_mlx5_task_complete(task);
668 					}
669 				}
670 			}
671 			break;
672 		}
673 	}
674 
675 	return reaped;
676 }
677 
678 static inline void
679 accel_mlx5_resubmit_nomem_tasks(struct accel_mlx5_dev *dev)
680 {
681 	struct accel_mlx5_task *task, *tmp;
682 	int rc;
683 
684 	TAILQ_FOREACH_SAFE(task, &dev->nomem, link, tmp) {
685 		TAILQ_REMOVE(&dev->nomem, task, link);
686 		rc = accel_mlx5_task_continue(task);
687 		if (rc) {
688 			if (rc == -ENOMEM) {
689 				break;
690 			} else {
691 				task->rc = rc;
692 				accel_mlx5_task_complete(task);
693 			}
694 		}
695 	}
696 }
697 
698 static int
699 accel_mlx5_poller(void *ctx)
700 {
701 	struct accel_mlx5_io_channel *ch = ctx;
702 	struct accel_mlx5_dev *dev;
703 
704 	int64_t completions = 0, rc;
705 	uint32_t i;
706 
707 	for (i = 0; i < ch->num_devs; i++) {
708 		dev = &ch->devs[i];
709 		if (dev->reqs_submitted) {
710 			rc = accel_mlx5_poll_cq(dev);
711 			if (spdk_unlikely(rc < 0)) {
712 				SPDK_ERRLOG("Error %"PRId64" on CQ, dev %s\n", rc, dev->dev_ctx->context->device->name);
713 			}
714 			completions += rc;
715 			accel_mlx5_flush_wrs(dev);
716 		}
717 		if (!TAILQ_EMPTY(&dev->nomem)) {
718 			accel_mlx5_resubmit_nomem_tasks(dev);
719 		}
720 	}
721 
722 	return !!completions;
723 }
724 
725 static bool
726 accel_mlx5_supports_opcode(enum spdk_accel_opcode opc)
727 {
728 	assert(g_accel_mlx5.enabled);
729 
730 	switch (opc) {
731 	case SPDK_ACCEL_OPC_ENCRYPT:
732 	case SPDK_ACCEL_OPC_DECRYPT:
733 		return true;
734 	default:
735 		return false;
736 	}
737 }
738 
739 static struct spdk_io_channel *
740 accel_mlx5_get_io_channel(void)
741 {
742 	assert(g_accel_mlx5.enabled);
743 	return spdk_get_io_channel(&g_accel_mlx5);
744 }
745 
746 static void
747 accel_mlx5_qp_destroy(struct accel_mlx5_qp *qp)
748 {
749 	if (!qp) {
750 		return;
751 	}
752 
753 	if (qp->qp) {
754 		ibv_destroy_qp(qp->qp);
755 		qp->qp = NULL;
756 	}
757 
758 	free(qp);
759 }
760 
761 static struct accel_mlx5_qp *
762 accel_mlx5_qp_create(struct ibv_cq *cq, struct accel_mlx5_io_channel *ch, struct ibv_pd *pd,
763 		     int qp_size)
764 {
765 	struct accel_mlx5_qp *qp;
766 	struct ibv_qp_init_attr_ex dv_qp_attr = {
767 		.qp_context = ch,
768 		.cap = {
769 			.max_send_wr = qp_size,
770 			.max_recv_wr = 0,
771 			.max_send_sge = ACCEL_MLX5_MAX_SGE,
772 			.max_inline_data = sizeof(struct ibv_sge) * ACCEL_MLX5_MAX_SGE,
773 		},
774 		.qp_type = IBV_QPT_RC,
775 		.comp_mask = IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_SEND_OPS_FLAGS,
776 		.pd = pd,
777 		.send_ops_flags = IBV_QP_EX_WITH_RDMA_WRITE |  IBV_QP_EX_WITH_SEND | IBV_QP_EX_WITH_RDMA_READ | IBV_QP_EX_WITH_BIND_MW,
778 		.send_cq = cq,
779 		.recv_cq = cq,
780 	};
781 	/* Attrs required for MKEYs registration */
782 	struct mlx5dv_qp_init_attr mlx5_qp_attr = {
783 		.comp_mask = MLX5DV_QP_INIT_ATTR_MASK_SEND_OPS_FLAGS,
784 		.send_ops_flags = MLX5DV_QP_EX_WITH_MKEY_CONFIGURE
785 	};
786 	int rc;
787 
788 	if (!dv_qp_attr.send_cq || !dv_qp_attr.recv_cq) {
789 		return  NULL;
790 	}
791 
792 	qp = calloc(1, sizeof(*qp));
793 	if (!qp) {
794 		return NULL;
795 	}
796 
797 	qp->qp = mlx5dv_create_qp(cq->context, &dv_qp_attr, &mlx5_qp_attr);
798 	if (!qp->qp) {
799 		SPDK_ERRLOG("Failed to create qpair, errno %s (%d)\n", spdk_strerror(errno), errno);
800 		free(qp);
801 		return NULL;
802 	}
803 
804 	rc = mlx5_qp_init_2_rts(qp->qp, qp->qp->qp_num);
805 	if (rc) {
806 		SPDK_ERRLOG("Failed to create loopback connection, qp_num %u\n", qp->qp->qp_num);
807 		accel_mlx5_qp_destroy(qp);
808 		return NULL;
809 	}
810 
811 	qp->qpex = ibv_qp_to_qp_ex(qp->qp);
812 	if (!qp->qpex) {
813 		SPDK_ERRLOG("Failed to get qpex\n");
814 		accel_mlx5_qp_destroy(qp);
815 		return NULL;
816 	}
817 
818 	qp->mqpx = mlx5dv_qp_ex_from_ibv_qp_ex(qp->qpex);
819 	if (!qp->mqpx) {
820 		SPDK_ERRLOG("Failed to get mqpx\n");
821 		accel_mlx5_qp_destroy(qp);
822 		return NULL;
823 	}
824 
825 	qp->num_reqs = qp_size;
826 	qp->cq = cq;
827 
828 	return qp;
829 }
830 
831 static void
832 accel_mlx5_destroy_cb(void *io_device, void *ctx_buf)
833 {
834 	struct accel_mlx5_io_channel *ch = ctx_buf;
835 	struct accel_mlx5_dev *dev;
836 	uint32_t i;
837 
838 	spdk_poller_unregister(&ch->poller);
839 	for (i = 0; i < ch->num_devs; i++) {
840 		dev = &ch->devs[i];
841 		accel_mlx5_qp_destroy(dev->qp);
842 		if (dev->cq) {
843 			ibv_destroy_cq(dev->cq);
844 			dev->cq = NULL;
845 		}
846 		spdk_rdma_utils_free_mem_map(&dev->mmap);
847 	}
848 	free(ch->devs);
849 }
850 
851 static int
852 accel_mlx5_create_cb(void *io_device, void *ctx_buf)
853 {
854 	struct accel_mlx5_io_channel *ch = ctx_buf;
855 	struct accel_mlx5_crypto_dev_ctx *dev_ctx;
856 	struct accel_mlx5_dev *dev;
857 	uint32_t i;
858 	int rc;
859 
860 	ch->devs = calloc(g_accel_mlx5.num_crypto_ctxs, sizeof(*ch->devs));
861 	if (!ch->devs) {
862 		SPDK_ERRLOG("Memory allocation failed\n");
863 		return -ENOMEM;
864 	}
865 
866 	for (i = 0; i < g_accel_mlx5.num_crypto_ctxs; i++) {
867 		dev_ctx = &g_accel_mlx5.crypto_ctxs[i];
868 		dev = &ch->devs[i];
869 		dev->dev_ctx = dev_ctx;
870 		ch->num_devs++;
871 		dev->cq = ibv_create_cq(dev_ctx->context, g_accel_mlx5.attr.qp_size, ch, NULL, 0);
872 		if (!dev->cq) {
873 			SPDK_ERRLOG("Failed to create CQ on dev %s\n", dev_ctx->context->device->name);
874 			rc = -ENOMEM;
875 			goto err_out;
876 		}
877 
878 		dev->qp = accel_mlx5_qp_create(dev->cq, ch, dev_ctx->pd, g_accel_mlx5.attr.qp_size);
879 		if (!dev->qp) {
880 			SPDK_ERRLOG("Failed to create QP on dev %s\n", dev_ctx->context->device->name);
881 			rc = -ENOMEM;
882 			goto err_out;
883 		}
884 
885 		TAILQ_INIT(&dev->nomem);
886 		TAILQ_INIT(&dev->in_hw);
887 		TAILQ_INIT(&dev->before_submit);
888 		/* Each request consumes 2 WQE - MKEY and RDMA_WRITE. MKEY is unsignaled, so we count only RDMA_WRITE completions.
889 		 * Divide user defined qp_size by two for simplicity */
890 		dev->max_reqs = g_accel_mlx5.attr.qp_size / 2;
891 		dev->mmap = spdk_rdma_utils_create_mem_map(dev_ctx->pd, NULL,
892 				IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE);
893 		if (!dev->mmap) {
894 			SPDK_ERRLOG("Failed to create memory map\n");
895 			accel_mlx5_qp_destroy(dev->qp);
896 			return -ENOMEM;
897 		}
898 	}
899 
900 	ch->poller = SPDK_POLLER_REGISTER(accel_mlx5_poller, ch, 0);
901 
902 	return 0;
903 
904 err_out:
905 	accel_mlx5_destroy_cb(&g_accel_mlx5, ctx_buf);
906 	return rc;
907 }
908 
909 void
910 accel_mlx5_get_default_attr(struct accel_mlx5_attr *attr)
911 {
912 	assert(attr);
913 
914 	attr->qp_size = ACCEL_MLX5_QP_SIZE;
915 	attr->num_requests = ACCEL_MLX5_NUM_REQUESTS;
916 }
917 
918 int
919 accel_mlx5_enable(struct accel_mlx5_attr *attr)
920 {
921 	if (g_accel_mlx5.enabled) {
922 		return -EEXIST;
923 	}
924 	if (attr) {
925 		g_accel_mlx5.attr = *attr;
926 	} else {
927 		accel_mlx5_get_default_attr(&g_accel_mlx5.attr);
928 	}
929 
930 	g_accel_mlx5.enabled = true;
931 	spdk_accel_module_list_add(&g_accel_mlx5.module);
932 
933 	return 0;
934 }
935 
936 static void
937 accel_mlx5_release_crypto_req(struct spdk_mempool *mp, void *cb_arg, void *_req, unsigned obj_idx)
938 {
939 	struct accel_mlx5_req *req = _req;
940 
941 	if (req->mkey) {
942 		mlx5dv_destroy_mkey(req->mkey);
943 	}
944 }
945 
946 
947 static void
948 accel_mlx5_release_reqs(struct accel_mlx5_crypto_dev_ctx *dev_ctx)
949 {
950 	if (!dev_ctx->requests_pool) {
951 		return;
952 	}
953 
954 	spdk_mempool_obj_iter(dev_ctx->requests_pool, accel_mlx5_release_crypto_req, NULL);
955 }
956 
957 static void
958 accel_mlx5_free_resources(void)
959 {
960 	uint32_t i;
961 
962 	for (i = 0; i < g_accel_mlx5.num_crypto_ctxs; i++) {
963 		accel_mlx5_release_reqs(&g_accel_mlx5.crypto_ctxs[i]);
964 		spdk_rdma_utils_put_pd(g_accel_mlx5.crypto_ctxs[i].pd);
965 	}
966 
967 	free(g_accel_mlx5.crypto_ctxs);
968 	g_accel_mlx5.crypto_ctxs = NULL;
969 }
970 
971 static void
972 accel_mlx5_deinit_cb(void *ctx)
973 {
974 	accel_mlx5_free_resources();
975 	spdk_accel_module_finish();
976 }
977 
978 static void
979 accel_mlx5_deinit(void *ctx)
980 {
981 	if (g_accel_mlx5.crypto_ctxs) {
982 		spdk_io_device_unregister(&g_accel_mlx5, accel_mlx5_deinit_cb);
983 	} else {
984 		spdk_accel_module_finish();
985 	}
986 }
987 
988 static void
989 accel_mlx5_configure_crypto_req(struct spdk_mempool *mp, void *cb_arg, void *_req, unsigned obj_idx)
990 {
991 	struct accel_mlx5_req *req = _req;
992 	struct accel_mlx5_req_init_ctx *ctx = cb_arg;
993 	struct mlx5dv_mkey_init_attr mkey_attr = {
994 		.pd = ctx->pd,
995 		.max_entries = ACCEL_MLX5_MAX_SGE, /* This MKEY refers to N base MKEYs/buffers */
996 		.create_flags = MLX5DV_MKEY_INIT_ATTR_FLAGS_INDIRECT | /* This MKEY refers to another MKEYs */
997 		MLX5DV_MKEY_INIT_ATTR_FLAGS_CRYPTO
998 	};
999 
1000 	memset(req, 0, sizeof(*req));
1001 	if (ctx->rc) {
1002 		return;
1003 	}
1004 
1005 	req->mkey = mlx5dv_create_mkey(&mkey_attr);
1006 	if (!req->mkey) {
1007 		SPDK_ERRLOG("Failed to create mkey on dev %s, errno %d\n", ctx->pd->context->device->name, errno);
1008 		ctx->rc = errno;
1009 		return;
1010 	}
1011 
1012 	req->mkey_wrid.wrid = ACCEL_MLX5_WRID_MKEY;
1013 	req->write_wrid.wrid = ACCEL_MLX5_WRID_WRITE;
1014 }
1015 
1016 static int
1017 accel_mlx5_crypto_ctx_mempool_create(struct accel_mlx5_crypto_dev_ctx *crypto_dev_ctx,
1018 				     size_t num_entries)
1019 {
1020 	struct accel_mlx5_req_init_ctx init_ctx = {.pd = crypto_dev_ctx->pd };
1021 	char pool_name[32];
1022 	int rc;
1023 
1024 	/* Compiler may produce a warning like
1025 	 * warning: ā€˜%s’ directive output may be truncated writing up to 63 bytes into a region of size 21
1026 	 * [-Wformat-truncation=]
1027 	 * That is expected and that is due to ibv device name is 64 bytes while DPDK mempool API allows
1028 	 * name to be max 32 bytes.
1029 	 * To suppress this warning check the value returned by snprintf */
1030 	rc = snprintf(pool_name, 32, "accel_mlx5_%s", crypto_dev_ctx->context->device->name);
1031 	if (rc < 0) {
1032 		assert(0);
1033 		return -EINVAL;
1034 	}
1035 	crypto_dev_ctx->requests_pool = spdk_mempool_create_ctor(pool_name, num_entries,
1036 					sizeof(struct accel_mlx5_req),
1037 					SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, SPDK_ENV_SOCKET_ID_ANY,
1038 					accel_mlx5_configure_crypto_req, &init_ctx);
1039 	if (!crypto_dev_ctx->requests_pool || init_ctx.rc) {
1040 		SPDK_ERRLOG("Failed to create memory pool\n");
1041 		return init_ctx.rc ? : -ENOMEM;
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 static int
1048 accel_mlx5_init(void)
1049 {
1050 	struct accel_mlx5_crypto_dev_ctx *crypto_dev_ctx;
1051 	struct ibv_context **rdma_devs, *dev;
1052 	struct ibv_pd *pd;
1053 	int num_devs = 0, rc = 0, i;
1054 
1055 	if (!g_accel_mlx5.enabled) {
1056 		return -EINVAL;
1057 	}
1058 
1059 	rdma_devs = spdk_mlx5_crypto_devs_get(&num_devs);
1060 	if (!rdma_devs || !num_devs) {
1061 		return -ENODEV;
1062 	}
1063 
1064 	g_accel_mlx5.crypto_ctxs = calloc(num_devs, sizeof(*g_accel_mlx5.crypto_ctxs));
1065 	if (!g_accel_mlx5.crypto_ctxs) {
1066 		SPDK_ERRLOG("Memory allocation failed\n");
1067 		rc = -ENOMEM;
1068 		goto cleanup;
1069 	}
1070 
1071 	for (i = 0; i < num_devs; i++) {
1072 		crypto_dev_ctx = &g_accel_mlx5.crypto_ctxs[i];
1073 		dev = rdma_devs[i];
1074 		pd = spdk_rdma_utils_get_pd(dev);
1075 		if (!pd) {
1076 			SPDK_ERRLOG("Failed to get PD for context %p, dev %s\n", dev, dev->device->name);
1077 			rc = -EINVAL;
1078 			goto cleanup;
1079 		}
1080 		crypto_dev_ctx->context = dev;
1081 		crypto_dev_ctx->pd = pd;
1082 		g_accel_mlx5.num_crypto_ctxs++;
1083 		rc = accel_mlx5_crypto_ctx_mempool_create(crypto_dev_ctx, g_accel_mlx5.attr.num_requests);
1084 		if (rc) {
1085 			goto cleanup;
1086 		}
1087 	}
1088 
1089 	SPDK_NOTICELOG("Accel framework mlx5 initialized, found %d devices.\n", num_devs);
1090 	spdk_io_device_register(&g_accel_mlx5, accel_mlx5_create_cb, accel_mlx5_destroy_cb,
1091 				sizeof(struct accel_mlx5_io_channel), "accel_mlx5");
1092 
1093 	spdk_mlx5_crypto_devs_release(rdma_devs);
1094 
1095 	return rc;
1096 
1097 cleanup:
1098 	spdk_mlx5_crypto_devs_release(rdma_devs);
1099 	accel_mlx5_free_resources();
1100 
1101 	return rc;
1102 }
1103 
1104 static void
1105 accel_mlx5_write_config_json(struct spdk_json_write_ctx *w)
1106 {
1107 	if (g_accel_mlx5.enabled) {
1108 		spdk_json_write_object_begin(w);
1109 		spdk_json_write_named_string(w, "method", "mlx5_scan_accel_module");
1110 		spdk_json_write_named_object_begin(w, "params");
1111 		spdk_json_write_named_uint16(w, "qp_size", g_accel_mlx5.attr.qp_size);
1112 		spdk_json_write_named_uint32(w, "num_requests", g_accel_mlx5.attr.num_requests);
1113 		spdk_json_write_object_end(w);
1114 		spdk_json_write_object_end(w);
1115 	}
1116 }
1117 
1118 static size_t
1119 accel_mlx5_get_ctx_size(void)
1120 {
1121 	return sizeof(struct accel_mlx5_task);
1122 }
1123 
1124 static int
1125 accel_mlx5_crypto_key_init(struct spdk_accel_crypto_key *key)
1126 {
1127 	struct spdk_mlx5_crypto_dek_create_attr attr = {};
1128 	struct spdk_mlx5_crypto_keytag *keytag;
1129 	int rc;
1130 
1131 	if (!key || !key->key || !key->key2 || !key->key_size || !key->key2_size) {
1132 		return -EINVAL;
1133 	}
1134 
1135 	attr.dek = calloc(1, key->key_size + key->key2_size);
1136 	if (!attr.dek) {
1137 		return -ENOMEM;
1138 	}
1139 
1140 	memcpy(attr.dek, key->key, key->key_size);
1141 	memcpy(attr.dek + key->key_size, key->key2, key->key2_size);
1142 	attr.dek_len = key->key_size + key->key2_size;
1143 
1144 	rc = spdk_mlx5_crypto_keytag_create(&attr, &keytag);
1145 	spdk_memset_s(attr.dek, attr.dek_len, 0, attr.dek_len);
1146 	free(attr.dek);
1147 	if (rc) {
1148 		SPDK_ERRLOG("Failed to create a keytag, rc %d\n", rc);
1149 		return rc;
1150 	}
1151 
1152 	key->priv = keytag;
1153 
1154 	return 0;
1155 }
1156 
1157 static void
1158 accel_mlx5_crypto_key_deinit(struct spdk_accel_crypto_key *key)
1159 {
1160 	if (!key || key->module_if != &g_accel_mlx5.module || !key->priv) {
1161 		return;
1162 	}
1163 
1164 	spdk_mlx5_crypto_keytag_destroy(key->priv);
1165 }
1166 
1167 static bool
1168 accel_mlx5_crypto_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
1169 {
1170 	switch (cipher) {
1171 	case SPDK_ACCEL_CIPHER_AES_XTS:
1172 		return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
1173 	default:
1174 		return false;
1175 	}
1176 }
1177 
1178 static struct accel_mlx5_module g_accel_mlx5 = {
1179 	.module = {
1180 		.module_init		= accel_mlx5_init,
1181 		.module_fini		= accel_mlx5_deinit,
1182 		.write_config_json	= accel_mlx5_write_config_json,
1183 		.get_ctx_size		= accel_mlx5_get_ctx_size,
1184 		.name			= "mlx5",
1185 		.supports_opcode	= accel_mlx5_supports_opcode,
1186 		.get_io_channel		= accel_mlx5_get_io_channel,
1187 		.submit_tasks		= accel_mlx5_submit_tasks,
1188 		.crypto_key_init	= accel_mlx5_crypto_key_init,
1189 		.crypto_key_deinit	= accel_mlx5_crypto_key_deinit,
1190 		.crypto_supports_cipher	= accel_mlx5_crypto_supports_cipher,
1191 	}
1192 };
1193 
1194 SPDK_LOG_REGISTER_COMPONENT(accel_mlx5)
1195