xref: /spdk/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c (revision 935721bb1e21f686ed96c6da536272068fade793)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "accel_dpdk_cryptodev.h"
8 
9 #include "spdk/accel.h"
10 #include "spdk_internal/accel_module.h"
11 #include "spdk/env.h"
12 #include "spdk/likely.h"
13 #include "spdk/thread.h"
14 #include "spdk/util.h"
15 #include "spdk/log.h"
16 #include "spdk/json.h"
17 #include "spdk_internal/sgl.h"
18 
19 #include <rte_bus_vdev.h>
20 #include <rte_crypto.h>
21 #include <rte_cryptodev.h>
22 #include <rte_mbuf_dyn.h>
23 
24 /* The VF spread is the number of queue pairs between virtual functions, we use this to
25  * load balance the QAT device.
26  */
27 #define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD		32
28 
29 /* Max length in byte of a crypto operation */
30 #define ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO		(64 * 1024)
31 
32 /* This controls how many ops will be dequeued from the crypto driver in one run
33  * of the poller. It is mainly a performance knob as it effectively determines how
34  * much work the poller has to do.  However even that can vary between crypto drivers
35  * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the
36  * QAT driver just dequeues what has been completed already.
37  */
38 #define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE	64
39 
40 /* When enqueueing, we need to supply the crypto driver with an array of pointers to
41  * operation structs. As each of these can be max 512B, we can adjust the ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO
42  * value in conjunction with the other defines to make sure we're not using crazy amounts
43  * of memory. All of these numbers can and probably should be adjusted based on the
44  * workload. By default we'll use the worst case (smallest) block size for the
45  * minimum number of array entries. As an example, a ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO size of 64K with 512B
46  * blocks would give us an enqueue array size of 128.
47  */
48 #define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO / 512)
49 
50 /* The number of MBUFS we need must be a power of two and to support other small IOs
51  * in addition to the limits mentioned above, we go to the next power of two. It is
52  * big number because it is one mempool for source and destination mbufs. It may
53  * need to be bigger to support multiple crypto drivers at once.
54  */
55 #define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS			32768
56 #define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE		256
57 #define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES		128
58 #define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS		(2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES)
59 #define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE	0
60 
61 /* This is the max number of IOs we can supply to any crypto device QP at one time.
62  * It can vary between drivers.
63  */
64 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS		2048
65 
66 /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512
67  * as a compromise value between performance and the time spent for initialization. */
68 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5	512
69 
70 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP		64
71 
72 /* Common for suported devices. */
73 #define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS		2
74 #define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \
75                 sizeof(struct rte_crypto_sym_op) + \
76                 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \
77                  sizeof(struct rte_crypto_sym_xform)))
78 #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH			16
79 #define ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET (ACCEL_DPDK_CRYPTODEV_IV_OFFSET + ACCEL_DPDK_CRYPTODEV_IV_LENGTH)
80 
81 /* Driver names */
82 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB	"crypto_aesni_mb"
83 #define ACCEL_DPDK_CRYPTODEV_QAT	"crypto_qat"
84 #define ACCEL_DPDK_CRYPTODEV_QAT_ASYM	"crypto_qat_asym"
85 #define ACCEL_DPDK_CRYPTODEV_MLX5	"mlx5_pci"
86 
87 /* Supported ciphers */
88 #define ACCEL_DPDK_CRYPTODEV_AES_CBC	"AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */
89 #define ACCEL_DPDK_CRYPTODEV_AES_XTS	"AES_XTS" /* QAT and MLX5 */
90 
91 /* Specific to AES_CBC. */
92 #define ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH			16
93 #define ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH	16 /* AES-XTS-128 block key size. */
94 #define ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH	32 /* AES-XTS-256 block key size. */
95 #define ACCEL_DPDK_CRYPTODEV_AES_XTS_512_BLOCK_KEY_LENGTH	64 /* AES-XTS-512 block key size. */
96 
97 #define ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH		16 /* XTS part key size is always 128 bit. */
98 
99 /* Used to store IO context in mbuf */
100 static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
101 	.name = "context_accel_dpdk_cryptodev",
102 	.size = sizeof(uint64_t),
103 	.align = __alignof__(uint64_t),
104 	.flags = 0,
105 };
106 
107 struct accel_dpdk_cryptodev_device;
108 
109 enum accel_dpdk_cryptodev_driver_type {
110 	ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0,
111 	ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
112 	ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
113 	ACCEL_DPDK_CRYPTODEV_DRIVER_LAST
114 };
115 
116 enum accel_dpdk_crypto_dev_cipher_type {
117 	ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC,
118 	ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS
119 };
120 
121 struct accel_dpdk_cryptodev_qp {
122 	struct accel_dpdk_cryptodev_device *device;	/* ptr to crypto device */
123 	uint32_t num_enqueued_ops;	/* Used to decide whether to poll the qp or not */
124 	uint8_t qp; /* queue identifier */
125 	bool in_use; /* whether this node is in use or not */
126 	uint8_t index; /* used by QAT to load balance placement of qpairs */
127 	TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link;
128 };
129 
130 struct accel_dpdk_cryptodev_device {
131 	enum accel_dpdk_cryptodev_driver_type type;
132 	struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */
133 	uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */
134 	uint8_t cdev_id; /* identifier for the device */
135 	TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs;
136 	TAILQ_ENTRY(accel_dpdk_cryptodev_device) link;
137 };
138 
139 struct accel_dpdk_cryptodev_key_handle {
140 	struct accel_dpdk_cryptodev_device *device;
141 	TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link;
142 	struct rte_cryptodev_sym_session *session_encrypt;	/* encryption session for this key */
143 	struct rte_cryptodev_sym_session *session_decrypt;	/* decryption session for this key */
144 	struct rte_crypto_sym_xform cipher_xform;		/* crypto control struct for this key */
145 };
146 
147 struct accel_dpdk_cryptodev_key_priv {
148 	enum accel_dpdk_cryptodev_driver_type driver;
149 	enum accel_dpdk_crypto_dev_cipher_type cipher;
150 	char *xts_key;
151 	TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys;
152 };
153 
154 /* For queueing up crypto operations that we can't submit for some reason */
155 struct accel_dpdk_cryptodev_queued_op {
156 	struct accel_dpdk_cryptodev_qp *qp;
157 	struct rte_crypto_op *crypto_op;
158 	struct accel_dpdk_cryptodev_task *task;
159 	TAILQ_ENTRY(accel_dpdk_cryptodev_queued_op) link;
160 };
161 #define ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH (sizeof(struct accel_dpdk_cryptodev_queued_op))
162 
163 /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code.
164  * We store things in here that are needed on per thread basis like the base_channel for this thread,
165  * and the poller for this thread.
166  */
167 struct accel_dpdk_cryptodev_io_channel {
168 	/* completion poller */
169 	struct spdk_poller *poller;
170 	/* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */
171 	struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST];
172 	/* queued for re-submission to CryptoDev */
173 	TAILQ_HEAD(, accel_dpdk_cryptodev_queued_op) queued_cry_ops;
174 };
175 
176 struct accel_dpdk_cryptodev_task {
177 	struct spdk_accel_task base;
178 	uint32_t cryop_cnt_remaining;
179 	bool is_failed;
180 	TAILQ_ENTRY(accel_dpdk_cryptodev_task) link;
181 };
182 
183 /* Shared mempools between all devices on this system */
184 static struct rte_mempool *g_session_mp = NULL;
185 static struct rte_mempool *g_session_mp_priv = NULL;
186 static struct rte_mempool *g_mbuf_mp = NULL;            /* mbuf mempool */
187 static int g_mbuf_offset;
188 static struct rte_mempool *g_crypto_op_mp = NULL;	/* crypto operations, must be rte* mempool */
189 
190 static struct rte_mbuf_ext_shared_info g_shinfo = {};   /* used by DPDK mbuf macro */
191 
192 static uint8_t g_qat_total_qp = 0;
193 static uint8_t g_next_qat_index;
194 
195 static const char *g_driver_names[] = {
196 	[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]	= ACCEL_DPDK_CRYPTODEV_AESNI_MB,
197 	[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]	= ACCEL_DPDK_CRYPTODEV_QAT,
198 	[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]	= ACCEL_DPDK_CRYPTODEV_MLX5
199 };
200 static const char *g_cipher_names[] = {
201 	[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC]	= ACCEL_DPDK_CRYPTODEV_AES_CBC,
202 	[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS]	= ACCEL_DPDK_CRYPTODEV_AES_XTS,
203 };
204 
205 static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver =
206 	ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
207 
208 /* Global list of all crypto devices */
209 static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER(
210 			g_crypto_devices);
211 static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER;
212 
213 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module;
214 
215 void
216 accel_dpdk_cryptodev_enable(void)
217 {
218 	spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module);
219 }
220 
221 int
222 accel_dpdk_cryptodev_set_driver(const char *driver_name)
223 {
224 	if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
225 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
226 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
227 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
228 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
229 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
230 	} else {
231 		SPDK_ERRLOG("Unsupported driver %s\n", driver_name);
232 		return -EINVAL;
233 	}
234 
235 	SPDK_NOTICELOG("Using driver %s\n", driver_name);
236 
237 	return 0;
238 }
239 
240 const char *
241 accel_dpdk_cryptodev_get_driver(void)
242 {
243 	return g_driver_names[g_dpdk_cryptodev_driver];
244 }
245 
246 static void
247 cancel_queued_crypto_ops(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
248 			 struct accel_dpdk_cryptodev_task *task)
249 {
250 	struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
251 	struct rte_crypto_op *cancelled_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
252 	struct accel_dpdk_cryptodev_queued_op *op_to_cancel, *tmp_op;
253 	struct rte_crypto_op *crypto_op;
254 	int num_mbufs = 0, num_dequeued_ops = 0;
255 
256 	/* Remove all ops from the failed IO. Since we don't know the
257 	 * order we have to check them all. */
258 	TAILQ_FOREACH_SAFE(op_to_cancel, &crypto_ch->queued_cry_ops, link, tmp_op) {
259 		/* Checking if this is our op. One IO contains multiple ops. */
260 		if (task == op_to_cancel->task) {
261 			crypto_op = op_to_cancel->crypto_op;
262 			TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_cancel, link);
263 
264 			/* Populating lists for freeing mbufs and ops. */
265 			mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_src;
266 			if (crypto_op->sym->m_dst) {
267 				mbufs_to_free[num_mbufs++] = (void *)crypto_op->sym->m_dst;
268 			}
269 			cancelled_ops[num_dequeued_ops++] = crypto_op;
270 		}
271 	}
272 
273 	/* Now bulk free both mbufs and crypto operations. */
274 	if (num_dequeued_ops > 0) {
275 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)cancelled_ops,
276 				     num_dequeued_ops);
277 		assert(num_mbufs > 0);
278 		/* This also releases chained mbufs if any. */
279 		rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
280 	}
281 }
282 
283 static inline uint16_t
284 accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp)
285 {
286 	struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
287 	struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
288 	struct accel_dpdk_cryptodev_task *task;
289 	uint32_t num_mbufs = 0;
290 	int i;
291 	uint16_t num_dequeued_ops;
292 
293 	/* Each run of the poller will get just what the device has available
294 	 * at the moment we call it, we don't check again after draining the
295 	 * first batch.
296 	 */
297 	num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp,
298 			   dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE);
299 	/* Check if operation was processed successfully */
300 	for (i = 0; i < num_dequeued_ops; i++) {
301 
302 		/* We don't know the order or association of the crypto ops wrt any
303 		 * particular task so need to look at each and determine if it's
304 		 * the last one for it's task or not.
305 		 */
306 		task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src,
307 				g_mbuf_offset, uint64_t *);
308 		assert(task != NULL);
309 
310 		if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
311 			SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status);
312 			/* Update the task status to error, we'll still process the
313 			 * rest of the crypto ops for this task though so they
314 			 * aren't left hanging.
315 			 */
316 			task->is_failed = true;
317 		}
318 
319 		/* Return the associated src and dst mbufs by collecting them into
320 		 * an array that we can use the bulk API to free after the loop.
321 		 */
322 		*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0;
323 		mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src;
324 		if (dequeued_ops[i]->sym->m_dst) {
325 			mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst;
326 		}
327 
328 		assert(task->cryop_cnt_remaining > 0);
329 		/* done encrypting, complete the task */
330 		if (--task->cryop_cnt_remaining == 0) {
331 			/* Complete the IO */
332 			spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0);
333 		}
334 	}
335 
336 	/* Now bulk free both mbufs and crypto operations. */
337 	if (num_dequeued_ops > 0) {
338 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops);
339 		assert(num_mbufs > 0);
340 		/* This also releases chained mbufs if any. */
341 		rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
342 	}
343 
344 	assert(qp->num_enqueued_ops >= num_dequeued_ops);
345 	qp->num_enqueued_ops -= num_dequeued_ops;
346 
347 	return num_dequeued_ops;
348 }
349 
350 /* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at
351  * the device. Then we need to decide if what we've got so far (including previous poller
352  * runs) totals up to one or more complete task */
353 static int
354 accel_dpdk_cryptodev_poller(void *args)
355 {
356 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = args;
357 	struct accel_dpdk_cryptodev_qp *qp;
358 	struct accel_dpdk_cryptodev_task *task;
359 	struct accel_dpdk_cryptodev_queued_op *op_to_resubmit;
360 	uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0;
361 	uint16_t enqueued;
362 	int i;
363 
364 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
365 		qp = crypto_ch->device_qp[i];
366 		/* Avoid polling "idle" qps since it may affect performance */
367 		if (qp && qp->num_enqueued_ops) {
368 			num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp);
369 		}
370 	}
371 
372 	/* Check if there are any queued crypto ops to process */
373 	while (!TAILQ_EMPTY(&crypto_ch->queued_cry_ops)) {
374 		op_to_resubmit = TAILQ_FIRST(&crypto_ch->queued_cry_ops);
375 		task = op_to_resubmit->task;
376 		qp = op_to_resubmit->qp;
377 		enqueued = rte_cryptodev_enqueue_burst(qp->device->cdev_id,
378 						       qp->qp,
379 						       &op_to_resubmit->crypto_op,
380 						       1);
381 		if (enqueued == 1) {
382 			TAILQ_REMOVE(&crypto_ch->queued_cry_ops, op_to_resubmit, link);
383 			qp->num_enqueued_ops++;
384 			num_enqueued_ops++;
385 		} else {
386 			if (op_to_resubmit->crypto_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) {
387 				/* If we couldn't get one, just break and try again later. */
388 				break;
389 			} else {
390 				/* Something is really wrong with the op. Most probably the
391 				 * mbuf is broken or the HW is not able to process the request.
392 				 * Fail the IO and remove its ops from the queued ops list. */
393 				task->is_failed = true;
394 
395 				cancel_queued_crypto_ops(crypto_ch, task);
396 
397 				/* Fail the IO if there is nothing left on device. */
398 				if (--task->cryop_cnt_remaining == 0) {
399 					spdk_accel_task_complete(&task->base, -EFAULT);
400 				}
401 			}
402 		}
403 	}
404 
405 	return !!(num_dequeued_ops + num_enqueued_ops);
406 }
407 
408 /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach
409  * it to the @orig_mbuf. */
410 static inline int
411 accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task,
412 		struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder)
413 {
414 	uint64_t phys_addr, phys_len, remainder = *_remainder;
415 	struct rte_mbuf *chain_mbuf;
416 	int rc;
417 
418 	phys_len = remainder;
419 	phys_addr = spdk_vtophys((void *)addr, &phys_len);
420 	if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
421 		return -EFAULT;
422 	}
423 	remainder = spdk_min(remainder, phys_len);
424 	rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1);
425 	if (spdk_unlikely(rc)) {
426 		return -ENOMEM;
427 	}
428 	/* Store context in every mbuf as we don't know anything about completion order */
429 	*RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
430 	rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo);
431 	rte_pktmbuf_append(chain_mbuf, remainder);
432 
433 	/* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */
434 	rte_pktmbuf_chain(orig_mbuf, chain_mbuf);
435 	*_remainder = remainder;
436 
437 	return 0;
438 }
439 
440 /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the
441  * contiguous space that was physically available. */
442 static inline uint64_t
443 accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf,
444 				     uint8_t *addr, uint32_t len)
445 {
446 	uint64_t phys_addr, phys_len;
447 
448 	/* Store context in every mbuf as we don't know anything about completion order */
449 	*RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
450 
451 	phys_len = len;
452 	phys_addr = spdk_vtophys((void *)addr, &phys_len);
453 	if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) {
454 		return 0;
455 	}
456 	assert(phys_len <= len);
457 
458 	/* Set the mbuf elements address and length. */
459 	rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo);
460 	rte_pktmbuf_append(mbuf, phys_len);
461 
462 	return phys_len;
463 }
464 
465 static inline struct accel_dpdk_cryptodev_key_handle *
466 accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
467 				      struct accel_dpdk_cryptodev_key_priv *key)
468 {
469 	struct accel_dpdk_cryptodev_key_handle *key_handle;
470 
471 	if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
472 		/* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device.
473 		 * We need to iterate a list of key entries to find a suitable device */
474 		TAILQ_FOREACH(key_handle, &key->dev_keys, link) {
475 			if (key_handle->device->cdev_id ==
476 			    crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) {
477 				return key_handle;
478 			}
479 		}
480 		return NULL;
481 	} else {
482 		return TAILQ_FIRST(&key->dev_keys);
483 	}
484 }
485 
486 static inline int
487 accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
488 		struct rte_crypto_op **crypto_ops, int count)
489 {
490 	int rc;
491 
492 	/* Get the number of source mbufs that we need. These will always be 1:1 because we
493 	 * don't support chaining. The reason we don't is because of our decision to use
494 	 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the
495 	 * op would be > 1 LBA.
496 	 */
497 	rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count);
498 	if (rc) {
499 		SPDK_ERRLOG("Failed to get src_mbufs!\n");
500 		return -ENOMEM;
501 	}
502 
503 	/* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */
504 	if (dst_mbufs) {
505 		rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count);
506 		if (rc) {
507 			SPDK_ERRLOG("Failed to get dst_mbufs!\n");
508 			goto err_free_src;
509 		}
510 	}
511 
512 #ifdef __clang_analyzer__
513 	/* silence scan-build false positive */
514 	SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE,
515 					      0x1000);
516 #endif
517 	/* Allocate crypto operations. */
518 	rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp,
519 				      RTE_CRYPTO_OP_TYPE_SYMMETRIC,
520 				      crypto_ops, count);
521 	if (rc < count) {
522 		SPDK_ERRLOG("Failed to allocate crypto ops!\n");
523 		goto err_free_ops;
524 	}
525 
526 	return 0;
527 
528 err_free_ops:
529 	if (rc > 0) {
530 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc);
531 	}
532 	if (dst_mbufs) {
533 		/* This also releases chained mbufs if any. */
534 		rte_pktmbuf_free_bulk(dst_mbufs, count);
535 	}
536 err_free_src:
537 	/* This also releases chained mbufs if any. */
538 	rte_pktmbuf_free_bulk(src_mbufs, count);
539 
540 	return -ENOMEM;
541 }
542 
543 static inline int
544 accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf,
545 		struct accel_dpdk_cryptodev_task *task)
546 {
547 	int rc;
548 	uint8_t *buf_addr;
549 	uint64_t phys_len;
550 	uint64_t remainder;
551 	uint64_t buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset);
552 
553 	buf_addr = sgl->iov->iov_base + sgl->iov_offset;
554 	phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len);
555 	if (spdk_unlikely(phys_len == 0)) {
556 		return -EFAULT;
557 	}
558 	buf_len = spdk_min(buf_len, phys_len);
559 	spdk_iov_sgl_advance(sgl, buf_len);
560 
561 	/* Handle the case of page boundary. */
562 	remainder = task->base.block_size - buf_len;
563 	while (remainder) {
564 		buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset);
565 		buf_addr = sgl->iov->iov_base + sgl->iov_offset;
566 		rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len);
567 		if (spdk_unlikely(rc)) {
568 			return rc;
569 		}
570 		spdk_iov_sgl_advance(sgl, buf_len);
571 		remainder -= buf_len;
572 	}
573 
574 	return 0;
575 }
576 
577 static inline void
578 accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv)
579 {
580 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
581 
582 	/* Set the IV - we use the LBA of the crypto_op */
583 	memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH);
584 	rte_memcpy(iv_ptr, &iv, sizeof(uint64_t));
585 }
586 
587 static int
588 accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
589 				  struct accel_dpdk_cryptodev_task *task)
590 {
591 	uint16_t num_enqueued_ops;
592 	uint32_t cryop_cnt;
593 	uint32_t crypto_len = task->base.block_size;
594 	uint64_t total_length = task->base.nbytes;
595 	uint64_t iv_start = task->base.iv;
596 	struct accel_dpdk_cryptodev_queued_op *op_to_queue;
597 	uint32_t crypto_index;
598 	struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
599 	struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
600 	struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
601 	struct rte_cryptodev_sym_session *session;
602 	struct accel_dpdk_cryptodev_key_priv *priv;
603 	struct accel_dpdk_cryptodev_key_handle *key_handle;
604 	struct accel_dpdk_cryptodev_qp *qp;
605 	struct accel_dpdk_cryptodev_device *dev;
606 	struct spdk_iov_sgl src, dst = {};
607 	bool inplace = true;
608 	int rc;
609 
610 	if (spdk_unlikely(!task->base.crypto_key ||
611 			  task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) {
612 		return -EINVAL;
613 	}
614 	priv = task->base.crypto_key->priv;
615 
616 	assert(task->base.nbytes);
617 	assert(task->base.block_size);
618 	assert(task->base.nbytes % task->base.block_size == 0);
619 	assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST);
620 
621 	if (total_length > ACCEL_DPDK_CRYPTODEV_CRYPTO_MAX_IO) {
622 		return -E2BIG;
623 	}
624 
625 	cryop_cnt =  task->base.nbytes / task->base.block_size;
626 	qp = crypto_ch->device_qp[priv->driver];
627 	assert(qp);
628 	dev = qp->device;
629 	assert(dev);
630 
631 	key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv);
632 	if (spdk_unlikely(!key_handle)) {
633 		SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver],
634 			    g_cipher_names[priv->cipher]);
635 		return -EINVAL;
636 	}
637 	/* mlx5_pci binds keys to a specific device, we can't use a key with any device */
638 	assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
639 
640 	if (task->base.op_code == ACCEL_OPC_ENCRYPT) {
641 		session = key_handle->session_encrypt;
642 	} else if (task->base.op_code == ACCEL_OPC_DECRYPT) {
643 		session = key_handle->session_decrypt;
644 	} else {
645 		return -EINVAL;
646 	}
647 
648 	/* Check if crypto operation is inplace: no destination or source == destination */
649 	if (task->base.s.iovcnt == task->base.d.iovcnt) {
650 		if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) {
651 			inplace = false;
652 		}
653 	} else if (task->base.d.iovcnt != 0) {
654 		inplace = false;
655 	}
656 
657 	rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs, crypto_ops,
658 			cryop_cnt);
659 	if (rc) {
660 		return rc;
661 	}
662 	/* This value is used in the completion callback to determine when the accel task is complete.
663 	 */
664 	task->cryop_cnt_remaining = cryop_cnt;
665 
666 	/* As we don't support chaining because of a decision to use LBA as IV, construction
667 	 * of crypto operations is straightforward. We build both the op, the mbuf and the
668 	 * dst_mbuf in our local arrays by looping through the length of the accel task and
669 	 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each
670 	 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single
671 	 * mbuf per crypto operation.
672 	 */
673 	spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0);
674 	if (!inplace) {
675 		spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0);
676 	}
677 
678 	for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) {
679 		rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task);
680 		if (spdk_unlikely(rc)) {
681 			goto err_free_ops;
682 		}
683 		accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start);
684 		iv_start++;
685 
686 		/* Set the data to encrypt/decrypt length */
687 		crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len;
688 		crypto_ops[crypto_index]->sym->cipher.data.offset = 0;
689 		rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session);
690 
691 		/* link the mbuf to the crypto op. */
692 		crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index];
693 
694 		if (inplace) {
695 			crypto_ops[crypto_index]->sym->m_dst = NULL;
696 		} else {
697 			rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task);
698 			if (spdk_unlikely(rc)) {
699 				goto err_free_ops;
700 			}
701 			crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
702 		}
703 	}
704 
705 	/* Enqueue everything we've got but limit by the max number of descriptors we
706 	 * configured the crypto device for.
707 	 */
708 	num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, spdk_min(cryop_cnt,
709 			   dev->qp_desc_nr));
710 
711 	qp->num_enqueued_ops += num_enqueued_ops;
712 	/* We were unable to enqueue everything but did get some, so need to decide what
713 	 * to do based on the status of the last op.
714 	 */
715 	if (num_enqueued_ops < cryop_cnt) {
716 		switch (crypto_ops[num_enqueued_ops]->status) {
717 		case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED:
718 			/* Queue them up on a linked list to be resubmitted via the poller. */
719 			for (crypto_index = num_enqueued_ops; crypto_index < cryop_cnt; crypto_index++) {
720 				op_to_queue = (struct accel_dpdk_cryptodev_queued_op *)rte_crypto_op_ctod_offset(
721 						      crypto_ops[crypto_index],
722 						      uint8_t *, ACCEL_DPDK_CRYPTODEV_QUEUED_OP_OFFSET);
723 				op_to_queue->qp = qp;
724 				op_to_queue->crypto_op = crypto_ops[crypto_index];
725 				op_to_queue->task = task;
726 				TAILQ_INSERT_TAIL(&crypto_ch->queued_cry_ops, op_to_queue, link);
727 			}
728 			break;
729 		default:
730 			/* For all other statuses, mark task as failed so that the poller will pick
731 			 * the failure up for the overall task status.
732 			 */
733 			task->is_failed = true;
734 			if (num_enqueued_ops == 0) {
735 				/* If nothing was enqueued, but the last one wasn't because of
736 				 * busy, fail it now as the poller won't know anything about it.
737 				 */
738 				rc = -EINVAL;
739 				goto err_free_ops;
740 			}
741 			break;
742 		}
743 	}
744 
745 	return 0;
746 
747 	/* Error cleanup paths. */
748 err_free_ops:
749 	if (!inplace) {
750 		/* This also releases chained mbufs if any. */
751 		rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
752 	}
753 	rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt);
754 	/* This also releases chained mbufs if any. */
755 	rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt);
756 	return rc;
757 }
758 
759 static inline struct accel_dpdk_cryptodev_qp *
760 accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type)
761 {
762 	struct accel_dpdk_cryptodev_device *device, *device_tmp;
763 	struct accel_dpdk_cryptodev_qp *qpair;
764 
765 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) {
766 		if (device->type != type) {
767 			continue;
768 		}
769 		TAILQ_FOREACH(qpair, &device->qpairs, link) {
770 			if (!qpair->in_use) {
771 				qpair->in_use = true;
772 				return qpair;
773 			}
774 		}
775 	}
776 
777 	return NULL;
778 }
779 
780 /* Helper function for the channel creation callback.
781  * Returns the number of drivers assigned to the channel */
782 static uint32_t
783 accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch)
784 {
785 	struct accel_dpdk_cryptodev_device *device;
786 	struct accel_dpdk_cryptodev_qp *device_qp;
787 	uint32_t num_drivers = 0;
788 	bool qat_found = false;
789 
790 	pthread_mutex_lock(&g_device_lock);
791 
792 	TAILQ_FOREACH(device, &g_crypto_devices, link) {
793 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) {
794 			/* For some QAT devices, the optimal qp to use is every 32nd as this spreads the
795 			 * workload out over the multiple virtual functions in the device. For the devices
796 			 * where this isn't the case, it doesn't hurt.
797 			 */
798 			TAILQ_FOREACH(device_qp, &device->qpairs, link) {
799 				if (device_qp->index != g_next_qat_index) {
800 					continue;
801 				}
802 				if (device_qp->in_use == false) {
803 					assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL);
804 					crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp;
805 					device_qp->in_use = true;
806 					g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp;
807 					qat_found = true;
808 					num_drivers++;
809 					break;
810 				} else {
811 					/* if the preferred index is used, skip to the next one in this set. */
812 					g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp;
813 				}
814 			}
815 		}
816 	}
817 
818 	/* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */
819 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB);
820 	if (device_qp) {
821 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL);
822 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp;
823 		num_drivers++;
824 	}
825 
826 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
827 	if (device_qp) {
828 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL);
829 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp;
830 		num_drivers++;
831 	}
832 
833 	pthread_mutex_unlock(&g_device_lock);
834 
835 	return num_drivers;
836 }
837 
838 static void
839 _accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf)
840 {
841 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
842 			ctx_buf;
843 	int i;
844 
845 	pthread_mutex_lock(&g_device_lock);
846 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
847 		if (crypto_ch->device_qp[i]) {
848 			crypto_ch->device_qp[i]->in_use = false;
849 		}
850 	}
851 	pthread_mutex_unlock(&g_device_lock);
852 
853 	spdk_poller_unregister(&crypto_ch->poller);
854 }
855 
856 static int
857 _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf)
858 {
859 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
860 			ctx_buf;
861 
862 	crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0);
863 	if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) {
864 		SPDK_ERRLOG("No crypto drivers assigned\n");
865 		spdk_poller_unregister(&crypto_ch->poller);
866 		return -EINVAL;
867 	}
868 
869 	/* We use this to queue up crypto ops when the device is busy. */
870 	TAILQ_INIT(&crypto_ch->queued_cry_ops);
871 
872 	return 0;
873 }
874 
875 static struct spdk_io_channel *
876 accel_dpdk_cryptodev_get_io_channel(void)
877 {
878 	return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module);
879 }
880 
881 static size_t
882 accel_dpdk_cryptodev_ctx_size(void)
883 {
884 	return sizeof(struct accel_dpdk_cryptodev_task);
885 }
886 
887 static bool
888 accel_dpdk_cryptodev_supports_opcode(enum accel_opcode opc)
889 {
890 	switch (opc) {
891 	case ACCEL_OPC_ENCRYPT:
892 	case ACCEL_OPC_DECRYPT:
893 		return true;
894 	default:
895 		return false;
896 	}
897 }
898 
899 static int
900 accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task)
901 {
902 	struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task,
903 			base);
904 	struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch);
905 
906 	return accel_dpdk_cryptodev_process_task(ch, task);
907 }
908 
909 /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but
910  * this callback has to be here. */
911 static void
912 shinfo_free_cb(void *arg1, void *arg2)
913 {
914 }
915 
916 static int
917 accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores)
918 {
919 	struct rte_cryptodev_qp_conf qp_conf = { .mp_session = g_session_mp, .mp_session_private = g_session_mp_priv };
920 	/* Setup queue pairs. */
921 	struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_SOCKET_ID_ANY };
922 	struct accel_dpdk_cryptodev_device *device;
923 	uint8_t j, cdev_id, cdrv_id;
924 	struct accel_dpdk_cryptodev_qp *dev_qp;
925 	int rc;
926 
927 	device = calloc(1, sizeof(*device));
928 	if (!device) {
929 		return -ENOMEM;
930 	}
931 
932 	/* Get details about this device. */
933 	rte_cryptodev_info_get(index, &device->cdev_info);
934 	cdrv_id = device->cdev_info.driver_id;
935 	cdev_id = device->cdev_id = index;
936 
937 	if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
938 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
939 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
940 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
941 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
942 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
943 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
944 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5;
945 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
946 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) {
947 		/* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */
948 		rc = 0;
949 		goto err;
950 	} else {
951 		SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n",
952 			    cdev_id, device->cdev_info.driver_name);
953 		rc = -EINVAL;
954 		goto err;
955 	}
956 
957 	/* Before going any further, make sure we have enough resources for this
958 	 * device type to function.  We need a unique queue pair per core accross each
959 	 * device type to remain lockless....
960 	 */
961 	if ((rte_cryptodev_device_count_by_driver(cdrv_id) *
962 	     device->cdev_info.max_nb_queue_pairs) < num_lcores) {
963 		SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n",
964 			    device->cdev_info.driver_name);
965 		SPDK_ERRLOG("Either add more crypto devices or decrease core count\n");
966 		rc = -EINVAL;
967 		goto err;
968 	}
969 
970 	conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs;
971 	rc = rte_cryptodev_configure(cdev_id, &conf);
972 	if (rc < 0) {
973 		SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n",
974 			    cdev_id, rc);
975 		rc = -EINVAL;
976 		goto err;
977 	}
978 
979 	/* Pre-setup all potential qpairs now and assign them in the channel
980 	 * callback. If we were to create them there, we'd have to stop the
981 	 * entire device affecting all other threads that might be using it
982 	 * even on other queue pairs.
983 	 */
984 	qp_conf.nb_descriptors = device->qp_desc_nr;
985 	for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
986 		rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY);
987 		if (rc < 0) {
988 			SPDK_ERRLOG("Failed to setup queue pair %u on "
989 				    "cryptodev %u: error %d\n", j, cdev_id, rc);
990 			rc = -EINVAL;
991 			goto err_qp_setup;
992 		}
993 	}
994 
995 	rc = rte_cryptodev_start(cdev_id);
996 	if (rc < 0) {
997 		SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc);
998 		rc = -EINVAL;
999 		goto err_dev_start;
1000 	}
1001 
1002 	TAILQ_INIT(&device->qpairs);
1003 	/* Build up lists of device/qp combinations per PMD */
1004 	for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
1005 		dev_qp = calloc(1, sizeof(*dev_qp));
1006 		if (!dev_qp) {
1007 			rc = -ENOMEM;
1008 			goto err_qp_alloc;
1009 		}
1010 		dev_qp->device = device;
1011 		dev_qp->qp = j;
1012 		dev_qp->in_use = false;
1013 		TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link);
1014 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1015 			dev_qp->index = g_qat_total_qp++;
1016 		}
1017 	}
1018 	/* Add to our list of available crypto devices. */
1019 	TAILQ_INSERT_TAIL(&g_crypto_devices, device, link);
1020 
1021 	return 0;
1022 
1023 err_qp_alloc:
1024 	TAILQ_FOREACH(dev_qp, &device->qpairs, link) {
1025 		if (dev_qp->device->cdev_id != device->cdev_id) {
1026 			continue;
1027 		}
1028 		free(dev_qp);
1029 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1030 			assert(g_qat_total_qp);
1031 			g_qat_total_qp--;
1032 		}
1033 	}
1034 	rte_cryptodev_stop(cdev_id);
1035 err_dev_start:
1036 err_qp_setup:
1037 	rte_cryptodev_close(cdev_id);
1038 err:
1039 	free(device);
1040 
1041 	return rc;
1042 }
1043 
1044 static void
1045 accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device)
1046 {
1047 	struct accel_dpdk_cryptodev_qp *dev_qp, *tmp;
1048 
1049 	assert(device);
1050 
1051 	TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) {
1052 		free(dev_qp);
1053 	}
1054 	if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1055 		assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs);
1056 		g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs;
1057 	}
1058 	rte_cryptodev_stop(device->cdev_id);
1059 	rte_cryptodev_close(device->cdev_id);
1060 	free(device);
1061 }
1062 
1063 static int
1064 accel_dpdk_cryptodev_init(void)
1065 {
1066 	uint8_t cdev_count;
1067 	uint8_t cdev_id;
1068 	int i, rc;
1069 	struct accel_dpdk_cryptodev_device *device, *tmp_dev;
1070 	unsigned int max_sess_size = 0, sess_size;
1071 	uint16_t num_lcores = rte_lcore_count();
1072 	char aesni_args[32];
1073 
1074 	/* Only the first call via module init should init the crypto drivers. */
1075 	if (g_session_mp != NULL) {
1076 		return 0;
1077 	}
1078 
1079 	/* We always init ACCEL_DPDK_CRYPTODEV_AESNI_MB */
1080 	snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d",
1081 		 ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP);
1082 	rc = rte_vdev_init(ACCEL_DPDK_CRYPTODEV_AESNI_MB, aesni_args);
1083 	if (rc) {
1084 		SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. "
1085 			       "Possibly %s is not supported by DPDK library. "
1086 			       "Keep going...\n", ACCEL_DPDK_CRYPTODEV_AESNI_MB, rc, ACCEL_DPDK_CRYPTODEV_AESNI_MB);
1087 	}
1088 
1089 	/* If we have no crypto devices, there's no reason to continue. */
1090 	cdev_count = rte_cryptodev_count();
1091 	SPDK_NOTICELOG("Found crypto devices: %d\n", (int)cdev_count);
1092 	if (cdev_count == 0) {
1093 		return 0;
1094 	}
1095 
1096 	g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context);
1097 	if (g_mbuf_offset < 0) {
1098 		SPDK_ERRLOG("error registering dynamic field with DPDK\n");
1099 		return -EINVAL;
1100 	}
1101 
1102 	/* Create global mempools, shared by all devices regardless of type */
1103 	/* First determine max session size, most pools are shared by all the devices,
1104 	 * so we need to find the global max sessions size. */
1105 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1106 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
1107 		if (sess_size > max_sess_size) {
1108 			max_sess_size = sess_size;
1109 		}
1110 	}
1111 
1112 	g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv",
1113 					       ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1114 					       NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1115 	if (g_session_mp_priv == NULL) {
1116 		SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size);
1117 		return -ENOMEM;
1118 	}
1119 
1120 	g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp",
1121 			ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, 0, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1122 			SOCKET_ID_ANY);
1123 	if (g_session_mp == NULL) {
1124 		SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size);
1125 		rc = -ENOMEM;
1126 		goto error_create_session_mp;
1127 	}
1128 
1129 	g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
1130 					    ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1131 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
1132 	if (g_mbuf_mp == NULL) {
1133 		SPDK_ERRLOG("Cannot create mbuf pool\n");
1134 		rc = -ENOMEM;
1135 		goto error_create_mbuf;
1136 	}
1137 
1138 	/* We use per op private data as suggested by DPDK and to store the IV and
1139 	 * our own struct for queueing ops. */
1140 	g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp",
1141 			 RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1142 			 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) +
1143 			 ACCEL_DPDK_CRYPTODEV_IV_LENGTH + ACCEL_DPDK_CRYPTODEV_QUEUED_OP_LENGTH, rte_socket_id());
1144 	if (g_crypto_op_mp == NULL) {
1145 		SPDK_ERRLOG("Cannot create op pool\n");
1146 		rc = -ENOMEM;
1147 		goto error_create_op;
1148 	}
1149 
1150 	/* Init all devices */
1151 	for (i = 0; i < cdev_count; i++) {
1152 		rc = accel_dpdk_cryptodev_create(i, num_lcores);
1153 		if (rc) {
1154 			goto err;
1155 		}
1156 	}
1157 
1158 	g_shinfo.free_cb = shinfo_free_cb;
1159 
1160 	spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb,
1161 				_accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel),
1162 				"accel_dpdk_cryptodev");
1163 
1164 	return 0;
1165 
1166 	/* Error cleanup paths. */
1167 err:
1168 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) {
1169 		TAILQ_REMOVE(&g_crypto_devices, device, link);
1170 		accel_dpdk_cryptodev_release(device);
1171 	}
1172 	rte_mempool_free(g_crypto_op_mp);
1173 	g_crypto_op_mp = NULL;
1174 error_create_op:
1175 	rte_mempool_free(g_mbuf_mp);
1176 	g_mbuf_mp = NULL;
1177 error_create_mbuf:
1178 	rte_mempool_free(g_session_mp);
1179 	g_session_mp = NULL;
1180 error_create_session_mp:
1181 	if (g_session_mp_priv != NULL) {
1182 		rte_mempool_free(g_session_mp_priv);
1183 		g_session_mp_priv = NULL;
1184 	}
1185 	return rc;
1186 }
1187 
1188 static void
1189 accel_dpdk_cryptodev_fini_cb(void *io_device)
1190 {
1191 	struct accel_dpdk_cryptodev_device *device, *tmp;
1192 
1193 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) {
1194 		TAILQ_REMOVE(&g_crypto_devices, device, link);
1195 		accel_dpdk_cryptodev_release(device);
1196 	}
1197 	rte_vdev_uninit(ACCEL_DPDK_CRYPTODEV_AESNI_MB);
1198 
1199 	rte_mempool_free(g_crypto_op_mp);
1200 	rte_mempool_free(g_mbuf_mp);
1201 	rte_mempool_free(g_session_mp);
1202 	if (g_session_mp_priv != NULL) {
1203 		rte_mempool_free(g_session_mp_priv);
1204 	}
1205 
1206 	spdk_accel_module_finish();
1207 }
1208 
1209 /* Called when the entire module is being torn down. */
1210 static void
1211 accel_dpdk_cryptodev_fini(void *ctx)
1212 {
1213 	spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb);
1214 }
1215 
1216 static int
1217 accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key,
1218 		struct accel_dpdk_cryptodev_key_handle *key_handle)
1219 {
1220 	struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1221 	int rc;
1222 
1223 	key_handle->session_encrypt = rte_cryptodev_sym_session_create(g_session_mp);
1224 	if (!key_handle->session_encrypt) {
1225 		SPDK_ERRLOG("Failed to create encrypt crypto session.\n");
1226 		return -EINVAL;
1227 	}
1228 	key_handle->session_decrypt = rte_cryptodev_sym_session_create(g_session_mp);
1229 	if (!key_handle->session_decrypt) {
1230 		SPDK_ERRLOG("Failed to create decrypt crypto session.\n");
1231 		rc = -EINVAL;
1232 		goto err_ses_encrypt;
1233 	}
1234 	key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1235 	key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET;
1236 	key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
1237 
1238 	switch (priv->cipher) {
1239 	case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC:
1240 		key_handle->cipher_xform.cipher.key.data = key->key;
1241 		key_handle->cipher_xform.cipher.key.length = key->key_size;
1242 		key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1243 		break;
1244 	case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS:
1245 		key_handle->cipher_xform.cipher.key.data = priv->xts_key;
1246 		key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size;
1247 		key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS;
1248 		break;
1249 	default:
1250 		SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher);
1251 		rc = -EINVAL;
1252 		goto err_ses_decrypt;
1253 	}
1254 
1255 	key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1256 	rc = rte_cryptodev_sym_session_init(key_handle->device->cdev_id, key_handle->session_encrypt,
1257 					    &key_handle->cipher_xform,
1258 					    g_session_mp_priv ? g_session_mp_priv : g_session_mp);
1259 	if (rc < 0) {
1260 		SPDK_ERRLOG("Failed to init encrypt session: error %d\n", rc);
1261 		rc = -EINVAL;
1262 		goto err_ses_decrypt;
1263 	}
1264 
1265 	key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1266 	rc = rte_cryptodev_sym_session_init(key_handle->device->cdev_id, key_handle->session_decrypt,
1267 					    &key_handle->cipher_xform,
1268 					    g_session_mp_priv ? g_session_mp_priv : g_session_mp);
1269 	if (rc < 0) {
1270 		SPDK_ERRLOG("Failed to init decrypt session: error %d\n", rc);
1271 		rc = -EINVAL;
1272 		goto err_ses_decrypt;
1273 	}
1274 
1275 	return 0;
1276 
1277 err_ses_decrypt:
1278 	rte_cryptodev_sym_session_free(key_handle->session_decrypt);
1279 err_ses_encrypt:
1280 	rte_cryptodev_sym_session_free(key_handle->session_encrypt);
1281 
1282 	return rc;
1283 }
1284 
1285 static int
1286 accel_dpdk_cryptodev_validate_parameters(enum accel_dpdk_cryptodev_driver_type driver,
1287 		enum accel_dpdk_crypto_dev_cipher_type cipher, struct spdk_accel_crypto_key *key)
1288 {
1289 	/* Check that all required parameters exist */
1290 	switch (cipher) {
1291 	case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC:
1292 		if (!key->key || !key->key_size) {
1293 			SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_CBC requires a key\n");
1294 			return -1;
1295 		}
1296 		if (key->key2 || key->key2_size) {
1297 			SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_CBC doesn't use key2\n");
1298 			return -1;
1299 		}
1300 		break;
1301 	case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS:
1302 		if (!key->key || !key->key_size || !key->key2 || !key->key2_size) {
1303 			SPDK_ERRLOG("ACCEL_DPDK_CRYPTODEV_AES_XTS requires both key and key2\n");
1304 			return -1;
1305 		}
1306 		break;
1307 	default:
1308 		return -1;
1309 	}
1310 
1311 	/* Check driver/cipher combinations and key lengths */
1312 	switch (cipher) {
1313 	case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC:
1314 		if (driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
1315 			SPDK_ERRLOG("Driver %s only supports cipher %s\n",
1316 				    g_driver_names[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI],
1317 				    g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS]);
1318 			return -1;
1319 		}
1320 		if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH) {
1321 			SPDK_ERRLOG("Invalid key size %zu for cipher %s, should be %d\n", key->key_size,
1322 				    g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC], ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH);
1323 			return -1;
1324 		}
1325 		break;
1326 	case ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS:
1327 		switch (driver) {
1328 		case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI:
1329 			if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH &&
1330 			    key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_512_BLOCK_KEY_LENGTH) {
1331 				SPDK_ERRLOG("Invalid key size %zu for driver %s, cipher %s, supported %d or %d\n",
1332 					    key->key_size, g_driver_names[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI],
1333 					    g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS],
1334 					    ACCEL_DPDK_CRYPTODEV_AES_XTS_256_BLOCK_KEY_LENGTH,
1335 					    ACCEL_DPDK_CRYPTODEV_AES_XTS_512_BLOCK_KEY_LENGTH);
1336 				return -1;
1337 			}
1338 			break;
1339 		case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
1340 		case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB:
1341 			if (key->key_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH) {
1342 				SPDK_ERRLOG("Invalid key size %zu, supported %d\n", key->key_size,
1343 					    ACCEL_DPDK_CRYPTODEV_AES_XTS_128_BLOCK_KEY_LENGTH);
1344 				return -1;
1345 			}
1346 			break;
1347 		default:
1348 			SPDK_ERRLOG("Incorrect driver type %d\n", driver);
1349 			assert(0);
1350 			return -1;
1351 		}
1352 		if (key->key2_size != ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH) {
1353 			SPDK_ERRLOG("Cipher %s requires key2 size %d\n",
1354 				    g_cipher_names[ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC], ACCEL_DPDK_CRYPTODEV_AES_XTS_TWEAK_KEY_LENGTH);
1355 			return -1;
1356 		}
1357 		break;
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 static void
1364 accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key)
1365 {
1366 	struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp;
1367 	struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1368 
1369 	TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) {
1370 		rte_cryptodev_sym_session_free(key_handle->session_encrypt);
1371 		rte_cryptodev_sym_session_free(key_handle->session_decrypt);
1372 		TAILQ_REMOVE(&priv->dev_keys, key_handle, link);
1373 		spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle));
1374 		free(key_handle);
1375 	}
1376 
1377 	if (priv->xts_key) {
1378 		spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size);
1379 	}
1380 	free(priv->xts_key);
1381 	free(priv);
1382 }
1383 
1384 static int
1385 accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key)
1386 {
1387 	struct accel_dpdk_cryptodev_device *device;
1388 	struct accel_dpdk_cryptodev_key_priv *priv;
1389 	struct accel_dpdk_cryptodev_key_handle *key_handle;
1390 	enum accel_dpdk_cryptodev_driver_type driver;
1391 	enum accel_dpdk_crypto_dev_cipher_type cipher;
1392 
1393 	if (!key->param.cipher) {
1394 		SPDK_ERRLOG("Cipher is missing\n");
1395 		return -EINVAL;
1396 	}
1397 
1398 	if (strcmp(key->param.cipher, ACCEL_DPDK_CRYPTODEV_AES_CBC) == 0) {
1399 		cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_CBC;
1400 	} else if (strcmp(key->param.cipher, ACCEL_DPDK_CRYPTODEV_AES_XTS) == 0) {
1401 		cipher = ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS;
1402 	} else {
1403 		SPDK_ERRLOG("Unsupported cipher name %s.\n", key->param.cipher);
1404 		return -EINVAL;
1405 	}
1406 
1407 	driver = g_dpdk_cryptodev_driver;
1408 
1409 	if (accel_dpdk_cryptodev_validate_parameters(driver, cipher, key)) {
1410 		return -EINVAL;
1411 	}
1412 
1413 	priv = calloc(1, sizeof(*priv));
1414 	if (!priv) {
1415 		SPDK_ERRLOG("Memory allocation failed\n");
1416 		return -ENOMEM;
1417 	}
1418 	key->priv = priv;
1419 	priv->driver = driver;
1420 	priv->cipher = cipher;
1421 	TAILQ_INIT(&priv->dev_keys);
1422 
1423 	if (cipher == ACCEL_DPDK_CRYPTODEV_CIPHER_AES_XTS) {
1424 		/* DPDK expects the keys to be concatenated together. */
1425 		priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char));
1426 		if (!priv->xts_key) {
1427 			SPDK_ERRLOG("Memory allocation failed\n");
1428 			accel_dpdk_cryptodev_key_deinit(key);
1429 			return -ENOMEM;
1430 		}
1431 		memcpy(priv->xts_key, key->key, key->key_size);
1432 		memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size);
1433 	}
1434 
1435 	pthread_mutex_lock(&g_device_lock);
1436 	TAILQ_FOREACH(device, &g_crypto_devices, link) {
1437 		if (device->type != driver) {
1438 			continue;
1439 		}
1440 		key_handle = calloc(1, sizeof(*key_handle));
1441 		if (!key_handle) {
1442 			pthread_mutex_unlock(&g_device_lock);
1443 			accel_dpdk_cryptodev_key_deinit(key);
1444 			return -ENOMEM;
1445 		}
1446 		key_handle->device = device;
1447 		TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link);
1448 		if (accel_dpdk_cryptodev_key_handle_configure(key, key_handle)) {
1449 			pthread_mutex_unlock(&g_device_lock);
1450 			accel_dpdk_cryptodev_key_deinit(key);
1451 			return -ENOMEM;
1452 		}
1453 		if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
1454 			/* For MLX5_PCI we need to register a key on each device since
1455 			 * the key is bound to a specific Protection Domain,
1456 			 * so don't break the loop */
1457 			break;
1458 		}
1459 	}
1460 	pthread_mutex_unlock(&g_device_lock);
1461 
1462 	if (TAILQ_EMPTY(&priv->dev_keys)) {
1463 		free(priv);
1464 		return -ENODEV;
1465 	}
1466 
1467 	return 0;
1468 }
1469 
1470 static void
1471 accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w)
1472 {
1473 	spdk_json_write_object_begin(w);
1474 	spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module");
1475 	spdk_json_write_object_end(w);
1476 
1477 	spdk_json_write_object_begin(w);
1478 	spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver");
1479 	spdk_json_write_named_object_begin(w, "params");
1480 	spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]);
1481 	spdk_json_write_object_end(w);
1482 	spdk_json_write_object_end(w);
1483 }
1484 
1485 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = {
1486 	.module_init		= accel_dpdk_cryptodev_init,
1487 	.module_fini		= accel_dpdk_cryptodev_fini,
1488 	.write_config_json	= accel_dpdk_cryptodev_write_config_json,
1489 	.get_ctx_size		= accel_dpdk_cryptodev_ctx_size,
1490 	.name			= "dpdk_cryptodev",
1491 	.supports_opcode	= accel_dpdk_cryptodev_supports_opcode,
1492 	.get_io_channel		= accel_dpdk_cryptodev_get_io_channel,
1493 	.submit_tasks		= accel_dpdk_cryptodev_submit_tasks,
1494 	.crypto_key_init	= accel_dpdk_cryptodev_key_init,
1495 	.crypto_key_deinit	= accel_dpdk_cryptodev_key_deinit,
1496 };
1497