xref: /spdk/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "accel_dpdk_cryptodev.h"
8 
9 #include "spdk/accel.h"
10 #include "spdk/accel_module.h"
11 #include "spdk/env.h"
12 #include "spdk/likely.h"
13 #include "spdk/thread.h"
14 #include "spdk/util.h"
15 #include "spdk/log.h"
16 #include "spdk/json.h"
17 #include "spdk_internal/sgl.h"
18 
19 #include <rte_bus_vdev.h>
20 #include <rte_crypto.h>
21 #include <rte_cryptodev.h>
22 #include <rte_mbuf_dyn.h>
23 #include <rte_version.h>
24 
25 /* The VF spread is the number of queue pairs between virtual functions, we use this to
26  * load balance the QAT device.
27  */
28 #define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD		32
29 
30 /* This controls how many ops will be dequeued from the crypto driver in one run
31  * of the poller. It is mainly a performance knob as it effectively determines how
32  * much work the poller has to do.  However even that can vary between crypto drivers
33  * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the
34  * QAT driver just dequeues what has been completed already.
35  */
36 #define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE	64
37 
38 #define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (128)
39 
40 /* The number of MBUFS we need must be a power of two and to support other small IOs
41  * in addition to the limits mentioned above, we go to the next power of two. It is
42  * big number because it is one mempool for source and destination mbufs. It may
43  * need to be bigger to support multiple crypto drivers at once.
44  */
45 #define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS			32768
46 #define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE		256
47 #define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES		128
48 #define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS		(2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES)
49 #define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE	0
50 
51 /* This is the max number of IOs we can supply to any crypto device QP at one time.
52  * It can vary between drivers.
53  */
54 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS		2048
55 
56 /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512
57  * as a compromise value between performance and the time spent for initialization. */
58 #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5	512
59 
60 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP		64
61 
62 /* Common for suported devices. */
63 #define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS		2
64 #define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \
65                 sizeof(struct rte_crypto_sym_op) + \
66                 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \
67                  sizeof(struct rte_crypto_sym_xform)))
68 #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH			16
69 
70 /* Driver names */
71 #define ACCEL_DPDK_CRYPTODEV_AESNI_MB	"crypto_aesni_mb"
72 #define ACCEL_DPDK_CRYPTODEV_QAT	"crypto_qat"
73 #define ACCEL_DPDK_CRYPTODEV_QAT_ASYM	"crypto_qat_asym"
74 #define ACCEL_DPDK_CRYPTODEV_MLX5	"mlx5_pci"
75 
76 /* Supported ciphers */
77 #define ACCEL_DPDK_CRYPTODEV_AES_CBC	"AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */
78 #define ACCEL_DPDK_CRYPTODEV_AES_XTS	"AES_XTS" /* QAT and MLX5 */
79 
80 /* Specific to AES_CBC. */
81 #define ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH			16
82 
83 /* Limit of the max memory len attached to mbuf - rte_pktmbuf_attach_extbuf has uint16_t `buf_len`
84  * parameter, we use closes aligned value 32768 for better performance */
85 #define ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN			32768
86 
87 /* Used to store IO context in mbuf */
88 static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
89 	.name = "context_accel_dpdk_cryptodev",
90 	.size = sizeof(uint64_t),
91 	.align = __alignof__(uint64_t),
92 	.flags = 0,
93 };
94 
95 struct accel_dpdk_cryptodev_device;
96 
97 enum accel_dpdk_cryptodev_driver_type {
98 	ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0,
99 	ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
100 	ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
101 	ACCEL_DPDK_CRYPTODEV_DRIVER_LAST
102 };
103 
104 struct accel_dpdk_cryptodev_qp {
105 	struct accel_dpdk_cryptodev_device *device;	/* ptr to crypto device */
106 	uint32_t num_enqueued_ops;	/* Used to decide whether to poll the qp or not */
107 	uint8_t qp; /* queue identifier */
108 	bool in_use; /* whether this node is in use or not */
109 	uint8_t index; /* used by QAT to load balance placement of qpairs */
110 	TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link;
111 };
112 
113 struct accel_dpdk_cryptodev_device {
114 	enum accel_dpdk_cryptodev_driver_type type;
115 	struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */
116 	uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */
117 	uint8_t cdev_id; /* identifier for the device */
118 	TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs;
119 	TAILQ_ENTRY(accel_dpdk_cryptodev_device) link;
120 };
121 
122 struct accel_dpdk_cryptodev_key_handle {
123 	struct accel_dpdk_cryptodev_device *device;
124 	TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link;
125 	void *session_encrypt;	/* encryption session for this key */
126 	void *session_decrypt;	/* decryption session for this key */
127 	struct rte_crypto_sym_xform cipher_xform;		/* crypto control struct for this key */
128 };
129 
130 struct accel_dpdk_cryptodev_key_priv {
131 	enum accel_dpdk_cryptodev_driver_type driver;
132 	enum spdk_accel_cipher cipher;
133 	char *xts_key;
134 	TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys;
135 };
136 
137 /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code.
138  * We store things in here that are needed on per thread basis like the base_channel for this thread,
139  * and the poller for this thread.
140  */
141 struct accel_dpdk_cryptodev_io_channel {
142 	/* completion poller */
143 	struct spdk_poller *poller;
144 	/* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */
145 	struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST];
146 	/* Used to queue tasks when qpair is full or only part of crypto ops was submitted to the PMD */
147 	TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks;
148 	/* Used to queue tasks that were completed in submission path - to avoid calling cpl_cb and possibly overflow
149 	 * call stack */
150 	TAILQ_HEAD(, accel_dpdk_cryptodev_task) completed_tasks;
151 };
152 
153 struct accel_dpdk_cryptodev_task {
154 	struct spdk_accel_task base;
155 	uint32_t cryop_completed;	/* The number of crypto operations completed by HW */
156 	uint32_t cryop_submitted;	/* The number of crypto operations submitted to HW */
157 	uint32_t cryop_total;		/* Total number of crypto operations in this task */
158 	bool is_failed;
159 	bool inplace;
160 	TAILQ_ENTRY(accel_dpdk_cryptodev_task) link;
161 };
162 
163 /* Shared mempools between all devices on this system */
164 static struct rte_mempool *g_session_mp = NULL;
165 static struct rte_mempool *g_session_mp_priv = NULL;
166 static struct rte_mempool *g_mbuf_mp = NULL;            /* mbuf mempool */
167 static int g_mbuf_offset;
168 static struct rte_mempool *g_crypto_op_mp = NULL;	/* crypto operations, must be rte* mempool */
169 
170 static struct rte_mbuf_ext_shared_info g_shinfo = {};   /* used by DPDK mbuf macro */
171 
172 static uint8_t g_qat_total_qp = 0;
173 static uint8_t g_next_qat_index;
174 
175 static const char *g_driver_names[] = {
176 	[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]	= ACCEL_DPDK_CRYPTODEV_AESNI_MB,
177 	[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]	= ACCEL_DPDK_CRYPTODEV_QAT,
178 	[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]	= ACCEL_DPDK_CRYPTODEV_MLX5
179 };
180 static const char *g_cipher_names[] = {
181 	[SPDK_ACCEL_CIPHER_AES_CBC]	= ACCEL_DPDK_CRYPTODEV_AES_CBC,
182 	[SPDK_ACCEL_CIPHER_AES_XTS]	= ACCEL_DPDK_CRYPTODEV_AES_XTS,
183 };
184 
185 static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver =
186 	ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
187 
188 /* Global list of all crypto devices */
189 static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER(
190 			g_crypto_devices);
191 static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER;
192 
193 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module;
194 
195 static int accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
196 		struct accel_dpdk_cryptodev_task *task);
197 
198 void
199 accel_dpdk_cryptodev_enable(void)
200 {
201 	spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module);
202 }
203 
204 int
205 accel_dpdk_cryptodev_set_driver(const char *driver_name)
206 {
207 	if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
208 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
209 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
210 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
211 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
212 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
213 	} else {
214 		SPDK_ERRLOG("Unsupported driver %s\n", driver_name);
215 		return -EINVAL;
216 	}
217 
218 	SPDK_NOTICELOG("Using driver %s\n", driver_name);
219 
220 	return 0;
221 }
222 
223 const char *
224 accel_dpdk_cryptodev_get_driver(void)
225 {
226 	return g_driver_names[g_dpdk_cryptodev_driver];
227 }
228 
229 static inline uint16_t
230 accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp,
231 			     struct accel_dpdk_cryptodev_io_channel *crypto_ch)
232 {
233 	struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
234 	struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
235 	struct accel_dpdk_cryptodev_task *task;
236 	uint32_t num_mbufs = 0;
237 	int i;
238 	uint16_t num_dequeued_ops;
239 
240 	/* Each run of the poller will get just what the device has available
241 	 * at the moment we call it, we don't check again after draining the
242 	 * first batch.
243 	 */
244 	num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp,
245 			   dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE);
246 	/* Check if operation was processed successfully */
247 	for (i = 0; i < num_dequeued_ops; i++) {
248 
249 		/* We don't know the order or association of the crypto ops wrt any
250 		 * particular task so need to look at each and determine if it's
251 		 * the last one for it's task or not.
252 		 */
253 		task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src,
254 				g_mbuf_offset, uint64_t *);
255 		assert(task != NULL);
256 
257 		if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
258 			SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status);
259 			/* Update the task status to error, we'll still process the
260 			 * rest of the crypto ops for this task though so they
261 			 * aren't left hanging.
262 			 */
263 			task->is_failed = true;
264 		}
265 
266 		/* Return the associated src and dst mbufs by collecting them into
267 		 * an array that we can use the bulk API to free after the loop.
268 		 */
269 		*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0;
270 		mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src;
271 		if (dequeued_ops[i]->sym->m_dst) {
272 			mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst;
273 		}
274 
275 		task->cryop_completed++;
276 		if (task->cryop_completed == task->cryop_total) {
277 			/* Complete the IO */
278 			spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0);
279 		} else if (task->cryop_completed == task->cryop_submitted) {
280 			/* submit remaining crypto ops */
281 			int rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
282 
283 			if (spdk_unlikely(rc)) {
284 				if (rc == -ENOMEM) {
285 					TAILQ_INSERT_TAIL(&crypto_ch->queued_tasks, task, link);
286 					continue;
287 				} else if (rc == -EALREADY) {
288 					/* -EALREADY means that a task is completed, but it might be unsafe to complete
289 					 * it if we are in the submission path. Since we are in the poller context, we can
290 					 * complete th task immediately */
291 					rc = 0;
292 				}
293 				spdk_accel_task_complete(&task->base, rc);
294 			}
295 		}
296 	}
297 
298 	/* Now bulk free both mbufs and crypto operations. */
299 	if (num_dequeued_ops > 0) {
300 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops);
301 		assert(num_mbufs > 0);
302 		/* This also releases chained mbufs if any. */
303 		rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
304 	}
305 
306 	assert(qp->num_enqueued_ops >= num_dequeued_ops);
307 	qp->num_enqueued_ops -= num_dequeued_ops;
308 
309 	return num_dequeued_ops;
310 }
311 
312 /* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at
313  * the device. Then we need to decide if what we've got so far (including previous poller
314  * runs) totals up to one or more complete task */
315 static int
316 accel_dpdk_cryptodev_poller(void *args)
317 {
318 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = args;
319 	struct accel_dpdk_cryptodev_qp *qp;
320 	struct accel_dpdk_cryptodev_task *task, *task_tmp;
321 	TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp;
322 	uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0, num_completed_tasks = 0;
323 	int i, rc;
324 
325 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
326 		qp = crypto_ch->device_qp[i];
327 		/* Avoid polling "idle" qps since it may affect performance */
328 		if (qp && qp->num_enqueued_ops) {
329 			num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp, crypto_ch);
330 		}
331 	}
332 
333 	if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) {
334 		TAILQ_INIT(&queued_tasks_tmp);
335 
336 		TAILQ_FOREACH_SAFE(task, &crypto_ch->queued_tasks, link, task_tmp) {
337 			TAILQ_REMOVE(&crypto_ch->queued_tasks, task, link);
338 			rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
339 			if (spdk_unlikely(rc)) {
340 				if (rc == -ENOMEM) {
341 					TAILQ_INSERT_TAIL(&queued_tasks_tmp, task, link);
342 					/* Other queued tasks may belong to other qpairs,
343 					 * so process the whole list */
344 					continue;
345 				} else if (rc == -EALREADY) {
346 					/* -EALREADY means that a task is completed, but it might be unsafe to complete
347 					 * it if we are in the submission path. Since we are in the poller context, we can
348 					 * complete th task immediately */
349 					rc = 0;
350 				}
351 				spdk_accel_task_complete(&task->base, rc);
352 				num_completed_tasks++;
353 			} else {
354 				num_enqueued_ops++;
355 			}
356 		}
357 
358 		TAILQ_SWAP(&crypto_ch->queued_tasks, &queued_tasks_tmp, accel_dpdk_cryptodev_task, link);
359 	}
360 
361 	TAILQ_FOREACH_SAFE(task, &crypto_ch->completed_tasks, link, task_tmp) {
362 		TAILQ_REMOVE(&crypto_ch->completed_tasks, task, link);
363 		spdk_accel_task_complete(&task->base, 0);
364 		num_completed_tasks++;
365 	}
366 
367 	return !!(num_dequeued_ops + num_enqueued_ops + num_completed_tasks);
368 }
369 
370 /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach
371  * it to the @orig_mbuf. */
372 static inline int
373 accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task,
374 		struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder)
375 {
376 	uint64_t phys_addr, phys_len, remainder = *_remainder;
377 	struct rte_mbuf *chain_mbuf;
378 	int rc;
379 
380 	phys_len = remainder;
381 	phys_addr = spdk_vtophys((void *)addr, &phys_len);
382 	if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
383 		return -EFAULT;
384 	}
385 	remainder = spdk_min(remainder, phys_len);
386 	remainder = spdk_min(remainder, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
387 	rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1);
388 	if (spdk_unlikely(rc)) {
389 		return -ENOMEM;
390 	}
391 	/* Store context in every mbuf as we don't know anything about completion order */
392 	*RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
393 	rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo);
394 	rte_pktmbuf_append(chain_mbuf, remainder);
395 
396 	/* Chained buffer is released by rte_pktbuf_free_bulk() automagicaly. */
397 	rte_pktmbuf_chain(orig_mbuf, chain_mbuf);
398 	*_remainder = remainder;
399 
400 	return 0;
401 }
402 
403 /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the
404  * contiguous space that was physically available. */
405 static inline uint64_t
406 accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf,
407 				     uint8_t *addr, uint32_t len)
408 {
409 	uint64_t phys_addr, phys_len;
410 
411 	/* Store context in every mbuf as we don't know anything about completion order */
412 	*RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
413 
414 	phys_len = len;
415 	phys_addr = spdk_vtophys((void *)addr, &phys_len);
416 	if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) {
417 		return 0;
418 	}
419 	assert(phys_len <= len);
420 	phys_len = spdk_min(phys_len, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
421 
422 	/* Set the mbuf elements address and length. */
423 	rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo);
424 	rte_pktmbuf_append(mbuf, phys_len);
425 
426 	return phys_len;
427 }
428 
429 static inline struct accel_dpdk_cryptodev_key_handle *
430 accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
431 				      struct accel_dpdk_cryptodev_key_priv *key)
432 {
433 	struct accel_dpdk_cryptodev_key_handle *key_handle;
434 
435 	if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
436 		/* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device.
437 		 * We need to iterate a list of key entries to find a suitable device */
438 		TAILQ_FOREACH(key_handle, &key->dev_keys, link) {
439 			if (key_handle->device->cdev_id ==
440 			    crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) {
441 				return key_handle;
442 			}
443 		}
444 		return NULL;
445 	} else {
446 		return TAILQ_FIRST(&key->dev_keys);
447 	}
448 }
449 
450 static inline int
451 accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
452 		struct rte_crypto_op **crypto_ops, int count)
453 {
454 	int rc;
455 
456 	/* Get the number of source mbufs that we need. These will always be 1:1 because we
457 	 * don't support chaining. The reason we don't is because of our decision to use
458 	 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the
459 	 * op would be > 1 LBA.
460 	 */
461 	rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count);
462 	if (rc) {
463 		SPDK_ERRLOG("Failed to get src_mbufs!\n");
464 		return -ENOMEM;
465 	}
466 
467 	/* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */
468 	if (dst_mbufs) {
469 		rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count);
470 		if (rc) {
471 			SPDK_ERRLOG("Failed to get dst_mbufs!\n");
472 			goto err_free_src;
473 		}
474 	}
475 
476 #ifdef __clang_analyzer__
477 	/* silence scan-build false positive */
478 	SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE,
479 					      0x1000);
480 #endif
481 	/* Allocate crypto operations. */
482 	rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp,
483 				      RTE_CRYPTO_OP_TYPE_SYMMETRIC,
484 				      crypto_ops, count);
485 	if (rc < count) {
486 		SPDK_ERRLOG("Failed to allocate crypto ops! rc %d\n", rc);
487 		goto err_free_ops;
488 	}
489 
490 	return 0;
491 
492 err_free_ops:
493 	if (rc > 0) {
494 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc);
495 	}
496 	if (dst_mbufs) {
497 		/* This also releases chained mbufs if any. */
498 		rte_pktmbuf_free_bulk(dst_mbufs, count);
499 	}
500 err_free_src:
501 	/* This also releases chained mbufs if any. */
502 	rte_pktmbuf_free_bulk(src_mbufs, count);
503 
504 	return -ENOMEM;
505 }
506 
507 static inline int
508 accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf,
509 		struct accel_dpdk_cryptodev_task *task)
510 {
511 	int rc;
512 	uint8_t *buf_addr;
513 	uint64_t phys_len;
514 	uint64_t remainder;
515 	uint64_t buf_len;
516 
517 	assert(sgl->iov->iov_len > sgl->iov_offset);
518 	buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset);
519 	buf_addr = sgl->iov->iov_base + sgl->iov_offset;
520 	phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len);
521 	if (spdk_unlikely(phys_len == 0)) {
522 		return -EFAULT;
523 	}
524 	buf_len = spdk_min(buf_len, phys_len);
525 	spdk_iov_sgl_advance(sgl, buf_len);
526 
527 	/* Handle the case of page boundary. */
528 	assert(task->base.block_size >= buf_len);
529 	remainder = task->base.block_size - buf_len;
530 	while (remainder) {
531 		buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset);
532 		buf_addr = sgl->iov->iov_base + sgl->iov_offset;
533 		rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len);
534 		if (spdk_unlikely(rc)) {
535 			return rc;
536 		}
537 		spdk_iov_sgl_advance(sgl, buf_len);
538 		remainder -= buf_len;
539 	}
540 
541 	return 0;
542 }
543 
544 static inline void
545 accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv)
546 {
547 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
548 
549 	/* Set the IV - we use the LBA of the crypto_op */
550 	memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH);
551 	rte_memcpy(iv_ptr, &iv, sizeof(uint64_t));
552 }
553 
554 static inline void
555 accel_dpdk_cryptodev_update_resources_from_pools(struct rte_crypto_op **crypto_ops,
556 		struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
557 		uint32_t num_enqueued_ops, uint32_t cryop_cnt)
558 {
559 	memmove(crypto_ops, &crypto_ops[num_enqueued_ops], sizeof(crypto_ops[0]) * cryop_cnt);
560 	memmove(src_mbufs, &src_mbufs[num_enqueued_ops], sizeof(src_mbufs[0]) * cryop_cnt);
561 	if (dst_mbufs) {
562 		memmove(dst_mbufs, &dst_mbufs[num_enqueued_ops], sizeof(dst_mbufs[0]) * cryop_cnt);
563 	}
564 }
565 
566 static int
567 accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
568 				  struct accel_dpdk_cryptodev_task *task)
569 {
570 	uint16_t num_enqueued_ops;
571 	uint32_t cryop_cnt;
572 	uint32_t crypto_len = task->base.block_size;
573 	uint64_t dst_length, total_length;
574 	uint32_t sgl_offset;
575 	uint32_t qp_capacity;
576 	uint64_t iv_start;
577 	uint32_t i, crypto_index;
578 	struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
579 	struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
580 	struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
581 	void *session;
582 	struct accel_dpdk_cryptodev_key_priv *priv;
583 	struct accel_dpdk_cryptodev_key_handle *key_handle;
584 	struct accel_dpdk_cryptodev_qp *qp;
585 	struct accel_dpdk_cryptodev_device *dev;
586 	struct spdk_iov_sgl src, dst = {};
587 	int rc;
588 	bool inplace = task->inplace;
589 
590 	if (spdk_unlikely(!task->base.crypto_key ||
591 			  task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) {
592 		return -EINVAL;
593 	}
594 
595 	priv = task->base.crypto_key->priv;
596 	assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST);
597 
598 	if (task->cryop_completed) {
599 		/* We continue to process remaining blocks */
600 		assert(task->cryop_submitted == task->cryop_completed);
601 		assert(task->cryop_total > task->cryop_completed);
602 		cryop_cnt = task->cryop_total - task->cryop_completed;
603 		sgl_offset = task->cryop_completed * crypto_len;
604 		iv_start = task->base.iv + task->cryop_completed;
605 	} else {
606 		/* That is a new task */
607 		total_length = 0;
608 		for (i = 0; i < task->base.s.iovcnt; i++) {
609 			total_length += task->base.s.iovs[i].iov_len;
610 		}
611 		dst_length = 0;
612 		for (i = 0; i < task->base.d.iovcnt; i++) {
613 			dst_length += task->base.d.iovs[i].iov_len;
614 		}
615 
616 		if (spdk_unlikely(total_length != dst_length || !total_length)) {
617 			return -ERANGE;
618 		}
619 		if (spdk_unlikely(total_length % task->base.block_size != 0)) {
620 			return -EINVAL;
621 		}
622 
623 		cryop_cnt = total_length / task->base.block_size;
624 		task->cryop_total = cryop_cnt;
625 		sgl_offset = 0;
626 		iv_start = task->base.iv;
627 	}
628 
629 	/* Limit the number of crypto ops that we can process once */
630 	cryop_cnt = spdk_min(cryop_cnt, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
631 
632 	qp = crypto_ch->device_qp[priv->driver];
633 	assert(qp);
634 	dev = qp->device;
635 	assert(dev);
636 	assert(dev->qp_desc_nr >= qp->num_enqueued_ops);
637 
638 	qp_capacity = dev->qp_desc_nr - qp->num_enqueued_ops;
639 	cryop_cnt = spdk_min(cryop_cnt, qp_capacity);
640 	if (spdk_unlikely(cryop_cnt == 0)) {
641 		/* QP is full */
642 		return -ENOMEM;
643 	}
644 
645 	key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv);
646 	if (spdk_unlikely(!key_handle)) {
647 		SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver],
648 			    g_cipher_names[priv->cipher]);
649 		return -EINVAL;
650 	}
651 	/* mlx5_pci binds keys to a specific device, we can't use a key with any device */
652 	assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
653 
654 	if (task->base.op_code == SPDK_ACCEL_OPC_ENCRYPT) {
655 		session = key_handle->session_encrypt;
656 	} else if (task->base.op_code == SPDK_ACCEL_OPC_DECRYPT) {
657 		session = key_handle->session_decrypt;
658 	} else {
659 		return -EINVAL;
660 	}
661 
662 	rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs,
663 			crypto_ops, cryop_cnt);
664 	if (rc) {
665 		return rc;
666 	}
667 
668 	/* As we don't support chaining because of a decision to use LBA as IV, construction
669 	 * of crypto operations is straightforward. We build both the op, the mbuf and the
670 	 * dst_mbuf in our local arrays by looping through the length of the accel task and
671 	 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each
672 	 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single
673 	 * mbuf per crypto operation.
674 	 */
675 	spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0);
676 	spdk_iov_sgl_advance(&src, sgl_offset);
677 	if (!inplace) {
678 		spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0);
679 		spdk_iov_sgl_advance(&dst, sgl_offset);
680 	}
681 
682 	for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) {
683 		rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task);
684 		if (spdk_unlikely(rc)) {
685 			goto free_ops;
686 		}
687 		accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start);
688 		iv_start++;
689 
690 		/* Set the data to encrypt/decrypt length */
691 		crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len;
692 		crypto_ops[crypto_index]->sym->cipher.data.offset = 0;
693 		rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session);
694 
695 		/* link the mbuf to the crypto op. */
696 		crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index];
697 
698 		if (inplace) {
699 			crypto_ops[crypto_index]->sym->m_dst = NULL;
700 		} else {
701 #ifndef __clang_analyzer__
702 			/* scan-build thinks that dst_mbufs is not initialized */
703 			rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task);
704 			if (spdk_unlikely(rc)) {
705 				goto free_ops;
706 			}
707 			crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
708 #endif
709 		}
710 	}
711 
712 	/* Enqueue everything we've got but limit by the max number of descriptors we
713 	 * configured the crypto device for.
714 	 */
715 	num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt);
716 	/* This value is used in the completion callback to determine when the accel task is complete. */
717 	task->cryop_submitted += num_enqueued_ops;
718 	qp->num_enqueued_ops += num_enqueued_ops;
719 	/* We were unable to enqueue everything but did get some, so need to decide what
720 	 * to do based on the status of the last op.
721 	 */
722 	if (num_enqueued_ops < cryop_cnt) {
723 		switch (crypto_ops[num_enqueued_ops]->status) {
724 		case RTE_CRYPTO_OP_STATUS_SUCCESS:
725 			/* Crypto operation might be completed successfully but enqueuing to a completion ring might fail.
726 			 * That might happen with SW PMDs like openssl
727 			 * We can't retry such operation on next turn since if crypto operation was inplace, we can encrypt/
728 			 * decrypt already processed buffer. See github issue #2907 for more details.
729 			 * Handle this case as the crypto op was completed successfully - increment cryop_submitted and
730 			 * cryop_completed.
731 			 * We won't receive a completion for such operation, so we need to cleanup mbufs and crypto_ops */
732 			assert(task->cryop_total > task->cryop_completed);
733 			task->cryop_completed++;
734 			task->cryop_submitted++;
735 			if (task->cryop_completed == task->cryop_total) {
736 				assert(num_enqueued_ops == 0);
737 				/* All crypto ops are completed. We can't complete the task immediately since this function might be
738 				 * called in scope of spdk_accel_submit_* function and user's logic in the completion callback
739 				 * might lead to stack overflow */
740 				cryop_cnt -= num_enqueued_ops;
741 				accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs,
742 						num_enqueued_ops, cryop_cnt);
743 				rc = -EALREADY;
744 				goto free_ops;
745 			}
746 		/* fallthrough */
747 		case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED:
748 			if (num_enqueued_ops == 0) {
749 				/* Nothing was submitted. Free crypto ops and mbufs, treat this case as NOMEM */
750 				rc = -ENOMEM;
751 				goto free_ops;
752 			}
753 			/* Part of the crypto operations were not submitted, release mbufs and crypto ops.
754 			 * The rest crypto ops will be submitted again once current batch is completed */
755 			cryop_cnt -= num_enqueued_ops;
756 			accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs,
757 					num_enqueued_ops, cryop_cnt);
758 			rc = 0;
759 			goto free_ops;
760 		default:
761 			/* For all other statuses, mark task as failed so that the poller will pick
762 			 * the failure up for the overall task status.
763 			 */
764 			task->is_failed = true;
765 			if (num_enqueued_ops == 0) {
766 				/* If nothing was enqueued, but the last one wasn't because of
767 				 * busy, fail it now as the poller won't know anything about it.
768 				 */
769 				rc = -EINVAL;
770 				goto free_ops;
771 			}
772 			break;
773 		}
774 	}
775 
776 	return 0;
777 
778 	/* Error cleanup paths. */
779 free_ops:
780 	if (!inplace) {
781 		/* This also releases chained mbufs if any. */
782 		rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
783 	}
784 	rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt);
785 	/* This also releases chained mbufs if any. */
786 	rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt);
787 	return rc;
788 }
789 
790 static inline struct accel_dpdk_cryptodev_qp *
791 accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type)
792 {
793 	struct accel_dpdk_cryptodev_device *device, *device_tmp;
794 	struct accel_dpdk_cryptodev_qp *qpair;
795 
796 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) {
797 		if (device->type != type) {
798 			continue;
799 		}
800 		TAILQ_FOREACH(qpair, &device->qpairs, link) {
801 			if (!qpair->in_use) {
802 				qpair->in_use = true;
803 				return qpair;
804 			}
805 		}
806 	}
807 
808 	return NULL;
809 }
810 
811 /* Helper function for the channel creation callback.
812  * Returns the number of drivers assigned to the channel */
813 static uint32_t
814 accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch)
815 {
816 	struct accel_dpdk_cryptodev_device *device;
817 	struct accel_dpdk_cryptodev_qp *device_qp;
818 	uint32_t num_drivers = 0;
819 	bool qat_found = false;
820 
821 	pthread_mutex_lock(&g_device_lock);
822 
823 	TAILQ_FOREACH(device, &g_crypto_devices, link) {
824 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) {
825 			/* For some QAT devices, the optimal qp to use is every 32nd as this spreads the
826 			 * workload out over the multiple virtual functions in the device. For the devices
827 			 * where this isn't the case, it doesn't hurt.
828 			 */
829 			TAILQ_FOREACH(device_qp, &device->qpairs, link) {
830 				if (device_qp->index != g_next_qat_index) {
831 					continue;
832 				}
833 				if (device_qp->in_use == false) {
834 					assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL);
835 					crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp;
836 					device_qp->in_use = true;
837 					g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp;
838 					qat_found = true;
839 					num_drivers++;
840 					break;
841 				} else {
842 					/* if the preferred index is used, skip to the next one in this set. */
843 					g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp;
844 				}
845 			}
846 		}
847 	}
848 
849 	/* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */
850 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB);
851 	if (device_qp) {
852 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL);
853 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp;
854 		num_drivers++;
855 	}
856 
857 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
858 	if (device_qp) {
859 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL);
860 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp;
861 		num_drivers++;
862 	}
863 
864 	pthread_mutex_unlock(&g_device_lock);
865 
866 	return num_drivers;
867 }
868 
869 static void
870 _accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf)
871 {
872 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
873 			ctx_buf;
874 	int i;
875 
876 	pthread_mutex_lock(&g_device_lock);
877 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
878 		if (crypto_ch->device_qp[i]) {
879 			crypto_ch->device_qp[i]->in_use = false;
880 		}
881 	}
882 	pthread_mutex_unlock(&g_device_lock);
883 
884 	spdk_poller_unregister(&crypto_ch->poller);
885 }
886 
887 static int
888 _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf)
889 {
890 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
891 			ctx_buf;
892 
893 	crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0);
894 	if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) {
895 		SPDK_ERRLOG("No crypto drivers assigned\n");
896 		spdk_poller_unregister(&crypto_ch->poller);
897 		return -EINVAL;
898 	}
899 
900 	/* We use this to queue tasks when qpair is full or no resources in pools */
901 	TAILQ_INIT(&crypto_ch->queued_tasks);
902 	TAILQ_INIT(&crypto_ch->completed_tasks);
903 
904 	return 0;
905 }
906 
907 static struct spdk_io_channel *
908 accel_dpdk_cryptodev_get_io_channel(void)
909 {
910 	return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module);
911 }
912 
913 static size_t
914 accel_dpdk_cryptodev_ctx_size(void)
915 {
916 	return sizeof(struct accel_dpdk_cryptodev_task);
917 }
918 
919 static bool
920 accel_dpdk_cryptodev_supports_opcode(enum spdk_accel_opcode opc)
921 {
922 	switch (opc) {
923 	case SPDK_ACCEL_OPC_ENCRYPT:
924 	case SPDK_ACCEL_OPC_DECRYPT:
925 		return true;
926 	default:
927 		return false;
928 	}
929 }
930 
931 static int
932 accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task)
933 {
934 	struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task,
935 			base);
936 	struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch);
937 	int rc;
938 
939 	task->cryop_completed = 0;
940 	task->cryop_submitted = 0;
941 	task->cryop_total = 0;
942 	task->inplace = true;
943 	task->is_failed = false;
944 
945 	/* Check if crypto operation is inplace: no destination or source == destination */
946 	if (task->base.s.iovcnt == task->base.d.iovcnt) {
947 		if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) {
948 			task->inplace = false;
949 		}
950 	} else if (task->base.d.iovcnt != 0) {
951 		task->inplace = false;
952 	}
953 
954 	rc = accel_dpdk_cryptodev_process_task(ch, task);
955 	if (spdk_unlikely(rc)) {
956 		if (rc == -ENOMEM) {
957 			TAILQ_INSERT_TAIL(&ch->queued_tasks, task, link);
958 			rc = 0;
959 		} else if (rc == -EALREADY) {
960 			/* -EALREADY means that a task is completed, but it might be unsafe to complete
961 			 * it if we are in the submission path. Hence put it into a dedicated queue to and
962 			 * process it during polling */
963 			TAILQ_INSERT_TAIL(&ch->completed_tasks, task, link);
964 			rc = 0;
965 		}
966 	}
967 
968 	return rc;
969 }
970 
971 /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but
972  * this callback has to be here. */
973 static void
974 shinfo_free_cb(void *arg1, void *arg2)
975 {
976 }
977 
978 static int
979 accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores)
980 {
981 	struct rte_cryptodev_qp_conf qp_conf = {
982 		.mp_session = g_session_mp,
983 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
984 		.mp_session_private = g_session_mp_priv
985 #endif
986 	};
987 	/* Setup queue pairs. */
988 	struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_SOCKET_ID_ANY };
989 	struct accel_dpdk_cryptodev_device *device;
990 	uint8_t j, cdev_id, cdrv_id;
991 	struct accel_dpdk_cryptodev_qp *dev_qp;
992 	int rc;
993 
994 	device = calloc(1, sizeof(*device));
995 	if (!device) {
996 		return -ENOMEM;
997 	}
998 
999 	/* Get details about this device. */
1000 	rte_cryptodev_info_get(index, &device->cdev_info);
1001 	cdrv_id = device->cdev_info.driver_id;
1002 	cdev_id = device->cdev_id = index;
1003 
1004 	if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
1005 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
1006 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
1007 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
1008 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
1009 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
1010 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
1011 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5;
1012 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
1013 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) {
1014 		/* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */
1015 		rc = 0;
1016 		goto err;
1017 	} else {
1018 		SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n",
1019 			    cdev_id, device->cdev_info.driver_name);
1020 		rc = -EINVAL;
1021 		goto err;
1022 	}
1023 
1024 	/* Before going any further, make sure we have enough resources for this
1025 	 * device type to function.  We need a unique queue pair per core accross each
1026 	 * device type to remain lockless....
1027 	 */
1028 	if ((rte_cryptodev_device_count_by_driver(cdrv_id) *
1029 	     device->cdev_info.max_nb_queue_pairs) < num_lcores) {
1030 		SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n",
1031 			    device->cdev_info.driver_name);
1032 		SPDK_ERRLOG("Either add more crypto devices or decrease core count\n");
1033 		rc = -EINVAL;
1034 		goto err;
1035 	}
1036 
1037 	conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs;
1038 	rc = rte_cryptodev_configure(cdev_id, &conf);
1039 	if (rc < 0) {
1040 		SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n",
1041 			    cdev_id, rc);
1042 		rc = -EINVAL;
1043 		goto err;
1044 	}
1045 
1046 	/* Pre-setup all potential qpairs now and assign them in the channel
1047 	 * callback. If we were to create them there, we'd have to stop the
1048 	 * entire device affecting all other threads that might be using it
1049 	 * even on other queue pairs.
1050 	 */
1051 	qp_conf.nb_descriptors = device->qp_desc_nr;
1052 	for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
1053 		rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY);
1054 		if (rc < 0) {
1055 			SPDK_ERRLOG("Failed to setup queue pair %u on "
1056 				    "cryptodev %u: error %d\n", j, cdev_id, rc);
1057 			rc = -EINVAL;
1058 			goto err_qp_setup;
1059 		}
1060 	}
1061 
1062 	rc = rte_cryptodev_start(cdev_id);
1063 	if (rc < 0) {
1064 		SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc);
1065 		rc = -EINVAL;
1066 		goto err_dev_start;
1067 	}
1068 
1069 	TAILQ_INIT(&device->qpairs);
1070 	/* Build up lists of device/qp combinations per PMD */
1071 	for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
1072 		dev_qp = calloc(1, sizeof(*dev_qp));
1073 		if (!dev_qp) {
1074 			rc = -ENOMEM;
1075 			goto err_qp_alloc;
1076 		}
1077 		dev_qp->device = device;
1078 		dev_qp->qp = j;
1079 		dev_qp->in_use = false;
1080 		TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link);
1081 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1082 			dev_qp->index = g_qat_total_qp++;
1083 		}
1084 	}
1085 	/* Add to our list of available crypto devices. */
1086 	TAILQ_INSERT_TAIL(&g_crypto_devices, device, link);
1087 
1088 	return 0;
1089 
1090 err_qp_alloc:
1091 	TAILQ_FOREACH(dev_qp, &device->qpairs, link) {
1092 		if (dev_qp->device->cdev_id != device->cdev_id) {
1093 			continue;
1094 		}
1095 		free(dev_qp);
1096 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1097 			assert(g_qat_total_qp);
1098 			g_qat_total_qp--;
1099 		}
1100 	}
1101 	rte_cryptodev_stop(cdev_id);
1102 err_dev_start:
1103 err_qp_setup:
1104 	rte_cryptodev_close(cdev_id);
1105 err:
1106 	free(device);
1107 
1108 	return rc;
1109 }
1110 
1111 static void
1112 accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device)
1113 {
1114 	struct accel_dpdk_cryptodev_qp *dev_qp, *tmp;
1115 
1116 	assert(device);
1117 
1118 	TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) {
1119 		free(dev_qp);
1120 	}
1121 	if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
1122 		assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs);
1123 		g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs;
1124 	}
1125 	rte_cryptodev_stop(device->cdev_id);
1126 	rte_cryptodev_close(device->cdev_id);
1127 	free(device);
1128 }
1129 
1130 static int
1131 accel_dpdk_cryptodev_init(void)
1132 {
1133 	uint8_t cdev_count;
1134 	uint8_t cdev_id;
1135 	int i, rc;
1136 	struct accel_dpdk_cryptodev_device *device, *tmp_dev;
1137 	unsigned int max_sess_size = 0, sess_size;
1138 	uint16_t num_lcores = rte_lcore_count();
1139 	char aesni_args[32];
1140 
1141 	/* Only the first call via module init should init the crypto drivers. */
1142 	if (g_session_mp != NULL) {
1143 		return 0;
1144 	}
1145 
1146 	/* We always init ACCEL_DPDK_CRYPTODEV_AESNI_MB */
1147 	snprintf(aesni_args, sizeof(aesni_args), "max_nb_queue_pairs=%d",
1148 		 ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP);
1149 	rc = rte_vdev_init(ACCEL_DPDK_CRYPTODEV_AESNI_MB, aesni_args);
1150 	if (rc) {
1151 		SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. "
1152 			       "Possibly %s is not supported by DPDK library. "
1153 			       "Keep going...\n", ACCEL_DPDK_CRYPTODEV_AESNI_MB, rc, ACCEL_DPDK_CRYPTODEV_AESNI_MB);
1154 	}
1155 
1156 	/* If we have no crypto devices, there's no reason to continue. */
1157 	cdev_count = rte_cryptodev_count();
1158 	SPDK_NOTICELOG("Found crypto devices: %d\n", (int)cdev_count);
1159 	if (cdev_count == 0) {
1160 		return 0;
1161 	}
1162 
1163 	g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context);
1164 	if (g_mbuf_offset < 0) {
1165 		SPDK_ERRLOG("error registering dynamic field with DPDK\n");
1166 		return -EINVAL;
1167 	}
1168 
1169 	/* Create global mempools, shared by all devices regardless of type */
1170 	/* First determine max session size, most pools are shared by all the devices,
1171 	 * so we need to find the global max sessions size. */
1172 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1173 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
1174 		if (sess_size > max_sess_size) {
1175 			max_sess_size = sess_size;
1176 		}
1177 	}
1178 
1179 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
1180 	g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv",
1181 					       ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1182 					       NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1183 	if (g_session_mp_priv == NULL) {
1184 		SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size);
1185 		return -ENOMEM;
1186 	}
1187 
1188 	/* When session private data mempool allocated, the element size for the session mempool
1189 	 * should be 0. */
1190 	max_sess_size = 0;
1191 #endif
1192 
1193 	g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp",
1194 			ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1195 			SOCKET_ID_ANY);
1196 	if (g_session_mp == NULL) {
1197 		SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size);
1198 		rc = -ENOMEM;
1199 		goto error_create_session_mp;
1200 	}
1201 
1202 	g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
1203 					    ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1204 					    0, 0, SPDK_ENV_SOCKET_ID_ANY);
1205 	if (g_mbuf_mp == NULL) {
1206 		SPDK_ERRLOG("Cannot create mbuf pool\n");
1207 		rc = -ENOMEM;
1208 		goto error_create_mbuf;
1209 	}
1210 
1211 	/* We use per op private data as suggested by DPDK and to store the IV and
1212 	 * our own struct for queueing ops. */
1213 	g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp",
1214 			 RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1215 			 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) +
1216 			 ACCEL_DPDK_CRYPTODEV_IV_LENGTH, rte_socket_id());
1217 	if (g_crypto_op_mp == NULL) {
1218 		SPDK_ERRLOG("Cannot create op pool\n");
1219 		rc = -ENOMEM;
1220 		goto error_create_op;
1221 	}
1222 
1223 	/* Init all devices */
1224 	for (i = 0; i < cdev_count; i++) {
1225 		rc = accel_dpdk_cryptodev_create(i, num_lcores);
1226 		if (rc) {
1227 			goto err;
1228 		}
1229 	}
1230 
1231 	g_shinfo.free_cb = shinfo_free_cb;
1232 
1233 	spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb,
1234 				_accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel),
1235 				"accel_dpdk_cryptodev");
1236 
1237 	return 0;
1238 
1239 	/* Error cleanup paths. */
1240 err:
1241 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) {
1242 		TAILQ_REMOVE(&g_crypto_devices, device, link);
1243 		accel_dpdk_cryptodev_release(device);
1244 	}
1245 	rte_mempool_free(g_crypto_op_mp);
1246 	g_crypto_op_mp = NULL;
1247 error_create_op:
1248 	rte_mempool_free(g_mbuf_mp);
1249 	g_mbuf_mp = NULL;
1250 error_create_mbuf:
1251 	rte_mempool_free(g_session_mp);
1252 	g_session_mp = NULL;
1253 error_create_session_mp:
1254 	if (g_session_mp_priv != NULL) {
1255 		rte_mempool_free(g_session_mp_priv);
1256 		g_session_mp_priv = NULL;
1257 	}
1258 	return rc;
1259 }
1260 
1261 static void
1262 accel_dpdk_cryptodev_fini_cb(void *io_device)
1263 {
1264 	struct accel_dpdk_cryptodev_device *device, *tmp;
1265 
1266 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) {
1267 		TAILQ_REMOVE(&g_crypto_devices, device, link);
1268 		accel_dpdk_cryptodev_release(device);
1269 	}
1270 	rte_vdev_uninit(ACCEL_DPDK_CRYPTODEV_AESNI_MB);
1271 
1272 	rte_mempool_free(g_crypto_op_mp);
1273 	rte_mempool_free(g_mbuf_mp);
1274 	rte_mempool_free(g_session_mp);
1275 	if (g_session_mp_priv != NULL) {
1276 		rte_mempool_free(g_session_mp_priv);
1277 	}
1278 
1279 	spdk_accel_module_finish();
1280 }
1281 
1282 /* Called when the entire module is being torn down. */
1283 static void
1284 accel_dpdk_cryptodev_fini(void *ctx)
1285 {
1286 	if (g_crypto_op_mp) {
1287 		spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb);
1288 	}
1289 }
1290 
1291 static void
1292 accel_dpdk_cryptodev_key_handle_session_free(struct accel_dpdk_cryptodev_device *device,
1293 		void *session)
1294 {
1295 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
1296 	assert(device != NULL);
1297 
1298 	rte_cryptodev_sym_session_free(device->cdev_id, session);
1299 #else
1300 	rte_cryptodev_sym_session_free(session);
1301 #endif
1302 }
1303 
1304 static void *
1305 accel_dpdk_cryptodev_key_handle_session_create(struct accel_dpdk_cryptodev_device *device,
1306 		struct rte_crypto_sym_xform *cipher_xform)
1307 {
1308 	void *session;
1309 
1310 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
1311 	session = rte_cryptodev_sym_session_create(device->cdev_id, cipher_xform, g_session_mp);
1312 #else
1313 	session = rte_cryptodev_sym_session_create(g_session_mp);
1314 	if (!session) {
1315 		return NULL;
1316 	}
1317 
1318 	if (rte_cryptodev_sym_session_init(device->cdev_id, session, cipher_xform, g_session_mp_priv) < 0) {
1319 		accel_dpdk_cryptodev_key_handle_session_free(device, session);
1320 		return NULL;
1321 	}
1322 #endif
1323 
1324 	return session;
1325 }
1326 
1327 static int
1328 accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key,
1329 		struct accel_dpdk_cryptodev_key_handle *key_handle)
1330 {
1331 	struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1332 
1333 	key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1334 	key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET;
1335 	key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
1336 
1337 	switch (priv->cipher) {
1338 	case SPDK_ACCEL_CIPHER_AES_CBC:
1339 		key_handle->cipher_xform.cipher.key.data = key->key;
1340 		key_handle->cipher_xform.cipher.key.length = key->key_size;
1341 		key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
1342 		break;
1343 	case SPDK_ACCEL_CIPHER_AES_XTS:
1344 		key_handle->cipher_xform.cipher.key.data = priv->xts_key;
1345 		key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size;
1346 		key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS;
1347 		break;
1348 	default:
1349 		SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher);
1350 		return -EINVAL;
1351 	}
1352 
1353 	key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1354 	key_handle->session_encrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device,
1355 				      &key_handle->cipher_xform);
1356 	if (!key_handle->session_encrypt) {
1357 		SPDK_ERRLOG("Failed to init encrypt session\n");
1358 		return -EINVAL;
1359 	}
1360 
1361 	key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
1362 	key_handle->session_decrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device,
1363 				      &key_handle->cipher_xform);
1364 	if (!key_handle->session_decrypt) {
1365 		SPDK_ERRLOG("Failed to init decrypt session:");
1366 		accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt);
1367 		return -EINVAL;
1368 	}
1369 
1370 	return 0;
1371 }
1372 
1373 static void
1374 accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key)
1375 {
1376 	struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp;
1377 	struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1378 
1379 	TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) {
1380 		accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt);
1381 		accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_decrypt);
1382 		TAILQ_REMOVE(&priv->dev_keys, key_handle, link);
1383 		spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle));
1384 		free(key_handle);
1385 	}
1386 
1387 	if (priv->xts_key) {
1388 		spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size);
1389 	}
1390 	free(priv->xts_key);
1391 	free(priv);
1392 }
1393 
1394 static bool
1395 accel_dpdk_cryptodev_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
1396 {
1397 	switch (g_dpdk_cryptodev_driver) {
1398 	case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
1399 	case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB:
1400 		switch (cipher) {
1401 		case SPDK_ACCEL_CIPHER_AES_XTS:
1402 			return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE;
1403 		case SPDK_ACCEL_CIPHER_AES_CBC:
1404 			return key_size == ACCEL_DPDK_CRYPTODEV_AES_CBC_KEY_LENGTH;
1405 		default:
1406 			return false;
1407 		}
1408 	case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI:
1409 		switch (cipher) {
1410 		case SPDK_ACCEL_CIPHER_AES_XTS:
1411 			return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
1412 		default:
1413 			return false;
1414 		}
1415 	default:
1416 		return false;
1417 	}
1418 }
1419 
1420 static int
1421 accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key)
1422 {
1423 	struct accel_dpdk_cryptodev_device *device;
1424 	struct accel_dpdk_cryptodev_key_priv *priv;
1425 	struct accel_dpdk_cryptodev_key_handle *key_handle;
1426 	enum accel_dpdk_cryptodev_driver_type driver;
1427 	int rc;
1428 
1429 	driver = g_dpdk_cryptodev_driver;
1430 
1431 	priv = calloc(1, sizeof(*priv));
1432 	if (!priv) {
1433 		SPDK_ERRLOG("Memory allocation failed\n");
1434 		return -ENOMEM;
1435 	}
1436 	key->priv = priv;
1437 	priv->driver = driver;
1438 	priv->cipher = key->cipher;
1439 	TAILQ_INIT(&priv->dev_keys);
1440 
1441 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
1442 		/* DPDK expects the keys to be concatenated together. */
1443 		priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char));
1444 		if (!priv->xts_key) {
1445 			SPDK_ERRLOG("Memory allocation failed\n");
1446 			accel_dpdk_cryptodev_key_deinit(key);
1447 			return -ENOMEM;
1448 		}
1449 		memcpy(priv->xts_key, key->key, key->key_size);
1450 		memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size);
1451 	}
1452 
1453 	pthread_mutex_lock(&g_device_lock);
1454 	TAILQ_FOREACH(device, &g_crypto_devices, link) {
1455 		if (device->type != driver) {
1456 			continue;
1457 		}
1458 		key_handle = calloc(1, sizeof(*key_handle));
1459 		if (!key_handle) {
1460 			pthread_mutex_unlock(&g_device_lock);
1461 			accel_dpdk_cryptodev_key_deinit(key);
1462 			return -ENOMEM;
1463 		}
1464 		key_handle->device = device;
1465 		TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link);
1466 		rc = accel_dpdk_cryptodev_key_handle_configure(key, key_handle);
1467 		if (rc) {
1468 			pthread_mutex_unlock(&g_device_lock);
1469 			accel_dpdk_cryptodev_key_deinit(key);
1470 			return rc;
1471 		}
1472 		if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
1473 			/* For MLX5_PCI we need to register a key on each device since
1474 			 * the key is bound to a specific Protection Domain,
1475 			 * so don't break the loop */
1476 			break;
1477 		}
1478 	}
1479 	pthread_mutex_unlock(&g_device_lock);
1480 
1481 	if (TAILQ_EMPTY(&priv->dev_keys)) {
1482 		free(priv);
1483 		return -ENODEV;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 static void
1490 accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w)
1491 {
1492 	spdk_json_write_object_begin(w);
1493 	spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module");
1494 	spdk_json_write_object_end(w);
1495 
1496 	spdk_json_write_object_begin(w);
1497 	spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver");
1498 	spdk_json_write_named_object_begin(w, "params");
1499 	spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]);
1500 	spdk_json_write_object_end(w);
1501 	spdk_json_write_object_end(w);
1502 }
1503 
1504 static int
1505 accel_dpdk_cryptodev_get_operation_info(enum spdk_accel_opcode opcode,
1506 					const struct spdk_accel_operation_exec_ctx *ctx,
1507 					struct spdk_accel_opcode_info *info)
1508 {
1509 	if (!accel_dpdk_cryptodev_supports_opcode(opcode)) {
1510 		SPDK_ERRLOG("Received unexpected opcode: %d", opcode);
1511 		assert(false);
1512 		return -EINVAL;
1513 	}
1514 
1515 	switch (g_dpdk_cryptodev_driver) {
1516 	case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
1517 		info->required_alignment = spdk_u32log2(ctx->block_size);
1518 		break;
1519 	default:
1520 		info->required_alignment = 0;
1521 		break;
1522 	}
1523 
1524 	return 0;
1525 }
1526 
1527 static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = {
1528 	.module_init		= accel_dpdk_cryptodev_init,
1529 	.module_fini		= accel_dpdk_cryptodev_fini,
1530 	.write_config_json	= accel_dpdk_cryptodev_write_config_json,
1531 	.get_ctx_size		= accel_dpdk_cryptodev_ctx_size,
1532 	.name			= "dpdk_cryptodev",
1533 	.supports_opcode	= accel_dpdk_cryptodev_supports_opcode,
1534 	.get_io_channel		= accel_dpdk_cryptodev_get_io_channel,
1535 	.submit_tasks		= accel_dpdk_cryptodev_submit_tasks,
1536 	.crypto_key_init	= accel_dpdk_cryptodev_key_init,
1537 	.crypto_key_deinit	= accel_dpdk_cryptodev_key_deinit,
1538 	.crypto_supports_cipher	= accel_dpdk_cryptodev_supports_cipher,
1539 	.get_operation_info	= accel_dpdk_cryptodev_get_operation_info,
1540 };
1541