xref: /spdk/module/accel/dpdk_cryptodev/accel_dpdk_cryptodev.c (revision 186b109dd3a723612e3df79bb3d97699173d39e3)
1f5d1a924SAlexey Marchuk /*   SPDX-License-Identifier: BSD-3-Clause
2f5d1a924SAlexey Marchuk  *   Copyright (C) 2018 Intel Corporation.
38f4d98bbSAlexey Marchuk  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4f5d1a924SAlexey Marchuk  *   All rights reserved.
5f5d1a924SAlexey Marchuk  */
6f5d1a924SAlexey Marchuk 
761fbb000SAlexey Marchuk #include "accel_dpdk_cryptodev.h"
8f5d1a924SAlexey Marchuk 
961fbb000SAlexey Marchuk #include "spdk/accel.h"
105d2d59beSKonrad Sztyber #include "spdk/accel_module.h"
11f5d1a924SAlexey Marchuk #include "spdk/env.h"
12f5d1a924SAlexey Marchuk #include "spdk/likely.h"
13f5d1a924SAlexey Marchuk #include "spdk/thread.h"
1461fbb000SAlexey Marchuk #include "spdk/util.h"
15f5d1a924SAlexey Marchuk #include "spdk/log.h"
1661fbb000SAlexey Marchuk #include "spdk/json.h"
1761fbb000SAlexey Marchuk #include "spdk_internal/sgl.h"
18f5d1a924SAlexey Marchuk 
19f5d1a924SAlexey Marchuk #include <rte_bus_vdev.h>
20f5d1a924SAlexey Marchuk #include <rte_crypto.h>
21f5d1a924SAlexey Marchuk #include <rte_cryptodev.h>
22f5d1a924SAlexey Marchuk #include <rte_mbuf_dyn.h>
2310dcf2dbSAlexey Marchuk #include <rte_version.h>
24f5d1a924SAlexey Marchuk 
25f5d1a924SAlexey Marchuk /* The VF spread is the number of queue pairs between virtual functions, we use this to
26f5d1a924SAlexey Marchuk  * load balance the QAT device.
27f5d1a924SAlexey Marchuk  */
2861fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD		32
29f5d1a924SAlexey Marchuk 
30f5d1a924SAlexey Marchuk /* This controls how many ops will be dequeued from the crypto driver in one run
31f5d1a924SAlexey Marchuk  * of the poller. It is mainly a performance knob as it effectively determines how
32f5d1a924SAlexey Marchuk  * much work the poller has to do.  However even that can vary between crypto drivers
3361fbb000SAlexey Marchuk  * as the ACCEL_DPDK_CRYPTODEV_AESNI_MB driver for example does all the crypto work on dequeue whereas the
34f5d1a924SAlexey Marchuk  * QAT driver just dequeues what has been completed already.
35f5d1a924SAlexey Marchuk  */
3661fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE	64
37f5d1a924SAlexey Marchuk 
38bf8e0656SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE (128)
39f5d1a924SAlexey Marchuk 
40f5d1a924SAlexey Marchuk /* The number of MBUFS we need must be a power of two and to support other small IOs
41f5d1a924SAlexey Marchuk  * in addition to the limits mentioned above, we go to the next power of two. It is
42f5d1a924SAlexey Marchuk  * big number because it is one mempool for source and destination mbufs. It may
43f5d1a924SAlexey Marchuk  * need to be bigger to support multiple crypto drivers at once.
44f5d1a924SAlexey Marchuk  */
4561fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_NUM_MBUFS			32768
4661fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE		256
4761fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES		128
4861fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS		(2 * ACCEL_DPDK_CRYPTODEV_MAX_CRYPTO_VOLUMES)
4961fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE	0
50f5d1a924SAlexey Marchuk 
51f5d1a924SAlexey Marchuk /* This is the max number of IOs we can supply to any crypto device QP at one time.
52f5d1a924SAlexey Marchuk  * It can vary between drivers.
53f5d1a924SAlexey Marchuk  */
5461fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS		2048
55f5d1a924SAlexey Marchuk 
56f5d1a924SAlexey Marchuk /* At this moment DPDK descriptors allocation for mlx5 has some issues. We use 512
5761fbb000SAlexey Marchuk  * as a compromise value between performance and the time spent for initialization. */
5861fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5	512
59f5d1a924SAlexey Marchuk 
6061fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP		64
61f5d1a924SAlexey Marchuk 
6234edd9f1SKamil Godzwon /* Common for supported devices. */
6361fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS		2
6461fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_IV_OFFSET (sizeof(struct rte_crypto_op) + \
65f5d1a924SAlexey Marchuk                 sizeof(struct rte_crypto_sym_op) + \
6661fbb000SAlexey Marchuk                 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * \
67f5d1a924SAlexey Marchuk                  sizeof(struct rte_crypto_sym_xform)))
6861fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_IV_LENGTH			16
69f5d1a924SAlexey Marchuk 
7061fbb000SAlexey Marchuk /* Driver names */
7161fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_AESNI_MB	"crypto_aesni_mb"
7261fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_QAT	"crypto_qat"
7361fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_QAT_ASYM	"crypto_qat_asym"
7461fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_MLX5	"mlx5_pci"
7513603217SZhangfei Gao #define ACCEL_DPDK_CRYPTODEV_UADK	"crypto_uadk"
76f5d1a924SAlexey Marchuk 
7761fbb000SAlexey Marchuk /* Supported ciphers */
7861fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_AES_CBC	"AES_CBC" /* QAT and ACCEL_DPDK_CRYPTODEV_AESNI_MB */
7961fbb000SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_AES_XTS	"AES_XTS" /* QAT and MLX5 */
8061fbb000SAlexey Marchuk 
8161fbb000SAlexey Marchuk /* Specific to AES_CBC. */
82a25d1d33SDiwakar Sharma #define ACCEL_DPDK_CRYPTODEV_AES_CBC_128_KEY_SIZE			16
83a25d1d33SDiwakar Sharma #define ACCEL_DPDK_CRYPTODEV_AES_CBC_256_KEY_SIZE			32
8461fbb000SAlexey Marchuk 
85bf8e0656SAlexey Marchuk /* Limit of the max memory len attached to mbuf - rte_pktmbuf_attach_extbuf has uint16_t `buf_len`
86bf8e0656SAlexey Marchuk  * parameter, we use closes aligned value 32768 for better performance */
87bf8e0656SAlexey Marchuk #define ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN			32768
88bf8e0656SAlexey Marchuk 
8961fbb000SAlexey Marchuk /* Used to store IO context in mbuf */
9061fbb000SAlexey Marchuk static const struct rte_mbuf_dynfield rte_mbuf_dynfield_io_context = {
9161fbb000SAlexey Marchuk 	.name = "context_accel_dpdk_cryptodev",
9261fbb000SAlexey Marchuk 	.size = sizeof(uint64_t),
9361fbb000SAlexey Marchuk 	.align = __alignof__(uint64_t),
9461fbb000SAlexey Marchuk 	.flags = 0,
95f5d1a924SAlexey Marchuk };
96f5d1a924SAlexey Marchuk 
9761fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_device;
98f5d1a924SAlexey Marchuk 
9961fbb000SAlexey Marchuk enum accel_dpdk_cryptodev_driver_type {
10061fbb000SAlexey Marchuk 	ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB = 0,
10161fbb000SAlexey Marchuk 	ACCEL_DPDK_CRYPTODEV_DRIVER_QAT,
10261fbb000SAlexey Marchuk 	ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI,
10313603217SZhangfei Gao 	ACCEL_DPDK_CRYPTODEV_DRIVER_UADK,
10461fbb000SAlexey Marchuk 	ACCEL_DPDK_CRYPTODEV_DRIVER_LAST
105f5d1a924SAlexey Marchuk };
106f5d1a924SAlexey Marchuk 
10761fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_qp {
10861fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device;	/* ptr to crypto device */
10961fbb000SAlexey Marchuk 	uint32_t num_enqueued_ops;	/* Used to decide whether to poll the qp or not */
11061fbb000SAlexey Marchuk 	uint8_t qp; /* queue identifier */
11161fbb000SAlexey Marchuk 	bool in_use; /* whether this node is in use or not */
11261fbb000SAlexey Marchuk 	uint8_t index; /* used by QAT to load balance placement of qpairs */
11361fbb000SAlexey Marchuk 	TAILQ_ENTRY(accel_dpdk_cryptodev_qp) link;
11461fbb000SAlexey Marchuk };
11561fbb000SAlexey Marchuk 
11661fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_device {
11761fbb000SAlexey Marchuk 	enum accel_dpdk_cryptodev_driver_type type;
11861fbb000SAlexey Marchuk 	struct rte_cryptodev_info cdev_info; /* includes DPDK device friendly name */
11961fbb000SAlexey Marchuk 	uint32_t qp_desc_nr; /* max number of qp descriptors to be enqueued in burst */
12061fbb000SAlexey Marchuk 	uint8_t cdev_id; /* identifier for the device */
12161fbb000SAlexey Marchuk 	TAILQ_HEAD(, accel_dpdk_cryptodev_qp) qpairs;
12261fbb000SAlexey Marchuk 	TAILQ_ENTRY(accel_dpdk_cryptodev_device) link;
12361fbb000SAlexey Marchuk };
12461fbb000SAlexey Marchuk 
12561fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_key_handle {
12661fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device;
12761fbb000SAlexey Marchuk 	TAILQ_ENTRY(accel_dpdk_cryptodev_key_handle) link;
12810dcf2dbSAlexey Marchuk 	void *session_encrypt;	/* encryption session for this key */
12910dcf2dbSAlexey Marchuk 	void *session_decrypt;	/* decryption session for this key */
13061fbb000SAlexey Marchuk 	struct rte_crypto_sym_xform cipher_xform;		/* crypto control struct for this key */
13161fbb000SAlexey Marchuk };
13261fbb000SAlexey Marchuk 
13361fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_key_priv {
13461fbb000SAlexey Marchuk 	enum accel_dpdk_cryptodev_driver_type driver;
1359cd94384SJacek Kalwas 	enum spdk_accel_cipher cipher;
13661fbb000SAlexey Marchuk 	char *xts_key;
13761fbb000SAlexey Marchuk 	TAILQ_HEAD(, accel_dpdk_cryptodev_key_handle) dev_keys;
13861fbb000SAlexey Marchuk };
13961fbb000SAlexey Marchuk 
14061fbb000SAlexey Marchuk /* The crypto channel struct. It is allocated and freed on my behalf by the io channel code.
14161fbb000SAlexey Marchuk  * We store things in here that are needed on per thread basis like the base_channel for this thread,
14261fbb000SAlexey Marchuk  * and the poller for this thread.
143f5d1a924SAlexey Marchuk  */
14461fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_io_channel {
14561fbb000SAlexey Marchuk 	/* completion poller */
14661fbb000SAlexey Marchuk 	struct spdk_poller *poller;
14761fbb000SAlexey Marchuk 	/* Array of qpairs for each available device. The specific device will be selected depending on the crypto key */
14861fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_LAST];
1496b7cca15SAlexey Marchuk 	/* Used to queue tasks when qpair is full or only part of crypto ops was submitted to the PMD */
1508f36853aSAlexey Marchuk 	TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks;
1516b7cca15SAlexey Marchuk 	/* Used to queue tasks that were completed in submission path - to avoid calling cpl_cb and possibly overflow
1526b7cca15SAlexey Marchuk 	 * call stack */
1536b7cca15SAlexey Marchuk 	TAILQ_HEAD(, accel_dpdk_cryptodev_task) completed_tasks;
15461fbb000SAlexey Marchuk };
15561fbb000SAlexey Marchuk 
15661fbb000SAlexey Marchuk struct accel_dpdk_cryptodev_task {
15761fbb000SAlexey Marchuk 	struct spdk_accel_task base;
158bf8e0656SAlexey Marchuk 	uint32_t cryop_completed;	/* The number of crypto operations completed by HW */
159bf8e0656SAlexey Marchuk 	uint32_t cryop_submitted;	/* The number of crypto operations submitted to HW */
160bf8e0656SAlexey Marchuk 	uint32_t cryop_total;		/* Total number of crypto operations in this task */
16161fbb000SAlexey Marchuk 	bool is_failed;
162bf8e0656SAlexey Marchuk 	bool inplace;
16361fbb000SAlexey Marchuk 	TAILQ_ENTRY(accel_dpdk_cryptodev_task) link;
16461fbb000SAlexey Marchuk };
165f5d1a924SAlexey Marchuk 
166f5d1a924SAlexey Marchuk /* Shared mempools between all devices on this system */
167f5d1a924SAlexey Marchuk static struct rte_mempool *g_session_mp = NULL;
168f5d1a924SAlexey Marchuk static struct rte_mempool *g_session_mp_priv = NULL;
169f5d1a924SAlexey Marchuk static struct rte_mempool *g_mbuf_mp = NULL;            /* mbuf mempool */
17061fbb000SAlexey Marchuk static int g_mbuf_offset;
171f5d1a924SAlexey Marchuk static struct rte_mempool *g_crypto_op_mp = NULL;	/* crypto operations, must be rte* mempool */
172f5d1a924SAlexey Marchuk 
173f5d1a924SAlexey Marchuk static struct rte_mbuf_ext_shared_info g_shinfo = {};   /* used by DPDK mbuf macro */
174f5d1a924SAlexey Marchuk 
17561fbb000SAlexey Marchuk static uint8_t g_qat_total_qp = 0;
17661fbb000SAlexey Marchuk static uint8_t g_next_qat_index;
177f5d1a924SAlexey Marchuk 
17861fbb000SAlexey Marchuk static const char *g_driver_names[] = {
17961fbb000SAlexey Marchuk 	[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB]	= ACCEL_DPDK_CRYPTODEV_AESNI_MB,
18061fbb000SAlexey Marchuk 	[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT]	= ACCEL_DPDK_CRYPTODEV_QAT,
18113603217SZhangfei Gao 	[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]	= ACCEL_DPDK_CRYPTODEV_MLX5,
18213603217SZhangfei Gao 	[ACCEL_DPDK_CRYPTODEV_DRIVER_UADK]	= ACCEL_DPDK_CRYPTODEV_UADK
18361fbb000SAlexey Marchuk };
18461fbb000SAlexey Marchuk static const char *g_cipher_names[] = {
1859cd94384SJacek Kalwas 	[SPDK_ACCEL_CIPHER_AES_CBC]	= ACCEL_DPDK_CRYPTODEV_AES_CBC,
1869cd94384SJacek Kalwas 	[SPDK_ACCEL_CIPHER_AES_XTS]	= ACCEL_DPDK_CRYPTODEV_AES_XTS,
187f5d1a924SAlexey Marchuk };
188f5d1a924SAlexey Marchuk 
18961fbb000SAlexey Marchuk static enum accel_dpdk_cryptodev_driver_type g_dpdk_cryptodev_driver =
19061fbb000SAlexey Marchuk 	ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
191f5d1a924SAlexey Marchuk 
19261fbb000SAlexey Marchuk /* Global list of all crypto devices */
19361fbb000SAlexey Marchuk static TAILQ_HEAD(, accel_dpdk_cryptodev_device) g_crypto_devices = TAILQ_HEAD_INITIALIZER(
19461fbb000SAlexey Marchuk 			g_crypto_devices);
19561fbb000SAlexey Marchuk static pthread_mutex_t g_device_lock = PTHREAD_MUTEX_INITIALIZER;
196f5d1a924SAlexey Marchuk 
19761fbb000SAlexey Marchuk static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module;
19861fbb000SAlexey Marchuk 
199bf8e0656SAlexey Marchuk static int accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
200bf8e0656SAlexey Marchuk 		struct accel_dpdk_cryptodev_task *task);
201bf8e0656SAlexey Marchuk 
20261fbb000SAlexey Marchuk void
20361fbb000SAlexey Marchuk accel_dpdk_cryptodev_enable(void)
204f5d1a924SAlexey Marchuk {
20561fbb000SAlexey Marchuk 	spdk_accel_module_list_add(&g_accel_dpdk_cryptodev_module);
20661fbb000SAlexey Marchuk }
207f5d1a924SAlexey Marchuk 
20861fbb000SAlexey Marchuk int
20961fbb000SAlexey Marchuk accel_dpdk_cryptodev_set_driver(const char *driver_name)
21061fbb000SAlexey Marchuk {
21161fbb000SAlexey Marchuk 	if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
21261fbb000SAlexey Marchuk 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
21361fbb000SAlexey Marchuk 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
21461fbb000SAlexey Marchuk 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
21561fbb000SAlexey Marchuk 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
21661fbb000SAlexey Marchuk 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
21713603217SZhangfei Gao 	} else if (strcmp(driver_name, ACCEL_DPDK_CRYPTODEV_UADK) == 0) {
21813603217SZhangfei Gao 		g_dpdk_cryptodev_driver = ACCEL_DPDK_CRYPTODEV_DRIVER_UADK;
21961fbb000SAlexey Marchuk 	} else {
22061fbb000SAlexey Marchuk 		SPDK_ERRLOG("Unsupported driver %s\n", driver_name);
22161fbb000SAlexey Marchuk 		return -EINVAL;
22261fbb000SAlexey Marchuk 	}
22361fbb000SAlexey Marchuk 
22461fbb000SAlexey Marchuk 	SPDK_NOTICELOG("Using driver %s\n", driver_name);
22561fbb000SAlexey Marchuk 
22661fbb000SAlexey Marchuk 	return 0;
22761fbb000SAlexey Marchuk }
22861fbb000SAlexey Marchuk 
22961fbb000SAlexey Marchuk const char *
23061fbb000SAlexey Marchuk accel_dpdk_cryptodev_get_driver(void)
23161fbb000SAlexey Marchuk {
23261fbb000SAlexey Marchuk 	return g_driver_names[g_dpdk_cryptodev_driver];
23361fbb000SAlexey Marchuk }
23461fbb000SAlexey Marchuk 
23561fbb000SAlexey Marchuk static inline uint16_t
236bf8e0656SAlexey Marchuk accel_dpdk_cryptodev_poll_qp(struct accel_dpdk_cryptodev_qp *qp,
237bf8e0656SAlexey Marchuk 			     struct accel_dpdk_cryptodev_io_channel *crypto_ch)
23861fbb000SAlexey Marchuk {
23961fbb000SAlexey Marchuk 	struct rte_crypto_op *dequeued_ops[ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
24061fbb000SAlexey Marchuk 	struct rte_mbuf *mbufs_to_free[2 * ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE];
24161fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_task *task;
24261fbb000SAlexey Marchuk 	uint32_t num_mbufs = 0;
24361fbb000SAlexey Marchuk 	int i;
24461fbb000SAlexey Marchuk 	uint16_t num_dequeued_ops;
24561fbb000SAlexey Marchuk 
24661fbb000SAlexey Marchuk 	/* Each run of the poller will get just what the device has available
24761fbb000SAlexey Marchuk 	 * at the moment we call it, we don't check again after draining the
24861fbb000SAlexey Marchuk 	 * first batch.
24961fbb000SAlexey Marchuk 	 */
25061fbb000SAlexey Marchuk 	num_dequeued_ops = rte_cryptodev_dequeue_burst(qp->device->cdev_id, qp->qp,
25161fbb000SAlexey Marchuk 			   dequeued_ops, ACCEL_DPDK_CRYPTODEV_MAX_DEQUEUE_BURST_SIZE);
25261fbb000SAlexey Marchuk 	/* Check if operation was processed successfully */
25361fbb000SAlexey Marchuk 	for (i = 0; i < num_dequeued_ops; i++) {
25461fbb000SAlexey Marchuk 
25561fbb000SAlexey Marchuk 		/* We don't know the order or association of the crypto ops wrt any
25661fbb000SAlexey Marchuk 		 * particular task so need to look at each and determine if it's
25761fbb000SAlexey Marchuk 		 * the last one for it's task or not.
25861fbb000SAlexey Marchuk 		 */
25961fbb000SAlexey Marchuk 		task = (struct accel_dpdk_cryptodev_task *)*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src,
26061fbb000SAlexey Marchuk 				g_mbuf_offset, uint64_t *);
26161fbb000SAlexey Marchuk 		assert(task != NULL);
26261fbb000SAlexey Marchuk 
26361fbb000SAlexey Marchuk 		if (dequeued_ops[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
26461fbb000SAlexey Marchuk 			SPDK_ERRLOG("error with op %d status %u\n", i, dequeued_ops[i]->status);
26561fbb000SAlexey Marchuk 			/* Update the task status to error, we'll still process the
26661fbb000SAlexey Marchuk 			 * rest of the crypto ops for this task though so they
26761fbb000SAlexey Marchuk 			 * aren't left hanging.
26861fbb000SAlexey Marchuk 			 */
26961fbb000SAlexey Marchuk 			task->is_failed = true;
27061fbb000SAlexey Marchuk 		}
27161fbb000SAlexey Marchuk 
27261fbb000SAlexey Marchuk 		/* Return the associated src and dst mbufs by collecting them into
27361fbb000SAlexey Marchuk 		 * an array that we can use the bulk API to free after the loop.
27461fbb000SAlexey Marchuk 		 */
27561fbb000SAlexey Marchuk 		*RTE_MBUF_DYNFIELD(dequeued_ops[i]->sym->m_src, g_mbuf_offset, uint64_t *) = 0;
27661fbb000SAlexey Marchuk 		mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_src;
27761fbb000SAlexey Marchuk 		if (dequeued_ops[i]->sym->m_dst) {
27861fbb000SAlexey Marchuk 			mbufs_to_free[num_mbufs++] = (void *)dequeued_ops[i]->sym->m_dst;
27961fbb000SAlexey Marchuk 		}
28061fbb000SAlexey Marchuk 
281bf8e0656SAlexey Marchuk 		task->cryop_completed++;
282bf8e0656SAlexey Marchuk 		if (task->cryop_completed == task->cryop_total) {
28361fbb000SAlexey Marchuk 			/* Complete the IO */
28461fbb000SAlexey Marchuk 			spdk_accel_task_complete(&task->base, task->is_failed ? -EINVAL : 0);
285bf8e0656SAlexey Marchuk 		} else if (task->cryop_completed == task->cryop_submitted) {
286bf8e0656SAlexey Marchuk 			/* submit remaining crypto ops */
287bf8e0656SAlexey Marchuk 			int rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
288bf8e0656SAlexey Marchuk 
289bf8e0656SAlexey Marchuk 			if (spdk_unlikely(rc)) {
2908f36853aSAlexey Marchuk 				if (rc == -ENOMEM) {
2918f36853aSAlexey Marchuk 					TAILQ_INSERT_TAIL(&crypto_ch->queued_tasks, task, link);
2928f36853aSAlexey Marchuk 					continue;
2936b7cca15SAlexey Marchuk 				} else if (rc == -EALREADY) {
2946b7cca15SAlexey Marchuk 					/* -EALREADY means that a task is completed, but it might be unsafe to complete
2956b7cca15SAlexey Marchuk 					 * it if we are in the submission path. Since we are in the poller context, we can
2966b7cca15SAlexey Marchuk 					 * complete th task immediately */
2976b7cca15SAlexey Marchuk 					rc = 0;
2988f36853aSAlexey Marchuk 				}
299bf8e0656SAlexey Marchuk 				spdk_accel_task_complete(&task->base, rc);
300bf8e0656SAlexey Marchuk 			}
30161fbb000SAlexey Marchuk 		}
30261fbb000SAlexey Marchuk 	}
30361fbb000SAlexey Marchuk 
30461fbb000SAlexey Marchuk 	/* Now bulk free both mbufs and crypto operations. */
30561fbb000SAlexey Marchuk 	if (num_dequeued_ops > 0) {
30661fbb000SAlexey Marchuk 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)dequeued_ops, num_dequeued_ops);
30761fbb000SAlexey Marchuk 		assert(num_mbufs > 0);
30861fbb000SAlexey Marchuk 		/* This also releases chained mbufs if any. */
30961fbb000SAlexey Marchuk 		rte_pktmbuf_free_bulk(mbufs_to_free, num_mbufs);
31061fbb000SAlexey Marchuk 	}
31161fbb000SAlexey Marchuk 
31261fbb000SAlexey Marchuk 	assert(qp->num_enqueued_ops >= num_dequeued_ops);
31361fbb000SAlexey Marchuk 	qp->num_enqueued_ops -= num_dequeued_ops;
31461fbb000SAlexey Marchuk 
31561fbb000SAlexey Marchuk 	return num_dequeued_ops;
31661fbb000SAlexey Marchuk }
31761fbb000SAlexey Marchuk 
31861fbb000SAlexey Marchuk /* This is the poller for the crypto module. It uses a single API to dequeue whatever is ready at
31961fbb000SAlexey Marchuk  * the device. Then we need to decide if what we've got so far (including previous poller
32061fbb000SAlexey Marchuk  * runs) totals up to one or more complete task */
32161fbb000SAlexey Marchuk static int
32261fbb000SAlexey Marchuk accel_dpdk_cryptodev_poller(void *args)
32361fbb000SAlexey Marchuk {
32461fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = args;
32561fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *qp;
3268f36853aSAlexey Marchuk 	struct accel_dpdk_cryptodev_task *task, *task_tmp;
3278f36853aSAlexey Marchuk 	TAILQ_HEAD(, accel_dpdk_cryptodev_task) queued_tasks_tmp;
3286b7cca15SAlexey Marchuk 	uint32_t num_dequeued_ops = 0, num_enqueued_ops = 0, num_completed_tasks = 0;
3298f36853aSAlexey Marchuk 	int i, rc;
33061fbb000SAlexey Marchuk 
33161fbb000SAlexey Marchuk 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
33261fbb000SAlexey Marchuk 		qp = crypto_ch->device_qp[i];
33361fbb000SAlexey Marchuk 		/* Avoid polling "idle" qps since it may affect performance */
33461fbb000SAlexey Marchuk 		if (qp && qp->num_enqueued_ops) {
335bf8e0656SAlexey Marchuk 			num_dequeued_ops += accel_dpdk_cryptodev_poll_qp(qp, crypto_ch);
33661fbb000SAlexey Marchuk 		}
33761fbb000SAlexey Marchuk 	}
33861fbb000SAlexey Marchuk 
3398f36853aSAlexey Marchuk 	if (!TAILQ_EMPTY(&crypto_ch->queued_tasks)) {
3408f36853aSAlexey Marchuk 		TAILQ_INIT(&queued_tasks_tmp);
3418f36853aSAlexey Marchuk 
3428f36853aSAlexey Marchuk 		TAILQ_FOREACH_SAFE(task, &crypto_ch->queued_tasks, link, task_tmp) {
3438f36853aSAlexey Marchuk 			TAILQ_REMOVE(&crypto_ch->queued_tasks, task, link);
3448f36853aSAlexey Marchuk 			rc = accel_dpdk_cryptodev_process_task(crypto_ch, task);
3458f36853aSAlexey Marchuk 			if (spdk_unlikely(rc)) {
3468f36853aSAlexey Marchuk 				if (rc == -ENOMEM) {
3478f36853aSAlexey Marchuk 					TAILQ_INSERT_TAIL(&queued_tasks_tmp, task, link);
3488f36853aSAlexey Marchuk 					/* Other queued tasks may belong to other qpairs,
3498f36853aSAlexey Marchuk 					 * so process the whole list */
3508f36853aSAlexey Marchuk 					continue;
3516b7cca15SAlexey Marchuk 				} else if (rc == -EALREADY) {
3526b7cca15SAlexey Marchuk 					/* -EALREADY means that a task is completed, but it might be unsafe to complete
3536b7cca15SAlexey Marchuk 					 * it if we are in the submission path. Since we are in the poller context, we can
3546b7cca15SAlexey Marchuk 					 * complete th task immediately */
3556b7cca15SAlexey Marchuk 					rc = 0;
3568f36853aSAlexey Marchuk 				}
3578f36853aSAlexey Marchuk 				spdk_accel_task_complete(&task->base, rc);
3586b7cca15SAlexey Marchuk 				num_completed_tasks++;
3598f36853aSAlexey Marchuk 			} else {
3608f36853aSAlexey Marchuk 				num_enqueued_ops++;
3618f36853aSAlexey Marchuk 			}
3628f36853aSAlexey Marchuk 		}
3638f36853aSAlexey Marchuk 
3648f36853aSAlexey Marchuk 		TAILQ_SWAP(&crypto_ch->queued_tasks, &queued_tasks_tmp, accel_dpdk_cryptodev_task, link);
3658f36853aSAlexey Marchuk 	}
3668f36853aSAlexey Marchuk 
3676b7cca15SAlexey Marchuk 	TAILQ_FOREACH_SAFE(task, &crypto_ch->completed_tasks, link, task_tmp) {
3686b7cca15SAlexey Marchuk 		TAILQ_REMOVE(&crypto_ch->completed_tasks, task, link);
369a347d3e7SAlexey Marchuk 		spdk_accel_task_complete(&task->base, 0);
3706b7cca15SAlexey Marchuk 		num_completed_tasks++;
3716b7cca15SAlexey Marchuk 	}
3726b7cca15SAlexey Marchuk 
3736b7cca15SAlexey Marchuk 	return !!(num_dequeued_ops + num_enqueued_ops + num_completed_tasks);
37461fbb000SAlexey Marchuk }
37561fbb000SAlexey Marchuk 
37661fbb000SAlexey Marchuk /* Allocate the new mbuf of @remainder size with data pointed by @addr and attach
37761fbb000SAlexey Marchuk  * it to the @orig_mbuf. */
37861fbb000SAlexey Marchuk static inline int
37961fbb000SAlexey Marchuk accel_dpdk_cryptodev_mbuf_chain_remainder(struct accel_dpdk_cryptodev_task *task,
38061fbb000SAlexey Marchuk 		struct rte_mbuf *orig_mbuf, uint8_t *addr, uint64_t *_remainder)
38161fbb000SAlexey Marchuk {
38261fbb000SAlexey Marchuk 	uint64_t phys_addr, phys_len, remainder = *_remainder;
38361fbb000SAlexey Marchuk 	struct rte_mbuf *chain_mbuf;
38461fbb000SAlexey Marchuk 	int rc;
38561fbb000SAlexey Marchuk 
38661fbb000SAlexey Marchuk 	phys_len = remainder;
38761fbb000SAlexey Marchuk 	phys_addr = spdk_vtophys((void *)addr, &phys_len);
38861fbb000SAlexey Marchuk 	if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) {
38961fbb000SAlexey Marchuk 		return -EFAULT;
39061fbb000SAlexey Marchuk 	}
39161fbb000SAlexey Marchuk 	remainder = spdk_min(remainder, phys_len);
392bf8e0656SAlexey Marchuk 	remainder = spdk_min(remainder, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
39361fbb000SAlexey Marchuk 	rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, (struct rte_mbuf **)&chain_mbuf, 1);
39461fbb000SAlexey Marchuk 	if (spdk_unlikely(rc)) {
39561fbb000SAlexey Marchuk 		return -ENOMEM;
39661fbb000SAlexey Marchuk 	}
39761fbb000SAlexey Marchuk 	/* Store context in every mbuf as we don't know anything about completion order */
39861fbb000SAlexey Marchuk 	*RTE_MBUF_DYNFIELD(chain_mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
39961fbb000SAlexey Marchuk 	rte_pktmbuf_attach_extbuf(chain_mbuf, addr, phys_addr, remainder, &g_shinfo);
40061fbb000SAlexey Marchuk 	rte_pktmbuf_append(chain_mbuf, remainder);
40161fbb000SAlexey Marchuk 
40234edd9f1SKamil Godzwon 	/* Chained buffer is released by rte_pktbuf_free_bulk() automagically. */
40361fbb000SAlexey Marchuk 	rte_pktmbuf_chain(orig_mbuf, chain_mbuf);
40461fbb000SAlexey Marchuk 	*_remainder = remainder;
40561fbb000SAlexey Marchuk 
40661fbb000SAlexey Marchuk 	return 0;
40761fbb000SAlexey Marchuk }
40861fbb000SAlexey Marchuk 
40961fbb000SAlexey Marchuk /* Attach data buffer pointed by @addr to @mbuf. Return utilized len of the
41061fbb000SAlexey Marchuk  * contiguous space that was physically available. */
41161fbb000SAlexey Marchuk static inline uint64_t
41261fbb000SAlexey Marchuk accel_dpdk_cryptodev_mbuf_attach_buf(struct accel_dpdk_cryptodev_task *task, struct rte_mbuf *mbuf,
41361fbb000SAlexey Marchuk 				     uint8_t *addr, uint32_t len)
41461fbb000SAlexey Marchuk {
41561fbb000SAlexey Marchuk 	uint64_t phys_addr, phys_len;
41661fbb000SAlexey Marchuk 
41761fbb000SAlexey Marchuk 	/* Store context in every mbuf as we don't know anything about completion order */
41861fbb000SAlexey Marchuk 	*RTE_MBUF_DYNFIELD(mbuf, g_mbuf_offset, uint64_t *) = (uint64_t)task;
41961fbb000SAlexey Marchuk 
42061fbb000SAlexey Marchuk 	phys_len = len;
42161fbb000SAlexey Marchuk 	phys_addr = spdk_vtophys((void *)addr, &phys_len);
42261fbb000SAlexey Marchuk 	if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR || phys_len == 0)) {
42361fbb000SAlexey Marchuk 		return 0;
42461fbb000SAlexey Marchuk 	}
42561fbb000SAlexey Marchuk 	assert(phys_len <= len);
426bf8e0656SAlexey Marchuk 	phys_len = spdk_min(phys_len, ACCEL_DPDK_CRYPTODEV_MAX_MBUF_LEN);
42761fbb000SAlexey Marchuk 
42861fbb000SAlexey Marchuk 	/* Set the mbuf elements address and length. */
42961fbb000SAlexey Marchuk 	rte_pktmbuf_attach_extbuf(mbuf, addr, phys_addr, phys_len, &g_shinfo);
43061fbb000SAlexey Marchuk 	rte_pktmbuf_append(mbuf, phys_len);
43161fbb000SAlexey Marchuk 
43261fbb000SAlexey Marchuk 	return phys_len;
43361fbb000SAlexey Marchuk }
43461fbb000SAlexey Marchuk 
43561fbb000SAlexey Marchuk static inline struct accel_dpdk_cryptodev_key_handle *
43661fbb000SAlexey Marchuk accel_dpdk_find_key_handle_in_channel(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
43761fbb000SAlexey Marchuk 				      struct accel_dpdk_cryptodev_key_priv *key)
43861fbb000SAlexey Marchuk {
43961fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_handle *key_handle;
44061fbb000SAlexey Marchuk 
44161fbb000SAlexey Marchuk 	if (key->driver == ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
44261fbb000SAlexey Marchuk 		/* Crypto key is registered on all available devices while io_channel opens CQ/QP on a single device.
44361fbb000SAlexey Marchuk 		 * We need to iterate a list of key entries to find a suitable device */
44461fbb000SAlexey Marchuk 		TAILQ_FOREACH(key_handle, &key->dev_keys, link) {
44561fbb000SAlexey Marchuk 			if (key_handle->device->cdev_id ==
44661fbb000SAlexey Marchuk 			    crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI]->device->cdev_id) {
44761fbb000SAlexey Marchuk 				return key_handle;
44861fbb000SAlexey Marchuk 			}
44961fbb000SAlexey Marchuk 		}
45061fbb000SAlexey Marchuk 		return NULL;
45161fbb000SAlexey Marchuk 	} else {
45261fbb000SAlexey Marchuk 		return TAILQ_FIRST(&key->dev_keys);
45361fbb000SAlexey Marchuk 	}
45461fbb000SAlexey Marchuk }
45561fbb000SAlexey Marchuk 
45661fbb000SAlexey Marchuk static inline int
45761fbb000SAlexey Marchuk accel_dpdk_cryptodev_task_alloc_resources(struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
45861fbb000SAlexey Marchuk 		struct rte_crypto_op **crypto_ops, int count)
45961fbb000SAlexey Marchuk {
46061fbb000SAlexey Marchuk 	int rc;
46161fbb000SAlexey Marchuk 
46261fbb000SAlexey Marchuk 	/* Get the number of source mbufs that we need. These will always be 1:1 because we
46361fbb000SAlexey Marchuk 	 * don't support chaining. The reason we don't is because of our decision to use
46461fbb000SAlexey Marchuk 	 * LBA as IV, there can be no case where we'd need >1 mbuf per crypto op or the
46561fbb000SAlexey Marchuk 	 * op would be > 1 LBA.
46661fbb000SAlexey Marchuk 	 */
46761fbb000SAlexey Marchuk 	rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, src_mbufs, count);
46861fbb000SAlexey Marchuk 	if (rc) {
46961fbb000SAlexey Marchuk 		SPDK_ERRLOG("Failed to get src_mbufs!\n");
47061fbb000SAlexey Marchuk 		return -ENOMEM;
47161fbb000SAlexey Marchuk 	}
47261fbb000SAlexey Marchuk 
47361fbb000SAlexey Marchuk 	/* Get the same amount to describe destination. If crypto operation is inline then we don't just skip it */
47461fbb000SAlexey Marchuk 	if (dst_mbufs) {
47561fbb000SAlexey Marchuk 		rc = rte_pktmbuf_alloc_bulk(g_mbuf_mp, dst_mbufs, count);
47661fbb000SAlexey Marchuk 		if (rc) {
47761fbb000SAlexey Marchuk 			SPDK_ERRLOG("Failed to get dst_mbufs!\n");
47861fbb000SAlexey Marchuk 			goto err_free_src;
47961fbb000SAlexey Marchuk 		}
48061fbb000SAlexey Marchuk 	}
48161fbb000SAlexey Marchuk 
48261fbb000SAlexey Marchuk #ifdef __clang_analyzer__
48361fbb000SAlexey Marchuk 	/* silence scan-build false positive */
48461fbb000SAlexey Marchuk 	SPDK_CLANG_ANALYZER_PREINIT_PTR_ARRAY(crypto_ops, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE,
48561fbb000SAlexey Marchuk 					      0x1000);
48661fbb000SAlexey Marchuk #endif
48761fbb000SAlexey Marchuk 	/* Allocate crypto operations. */
48861fbb000SAlexey Marchuk 	rc = rte_crypto_op_bulk_alloc(g_crypto_op_mp,
48961fbb000SAlexey Marchuk 				      RTE_CRYPTO_OP_TYPE_SYMMETRIC,
49061fbb000SAlexey Marchuk 				      crypto_ops, count);
49161fbb000SAlexey Marchuk 	if (rc < count) {
492bf8e0656SAlexey Marchuk 		SPDK_ERRLOG("Failed to allocate crypto ops! rc %d\n", rc);
49361fbb000SAlexey Marchuk 		goto err_free_ops;
49461fbb000SAlexey Marchuk 	}
49561fbb000SAlexey Marchuk 
49661fbb000SAlexey Marchuk 	return 0;
49761fbb000SAlexey Marchuk 
49861fbb000SAlexey Marchuk err_free_ops:
49961fbb000SAlexey Marchuk 	if (rc > 0) {
50061fbb000SAlexey Marchuk 		rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, rc);
50161fbb000SAlexey Marchuk 	}
50261fbb000SAlexey Marchuk 	if (dst_mbufs) {
50361fbb000SAlexey Marchuk 		/* This also releases chained mbufs if any. */
50461fbb000SAlexey Marchuk 		rte_pktmbuf_free_bulk(dst_mbufs, count);
50561fbb000SAlexey Marchuk 	}
50661fbb000SAlexey Marchuk err_free_src:
50761fbb000SAlexey Marchuk 	/* This also releases chained mbufs if any. */
50861fbb000SAlexey Marchuk 	rte_pktmbuf_free_bulk(src_mbufs, count);
50961fbb000SAlexey Marchuk 
51061fbb000SAlexey Marchuk 	return -ENOMEM;
51161fbb000SAlexey Marchuk }
51261fbb000SAlexey Marchuk 
51361fbb000SAlexey Marchuk static inline int
51461fbb000SAlexey Marchuk accel_dpdk_cryptodev_mbuf_add_single_block(struct spdk_iov_sgl *sgl, struct rte_mbuf *mbuf,
51561fbb000SAlexey Marchuk 		struct accel_dpdk_cryptodev_task *task)
51661fbb000SAlexey Marchuk {
51761fbb000SAlexey Marchuk 	int rc;
51861fbb000SAlexey Marchuk 	uint8_t *buf_addr;
51961fbb000SAlexey Marchuk 	uint64_t phys_len;
52061fbb000SAlexey Marchuk 	uint64_t remainder;
5218f4d98bbSAlexey Marchuk 	uint64_t buf_len;
52261fbb000SAlexey Marchuk 
5238f4d98bbSAlexey Marchuk 	assert(sgl->iov->iov_len > sgl->iov_offset);
5248f4d98bbSAlexey Marchuk 	buf_len = spdk_min(task->base.block_size, sgl->iov->iov_len - sgl->iov_offset);
52561fbb000SAlexey Marchuk 	buf_addr = sgl->iov->iov_base + sgl->iov_offset;
52661fbb000SAlexey Marchuk 	phys_len = accel_dpdk_cryptodev_mbuf_attach_buf(task, mbuf, buf_addr, buf_len);
52761fbb000SAlexey Marchuk 	if (spdk_unlikely(phys_len == 0)) {
52861fbb000SAlexey Marchuk 		return -EFAULT;
52961fbb000SAlexey Marchuk 	}
53061fbb000SAlexey Marchuk 	buf_len = spdk_min(buf_len, phys_len);
53161fbb000SAlexey Marchuk 	spdk_iov_sgl_advance(sgl, buf_len);
53261fbb000SAlexey Marchuk 
53361fbb000SAlexey Marchuk 	/* Handle the case of page boundary. */
53413f97e67SAlexey Marchuk 	assert(task->base.block_size >= buf_len);
53561fbb000SAlexey Marchuk 	remainder = task->base.block_size - buf_len;
53661fbb000SAlexey Marchuk 	while (remainder) {
53761fbb000SAlexey Marchuk 		buf_len = spdk_min(remainder, sgl->iov->iov_len - sgl->iov_offset);
53861fbb000SAlexey Marchuk 		buf_addr = sgl->iov->iov_base + sgl->iov_offset;
53961fbb000SAlexey Marchuk 		rc = accel_dpdk_cryptodev_mbuf_chain_remainder(task, mbuf, buf_addr, &buf_len);
54061fbb000SAlexey Marchuk 		if (spdk_unlikely(rc)) {
54161fbb000SAlexey Marchuk 			return rc;
54261fbb000SAlexey Marchuk 		}
54361fbb000SAlexey Marchuk 		spdk_iov_sgl_advance(sgl, buf_len);
54461fbb000SAlexey Marchuk 		remainder -= buf_len;
54561fbb000SAlexey Marchuk 	}
54661fbb000SAlexey Marchuk 
54761fbb000SAlexey Marchuk 	return 0;
54861fbb000SAlexey Marchuk }
54961fbb000SAlexey Marchuk 
55061fbb000SAlexey Marchuk static inline void
55161fbb000SAlexey Marchuk accel_dpdk_cryptodev_op_set_iv(struct rte_crypto_op *crypto_op, uint64_t iv)
55261fbb000SAlexey Marchuk {
55361fbb000SAlexey Marchuk 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(crypto_op, uint8_t *, ACCEL_DPDK_CRYPTODEV_IV_OFFSET);
55461fbb000SAlexey Marchuk 
55561fbb000SAlexey Marchuk 	/* Set the IV - we use the LBA of the crypto_op */
55661fbb000SAlexey Marchuk 	memset(iv_ptr, 0, ACCEL_DPDK_CRYPTODEV_IV_LENGTH);
55761fbb000SAlexey Marchuk 	rte_memcpy(iv_ptr, &iv, sizeof(uint64_t));
55861fbb000SAlexey Marchuk }
55961fbb000SAlexey Marchuk 
5606b7cca15SAlexey Marchuk static inline void
5616b7cca15SAlexey Marchuk accel_dpdk_cryptodev_update_resources_from_pools(struct rte_crypto_op **crypto_ops,
5626b7cca15SAlexey Marchuk 		struct rte_mbuf **src_mbufs, struct rte_mbuf **dst_mbufs,
5636b7cca15SAlexey Marchuk 		uint32_t num_enqueued_ops, uint32_t cryop_cnt)
5646b7cca15SAlexey Marchuk {
5656b7cca15SAlexey Marchuk 	memmove(crypto_ops, &crypto_ops[num_enqueued_ops], sizeof(crypto_ops[0]) * cryop_cnt);
5666b7cca15SAlexey Marchuk 	memmove(src_mbufs, &src_mbufs[num_enqueued_ops], sizeof(src_mbufs[0]) * cryop_cnt);
5676b7cca15SAlexey Marchuk 	if (dst_mbufs) {
5686b7cca15SAlexey Marchuk 		memmove(dst_mbufs, &dst_mbufs[num_enqueued_ops], sizeof(dst_mbufs[0]) * cryop_cnt);
5696b7cca15SAlexey Marchuk 	}
5706b7cca15SAlexey Marchuk }
5716b7cca15SAlexey Marchuk 
57261fbb000SAlexey Marchuk static int
57361fbb000SAlexey Marchuk accel_dpdk_cryptodev_process_task(struct accel_dpdk_cryptodev_io_channel *crypto_ch,
57461fbb000SAlexey Marchuk 				  struct accel_dpdk_cryptodev_task *task)
57561fbb000SAlexey Marchuk {
57661fbb000SAlexey Marchuk 	uint16_t num_enqueued_ops;
57761fbb000SAlexey Marchuk 	uint32_t cryop_cnt;
57861fbb000SAlexey Marchuk 	uint32_t crypto_len = task->base.block_size;
57995707610SKonrad Sztyber 	uint64_t dst_length, total_length;
580bf8e0656SAlexey Marchuk 	uint32_t sgl_offset;
5818f36853aSAlexey Marchuk 	uint32_t qp_capacity;
582bf8e0656SAlexey Marchuk 	uint64_t iv_start;
58395707610SKonrad Sztyber 	uint32_t i, crypto_index;
58461fbb000SAlexey Marchuk 	struct rte_crypto_op *crypto_ops[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
58561fbb000SAlexey Marchuk 	struct rte_mbuf *src_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
58661fbb000SAlexey Marchuk 	struct rte_mbuf *dst_mbufs[ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE];
58710dcf2dbSAlexey Marchuk 	void *session;
58861fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_priv *priv;
58961fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_handle *key_handle;
59061fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *qp;
59161fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *dev;
59261fbb000SAlexey Marchuk 	struct spdk_iov_sgl src, dst = {};
59361fbb000SAlexey Marchuk 	int rc;
5946b7cca15SAlexey Marchuk 	bool inplace = task->inplace;
59561fbb000SAlexey Marchuk 
59661fbb000SAlexey Marchuk 	if (spdk_unlikely(!task->base.crypto_key ||
59761fbb000SAlexey Marchuk 			  task->base.crypto_key->module_if != &g_accel_dpdk_cryptodev_module)) {
59861fbb000SAlexey Marchuk 		return -EINVAL;
59961fbb000SAlexey Marchuk 	}
60061fbb000SAlexey Marchuk 
601bf8e0656SAlexey Marchuk 	priv = task->base.crypto_key->priv;
602bf8e0656SAlexey Marchuk 	assert(priv->driver < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST);
603bf8e0656SAlexey Marchuk 
604bf8e0656SAlexey Marchuk 	if (task->cryop_completed) {
605bf8e0656SAlexey Marchuk 		/* We continue to process remaining blocks */
606bf8e0656SAlexey Marchuk 		assert(task->cryop_submitted == task->cryop_completed);
607bf8e0656SAlexey Marchuk 		assert(task->cryop_total > task->cryop_completed);
608bf8e0656SAlexey Marchuk 		cryop_cnt = task->cryop_total - task->cryop_completed;
609bf8e0656SAlexey Marchuk 		sgl_offset = task->cryop_completed * crypto_len;
610bf8e0656SAlexey Marchuk 		iv_start = task->base.iv + task->cryop_completed;
611bf8e0656SAlexey Marchuk 	} else {
612bf8e0656SAlexey Marchuk 		/* That is a new task */
61395707610SKonrad Sztyber 		total_length = 0;
61495707610SKonrad Sztyber 		for (i = 0; i < task->base.s.iovcnt; i++) {
61595707610SKonrad Sztyber 			total_length += task->base.s.iovs[i].iov_len;
61695707610SKonrad Sztyber 		}
61795707610SKonrad Sztyber 		dst_length = 0;
61895707610SKonrad Sztyber 		for (i = 0; i < task->base.d.iovcnt; i++) {
61995707610SKonrad Sztyber 			dst_length += task->base.d.iovs[i].iov_len;
62095707610SKonrad Sztyber 		}
62195707610SKonrad Sztyber 
62295707610SKonrad Sztyber 		if (spdk_unlikely(total_length != dst_length || !total_length)) {
62395707610SKonrad Sztyber 			return -ERANGE;
62495707610SKonrad Sztyber 		}
62595707610SKonrad Sztyber 		if (spdk_unlikely(total_length % task->base.block_size != 0)) {
62695707610SKonrad Sztyber 			return -EINVAL;
62795707610SKonrad Sztyber 		}
62895707610SKonrad Sztyber 
629bf8e0656SAlexey Marchuk 		cryop_cnt = total_length / task->base.block_size;
630bf8e0656SAlexey Marchuk 		task->cryop_total = cryop_cnt;
631bf8e0656SAlexey Marchuk 		sgl_offset = 0;
632bf8e0656SAlexey Marchuk 		iv_start = task->base.iv;
63361fbb000SAlexey Marchuk 	}
63461fbb000SAlexey Marchuk 
635bf8e0656SAlexey Marchuk 	/* Limit the number of crypto ops that we can process once */
636bf8e0656SAlexey Marchuk 	cryop_cnt = spdk_min(cryop_cnt, ACCEL_DPDK_CRYPTODEV_MAX_ENQUEUE_ARRAY_SIZE);
637bf8e0656SAlexey Marchuk 
63861fbb000SAlexey Marchuk 	qp = crypto_ch->device_qp[priv->driver];
63961fbb000SAlexey Marchuk 	assert(qp);
64061fbb000SAlexey Marchuk 	dev = qp->device;
64161fbb000SAlexey Marchuk 	assert(dev);
6428f36853aSAlexey Marchuk 	assert(dev->qp_desc_nr >= qp->num_enqueued_ops);
6438f36853aSAlexey Marchuk 
6448f36853aSAlexey Marchuk 	qp_capacity = dev->qp_desc_nr - qp->num_enqueued_ops;
6458f36853aSAlexey Marchuk 	cryop_cnt = spdk_min(cryop_cnt, qp_capacity);
6468f36853aSAlexey Marchuk 	if (spdk_unlikely(cryop_cnt == 0)) {
6478f36853aSAlexey Marchuk 		/* QP is full */
6488f36853aSAlexey Marchuk 		return -ENOMEM;
6498f36853aSAlexey Marchuk 	}
65061fbb000SAlexey Marchuk 
65161fbb000SAlexey Marchuk 	key_handle = accel_dpdk_find_key_handle_in_channel(crypto_ch, priv);
65261fbb000SAlexey Marchuk 	if (spdk_unlikely(!key_handle)) {
65361fbb000SAlexey Marchuk 		SPDK_ERRLOG("Failed to find a key handle, driver %s, cipher %s\n", g_driver_names[priv->driver],
65461fbb000SAlexey Marchuk 			    g_cipher_names[priv->cipher]);
65561fbb000SAlexey Marchuk 		return -EINVAL;
65661fbb000SAlexey Marchuk 	}
65761fbb000SAlexey Marchuk 	/* mlx5_pci binds keys to a specific device, we can't use a key with any device */
65861fbb000SAlexey Marchuk 	assert(dev == key_handle->device || priv->driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
65961fbb000SAlexey Marchuk 
6605105dc5dSKonrad Sztyber 	if (task->base.op_code == SPDK_ACCEL_OPC_ENCRYPT) {
66161fbb000SAlexey Marchuk 		session = key_handle->session_encrypt;
6625105dc5dSKonrad Sztyber 	} else if (task->base.op_code == SPDK_ACCEL_OPC_DECRYPT) {
66361fbb000SAlexey Marchuk 		session = key_handle->session_decrypt;
66461fbb000SAlexey Marchuk 	} else {
66561fbb000SAlexey Marchuk 		return -EINVAL;
66661fbb000SAlexey Marchuk 	}
66761fbb000SAlexey Marchuk 
6686b7cca15SAlexey Marchuk 	rc = accel_dpdk_cryptodev_task_alloc_resources(src_mbufs, inplace ? NULL : dst_mbufs,
669bf8e0656SAlexey Marchuk 			crypto_ops, cryop_cnt);
67061fbb000SAlexey Marchuk 	if (rc) {
67161fbb000SAlexey Marchuk 		return rc;
67261fbb000SAlexey Marchuk 	}
67361fbb000SAlexey Marchuk 
67461fbb000SAlexey Marchuk 	/* As we don't support chaining because of a decision to use LBA as IV, construction
67561fbb000SAlexey Marchuk 	 * of crypto operations is straightforward. We build both the op, the mbuf and the
67661fbb000SAlexey Marchuk 	 * dst_mbuf in our local arrays by looping through the length of the accel task and
67761fbb000SAlexey Marchuk 	 * picking off LBA sized blocks of memory from the IOVs as we walk through them. Each
67861fbb000SAlexey Marchuk 	 * LBA sized chunk of memory will correspond 1:1 to a crypto operation and a single
67961fbb000SAlexey Marchuk 	 * mbuf per crypto operation.
68061fbb000SAlexey Marchuk 	 */
6818f4d98bbSAlexey Marchuk 	spdk_iov_sgl_init(&src, task->base.s.iovs, task->base.s.iovcnt, 0);
6828f4d98bbSAlexey Marchuk 	spdk_iov_sgl_advance(&src, sgl_offset);
6836b7cca15SAlexey Marchuk 	if (!inplace) {
6848f4d98bbSAlexey Marchuk 		spdk_iov_sgl_init(&dst, task->base.d.iovs, task->base.d.iovcnt, 0);
6858f4d98bbSAlexey Marchuk 		spdk_iov_sgl_advance(&dst, sgl_offset);
68661fbb000SAlexey Marchuk 	}
68761fbb000SAlexey Marchuk 
68861fbb000SAlexey Marchuk 	for (crypto_index = 0; crypto_index < cryop_cnt; crypto_index++) {
68961fbb000SAlexey Marchuk 		rc = accel_dpdk_cryptodev_mbuf_add_single_block(&src, src_mbufs[crypto_index], task);
69061fbb000SAlexey Marchuk 		if (spdk_unlikely(rc)) {
6919c636a02SAlexey Marchuk 			goto free_ops;
69261fbb000SAlexey Marchuk 		}
69361fbb000SAlexey Marchuk 		accel_dpdk_cryptodev_op_set_iv(crypto_ops[crypto_index], iv_start);
69461fbb000SAlexey Marchuk 		iv_start++;
69561fbb000SAlexey Marchuk 
69661fbb000SAlexey Marchuk 		/* Set the data to encrypt/decrypt length */
69761fbb000SAlexey Marchuk 		crypto_ops[crypto_index]->sym->cipher.data.length = crypto_len;
69861fbb000SAlexey Marchuk 		crypto_ops[crypto_index]->sym->cipher.data.offset = 0;
69961fbb000SAlexey Marchuk 		rte_crypto_op_attach_sym_session(crypto_ops[crypto_index], session);
70061fbb000SAlexey Marchuk 
70161fbb000SAlexey Marchuk 		/* link the mbuf to the crypto op. */
70261fbb000SAlexey Marchuk 		crypto_ops[crypto_index]->sym->m_src = src_mbufs[crypto_index];
70361fbb000SAlexey Marchuk 
7046b7cca15SAlexey Marchuk 		if (inplace) {
70561fbb000SAlexey Marchuk 			crypto_ops[crypto_index]->sym->m_dst = NULL;
70661fbb000SAlexey Marchuk 		} else {
707bf8e0656SAlexey Marchuk #ifndef __clang_analyzer__
708bf8e0656SAlexey Marchuk 			/* scan-build thinks that dst_mbufs is not initialized */
70961fbb000SAlexey Marchuk 			rc = accel_dpdk_cryptodev_mbuf_add_single_block(&dst, dst_mbufs[crypto_index], task);
71061fbb000SAlexey Marchuk 			if (spdk_unlikely(rc)) {
7119c636a02SAlexey Marchuk 				goto free_ops;
71261fbb000SAlexey Marchuk 			}
71361fbb000SAlexey Marchuk 			crypto_ops[crypto_index]->sym->m_dst = dst_mbufs[crypto_index];
714bf8e0656SAlexey Marchuk #endif
71561fbb000SAlexey Marchuk 		}
71661fbb000SAlexey Marchuk 	}
71761fbb000SAlexey Marchuk 
71861fbb000SAlexey Marchuk 	/* Enqueue everything we've got but limit by the max number of descriptors we
71961fbb000SAlexey Marchuk 	 * configured the crypto device for.
72061fbb000SAlexey Marchuk 	 */
7218f36853aSAlexey Marchuk 	num_enqueued_ops = rte_cryptodev_enqueue_burst(dev->cdev_id, qp->qp, crypto_ops, cryop_cnt);
7229c636a02SAlexey Marchuk 	/* This value is used in the completion callback to determine when the accel task is complete. */
7239c636a02SAlexey Marchuk 	task->cryop_submitted += num_enqueued_ops;
72461fbb000SAlexey Marchuk 	qp->num_enqueued_ops += num_enqueued_ops;
72561fbb000SAlexey Marchuk 	/* We were unable to enqueue everything but did get some, so need to decide what
72661fbb000SAlexey Marchuk 	 * to do based on the status of the last op.
72761fbb000SAlexey Marchuk 	 */
72861fbb000SAlexey Marchuk 	if (num_enqueued_ops < cryop_cnt) {
72961fbb000SAlexey Marchuk 		switch (crypto_ops[num_enqueued_ops]->status) {
7306b7cca15SAlexey Marchuk 		case RTE_CRYPTO_OP_STATUS_SUCCESS:
7316b7cca15SAlexey Marchuk 			/* Crypto operation might be completed successfully but enqueuing to a completion ring might fail.
7326b7cca15SAlexey Marchuk 			 * That might happen with SW PMDs like openssl
7336b7cca15SAlexey Marchuk 			 * We can't retry such operation on next turn since if crypto operation was inplace, we can encrypt/
7346b7cca15SAlexey Marchuk 			 * decrypt already processed buffer. See github issue #2907 for more details.
7356b7cca15SAlexey Marchuk 			 * Handle this case as the crypto op was completed successfully - increment cryop_submitted and
7366b7cca15SAlexey Marchuk 			 * cryop_completed.
7376b7cca15SAlexey Marchuk 			 * We won't receive a completion for such operation, so we need to cleanup mbufs and crypto_ops */
7386b7cca15SAlexey Marchuk 			assert(task->cryop_total > task->cryop_completed);
7396b7cca15SAlexey Marchuk 			task->cryop_completed++;
7406b7cca15SAlexey Marchuk 			task->cryop_submitted++;
7416b7cca15SAlexey Marchuk 			if (task->cryop_completed == task->cryop_total) {
7426b7cca15SAlexey Marchuk 				assert(num_enqueued_ops == 0);
7436b7cca15SAlexey Marchuk 				/* All crypto ops are completed. We can't complete the task immediately since this function might be
7446b7cca15SAlexey Marchuk 				 * called in scope of spdk_accel_submit_* function and user's logic in the completion callback
7456b7cca15SAlexey Marchuk 				 * might lead to stack overflow */
7466b7cca15SAlexey Marchuk 				cryop_cnt -= num_enqueued_ops;
7476b7cca15SAlexey Marchuk 				accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs,
7486b7cca15SAlexey Marchuk 						num_enqueued_ops, cryop_cnt);
7496b7cca15SAlexey Marchuk 				rc = -EALREADY;
7506b7cca15SAlexey Marchuk 				goto free_ops;
7516b7cca15SAlexey Marchuk 			}
7526b7cca15SAlexey Marchuk 		/* fallthrough */
75361fbb000SAlexey Marchuk 		case RTE_CRYPTO_OP_STATUS_NOT_PROCESSED:
7549c636a02SAlexey Marchuk 			if (num_enqueued_ops == 0) {
7559c636a02SAlexey Marchuk 				/* Nothing was submitted. Free crypto ops and mbufs, treat this case as NOMEM */
7569c636a02SAlexey Marchuk 				rc = -ENOMEM;
7579c636a02SAlexey Marchuk 				goto free_ops;
75861fbb000SAlexey Marchuk 			}
7599c636a02SAlexey Marchuk 			/* Part of the crypto operations were not submitted, release mbufs and crypto ops.
7609c636a02SAlexey Marchuk 			 * The rest crypto ops will be submitted again once current batch is completed */
7619c636a02SAlexey Marchuk 			cryop_cnt -= num_enqueued_ops;
7626b7cca15SAlexey Marchuk 			accel_dpdk_cryptodev_update_resources_from_pools(crypto_ops, src_mbufs, inplace ? NULL : dst_mbufs,
7636b7cca15SAlexey Marchuk 					num_enqueued_ops, cryop_cnt);
7649c636a02SAlexey Marchuk 			rc = 0;
7659c636a02SAlexey Marchuk 			goto free_ops;
76661fbb000SAlexey Marchuk 		default:
76761fbb000SAlexey Marchuk 			/* For all other statuses, mark task as failed so that the poller will pick
76861fbb000SAlexey Marchuk 			 * the failure up for the overall task status.
76961fbb000SAlexey Marchuk 			 */
77061fbb000SAlexey Marchuk 			task->is_failed = true;
77161fbb000SAlexey Marchuk 			if (num_enqueued_ops == 0) {
77261fbb000SAlexey Marchuk 				/* If nothing was enqueued, but the last one wasn't because of
77361fbb000SAlexey Marchuk 				 * busy, fail it now as the poller won't know anything about it.
77461fbb000SAlexey Marchuk 				 */
77561fbb000SAlexey Marchuk 				rc = -EINVAL;
7769c636a02SAlexey Marchuk 				goto free_ops;
77761fbb000SAlexey Marchuk 			}
77861fbb000SAlexey Marchuk 			break;
77961fbb000SAlexey Marchuk 		}
78061fbb000SAlexey Marchuk 	}
78161fbb000SAlexey Marchuk 
78261fbb000SAlexey Marchuk 	return 0;
78361fbb000SAlexey Marchuk 
78461fbb000SAlexey Marchuk 	/* Error cleanup paths. */
7859c636a02SAlexey Marchuk free_ops:
7866b7cca15SAlexey Marchuk 	if (!inplace) {
78761fbb000SAlexey Marchuk 		/* This also releases chained mbufs if any. */
78861fbb000SAlexey Marchuk 		rte_pktmbuf_free_bulk(dst_mbufs, cryop_cnt);
78961fbb000SAlexey Marchuk 	}
79061fbb000SAlexey Marchuk 	rte_mempool_put_bulk(g_crypto_op_mp, (void **)crypto_ops, cryop_cnt);
79161fbb000SAlexey Marchuk 	/* This also releases chained mbufs if any. */
79261fbb000SAlexey Marchuk 	rte_pktmbuf_free_bulk(src_mbufs, cryop_cnt);
79361fbb000SAlexey Marchuk 	return rc;
79461fbb000SAlexey Marchuk }
79561fbb000SAlexey Marchuk 
79661fbb000SAlexey Marchuk static inline struct accel_dpdk_cryptodev_qp *
79761fbb000SAlexey Marchuk accel_dpdk_cryptodev_get_next_device_qpair(enum accel_dpdk_cryptodev_driver_type type)
79861fbb000SAlexey Marchuk {
79961fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device, *device_tmp;
80061fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *qpair;
80161fbb000SAlexey Marchuk 
80261fbb000SAlexey Marchuk 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, device_tmp) {
80361fbb000SAlexey Marchuk 		if (device->type != type) {
80461fbb000SAlexey Marchuk 			continue;
80561fbb000SAlexey Marchuk 		}
80661fbb000SAlexey Marchuk 		TAILQ_FOREACH(qpair, &device->qpairs, link) {
80761fbb000SAlexey Marchuk 			if (!qpair->in_use) {
80861fbb000SAlexey Marchuk 				qpair->in_use = true;
80961fbb000SAlexey Marchuk 				return qpair;
81061fbb000SAlexey Marchuk 			}
81161fbb000SAlexey Marchuk 		}
81261fbb000SAlexey Marchuk 	}
81361fbb000SAlexey Marchuk 
81461fbb000SAlexey Marchuk 	return NULL;
81561fbb000SAlexey Marchuk }
81661fbb000SAlexey Marchuk 
81761fbb000SAlexey Marchuk /* Helper function for the channel creation callback.
81861fbb000SAlexey Marchuk  * Returns the number of drivers assigned to the channel */
81961fbb000SAlexey Marchuk static uint32_t
82061fbb000SAlexey Marchuk accel_dpdk_cryptodev_assign_device_qps(struct accel_dpdk_cryptodev_io_channel *crypto_ch)
82161fbb000SAlexey Marchuk {
82261fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device;
82361fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *device_qp;
82461fbb000SAlexey Marchuk 	uint32_t num_drivers = 0;
82561fbb000SAlexey Marchuk 	bool qat_found = false;
82661fbb000SAlexey Marchuk 
82761fbb000SAlexey Marchuk 	pthread_mutex_lock(&g_device_lock);
82861fbb000SAlexey Marchuk 
82961fbb000SAlexey Marchuk 	TAILQ_FOREACH(device, &g_crypto_devices, link) {
83061fbb000SAlexey Marchuk 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT && !qat_found) {
83161fbb000SAlexey Marchuk 			/* For some QAT devices, the optimal qp to use is every 32nd as this spreads the
83261fbb000SAlexey Marchuk 			 * workload out over the multiple virtual functions in the device. For the devices
83361fbb000SAlexey Marchuk 			 * where this isn't the case, it doesn't hurt.
83461fbb000SAlexey Marchuk 			 */
83561fbb000SAlexey Marchuk 			TAILQ_FOREACH(device_qp, &device->qpairs, link) {
83661fbb000SAlexey Marchuk 				if (device_qp->index != g_next_qat_index) {
83761fbb000SAlexey Marchuk 					continue;
83861fbb000SAlexey Marchuk 				}
83961fbb000SAlexey Marchuk 				if (device_qp->in_use == false) {
84061fbb000SAlexey Marchuk 					assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] == NULL);
84161fbb000SAlexey Marchuk 					crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_QAT] = device_qp;
84261fbb000SAlexey Marchuk 					device_qp->in_use = true;
84361fbb000SAlexey Marchuk 					g_next_qat_index = (g_next_qat_index + ACCEL_DPDK_CRYPTODEV_QAT_VF_SPREAD) % g_qat_total_qp;
84461fbb000SAlexey Marchuk 					qat_found = true;
84561fbb000SAlexey Marchuk 					num_drivers++;
84661fbb000SAlexey Marchuk 					break;
84761fbb000SAlexey Marchuk 				} else {
84861fbb000SAlexey Marchuk 					/* if the preferred index is used, skip to the next one in this set. */
84961fbb000SAlexey Marchuk 					g_next_qat_index = (g_next_qat_index + 1) % g_qat_total_qp;
85061fbb000SAlexey Marchuk 				}
85161fbb000SAlexey Marchuk 			}
85261fbb000SAlexey Marchuk 		}
85361fbb000SAlexey Marchuk 	}
85461fbb000SAlexey Marchuk 
85561fbb000SAlexey Marchuk 	/* For ACCEL_DPDK_CRYPTODEV_AESNI_MB and MLX5_PCI select devices in round-robin manner */
85661fbb000SAlexey Marchuk 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB);
85761fbb000SAlexey Marchuk 	if (device_qp) {
85861fbb000SAlexey Marchuk 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] == NULL);
85961fbb000SAlexey Marchuk 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB] = device_qp;
86061fbb000SAlexey Marchuk 		num_drivers++;
86161fbb000SAlexey Marchuk 	}
86261fbb000SAlexey Marchuk 
86361fbb000SAlexey Marchuk 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI);
86461fbb000SAlexey Marchuk 	if (device_qp) {
86561fbb000SAlexey Marchuk 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] == NULL);
86661fbb000SAlexey Marchuk 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI] = device_qp;
86761fbb000SAlexey Marchuk 		num_drivers++;
86861fbb000SAlexey Marchuk 	}
86961fbb000SAlexey Marchuk 
87013603217SZhangfei Gao 	device_qp = accel_dpdk_cryptodev_get_next_device_qpair(ACCEL_DPDK_CRYPTODEV_DRIVER_UADK);
87113603217SZhangfei Gao 	if (device_qp) {
87213603217SZhangfei Gao 		assert(crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_UADK] == NULL);
87313603217SZhangfei Gao 		crypto_ch->device_qp[ACCEL_DPDK_CRYPTODEV_DRIVER_UADK] = device_qp;
87413603217SZhangfei Gao 		num_drivers++;
87513603217SZhangfei Gao 	}
87661fbb000SAlexey Marchuk 	pthread_mutex_unlock(&g_device_lock);
87761fbb000SAlexey Marchuk 
87861fbb000SAlexey Marchuk 	return num_drivers;
87961fbb000SAlexey Marchuk }
88061fbb000SAlexey Marchuk 
88161fbb000SAlexey Marchuk static void
88261fbb000SAlexey Marchuk _accel_dpdk_cryptodev_destroy_cb(void *io_device, void *ctx_buf)
88361fbb000SAlexey Marchuk {
88461fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
88561fbb000SAlexey Marchuk 			ctx_buf;
88661fbb000SAlexey Marchuk 	int i;
88761fbb000SAlexey Marchuk 
88861fbb000SAlexey Marchuk 	pthread_mutex_lock(&g_device_lock);
88961fbb000SAlexey Marchuk 	for (i = 0; i < ACCEL_DPDK_CRYPTODEV_DRIVER_LAST; i++) {
89061fbb000SAlexey Marchuk 		if (crypto_ch->device_qp[i]) {
89161fbb000SAlexey Marchuk 			crypto_ch->device_qp[i]->in_use = false;
89261fbb000SAlexey Marchuk 		}
89361fbb000SAlexey Marchuk 	}
89461fbb000SAlexey Marchuk 	pthread_mutex_unlock(&g_device_lock);
89561fbb000SAlexey Marchuk 
89661fbb000SAlexey Marchuk 	spdk_poller_unregister(&crypto_ch->poller);
89761fbb000SAlexey Marchuk }
89861fbb000SAlexey Marchuk 
89961fbb000SAlexey Marchuk static int
90061fbb000SAlexey Marchuk _accel_dpdk_cryptodev_create_cb(void *io_device, void *ctx_buf)
90161fbb000SAlexey Marchuk {
90261fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_io_channel *crypto_ch = (struct accel_dpdk_cryptodev_io_channel *)
90361fbb000SAlexey Marchuk 			ctx_buf;
90461fbb000SAlexey Marchuk 
90561fbb000SAlexey Marchuk 	crypto_ch->poller = SPDK_POLLER_REGISTER(accel_dpdk_cryptodev_poller, crypto_ch, 0);
90661fbb000SAlexey Marchuk 	if (!accel_dpdk_cryptodev_assign_device_qps(crypto_ch)) {
90761fbb000SAlexey Marchuk 		SPDK_ERRLOG("No crypto drivers assigned\n");
90861fbb000SAlexey Marchuk 		spdk_poller_unregister(&crypto_ch->poller);
90961fbb000SAlexey Marchuk 		return -EINVAL;
91061fbb000SAlexey Marchuk 	}
91161fbb000SAlexey Marchuk 
9128f36853aSAlexey Marchuk 	/* We use this to queue tasks when qpair is full or no resources in pools */
9138f36853aSAlexey Marchuk 	TAILQ_INIT(&crypto_ch->queued_tasks);
9146b7cca15SAlexey Marchuk 	TAILQ_INIT(&crypto_ch->completed_tasks);
91561fbb000SAlexey Marchuk 
91661fbb000SAlexey Marchuk 	return 0;
91761fbb000SAlexey Marchuk }
91861fbb000SAlexey Marchuk 
91961fbb000SAlexey Marchuk static struct spdk_io_channel *
92061fbb000SAlexey Marchuk accel_dpdk_cryptodev_get_io_channel(void)
92161fbb000SAlexey Marchuk {
92261fbb000SAlexey Marchuk 	return spdk_get_io_channel(&g_accel_dpdk_cryptodev_module);
92361fbb000SAlexey Marchuk }
92461fbb000SAlexey Marchuk 
92561fbb000SAlexey Marchuk static size_t
92661fbb000SAlexey Marchuk accel_dpdk_cryptodev_ctx_size(void)
92761fbb000SAlexey Marchuk {
92861fbb000SAlexey Marchuk 	return sizeof(struct accel_dpdk_cryptodev_task);
92961fbb000SAlexey Marchuk }
93061fbb000SAlexey Marchuk 
93161fbb000SAlexey Marchuk static bool
9325105dc5dSKonrad Sztyber accel_dpdk_cryptodev_supports_opcode(enum spdk_accel_opcode opc)
93361fbb000SAlexey Marchuk {
93461fbb000SAlexey Marchuk 	switch (opc) {
9355105dc5dSKonrad Sztyber 	case SPDK_ACCEL_OPC_ENCRYPT:
9365105dc5dSKonrad Sztyber 	case SPDK_ACCEL_OPC_DECRYPT:
93761fbb000SAlexey Marchuk 		return true;
93861fbb000SAlexey Marchuk 	default:
93961fbb000SAlexey Marchuk 		return false;
94061fbb000SAlexey Marchuk 	}
94161fbb000SAlexey Marchuk }
94261fbb000SAlexey Marchuk 
94361fbb000SAlexey Marchuk static int
94461fbb000SAlexey Marchuk accel_dpdk_cryptodev_submit_tasks(struct spdk_io_channel *_ch, struct spdk_accel_task *_task)
94561fbb000SAlexey Marchuk {
94661fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_task *task = SPDK_CONTAINEROF(_task, struct accel_dpdk_cryptodev_task,
94761fbb000SAlexey Marchuk 			base);
94861fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_io_channel *ch = spdk_io_channel_get_ctx(_ch);
9498f36853aSAlexey Marchuk 	int rc;
95061fbb000SAlexey Marchuk 
951bf8e0656SAlexey Marchuk 	task->cryop_completed = 0;
952bf8e0656SAlexey Marchuk 	task->cryop_submitted = 0;
953bf8e0656SAlexey Marchuk 	task->cryop_total = 0;
954bf8e0656SAlexey Marchuk 	task->inplace = true;
955bf8e0656SAlexey Marchuk 	task->is_failed = false;
956bf8e0656SAlexey Marchuk 
957bf8e0656SAlexey Marchuk 	/* Check if crypto operation is inplace: no destination or source == destination */
958bf8e0656SAlexey Marchuk 	if (task->base.s.iovcnt == task->base.d.iovcnt) {
959bf8e0656SAlexey Marchuk 		if (memcmp(task->base.s.iovs, task->base.d.iovs, sizeof(struct iovec) * task->base.s.iovcnt) != 0) {
960bf8e0656SAlexey Marchuk 			task->inplace = false;
961bf8e0656SAlexey Marchuk 		}
962bf8e0656SAlexey Marchuk 	} else if (task->base.d.iovcnt != 0) {
963bf8e0656SAlexey Marchuk 		task->inplace = false;
964bf8e0656SAlexey Marchuk 	}
965bf8e0656SAlexey Marchuk 
9668f36853aSAlexey Marchuk 	rc = accel_dpdk_cryptodev_process_task(ch, task);
9676b7cca15SAlexey Marchuk 	if (spdk_unlikely(rc)) {
9686b7cca15SAlexey Marchuk 		if (rc == -ENOMEM) {
9698f36853aSAlexey Marchuk 			TAILQ_INSERT_TAIL(&ch->queued_tasks, task, link);
9708f36853aSAlexey Marchuk 			rc = 0;
9716b7cca15SAlexey Marchuk 		} else if (rc == -EALREADY) {
9726b7cca15SAlexey Marchuk 			/* -EALREADY means that a task is completed, but it might be unsafe to complete
9736b7cca15SAlexey Marchuk 			 * it if we are in the submission path. Hence put it into a dedicated queue to and
9746b7cca15SAlexey Marchuk 			 * process it during polling */
9756b7cca15SAlexey Marchuk 			TAILQ_INSERT_TAIL(&ch->completed_tasks, task, link);
9766b7cca15SAlexey Marchuk 			rc = 0;
9776b7cca15SAlexey Marchuk 		}
9788f36853aSAlexey Marchuk 	}
9798f36853aSAlexey Marchuk 
9808f36853aSAlexey Marchuk 	return rc;
98161fbb000SAlexey Marchuk }
98261fbb000SAlexey Marchuk 
98361fbb000SAlexey Marchuk /* Dummy function used by DPDK to free ext attached buffers to mbufs, we free them ourselves but
98461fbb000SAlexey Marchuk  * this callback has to be here. */
98561fbb000SAlexey Marchuk static void
98661fbb000SAlexey Marchuk shinfo_free_cb(void *arg1, void *arg2)
98761fbb000SAlexey Marchuk {
98861fbb000SAlexey Marchuk }
98961fbb000SAlexey Marchuk 
99061fbb000SAlexey Marchuk static int
99161fbb000SAlexey Marchuk accel_dpdk_cryptodev_create(uint8_t index, uint16_t num_lcores)
99261fbb000SAlexey Marchuk {
99310dcf2dbSAlexey Marchuk 	struct rte_cryptodev_qp_conf qp_conf = {
99410dcf2dbSAlexey Marchuk 		.mp_session = g_session_mp,
99510dcf2dbSAlexey Marchuk #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
99610dcf2dbSAlexey Marchuk 		.mp_session_private = g_session_mp_priv
99710dcf2dbSAlexey Marchuk #endif
99810dcf2dbSAlexey Marchuk 	};
99961fbb000SAlexey Marchuk 	/* Setup queue pairs. */
1000*186b109dSJim Harris 	struct rte_cryptodev_config conf = { .socket_id = SPDK_ENV_NUMA_ID_ANY };
100161fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device;
100261fbb000SAlexey Marchuk 	uint8_t j, cdev_id, cdrv_id;
100361fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *dev_qp;
100461fbb000SAlexey Marchuk 	int rc;
100561fbb000SAlexey Marchuk 
100661fbb000SAlexey Marchuk 	device = calloc(1, sizeof(*device));
1007f5d1a924SAlexey Marchuk 	if (!device) {
1008f5d1a924SAlexey Marchuk 		return -ENOMEM;
1009f5d1a924SAlexey Marchuk 	}
1010f5d1a924SAlexey Marchuk 
1011f5d1a924SAlexey Marchuk 	/* Get details about this device. */
1012f5d1a924SAlexey Marchuk 	rte_cryptodev_info_get(index, &device->cdev_info);
1013f5d1a924SAlexey Marchuk 	cdrv_id = device->cdev_info.driver_id;
1014f5d1a924SAlexey Marchuk 	cdev_id = device->cdev_id = index;
1015f5d1a924SAlexey Marchuk 
101661fbb000SAlexey Marchuk 	if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT) == 0) {
101761fbb000SAlexey Marchuk 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
101861fbb000SAlexey Marchuk 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_QAT;
101961fbb000SAlexey Marchuk 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_AESNI_MB) == 0) {
102061fbb000SAlexey Marchuk 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
102161fbb000SAlexey Marchuk 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB;
102261fbb000SAlexey Marchuk 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_MLX5) == 0) {
102361fbb000SAlexey Marchuk 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS_MLX5;
102461fbb000SAlexey Marchuk 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI;
102561fbb000SAlexey Marchuk 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_QAT_ASYM) == 0) {
102661fbb000SAlexey Marchuk 		/* ACCEL_DPDK_CRYPTODEV_QAT_ASYM devices are not supported at this time. */
102761fbb000SAlexey Marchuk 		rc = 0;
102861fbb000SAlexey Marchuk 		goto err;
102913603217SZhangfei Gao 	} else if (strcmp(device->cdev_info.driver_name, ACCEL_DPDK_CRYPTODEV_UADK) == 0) {
103013603217SZhangfei Gao 		device->qp_desc_nr = ACCEL_DPDK_CRYPTODEV_QP_DESCRIPTORS;
103113603217SZhangfei Gao 		device->type = ACCEL_DPDK_CRYPTODEV_DRIVER_UADK;
103261fbb000SAlexey Marchuk 	} else {
103361fbb000SAlexey Marchuk 		SPDK_ERRLOG("Failed to start device %u. Invalid driver name \"%s\"\n",
103461fbb000SAlexey Marchuk 			    cdev_id, device->cdev_info.driver_name);
103561fbb000SAlexey Marchuk 		rc = -EINVAL;
103661fbb000SAlexey Marchuk 		goto err;
1037f5d1a924SAlexey Marchuk 	}
1038f5d1a924SAlexey Marchuk 
1039f5d1a924SAlexey Marchuk 	/* Before going any further, make sure we have enough resources for this
104034edd9f1SKamil Godzwon 	 * device type to function.  We need a unique queue pair per core across each
1041f5d1a924SAlexey Marchuk 	 * device type to remain lockless....
1042f5d1a924SAlexey Marchuk 	 */
1043f5d1a924SAlexey Marchuk 	if ((rte_cryptodev_device_count_by_driver(cdrv_id) *
1044f5d1a924SAlexey Marchuk 	     device->cdev_info.max_nb_queue_pairs) < num_lcores) {
1045f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Insufficient unique queue pairs available for %s\n",
1046f5d1a924SAlexey Marchuk 			    device->cdev_info.driver_name);
1047f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Either add more crypto devices or decrease core count\n");
1048f5d1a924SAlexey Marchuk 		rc = -EINVAL;
1049f5d1a924SAlexey Marchuk 		goto err;
1050f5d1a924SAlexey Marchuk 	}
1051f5d1a924SAlexey Marchuk 
105261fbb000SAlexey Marchuk 	conf.nb_queue_pairs = device->cdev_info.max_nb_queue_pairs;
1053f5d1a924SAlexey Marchuk 	rc = rte_cryptodev_configure(cdev_id, &conf);
1054f5d1a924SAlexey Marchuk 	if (rc < 0) {
1055f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Failed to configure cryptodev %u: error %d\n",
1056f5d1a924SAlexey Marchuk 			    cdev_id, rc);
1057f5d1a924SAlexey Marchuk 		rc = -EINVAL;
1058f5d1a924SAlexey Marchuk 		goto err;
1059f5d1a924SAlexey Marchuk 	}
1060f5d1a924SAlexey Marchuk 
1061f5d1a924SAlexey Marchuk 	/* Pre-setup all potential qpairs now and assign them in the channel
1062f5d1a924SAlexey Marchuk 	 * callback. If we were to create them there, we'd have to stop the
1063f5d1a924SAlexey Marchuk 	 * entire device affecting all other threads that might be using it
1064f5d1a924SAlexey Marchuk 	 * even on other queue pairs.
1065f5d1a924SAlexey Marchuk 	 */
106661fbb000SAlexey Marchuk 	qp_conf.nb_descriptors = device->qp_desc_nr;
1067f5d1a924SAlexey Marchuk 	for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
1068f5d1a924SAlexey Marchuk 		rc = rte_cryptodev_queue_pair_setup(cdev_id, j, &qp_conf, SOCKET_ID_ANY);
1069f5d1a924SAlexey Marchuk 		if (rc < 0) {
1070f5d1a924SAlexey Marchuk 			SPDK_ERRLOG("Failed to setup queue pair %u on "
1071f5d1a924SAlexey Marchuk 				    "cryptodev %u: error %d\n", j, cdev_id, rc);
1072f5d1a924SAlexey Marchuk 			rc = -EINVAL;
1073f5d1a924SAlexey Marchuk 			goto err_qp_setup;
1074f5d1a924SAlexey Marchuk 		}
1075f5d1a924SAlexey Marchuk 	}
1076f5d1a924SAlexey Marchuk 
1077f5d1a924SAlexey Marchuk 	rc = rte_cryptodev_start(cdev_id);
1078f5d1a924SAlexey Marchuk 	if (rc < 0) {
107961fbb000SAlexey Marchuk 		SPDK_ERRLOG("Failed to start device %u: error %d\n", cdev_id, rc);
1080f5d1a924SAlexey Marchuk 		rc = -EINVAL;
1081f5d1a924SAlexey Marchuk 		goto err_dev_start;
1082f5d1a924SAlexey Marchuk 	}
1083f5d1a924SAlexey Marchuk 
108461fbb000SAlexey Marchuk 	TAILQ_INIT(&device->qpairs);
1085f5d1a924SAlexey Marchuk 	/* Build up lists of device/qp combinations per PMD */
1086f5d1a924SAlexey Marchuk 	for (j = 0; j < device->cdev_info.max_nb_queue_pairs; j++) {
108761fbb000SAlexey Marchuk 		dev_qp = calloc(1, sizeof(*dev_qp));
1088f5d1a924SAlexey Marchuk 		if (!dev_qp) {
1089f5d1a924SAlexey Marchuk 			rc = -ENOMEM;
1090f5d1a924SAlexey Marchuk 			goto err_qp_alloc;
1091f5d1a924SAlexey Marchuk 		}
1092f5d1a924SAlexey Marchuk 		dev_qp->device = device;
1093f5d1a924SAlexey Marchuk 		dev_qp->qp = j;
1094f5d1a924SAlexey Marchuk 		dev_qp->in_use = false;
109561fbb000SAlexey Marchuk 		TAILQ_INSERT_TAIL(&device->qpairs, dev_qp, link);
109661fbb000SAlexey Marchuk 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
109761fbb000SAlexey Marchuk 			dev_qp->index = g_qat_total_qp++;
1098f5d1a924SAlexey Marchuk 		}
1099f5d1a924SAlexey Marchuk 	}
1100f5d1a924SAlexey Marchuk 	/* Add to our list of available crypto devices. */
110161fbb000SAlexey Marchuk 	TAILQ_INSERT_TAIL(&g_crypto_devices, device, link);
1102f5d1a924SAlexey Marchuk 
1103f5d1a924SAlexey Marchuk 	return 0;
110461fbb000SAlexey Marchuk 
1105f5d1a924SAlexey Marchuk err_qp_alloc:
110661fbb000SAlexey Marchuk 	TAILQ_FOREACH(dev_qp, &device->qpairs, link) {
1107f5d1a924SAlexey Marchuk 		if (dev_qp->device->cdev_id != device->cdev_id) {
1108f5d1a924SAlexey Marchuk 			continue;
1109f5d1a924SAlexey Marchuk 		}
111061fbb000SAlexey Marchuk 		free(dev_qp);
111161fbb000SAlexey Marchuk 		if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
111261fbb000SAlexey Marchuk 			assert(g_qat_total_qp);
1113f5d1a924SAlexey Marchuk 			g_qat_total_qp--;
1114f5d1a924SAlexey Marchuk 		}
1115f5d1a924SAlexey Marchuk 	}
1116f5d1a924SAlexey Marchuk 	rte_cryptodev_stop(cdev_id);
1117f5d1a924SAlexey Marchuk err_dev_start:
1118f5d1a924SAlexey Marchuk err_qp_setup:
1119f5d1a924SAlexey Marchuk 	rte_cryptodev_close(cdev_id);
1120f5d1a924SAlexey Marchuk err:
1121f5d1a924SAlexey Marchuk 	free(device);
1122f5d1a924SAlexey Marchuk 
1123f5d1a924SAlexey Marchuk 	return rc;
1124f5d1a924SAlexey Marchuk }
1125f5d1a924SAlexey Marchuk 
1126f5d1a924SAlexey Marchuk static void
112761fbb000SAlexey Marchuk accel_dpdk_cryptodev_release(struct accel_dpdk_cryptodev_device *device)
1128f5d1a924SAlexey Marchuk {
112961fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_qp *dev_qp, *tmp;
1130f5d1a924SAlexey Marchuk 
1131f5d1a924SAlexey Marchuk 	assert(device);
1132f5d1a924SAlexey Marchuk 
113361fbb000SAlexey Marchuk 	TAILQ_FOREACH_SAFE(dev_qp, &device->qpairs, link, tmp) {
1134f5d1a924SAlexey Marchuk 		free(dev_qp);
1135f5d1a924SAlexey Marchuk 	}
113661fbb000SAlexey Marchuk 	if (device->type == ACCEL_DPDK_CRYPTODEV_DRIVER_QAT) {
113761fbb000SAlexey Marchuk 		assert(g_qat_total_qp >= device->cdev_info.max_nb_queue_pairs);
113861fbb000SAlexey Marchuk 		g_qat_total_qp -= device->cdev_info.max_nb_queue_pairs;
1139f5d1a924SAlexey Marchuk 	}
1140f5d1a924SAlexey Marchuk 	rte_cryptodev_stop(device->cdev_id);
1141f5d1a924SAlexey Marchuk 	rte_cryptodev_close(device->cdev_id);
1142f5d1a924SAlexey Marchuk 	free(device);
1143f5d1a924SAlexey Marchuk }
1144f5d1a924SAlexey Marchuk 
1145f5d1a924SAlexey Marchuk static int
114661fbb000SAlexey Marchuk accel_dpdk_cryptodev_init(void)
1147f5d1a924SAlexey Marchuk {
1148f5d1a924SAlexey Marchuk 	uint8_t cdev_count;
1149f5d1a924SAlexey Marchuk 	uint8_t cdev_id;
1150f5d1a924SAlexey Marchuk 	int i, rc;
1151c618fc30SZhangfei Gao 	const char *driver_name = g_driver_names[g_dpdk_cryptodev_driver];
115261fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device, *tmp_dev;
1153f5d1a924SAlexey Marchuk 	unsigned int max_sess_size = 0, sess_size;
1154f5d1a924SAlexey Marchuk 	uint16_t num_lcores = rte_lcore_count();
1155c618fc30SZhangfei Gao 	char init_args[32];
1156f5d1a924SAlexey Marchuk 
115761fbb000SAlexey Marchuk 	/* Only the first call via module init should init the crypto drivers. */
1158f5d1a924SAlexey Marchuk 	if (g_session_mp != NULL) {
1159f5d1a924SAlexey Marchuk 		return 0;
1160f5d1a924SAlexey Marchuk 	}
1161f5d1a924SAlexey Marchuk 
116213603217SZhangfei Gao 	if (g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB ||
116313603217SZhangfei Gao 	    g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_UADK) {
1164c618fc30SZhangfei Gao 		snprintf(init_args, sizeof(init_args), "max_nb_queue_pairs=%d",
116561fbb000SAlexey Marchuk 			 ACCEL_DPDK_CRYPTODEV_AESNI_MB_NUM_QP);
1166c618fc30SZhangfei Gao 		rc = rte_vdev_init(driver_name, init_args);
1167f5d1a924SAlexey Marchuk 		if (rc) {
1168f5d1a924SAlexey Marchuk 			SPDK_NOTICELOG("Failed to create virtual PMD %s: error %d. "
1169f5d1a924SAlexey Marchuk 				       "Possibly %s is not supported by DPDK library. "
1170c618fc30SZhangfei Gao 				       "Keep going...\n", driver_name, rc, driver_name);
1171c618fc30SZhangfei Gao 		}
1172f5d1a924SAlexey Marchuk 	}
1173f5d1a924SAlexey Marchuk 
11741f88c365STomasz Zawadzki 	/* If we have no crypto devices, report error to fallback on other modules. */
1175f5d1a924SAlexey Marchuk 	cdev_count = rte_cryptodev_count();
1176f5d1a924SAlexey Marchuk 	if (cdev_count == 0) {
11771f88c365STomasz Zawadzki 		return -ENODEV;
1178f5d1a924SAlexey Marchuk 	}
11791f88c365STomasz Zawadzki 	SPDK_NOTICELOG("Found crypto devices: %d\n", (int)cdev_count);
1180f5d1a924SAlexey Marchuk 
1181f5d1a924SAlexey Marchuk 	g_mbuf_offset = rte_mbuf_dynfield_register(&rte_mbuf_dynfield_io_context);
1182f5d1a924SAlexey Marchuk 	if (g_mbuf_offset < 0) {
1183f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("error registering dynamic field with DPDK\n");
1184f5d1a924SAlexey Marchuk 		return -EINVAL;
1185f5d1a924SAlexey Marchuk 	}
1186f5d1a924SAlexey Marchuk 
118761fbb000SAlexey Marchuk 	/* Create global mempools, shared by all devices regardless of type */
1188f5d1a924SAlexey Marchuk 	/* First determine max session size, most pools are shared by all the devices,
118961fbb000SAlexey Marchuk 	 * so we need to find the global max sessions size. */
1190f5d1a924SAlexey Marchuk 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1191f5d1a924SAlexey Marchuk 		sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
1192f5d1a924SAlexey Marchuk 		if (sess_size > max_sess_size) {
1193f5d1a924SAlexey Marchuk 			max_sess_size = sess_size;
1194f5d1a924SAlexey Marchuk 		}
1195f5d1a924SAlexey Marchuk 	}
1196f5d1a924SAlexey Marchuk 
119710dcf2dbSAlexey Marchuk #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
119861fbb000SAlexey Marchuk 	g_session_mp_priv = rte_mempool_create("dpdk_crypto_ses_mp_priv",
119961fbb000SAlexey Marchuk 					       ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
120061fbb000SAlexey Marchuk 					       NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1201f5d1a924SAlexey Marchuk 	if (g_session_mp_priv == NULL) {
1202f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Cannot create private session pool max size 0x%x\n", max_sess_size);
1203f5d1a924SAlexey Marchuk 		return -ENOMEM;
1204f5d1a924SAlexey Marchuk 	}
1205f5d1a924SAlexey Marchuk 
120610dcf2dbSAlexey Marchuk 	/* When session private data mempool allocated, the element size for the session mempool
120710dcf2dbSAlexey Marchuk 	 * should be 0. */
120810dcf2dbSAlexey Marchuk 	max_sess_size = 0;
120910dcf2dbSAlexey Marchuk #endif
121010dcf2dbSAlexey Marchuk 
121161fbb000SAlexey Marchuk 	g_session_mp = rte_cryptodev_sym_session_pool_create("dpdk_crypto_ses_mp",
121210dcf2dbSAlexey Marchuk 			ACCEL_DPDK_CRYPTODEV_NUM_SESSIONS, max_sess_size, ACCEL_DPDK_CRYPTODEV_SESS_MEMPOOL_CACHE_SIZE, 0,
1213f5d1a924SAlexey Marchuk 			SOCKET_ID_ANY);
1214f5d1a924SAlexey Marchuk 	if (g_session_mp == NULL) {
1215f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Cannot create session pool max size 0x%x\n", max_sess_size);
1216f5d1a924SAlexey Marchuk 		rc = -ENOMEM;
1217f5d1a924SAlexey Marchuk 		goto error_create_session_mp;
1218f5d1a924SAlexey Marchuk 	}
1219f5d1a924SAlexey Marchuk 
122061fbb000SAlexey Marchuk 	g_mbuf_mp = rte_pktmbuf_pool_create("dpdk_crypto_mbuf_mp", ACCEL_DPDK_CRYPTODEV_NUM_MBUFS,
122161fbb000SAlexey Marchuk 					    ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
1222*186b109dSJim Harris 					    0, 0, SPDK_ENV_NUMA_ID_ANY);
1223f5d1a924SAlexey Marchuk 	if (g_mbuf_mp == NULL) {
1224f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Cannot create mbuf pool\n");
1225f5d1a924SAlexey Marchuk 		rc = -ENOMEM;
1226f5d1a924SAlexey Marchuk 		goto error_create_mbuf;
1227f5d1a924SAlexey Marchuk 	}
1228f5d1a924SAlexey Marchuk 
1229f5d1a924SAlexey Marchuk 	/* We use per op private data as suggested by DPDK and to store the IV and
123061fbb000SAlexey Marchuk 	 * our own struct for queueing ops. */
123161fbb000SAlexey Marchuk 	g_crypto_op_mp = rte_crypto_op_pool_create("dpdk_crypto_op_mp",
123261fbb000SAlexey Marchuk 			 RTE_CRYPTO_OP_TYPE_SYMMETRIC, ACCEL_DPDK_CRYPTODEV_NUM_MBUFS, ACCEL_DPDK_CRYPTODEV_POOL_CACHE_SIZE,
123361fbb000SAlexey Marchuk 			 (ACCEL_DPDK_CRYPTODEV_DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_sym_xform)) +
12349c636a02SAlexey Marchuk 			 ACCEL_DPDK_CRYPTODEV_IV_LENGTH, rte_socket_id());
1235f5d1a924SAlexey Marchuk 	if (g_crypto_op_mp == NULL) {
1236f5d1a924SAlexey Marchuk 		SPDK_ERRLOG("Cannot create op pool\n");
1237f5d1a924SAlexey Marchuk 		rc = -ENOMEM;
1238f5d1a924SAlexey Marchuk 		goto error_create_op;
1239f5d1a924SAlexey Marchuk 	}
1240f5d1a924SAlexey Marchuk 
1241f5d1a924SAlexey Marchuk 	/* Init all devices */
1242f5d1a924SAlexey Marchuk 	for (i = 0; i < cdev_count; i++) {
124361fbb000SAlexey Marchuk 		rc = accel_dpdk_cryptodev_create(i, num_lcores);
1244f5d1a924SAlexey Marchuk 		if (rc) {
1245f5d1a924SAlexey Marchuk 			goto err;
1246f5d1a924SAlexey Marchuk 		}
1247f5d1a924SAlexey Marchuk 	}
1248f5d1a924SAlexey Marchuk 
1249f5d1a924SAlexey Marchuk 	g_shinfo.free_cb = shinfo_free_cb;
125061fbb000SAlexey Marchuk 
125161fbb000SAlexey Marchuk 	spdk_io_device_register(&g_accel_dpdk_cryptodev_module, _accel_dpdk_cryptodev_create_cb,
125261fbb000SAlexey Marchuk 				_accel_dpdk_cryptodev_destroy_cb, sizeof(struct accel_dpdk_cryptodev_io_channel),
125361fbb000SAlexey Marchuk 				"accel_dpdk_cryptodev");
125461fbb000SAlexey Marchuk 
1255f5d1a924SAlexey Marchuk 	return 0;
1256f5d1a924SAlexey Marchuk 
1257f5d1a924SAlexey Marchuk 	/* Error cleanup paths. */
1258f5d1a924SAlexey Marchuk err:
125961fbb000SAlexey Marchuk 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp_dev) {
126061fbb000SAlexey Marchuk 		TAILQ_REMOVE(&g_crypto_devices, device, link);
126161fbb000SAlexey Marchuk 		accel_dpdk_cryptodev_release(device);
1262f5d1a924SAlexey Marchuk 	}
1263f5d1a924SAlexey Marchuk 	rte_mempool_free(g_crypto_op_mp);
1264f5d1a924SAlexey Marchuk 	g_crypto_op_mp = NULL;
1265f5d1a924SAlexey Marchuk error_create_op:
1266f5d1a924SAlexey Marchuk 	rte_mempool_free(g_mbuf_mp);
1267f5d1a924SAlexey Marchuk 	g_mbuf_mp = NULL;
1268f5d1a924SAlexey Marchuk error_create_mbuf:
1269f5d1a924SAlexey Marchuk 	rte_mempool_free(g_session_mp);
1270f5d1a924SAlexey Marchuk 	g_session_mp = NULL;
1271f5d1a924SAlexey Marchuk error_create_session_mp:
1272f5d1a924SAlexey Marchuk 	if (g_session_mp_priv != NULL) {
1273f5d1a924SAlexey Marchuk 		rte_mempool_free(g_session_mp_priv);
1274f5d1a924SAlexey Marchuk 		g_session_mp_priv = NULL;
1275f5d1a924SAlexey Marchuk 	}
1276f5d1a924SAlexey Marchuk 	return rc;
1277f5d1a924SAlexey Marchuk }
1278f5d1a924SAlexey Marchuk 
1279f5d1a924SAlexey Marchuk static void
128061fbb000SAlexey Marchuk accel_dpdk_cryptodev_fini_cb(void *io_device)
1281f5d1a924SAlexey Marchuk {
128261fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device, *tmp;
1283f5d1a924SAlexey Marchuk 
128461fbb000SAlexey Marchuk 	TAILQ_FOREACH_SAFE(device, &g_crypto_devices, link, tmp) {
128561fbb000SAlexey Marchuk 		TAILQ_REMOVE(&g_crypto_devices, device, link);
128661fbb000SAlexey Marchuk 		accel_dpdk_cryptodev_release(device);
1287f5d1a924SAlexey Marchuk 	}
1288c618fc30SZhangfei Gao 
128913603217SZhangfei Gao 	if (g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB ||
129013603217SZhangfei Gao 	    g_dpdk_cryptodev_driver == ACCEL_DPDK_CRYPTODEV_DRIVER_UADK) {
1291c618fc30SZhangfei Gao 		rte_vdev_uninit(g_driver_names[g_dpdk_cryptodev_driver]);
1292c618fc30SZhangfei Gao 	}
1293f5d1a924SAlexey Marchuk 
1294f5d1a924SAlexey Marchuk 	rte_mempool_free(g_crypto_op_mp);
1295f5d1a924SAlexey Marchuk 	rte_mempool_free(g_mbuf_mp);
1296f5d1a924SAlexey Marchuk 	rte_mempool_free(g_session_mp);
1297f5d1a924SAlexey Marchuk 	if (g_session_mp_priv != NULL) {
1298f5d1a924SAlexey Marchuk 		rte_mempool_free(g_session_mp_priv);
1299f5d1a924SAlexey Marchuk 	}
130061fbb000SAlexey Marchuk 
130161fbb000SAlexey Marchuk 	spdk_accel_module_finish();
1302f5d1a924SAlexey Marchuk }
1303f5d1a924SAlexey Marchuk 
130461fbb000SAlexey Marchuk /* Called when the entire module is being torn down. */
1305f5d1a924SAlexey Marchuk static void
130661fbb000SAlexey Marchuk accel_dpdk_cryptodev_fini(void *ctx)
1307f5d1a924SAlexey Marchuk {
13086da98786SAlexey Marchuk 	if (g_crypto_op_mp) {
130961fbb000SAlexey Marchuk 		spdk_io_device_unregister(&g_accel_dpdk_cryptodev_module, accel_dpdk_cryptodev_fini_cb);
1310f5d1a924SAlexey Marchuk 	}
13116da98786SAlexey Marchuk }
1312f5d1a924SAlexey Marchuk 
131310dcf2dbSAlexey Marchuk static void
131410dcf2dbSAlexey Marchuk accel_dpdk_cryptodev_key_handle_session_free(struct accel_dpdk_cryptodev_device *device,
131510dcf2dbSAlexey Marchuk 		void *session)
131610dcf2dbSAlexey Marchuk {
131710dcf2dbSAlexey Marchuk #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
131810dcf2dbSAlexey Marchuk 	assert(device != NULL);
131910dcf2dbSAlexey Marchuk 
132010dcf2dbSAlexey Marchuk 	rte_cryptodev_sym_session_free(device->cdev_id, session);
132110dcf2dbSAlexey Marchuk #else
132210dcf2dbSAlexey Marchuk 	rte_cryptodev_sym_session_free(session);
132310dcf2dbSAlexey Marchuk #endif
132410dcf2dbSAlexey Marchuk }
132510dcf2dbSAlexey Marchuk 
132610dcf2dbSAlexey Marchuk static void *
132710dcf2dbSAlexey Marchuk accel_dpdk_cryptodev_key_handle_session_create(struct accel_dpdk_cryptodev_device *device,
132810dcf2dbSAlexey Marchuk 		struct rte_crypto_sym_xform *cipher_xform)
132910dcf2dbSAlexey Marchuk {
133010dcf2dbSAlexey Marchuk 	void *session;
133110dcf2dbSAlexey Marchuk 
133210dcf2dbSAlexey Marchuk #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
133310dcf2dbSAlexey Marchuk 	session = rte_cryptodev_sym_session_create(device->cdev_id, cipher_xform, g_session_mp);
133410dcf2dbSAlexey Marchuk #else
133510dcf2dbSAlexey Marchuk 	session = rte_cryptodev_sym_session_create(g_session_mp);
133610dcf2dbSAlexey Marchuk 	if (!session) {
133710dcf2dbSAlexey Marchuk 		return NULL;
133810dcf2dbSAlexey Marchuk 	}
133910dcf2dbSAlexey Marchuk 
134010dcf2dbSAlexey Marchuk 	if (rte_cryptodev_sym_session_init(device->cdev_id, session, cipher_xform, g_session_mp_priv) < 0) {
134110dcf2dbSAlexey Marchuk 		accel_dpdk_cryptodev_key_handle_session_free(device, session);
134210dcf2dbSAlexey Marchuk 		return NULL;
134310dcf2dbSAlexey Marchuk 	}
134410dcf2dbSAlexey Marchuk #endif
134510dcf2dbSAlexey Marchuk 
134610dcf2dbSAlexey Marchuk 	return session;
134710dcf2dbSAlexey Marchuk }
134810dcf2dbSAlexey Marchuk 
1349f5d1a924SAlexey Marchuk static int
135061fbb000SAlexey Marchuk accel_dpdk_cryptodev_key_handle_configure(struct spdk_accel_crypto_key *key,
135161fbb000SAlexey Marchuk 		struct accel_dpdk_cryptodev_key_handle *key_handle)
1352f5d1a924SAlexey Marchuk {
135361fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
1354f5d1a924SAlexey Marchuk 
135561fbb000SAlexey Marchuk 	key_handle->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
135661fbb000SAlexey Marchuk 	key_handle->cipher_xform.cipher.iv.offset = ACCEL_DPDK_CRYPTODEV_IV_OFFSET;
135761fbb000SAlexey Marchuk 	key_handle->cipher_xform.cipher.iv.length = ACCEL_DPDK_CRYPTODEV_IV_LENGTH;
135861fbb000SAlexey Marchuk 
135961fbb000SAlexey Marchuk 	switch (priv->cipher) {
13609cd94384SJacek Kalwas 	case SPDK_ACCEL_CIPHER_AES_CBC:
136161fbb000SAlexey Marchuk 		key_handle->cipher_xform.cipher.key.data = key->key;
136261fbb000SAlexey Marchuk 		key_handle->cipher_xform.cipher.key.length = key->key_size;
136361fbb000SAlexey Marchuk 		key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
136461fbb000SAlexey Marchuk 		break;
13659cd94384SJacek Kalwas 	case SPDK_ACCEL_CIPHER_AES_XTS:
136661fbb000SAlexey Marchuk 		key_handle->cipher_xform.cipher.key.data = priv->xts_key;
136761fbb000SAlexey Marchuk 		key_handle->cipher_xform.cipher.key.length = key->key_size + key->key2_size;
136861fbb000SAlexey Marchuk 		key_handle->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS;
136961fbb000SAlexey Marchuk 		break;
137061fbb000SAlexey Marchuk 	default:
137161fbb000SAlexey Marchuk 		SPDK_ERRLOG("Invalid cipher name %s.\n", key->param.cipher);
137210dcf2dbSAlexey Marchuk 		return -EINVAL;
137361fbb000SAlexey Marchuk 	}
137461fbb000SAlexey Marchuk 
137561fbb000SAlexey Marchuk 	key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
137610dcf2dbSAlexey Marchuk 	key_handle->session_encrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device,
137710dcf2dbSAlexey Marchuk 				      &key_handle->cipher_xform);
137810dcf2dbSAlexey Marchuk 	if (!key_handle->session_encrypt) {
137910dcf2dbSAlexey Marchuk 		SPDK_ERRLOG("Failed to init encrypt session\n");
138010dcf2dbSAlexey Marchuk 		return -EINVAL;
138161fbb000SAlexey Marchuk 	}
138261fbb000SAlexey Marchuk 
138361fbb000SAlexey Marchuk 	key_handle->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
138410dcf2dbSAlexey Marchuk 	key_handle->session_decrypt = accel_dpdk_cryptodev_key_handle_session_create(key_handle->device,
138510dcf2dbSAlexey Marchuk 				      &key_handle->cipher_xform);
138610dcf2dbSAlexey Marchuk 	if (!key_handle->session_decrypt) {
138710dcf2dbSAlexey Marchuk 		SPDK_ERRLOG("Failed to init decrypt session:");
138810dcf2dbSAlexey Marchuk 		accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt);
138910dcf2dbSAlexey Marchuk 		return -EINVAL;
139061fbb000SAlexey Marchuk 	}
139161fbb000SAlexey Marchuk 
139261fbb000SAlexey Marchuk 	return 0;
139361fbb000SAlexey Marchuk }
139461fbb000SAlexey Marchuk 
139561fbb000SAlexey Marchuk static void
139661fbb000SAlexey Marchuk accel_dpdk_cryptodev_key_deinit(struct spdk_accel_crypto_key *key)
139761fbb000SAlexey Marchuk {
139861fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_handle *key_handle, *key_handle_tmp;
139961fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_priv *priv = key->priv;
140061fbb000SAlexey Marchuk 
140161fbb000SAlexey Marchuk 	TAILQ_FOREACH_SAFE(key_handle, &priv->dev_keys, link, key_handle_tmp) {
140210dcf2dbSAlexey Marchuk 		accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_encrypt);
140310dcf2dbSAlexey Marchuk 		accel_dpdk_cryptodev_key_handle_session_free(key_handle->device, key_handle->session_decrypt);
140461fbb000SAlexey Marchuk 		TAILQ_REMOVE(&priv->dev_keys, key_handle, link);
140561fbb000SAlexey Marchuk 		spdk_memset_s(key_handle, sizeof(*key_handle), 0, sizeof(*key_handle));
140661fbb000SAlexey Marchuk 		free(key_handle);
140761fbb000SAlexey Marchuk 	}
140861fbb000SAlexey Marchuk 
140961fbb000SAlexey Marchuk 	if (priv->xts_key) {
141061fbb000SAlexey Marchuk 		spdk_memset_s(priv->xts_key, key->key_size + key->key2_size, 0, key->key_size + key->key2_size);
141161fbb000SAlexey Marchuk 	}
141261fbb000SAlexey Marchuk 	free(priv->xts_key);
141361fbb000SAlexey Marchuk 	free(priv);
141461fbb000SAlexey Marchuk }
141561fbb000SAlexey Marchuk 
14160db4d79dSJacek Kalwas static bool
141747f8f398SJacek Kalwas accel_dpdk_cryptodev_supports_cipher(enum spdk_accel_cipher cipher, size_t key_size)
14180db4d79dSJacek Kalwas {
14190db4d79dSJacek Kalwas 	switch (g_dpdk_cryptodev_driver) {
14200db4d79dSJacek Kalwas 	case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
142113603217SZhangfei Gao 	case ACCEL_DPDK_CRYPTODEV_DRIVER_UADK:
14220db4d79dSJacek Kalwas 	case ACCEL_DPDK_CRYPTODEV_DRIVER_AESNI_MB:
142347f8f398SJacek Kalwas 		switch (cipher) {
142447f8f398SJacek Kalwas 		case SPDK_ACCEL_CIPHER_AES_XTS:
142547f8f398SJacek Kalwas 			return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE;
142647f8f398SJacek Kalwas 		case SPDK_ACCEL_CIPHER_AES_CBC:
1427a25d1d33SDiwakar Sharma 			return key_size == ACCEL_DPDK_CRYPTODEV_AES_CBC_128_KEY_SIZE ||
1428a25d1d33SDiwakar Sharma 			       key_size == ACCEL_DPDK_CRYPTODEV_AES_CBC_256_KEY_SIZE;
142947f8f398SJacek Kalwas 		default:
143047f8f398SJacek Kalwas 			return false;
143147f8f398SJacek Kalwas 		}
14320db4d79dSJacek Kalwas 	case ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI:
143347f8f398SJacek Kalwas 		switch (cipher) {
143447f8f398SJacek Kalwas 		case SPDK_ACCEL_CIPHER_AES_XTS:
143547f8f398SJacek Kalwas 			return key_size == SPDK_ACCEL_AES_XTS_128_KEY_SIZE || key_size == SPDK_ACCEL_AES_XTS_256_KEY_SIZE;
143647f8f398SJacek Kalwas 		default:
143747f8f398SJacek Kalwas 			return false;
143847f8f398SJacek Kalwas 		}
14390db4d79dSJacek Kalwas 	default:
14400db4d79dSJacek Kalwas 		return false;
14410db4d79dSJacek Kalwas 	}
14420db4d79dSJacek Kalwas }
14430db4d79dSJacek Kalwas 
144461fbb000SAlexey Marchuk static int
144561fbb000SAlexey Marchuk accel_dpdk_cryptodev_key_init(struct spdk_accel_crypto_key *key)
144661fbb000SAlexey Marchuk {
144761fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_device *device;
144861fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_priv *priv;
144961fbb000SAlexey Marchuk 	struct accel_dpdk_cryptodev_key_handle *key_handle;
145061fbb000SAlexey Marchuk 	enum accel_dpdk_cryptodev_driver_type driver;
145110dcf2dbSAlexey Marchuk 	int rc;
145261fbb000SAlexey Marchuk 
145361fbb000SAlexey Marchuk 	driver = g_dpdk_cryptodev_driver;
145461fbb000SAlexey Marchuk 
145561fbb000SAlexey Marchuk 	priv = calloc(1, sizeof(*priv));
145661fbb000SAlexey Marchuk 	if (!priv) {
145761fbb000SAlexey Marchuk 		SPDK_ERRLOG("Memory allocation failed\n");
145861fbb000SAlexey Marchuk 		return -ENOMEM;
145961fbb000SAlexey Marchuk 	}
146061fbb000SAlexey Marchuk 	key->priv = priv;
146161fbb000SAlexey Marchuk 	priv->driver = driver;
14629cd94384SJacek Kalwas 	priv->cipher = key->cipher;
146361fbb000SAlexey Marchuk 	TAILQ_INIT(&priv->dev_keys);
146461fbb000SAlexey Marchuk 
14659cd94384SJacek Kalwas 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
146661fbb000SAlexey Marchuk 		/* DPDK expects the keys to be concatenated together. */
146761fbb000SAlexey Marchuk 		priv->xts_key = calloc(key->key_size + key->key2_size + 1, sizeof(char));
146861fbb000SAlexey Marchuk 		if (!priv->xts_key) {
146961fbb000SAlexey Marchuk 			SPDK_ERRLOG("Memory allocation failed\n");
147061fbb000SAlexey Marchuk 			accel_dpdk_cryptodev_key_deinit(key);
147161fbb000SAlexey Marchuk 			return -ENOMEM;
147261fbb000SAlexey Marchuk 		}
147361fbb000SAlexey Marchuk 		memcpy(priv->xts_key, key->key, key->key_size);
147461fbb000SAlexey Marchuk 		memcpy(priv->xts_key + key->key_size, key->key2, key->key2_size);
147561fbb000SAlexey Marchuk 	}
147661fbb000SAlexey Marchuk 
147761fbb000SAlexey Marchuk 	pthread_mutex_lock(&g_device_lock);
147861fbb000SAlexey Marchuk 	TAILQ_FOREACH(device, &g_crypto_devices, link) {
147961fbb000SAlexey Marchuk 		if (device->type != driver) {
148061fbb000SAlexey Marchuk 			continue;
148161fbb000SAlexey Marchuk 		}
148261fbb000SAlexey Marchuk 		key_handle = calloc(1, sizeof(*key_handle));
148361fbb000SAlexey Marchuk 		if (!key_handle) {
148461fbb000SAlexey Marchuk 			pthread_mutex_unlock(&g_device_lock);
148561fbb000SAlexey Marchuk 			accel_dpdk_cryptodev_key_deinit(key);
148661fbb000SAlexey Marchuk 			return -ENOMEM;
148761fbb000SAlexey Marchuk 		}
148861fbb000SAlexey Marchuk 		key_handle->device = device;
148961fbb000SAlexey Marchuk 		TAILQ_INSERT_TAIL(&priv->dev_keys, key_handle, link);
149010dcf2dbSAlexey Marchuk 		rc = accel_dpdk_cryptodev_key_handle_configure(key, key_handle);
149110dcf2dbSAlexey Marchuk 		if (rc) {
149261fbb000SAlexey Marchuk 			pthread_mutex_unlock(&g_device_lock);
149361fbb000SAlexey Marchuk 			accel_dpdk_cryptodev_key_deinit(key);
149410dcf2dbSAlexey Marchuk 			return rc;
149561fbb000SAlexey Marchuk 		}
149661fbb000SAlexey Marchuk 		if (driver != ACCEL_DPDK_CRYPTODEV_DRIVER_MLX5_PCI) {
149761fbb000SAlexey Marchuk 			/* For MLX5_PCI we need to register a key on each device since
149861fbb000SAlexey Marchuk 			 * the key is bound to a specific Protection Domain,
149961fbb000SAlexey Marchuk 			 * so don't break the loop */
1500f5d1a924SAlexey Marchuk 			break;
1501f5d1a924SAlexey Marchuk 		}
1502f5d1a924SAlexey Marchuk 	}
150361fbb000SAlexey Marchuk 	pthread_mutex_unlock(&g_device_lock);
150461fbb000SAlexey Marchuk 
150561fbb000SAlexey Marchuk 	if (TAILQ_EMPTY(&priv->dev_keys)) {
150661fbb000SAlexey Marchuk 		free(priv);
150761fbb000SAlexey Marchuk 		return -ENODEV;
1508f5d1a924SAlexey Marchuk 	}
1509f5d1a924SAlexey Marchuk 
151061fbb000SAlexey Marchuk 	return 0;
151161fbb000SAlexey Marchuk }
151261fbb000SAlexey Marchuk 
1513f5d1a924SAlexey Marchuk static void
151461fbb000SAlexey Marchuk accel_dpdk_cryptodev_write_config_json(struct spdk_json_write_ctx *w)
1515f5d1a924SAlexey Marchuk {
151661fbb000SAlexey Marchuk 	spdk_json_write_object_begin(w);
151761fbb000SAlexey Marchuk 	spdk_json_write_named_string(w, "method", "dpdk_cryptodev_scan_accel_module");
151861fbb000SAlexey Marchuk 	spdk_json_write_object_end(w);
151961fbb000SAlexey Marchuk 
152061fbb000SAlexey Marchuk 	spdk_json_write_object_begin(w);
152161fbb000SAlexey Marchuk 	spdk_json_write_named_string(w, "method", "dpdk_cryptodev_set_driver");
152261fbb000SAlexey Marchuk 	spdk_json_write_named_object_begin(w, "params");
152361fbb000SAlexey Marchuk 	spdk_json_write_named_string(w, "driver_name", g_driver_names[g_dpdk_cryptodev_driver]);
152461fbb000SAlexey Marchuk 	spdk_json_write_object_end(w);
152561fbb000SAlexey Marchuk 	spdk_json_write_object_end(w);
1526f5d1a924SAlexey Marchuk }
1527f5d1a924SAlexey Marchuk 
15289f17bf3cSKonrad Sztyber static int
15299f17bf3cSKonrad Sztyber accel_dpdk_cryptodev_get_operation_info(enum spdk_accel_opcode opcode,
15309f17bf3cSKonrad Sztyber 					const struct spdk_accel_operation_exec_ctx *ctx,
15319f17bf3cSKonrad Sztyber 					struct spdk_accel_opcode_info *info)
15329f17bf3cSKonrad Sztyber {
15339f17bf3cSKonrad Sztyber 	if (!accel_dpdk_cryptodev_supports_opcode(opcode)) {
15349f17bf3cSKonrad Sztyber 		SPDK_ERRLOG("Received unexpected opcode: %d", opcode);
15359f17bf3cSKonrad Sztyber 		assert(false);
15369f17bf3cSKonrad Sztyber 		return -EINVAL;
15379f17bf3cSKonrad Sztyber 	}
15389f17bf3cSKonrad Sztyber 
15399f17bf3cSKonrad Sztyber 	switch (g_dpdk_cryptodev_driver) {
15409f17bf3cSKonrad Sztyber 	case ACCEL_DPDK_CRYPTODEV_DRIVER_QAT:
15419f17bf3cSKonrad Sztyber 		info->required_alignment = spdk_u32log2(ctx->block_size);
15429f17bf3cSKonrad Sztyber 		break;
15439f17bf3cSKonrad Sztyber 	default:
15449f17bf3cSKonrad Sztyber 		info->required_alignment = 0;
15459f17bf3cSKonrad Sztyber 		break;
15469f17bf3cSKonrad Sztyber 	}
15479f17bf3cSKonrad Sztyber 
15489f17bf3cSKonrad Sztyber 	return 0;
15499f17bf3cSKonrad Sztyber }
15509f17bf3cSKonrad Sztyber 
155161fbb000SAlexey Marchuk static struct spdk_accel_module_if g_accel_dpdk_cryptodev_module = {
155261fbb000SAlexey Marchuk 	.module_init		= accel_dpdk_cryptodev_init,
155361fbb000SAlexey Marchuk 	.module_fini		= accel_dpdk_cryptodev_fini,
155461fbb000SAlexey Marchuk 	.write_config_json	= accel_dpdk_cryptodev_write_config_json,
155561fbb000SAlexey Marchuk 	.get_ctx_size		= accel_dpdk_cryptodev_ctx_size,
155661fbb000SAlexey Marchuk 	.name			= "dpdk_cryptodev",
155761fbb000SAlexey Marchuk 	.supports_opcode	= accel_dpdk_cryptodev_supports_opcode,
155861fbb000SAlexey Marchuk 	.get_io_channel		= accel_dpdk_cryptodev_get_io_channel,
155961fbb000SAlexey Marchuk 	.submit_tasks		= accel_dpdk_cryptodev_submit_tasks,
156061fbb000SAlexey Marchuk 	.crypto_key_init	= accel_dpdk_cryptodev_key_init,
156161fbb000SAlexey Marchuk 	.crypto_key_deinit	= accel_dpdk_cryptodev_key_deinit,
15620db4d79dSJacek Kalwas 	.crypto_supports_cipher	= accel_dpdk_cryptodev_supports_cipher,
15639f17bf3cSKonrad Sztyber 	.get_operation_info	= accel_dpdk_cryptodev_get_operation_info,
156461fbb000SAlexey Marchuk };
1565