xref: /dpdk/drivers/crypto/mlx5/mlx5_crypto_xts.c (revision a27f6a2e1f30c44753832bd1ec70f5a416e9f16c)
1*a27f6a2eSSuanming Mou /* SPDX-License-Identifier: BSD-3-Clause
2*a27f6a2eSSuanming Mou  * Copyright (c) 2023 NVIDIA Corporation & Affiliates
3*a27f6a2eSSuanming Mou  */
4*a27f6a2eSSuanming Mou 
5*a27f6a2eSSuanming Mou #include <rte_malloc.h>
6*a27f6a2eSSuanming Mou #include <rte_mempool.h>
7*a27f6a2eSSuanming Mou #include <rte_eal_paging.h>
8*a27f6a2eSSuanming Mou #include <rte_errno.h>
9*a27f6a2eSSuanming Mou #include <rte_log.h>
10*a27f6a2eSSuanming Mou #include <bus_pci_driver.h>
11*a27f6a2eSSuanming Mou #include <rte_memory.h>
12*a27f6a2eSSuanming Mou 
13*a27f6a2eSSuanming Mou #include <mlx5_glue.h>
14*a27f6a2eSSuanming Mou #include <mlx5_common.h>
15*a27f6a2eSSuanming Mou #include <mlx5_devx_cmds.h>
16*a27f6a2eSSuanming Mou #include <mlx5_common_os.h>
17*a27f6a2eSSuanming Mou 
18*a27f6a2eSSuanming Mou #include "mlx5_crypto_utils.h"
19*a27f6a2eSSuanming Mou #include "mlx5_crypto.h"
20*a27f6a2eSSuanming Mou 
21*a27f6a2eSSuanming Mou const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {
22*a27f6a2eSSuanming Mou 	{		/* AES XTS */
23*a27f6a2eSSuanming Mou 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
24*a27f6a2eSSuanming Mou 		{.sym = {
25*a27f6a2eSSuanming Mou 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
26*a27f6a2eSSuanming Mou 			{.cipher = {
27*a27f6a2eSSuanming Mou 				.algo = RTE_CRYPTO_CIPHER_AES_XTS,
28*a27f6a2eSSuanming Mou 				.block_size = 16,
29*a27f6a2eSSuanming Mou 				.key_size = {
30*a27f6a2eSSuanming Mou 					.min = 32,
31*a27f6a2eSSuanming Mou 					.max = 64,
32*a27f6a2eSSuanming Mou 					.increment = 32
33*a27f6a2eSSuanming Mou 				},
34*a27f6a2eSSuanming Mou 				.iv_size = {
35*a27f6a2eSSuanming Mou 					.min = 16,
36*a27f6a2eSSuanming Mou 					.max = 16,
37*a27f6a2eSSuanming Mou 					.increment = 0
38*a27f6a2eSSuanming Mou 				},
39*a27f6a2eSSuanming Mou 				.dataunit_set =
40*a27f6a2eSSuanming Mou 				RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES |
41*a27f6a2eSSuanming Mou 				RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES |
42*a27f6a2eSSuanming Mou 				RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES,
43*a27f6a2eSSuanming Mou 			}, }
44*a27f6a2eSSuanming Mou 		}, }
45*a27f6a2eSSuanming Mou 	},
46*a27f6a2eSSuanming Mou };
47*a27f6a2eSSuanming Mou 
48*a27f6a2eSSuanming Mou static int
49*a27f6a2eSSuanming Mou mlx5_crypto_xts_sym_session_configure(struct rte_cryptodev *dev,
50*a27f6a2eSSuanming Mou 				      struct rte_crypto_sym_xform *xform,
51*a27f6a2eSSuanming Mou 				      struct rte_cryptodev_sym_session *session)
52*a27f6a2eSSuanming Mou {
53*a27f6a2eSSuanming Mou 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
54*a27f6a2eSSuanming Mou 	struct mlx5_crypto_session *sess_private_data =
55*a27f6a2eSSuanming Mou 		CRYPTODEV_GET_SYM_SESS_PRIV(session);
56*a27f6a2eSSuanming Mou 	struct rte_crypto_cipher_xform *cipher;
57*a27f6a2eSSuanming Mou 	uint8_t encryption_order;
58*a27f6a2eSSuanming Mou 
59*a27f6a2eSSuanming Mou 	if (unlikely(xform->next != NULL)) {
60*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Xform next is not supported.");
61*a27f6a2eSSuanming Mou 		return -ENOTSUP;
62*a27f6a2eSSuanming Mou 	}
63*a27f6a2eSSuanming Mou 	if (unlikely((xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) ||
64*a27f6a2eSSuanming Mou 		     (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_XTS))) {
65*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Only AES-XTS algorithm is supported.");
66*a27f6a2eSSuanming Mou 		return -ENOTSUP;
67*a27f6a2eSSuanming Mou 	}
68*a27f6a2eSSuanming Mou 	cipher = &xform->cipher;
69*a27f6a2eSSuanming Mou 	sess_private_data->dek = mlx5_crypto_dek_prepare(priv, cipher);
70*a27f6a2eSSuanming Mou 	if (sess_private_data->dek == NULL) {
71*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Failed to prepare dek.");
72*a27f6a2eSSuanming Mou 		return -ENOMEM;
73*a27f6a2eSSuanming Mou 	}
74*a27f6a2eSSuanming Mou 	if (cipher->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
75*a27f6a2eSSuanming Mou 		encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;
76*a27f6a2eSSuanming Mou 	else
77*a27f6a2eSSuanming Mou 		encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;
78*a27f6a2eSSuanming Mou 	sess_private_data->bs_bpt_eo_es = rte_cpu_to_be_32
79*a27f6a2eSSuanming Mou 			(MLX5_BSF_SIZE_64B << MLX5_BSF_SIZE_OFFSET |
80*a27f6a2eSSuanming Mou 			 MLX5_BSF_P_TYPE_CRYPTO << MLX5_BSF_P_TYPE_OFFSET |
81*a27f6a2eSSuanming Mou 			 encryption_order << MLX5_ENCRYPTION_ORDER_OFFSET |
82*a27f6a2eSSuanming Mou 			 MLX5_ENCRYPTION_STANDARD_AES_XTS);
83*a27f6a2eSSuanming Mou 	switch (xform->cipher.dataunit_len) {
84*a27f6a2eSSuanming Mou 	case 0:
85*a27f6a2eSSuanming Mou 		sess_private_data->bsp_res = 0;
86*a27f6a2eSSuanming Mou 		break;
87*a27f6a2eSSuanming Mou 	case 512:
88*a27f6a2eSSuanming Mou 		sess_private_data->bsp_res = rte_cpu_to_be_32
89*a27f6a2eSSuanming Mou 					     ((uint32_t)MLX5_BLOCK_SIZE_512B <<
90*a27f6a2eSSuanming Mou 					     MLX5_BLOCK_SIZE_OFFSET);
91*a27f6a2eSSuanming Mou 		break;
92*a27f6a2eSSuanming Mou 	case 4096:
93*a27f6a2eSSuanming Mou 		sess_private_data->bsp_res = rte_cpu_to_be_32
94*a27f6a2eSSuanming Mou 					     ((uint32_t)MLX5_BLOCK_SIZE_4096B <<
95*a27f6a2eSSuanming Mou 					     MLX5_BLOCK_SIZE_OFFSET);
96*a27f6a2eSSuanming Mou 		break;
97*a27f6a2eSSuanming Mou 	case 1048576:
98*a27f6a2eSSuanming Mou 		sess_private_data->bsp_res = rte_cpu_to_be_32
99*a27f6a2eSSuanming Mou 					     ((uint32_t)MLX5_BLOCK_SIZE_1MB <<
100*a27f6a2eSSuanming Mou 					     MLX5_BLOCK_SIZE_OFFSET);
101*a27f6a2eSSuanming Mou 		break;
102*a27f6a2eSSuanming Mou 	default:
103*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Cipher data unit length is not supported.");
104*a27f6a2eSSuanming Mou 		return -ENOTSUP;
105*a27f6a2eSSuanming Mou 	}
106*a27f6a2eSSuanming Mou 	sess_private_data->iv_offset = cipher->iv.offset;
107*a27f6a2eSSuanming Mou 	sess_private_data->dek_id =
108*a27f6a2eSSuanming Mou 			rte_cpu_to_be_32(sess_private_data->dek->obj->id &
109*a27f6a2eSSuanming Mou 					 0xffffff);
110*a27f6a2eSSuanming Mou 	DRV_LOG(DEBUG, "Session %p was configured.", sess_private_data);
111*a27f6a2eSSuanming Mou 	return 0;
112*a27f6a2eSSuanming Mou }
113*a27f6a2eSSuanming Mou 
114*a27f6a2eSSuanming Mou static void
115*a27f6a2eSSuanming Mou mlx5_crypto_xts_qp_release(struct mlx5_crypto_qp *qp)
116*a27f6a2eSSuanming Mou {
117*a27f6a2eSSuanming Mou 	if (qp == NULL)
118*a27f6a2eSSuanming Mou 		return;
119*a27f6a2eSSuanming Mou 	mlx5_devx_qp_destroy(&qp->qp_obj);
120*a27f6a2eSSuanming Mou 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
121*a27f6a2eSSuanming Mou 	mlx5_devx_cq_destroy(&qp->cq_obj);
122*a27f6a2eSSuanming Mou 	rte_free(qp);
123*a27f6a2eSSuanming Mou }
124*a27f6a2eSSuanming Mou 
125*a27f6a2eSSuanming Mou static int
126*a27f6a2eSSuanming Mou mlx5_crypto_xts_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
127*a27f6a2eSSuanming Mou {
128*a27f6a2eSSuanming Mou 	struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
129*a27f6a2eSSuanming Mou 
130*a27f6a2eSSuanming Mou 	mlx5_crypto_indirect_mkeys_release(qp, qp->entries_n);
131*a27f6a2eSSuanming Mou 	mlx5_crypto_xts_qp_release(qp);
132*a27f6a2eSSuanming Mou 	dev->data->queue_pairs[qp_id] = NULL;
133*a27f6a2eSSuanming Mou 	return 0;
134*a27f6a2eSSuanming Mou }
135*a27f6a2eSSuanming Mou 
136*a27f6a2eSSuanming Mou static __rte_noinline uint32_t
137*a27f6a2eSSuanming Mou mlx5_crypto_xts_get_block_size(struct rte_crypto_op *op)
138*a27f6a2eSSuanming Mou {
139*a27f6a2eSSuanming Mou 	uint32_t bl = op->sym->cipher.data.length;
140*a27f6a2eSSuanming Mou 
141*a27f6a2eSSuanming Mou 	switch (bl) {
142*a27f6a2eSSuanming Mou 	case (1 << 20):
143*a27f6a2eSSuanming Mou 		return RTE_BE32(MLX5_BLOCK_SIZE_1MB << MLX5_BLOCK_SIZE_OFFSET);
144*a27f6a2eSSuanming Mou 	case (1 << 12):
145*a27f6a2eSSuanming Mou 		return RTE_BE32(MLX5_BLOCK_SIZE_4096B <<
146*a27f6a2eSSuanming Mou 				MLX5_BLOCK_SIZE_OFFSET);
147*a27f6a2eSSuanming Mou 	case (1 << 9):
148*a27f6a2eSSuanming Mou 		return RTE_BE32(MLX5_BLOCK_SIZE_512B << MLX5_BLOCK_SIZE_OFFSET);
149*a27f6a2eSSuanming Mou 	default:
150*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Unknown block size: %u.", bl);
151*a27f6a2eSSuanming Mou 		return UINT32_MAX;
152*a27f6a2eSSuanming Mou 	}
153*a27f6a2eSSuanming Mou }
154*a27f6a2eSSuanming Mou 
155*a27f6a2eSSuanming Mou static __rte_always_inline uint32_t
156*a27f6a2eSSuanming Mou mlx5_crypto_xts_klm_set(struct mlx5_crypto_qp *qp, struct rte_mbuf *mbuf,
157*a27f6a2eSSuanming Mou 			struct mlx5_wqe_dseg *klm, uint32_t offset,
158*a27f6a2eSSuanming Mou 			uint32_t *remain)
159*a27f6a2eSSuanming Mou {
160*a27f6a2eSSuanming Mou 	uint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);
161*a27f6a2eSSuanming Mou 	uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
162*a27f6a2eSSuanming Mou 
163*a27f6a2eSSuanming Mou 	if (data_len > *remain)
164*a27f6a2eSSuanming Mou 		data_len = *remain;
165*a27f6a2eSSuanming Mou 	*remain -= data_len;
166*a27f6a2eSSuanming Mou 	klm->bcount = rte_cpu_to_be_32(data_len);
167*a27f6a2eSSuanming Mou 	klm->pbuf = rte_cpu_to_be_64(addr);
168*a27f6a2eSSuanming Mou 	klm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
169*a27f6a2eSSuanming Mou 	return klm->lkey;
170*a27f6a2eSSuanming Mou 
171*a27f6a2eSSuanming Mou }
172*a27f6a2eSSuanming Mou 
173*a27f6a2eSSuanming Mou static __rte_always_inline uint32_t
174*a27f6a2eSSuanming Mou mlx5_crypto_xts_klms_set(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op,
175*a27f6a2eSSuanming Mou 			 struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm)
176*a27f6a2eSSuanming Mou {
177*a27f6a2eSSuanming Mou 	uint32_t remain_len = op->sym->cipher.data.length;
178*a27f6a2eSSuanming Mou 	uint32_t nb_segs = mbuf->nb_segs;
179*a27f6a2eSSuanming Mou 	uint32_t klm_n = 1u;
180*a27f6a2eSSuanming Mou 
181*a27f6a2eSSuanming Mou 	/* First mbuf needs to take the cipher offset. */
182*a27f6a2eSSuanming Mou 	if (unlikely(mlx5_crypto_xts_klm_set(qp, mbuf, klm,
183*a27f6a2eSSuanming Mou 		     op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) {
184*a27f6a2eSSuanming Mou 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
185*a27f6a2eSSuanming Mou 		return 0;
186*a27f6a2eSSuanming Mou 	}
187*a27f6a2eSSuanming Mou 	while (remain_len) {
188*a27f6a2eSSuanming Mou 		nb_segs--;
189*a27f6a2eSSuanming Mou 		mbuf = mbuf->next;
190*a27f6a2eSSuanming Mou 		if (unlikely(mbuf == NULL || nb_segs == 0)) {
191*a27f6a2eSSuanming Mou 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
192*a27f6a2eSSuanming Mou 			return 0;
193*a27f6a2eSSuanming Mou 		}
194*a27f6a2eSSuanming Mou 		if (unlikely(mlx5_crypto_xts_klm_set(qp, mbuf, ++klm, 0,
195*a27f6a2eSSuanming Mou 						&remain_len) == UINT32_MAX)) {
196*a27f6a2eSSuanming Mou 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
197*a27f6a2eSSuanming Mou 			return 0;
198*a27f6a2eSSuanming Mou 		}
199*a27f6a2eSSuanming Mou 		klm_n++;
200*a27f6a2eSSuanming Mou 	}
201*a27f6a2eSSuanming Mou 	return klm_n;
202*a27f6a2eSSuanming Mou }
203*a27f6a2eSSuanming Mou 
204*a27f6a2eSSuanming Mou static __rte_always_inline int
205*a27f6a2eSSuanming Mou mlx5_crypto_xts_wqe_set(struct mlx5_crypto_priv *priv,
206*a27f6a2eSSuanming Mou 			 struct mlx5_crypto_qp *qp,
207*a27f6a2eSSuanming Mou 			 struct rte_crypto_op *op,
208*a27f6a2eSSuanming Mou 			 struct mlx5_umr_wqe *umr)
209*a27f6a2eSSuanming Mou {
210*a27f6a2eSSuanming Mou 	struct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
211*a27f6a2eSSuanming Mou 	struct mlx5_wqe_cseg *cseg = &umr->ctr;
212*a27f6a2eSSuanming Mou 	struct mlx5_wqe_mkey_cseg *mkc = &umr->mkc;
213*a27f6a2eSSuanming Mou 	struct mlx5_wqe_dseg *klms = &umr->kseg[0];
214*a27f6a2eSSuanming Mou 	struct mlx5_wqe_umr_bsf_seg *bsf = ((struct mlx5_wqe_umr_bsf_seg *)
215*a27f6a2eSSuanming Mou 				      RTE_PTR_ADD(umr, priv->umr_wqe_size)) - 1;
216*a27f6a2eSSuanming Mou 	uint32_t ds;
217*a27f6a2eSSuanming Mou 	bool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src;
218*a27f6a2eSSuanming Mou 	/* Set UMR WQE. */
219*a27f6a2eSSuanming Mou 	uint32_t klm_n = mlx5_crypto_xts_klms_set(qp, op,
220*a27f6a2eSSuanming Mou 				   ipl ? op->sym->m_src : op->sym->m_dst, klms);
221*a27f6a2eSSuanming Mou 
222*a27f6a2eSSuanming Mou 	if (unlikely(klm_n == 0))
223*a27f6a2eSSuanming Mou 		return 0;
224*a27f6a2eSSuanming Mou 	bsf->bs_bpt_eo_es = sess->bs_bpt_eo_es;
225*a27f6a2eSSuanming Mou 	if (unlikely(!sess->bsp_res)) {
226*a27f6a2eSSuanming Mou 		bsf->bsp_res = mlx5_crypto_xts_get_block_size(op);
227*a27f6a2eSSuanming Mou 		if (unlikely(bsf->bsp_res == UINT32_MAX)) {
228*a27f6a2eSSuanming Mou 			op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
229*a27f6a2eSSuanming Mou 			return 0;
230*a27f6a2eSSuanming Mou 		}
231*a27f6a2eSSuanming Mou 	} else {
232*a27f6a2eSSuanming Mou 		bsf->bsp_res = sess->bsp_res;
233*a27f6a2eSSuanming Mou 	}
234*a27f6a2eSSuanming Mou 	bsf->raw_data_size = rte_cpu_to_be_32(op->sym->cipher.data.length);
235*a27f6a2eSSuanming Mou 	memcpy(bsf->xts_initial_tweak,
236*a27f6a2eSSuanming Mou 	       rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), 16);
237*a27f6a2eSSuanming Mou 	bsf->res_dp = sess->dek_id;
238*a27f6a2eSSuanming Mou 	mkc->len = rte_cpu_to_be_64(op->sym->cipher.data.length);
239*a27f6a2eSSuanming Mou 	cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_UMR);
240*a27f6a2eSSuanming Mou 	qp->db_pi += priv->umr_wqe_stride;
241*a27f6a2eSSuanming Mou 	/* Set RDMA_WRITE WQE. */
242*a27f6a2eSSuanming Mou 	cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
243*a27f6a2eSSuanming Mou 	klms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe));
244*a27f6a2eSSuanming Mou 	if (!ipl) {
245*a27f6a2eSSuanming Mou 		klm_n = mlx5_crypto_xts_klms_set(qp, op, op->sym->m_src, klms);
246*a27f6a2eSSuanming Mou 		if (unlikely(klm_n == 0))
247*a27f6a2eSSuanming Mou 			return 0;
248*a27f6a2eSSuanming Mou 	} else {
249*a27f6a2eSSuanming Mou 		memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
250*a27f6a2eSSuanming Mou 	}
251*a27f6a2eSSuanming Mou 	ds = 2 + klm_n;
252*a27f6a2eSSuanming Mou 	cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
253*a27f6a2eSSuanming Mou 	cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
254*a27f6a2eSSuanming Mou 							MLX5_OPCODE_RDMA_WRITE);
255*a27f6a2eSSuanming Mou 	ds = RTE_ALIGN(ds, 4);
256*a27f6a2eSSuanming Mou 	qp->db_pi += ds >> 2;
257*a27f6a2eSSuanming Mou 	/* Set NOP WQE if needed. */
258*a27f6a2eSSuanming Mou 	if (priv->max_rdmar_ds > ds) {
259*a27f6a2eSSuanming Mou 		cseg += ds;
260*a27f6a2eSSuanming Mou 		ds = priv->max_rdmar_ds - ds;
261*a27f6a2eSSuanming Mou 		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
262*a27f6a2eSSuanming Mou 		cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
263*a27f6a2eSSuanming Mou 							       MLX5_OPCODE_NOP);
264*a27f6a2eSSuanming Mou 		qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
265*a27f6a2eSSuanming Mou 	}
266*a27f6a2eSSuanming Mou 	qp->wqe = (uint8_t *)cseg;
267*a27f6a2eSSuanming Mou 	return 1;
268*a27f6a2eSSuanming Mou }
269*a27f6a2eSSuanming Mou 
270*a27f6a2eSSuanming Mou static uint16_t
271*a27f6a2eSSuanming Mou mlx5_crypto_xts_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
272*a27f6a2eSSuanming Mou 			      uint16_t nb_ops)
273*a27f6a2eSSuanming Mou {
274*a27f6a2eSSuanming Mou 	struct mlx5_crypto_qp *qp = queue_pair;
275*a27f6a2eSSuanming Mou 	struct mlx5_crypto_priv *priv = qp->priv;
276*a27f6a2eSSuanming Mou 	struct mlx5_umr_wqe *umr;
277*a27f6a2eSSuanming Mou 	struct rte_crypto_op *op;
278*a27f6a2eSSuanming Mou 	uint16_t mask = qp->entries_n - 1;
279*a27f6a2eSSuanming Mou 	uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
280*a27f6a2eSSuanming Mou 	uint32_t idx;
281*a27f6a2eSSuanming Mou 
282*a27f6a2eSSuanming Mou 	if (remain < nb_ops)
283*a27f6a2eSSuanming Mou 		nb_ops = remain;
284*a27f6a2eSSuanming Mou 	else
285*a27f6a2eSSuanming Mou 		remain = nb_ops;
286*a27f6a2eSSuanming Mou 	if (unlikely(remain == 0))
287*a27f6a2eSSuanming Mou 		return 0;
288*a27f6a2eSSuanming Mou 	do {
289*a27f6a2eSSuanming Mou 		idx = qp->pi & mask;
290*a27f6a2eSSuanming Mou 		op = *ops++;
291*a27f6a2eSSuanming Mou 		umr = RTE_PTR_ADD(qp->qp_obj.umem_buf,
292*a27f6a2eSSuanming Mou 			priv->wqe_set_size * idx);
293*a27f6a2eSSuanming Mou 		if (unlikely(mlx5_crypto_xts_wqe_set(priv, qp, op, umr) == 0)) {
294*a27f6a2eSSuanming Mou 			qp->stats.enqueue_err_count++;
295*a27f6a2eSSuanming Mou 			if (remain != nb_ops) {
296*a27f6a2eSSuanming Mou 				qp->stats.enqueued_count -= remain;
297*a27f6a2eSSuanming Mou 				break;
298*a27f6a2eSSuanming Mou 			}
299*a27f6a2eSSuanming Mou 			return 0;
300*a27f6a2eSSuanming Mou 		}
301*a27f6a2eSSuanming Mou 		qp->ops[idx] = op;
302*a27f6a2eSSuanming Mou 		qp->pi++;
303*a27f6a2eSSuanming Mou 	} while (--remain);
304*a27f6a2eSSuanming Mou 	qp->stats.enqueued_count += nb_ops;
305*a27f6a2eSSuanming Mou 	mlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,
306*a27f6a2eSSuanming Mou 			   qp->db_pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],
307*a27f6a2eSSuanming Mou 			   !priv->uar.dbnc);
308*a27f6a2eSSuanming Mou 	return nb_ops;
309*a27f6a2eSSuanming Mou }
310*a27f6a2eSSuanming Mou 
311*a27f6a2eSSuanming Mou static __rte_noinline void
312*a27f6a2eSSuanming Mou mlx5_crypto_xts_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
313*a27f6a2eSSuanming Mou {
314*a27f6a2eSSuanming Mou 	const uint32_t idx = qp->ci & (qp->entries_n - 1);
315*a27f6a2eSSuanming Mou 	volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
316*a27f6a2eSSuanming Mou 							&qp->cq_obj.cqes[idx];
317*a27f6a2eSSuanming Mou 
318*a27f6a2eSSuanming Mou 	op->status = RTE_CRYPTO_OP_STATUS_ERROR;
319*a27f6a2eSSuanming Mou 	qp->stats.dequeue_err_count++;
320*a27f6a2eSSuanming Mou 	DRV_LOG(ERR, "CQE ERR:%x.\n", rte_be_to_cpu_32(cqe->syndrome));
321*a27f6a2eSSuanming Mou }
322*a27f6a2eSSuanming Mou 
323*a27f6a2eSSuanming Mou static uint16_t
324*a27f6a2eSSuanming Mou mlx5_crypto_xts_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
325*a27f6a2eSSuanming Mou 			  uint16_t nb_ops)
326*a27f6a2eSSuanming Mou {
327*a27f6a2eSSuanming Mou 	struct mlx5_crypto_qp *qp = queue_pair;
328*a27f6a2eSSuanming Mou 	volatile struct mlx5_cqe *restrict cqe;
329*a27f6a2eSSuanming Mou 	struct rte_crypto_op *restrict op;
330*a27f6a2eSSuanming Mou 	const unsigned int cq_size = qp->entries_n;
331*a27f6a2eSSuanming Mou 	const unsigned int mask = cq_size - 1;
332*a27f6a2eSSuanming Mou 	uint32_t idx;
333*a27f6a2eSSuanming Mou 	uint32_t next_idx = qp->ci & mask;
334*a27f6a2eSSuanming Mou 	const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
335*a27f6a2eSSuanming Mou 	uint16_t i = 0;
336*a27f6a2eSSuanming Mou 	int ret;
337*a27f6a2eSSuanming Mou 
338*a27f6a2eSSuanming Mou 	if (unlikely(max == 0))
339*a27f6a2eSSuanming Mou 		return 0;
340*a27f6a2eSSuanming Mou 	do {
341*a27f6a2eSSuanming Mou 		idx = next_idx;
342*a27f6a2eSSuanming Mou 		next_idx = (qp->ci + 1) & mask;
343*a27f6a2eSSuanming Mou 		op = qp->ops[idx];
344*a27f6a2eSSuanming Mou 		cqe = &qp->cq_obj.cqes[idx];
345*a27f6a2eSSuanming Mou 		ret = check_cqe(cqe, cq_size, qp->ci);
346*a27f6a2eSSuanming Mou 		rte_io_rmb();
347*a27f6a2eSSuanming Mou 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
348*a27f6a2eSSuanming Mou 			if (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))
349*a27f6a2eSSuanming Mou 				mlx5_crypto_xts_cqe_err_handle(qp, op);
350*a27f6a2eSSuanming Mou 			break;
351*a27f6a2eSSuanming Mou 		}
352*a27f6a2eSSuanming Mou 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
353*a27f6a2eSSuanming Mou 		ops[i++] = op;
354*a27f6a2eSSuanming Mou 		qp->ci++;
355*a27f6a2eSSuanming Mou 	} while (i < max);
356*a27f6a2eSSuanming Mou 	if (likely(i != 0)) {
357*a27f6a2eSSuanming Mou 		rte_io_wmb();
358*a27f6a2eSSuanming Mou 		qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);
359*a27f6a2eSSuanming Mou 		qp->stats.dequeued_count += i;
360*a27f6a2eSSuanming Mou 	}
361*a27f6a2eSSuanming Mou 	return i;
362*a27f6a2eSSuanming Mou }
363*a27f6a2eSSuanming Mou 
364*a27f6a2eSSuanming Mou static void
365*a27f6a2eSSuanming Mou mlx5_crypto_xts_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
366*a27f6a2eSSuanming Mou {
367*a27f6a2eSSuanming Mou 	uint32_t i;
368*a27f6a2eSSuanming Mou 
369*a27f6a2eSSuanming Mou 	for (i = 0 ; i < qp->entries_n; i++) {
370*a27f6a2eSSuanming Mou 		struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf,
371*a27f6a2eSSuanming Mou 			i * priv->wqe_set_size);
372*a27f6a2eSSuanming Mou 		struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
373*a27f6a2eSSuanming Mou 								     (cseg + 1);
374*a27f6a2eSSuanming Mou 		struct mlx5_wqe_umr_bsf_seg *bsf =
375*a27f6a2eSSuanming Mou 			(struct mlx5_wqe_umr_bsf_seg *)(RTE_PTR_ADD(cseg,
376*a27f6a2eSSuanming Mou 						       priv->umr_wqe_size)) - 1;
377*a27f6a2eSSuanming Mou 		struct mlx5_wqe_rseg *rseg;
378*a27f6a2eSSuanming Mou 
379*a27f6a2eSSuanming Mou 		/* Init UMR WQE. */
380*a27f6a2eSSuanming Mou 		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |
381*a27f6a2eSSuanming Mou 					 (priv->umr_wqe_size / MLX5_WSEG_SIZE));
382*a27f6a2eSSuanming Mou 		cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
383*a27f6a2eSSuanming Mou 				       MLX5_COMP_MODE_OFFSET);
384*a27f6a2eSSuanming Mou 		cseg->misc = rte_cpu_to_be_32(qp->mkey[i]->id);
385*a27f6a2eSSuanming Mou 		ucseg->if_cf_toe_cq_res = RTE_BE32(1u << MLX5_UMRC_IF_OFFSET);
386*a27f6a2eSSuanming Mou 		ucseg->mkey_mask = RTE_BE64(1u << 0); /* Mkey length bit. */
387*a27f6a2eSSuanming Mou 		ucseg->ko_to_bs = rte_cpu_to_be_32
388*a27f6a2eSSuanming Mou 			((MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size) <<
389*a27f6a2eSSuanming Mou 			 MLX5_UMRC_KO_OFFSET) | (4 << MLX5_UMRC_TO_BS_OFFSET));
390*a27f6a2eSSuanming Mou 		bsf->keytag = priv->keytag;
391*a27f6a2eSSuanming Mou 		/* Init RDMA WRITE WQE. */
392*a27f6a2eSSuanming Mou 		cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
393*a27f6a2eSSuanming Mou 		cseg->flags = RTE_BE32((MLX5_COMP_ALWAYS <<
394*a27f6a2eSSuanming Mou 				      MLX5_COMP_MODE_OFFSET) |
395*a27f6a2eSSuanming Mou 				      MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);
396*a27f6a2eSSuanming Mou 		rseg = (struct mlx5_wqe_rseg *)(cseg + 1);
397*a27f6a2eSSuanming Mou 		rseg->rkey = rte_cpu_to_be_32(qp->mkey[i]->id);
398*a27f6a2eSSuanming Mou 	}
399*a27f6a2eSSuanming Mou }
400*a27f6a2eSSuanming Mou 
401*a27f6a2eSSuanming Mou static void *
402*a27f6a2eSSuanming Mou mlx5_crypto_gcm_mkey_klm_update(struct mlx5_crypto_priv *priv,
403*a27f6a2eSSuanming Mou 				struct mlx5_crypto_qp *qp,
404*a27f6a2eSSuanming Mou 				uint32_t idx)
405*a27f6a2eSSuanming Mou {
406*a27f6a2eSSuanming Mou 	return RTE_PTR_ADD(qp->qp_obj.umem_buf, priv->wqe_set_size * idx);
407*a27f6a2eSSuanming Mou }
408*a27f6a2eSSuanming Mou 
409*a27f6a2eSSuanming Mou static int
410*a27f6a2eSSuanming Mou mlx5_crypto_xts_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
411*a27f6a2eSSuanming Mou 				 const struct rte_cryptodev_qp_conf *qp_conf,
412*a27f6a2eSSuanming Mou 				 int socket_id)
413*a27f6a2eSSuanming Mou {
414*a27f6a2eSSuanming Mou 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
415*a27f6a2eSSuanming Mou 	struct mlx5_devx_qp_attr attr = {0};
416*a27f6a2eSSuanming Mou 	struct mlx5_crypto_qp *qp;
417*a27f6a2eSSuanming Mou 	uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
418*a27f6a2eSSuanming Mou 	uint32_t ret;
419*a27f6a2eSSuanming Mou 	uint32_t alloc_size = sizeof(*qp);
420*a27f6a2eSSuanming Mou 	uint32_t log_wqbb_n;
421*a27f6a2eSSuanming Mou 	struct mlx5_devx_cq_attr cq_attr = {
422*a27f6a2eSSuanming Mou 		.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
423*a27f6a2eSSuanming Mou 	};
424*a27f6a2eSSuanming Mou 	struct mlx5_devx_mkey_attr mkey_attr = {
425*a27f6a2eSSuanming Mou 		.pd = priv->cdev->pdn,
426*a27f6a2eSSuanming Mou 		.umr_en = 1,
427*a27f6a2eSSuanming Mou 		.crypto_en = 1,
428*a27f6a2eSSuanming Mou 		.set_remote_rw = 1,
429*a27f6a2eSSuanming Mou 		.klm_num = MLX5_CRYPTO_KLM_SEGS_NUM(priv->umr_wqe_size),
430*a27f6a2eSSuanming Mou 	};
431*a27f6a2eSSuanming Mou 
432*a27f6a2eSSuanming Mou 	if (dev->data->queue_pairs[qp_id] != NULL)
433*a27f6a2eSSuanming Mou 		mlx5_crypto_xts_queue_pair_release(dev, qp_id);
434*a27f6a2eSSuanming Mou 	alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
435*a27f6a2eSSuanming Mou 	alloc_size += (sizeof(struct rte_crypto_op *) +
436*a27f6a2eSSuanming Mou 		       sizeof(struct mlx5_devx_obj *)) *
437*a27f6a2eSSuanming Mou 		       RTE_BIT32(log_nb_desc);
438*a27f6a2eSSuanming Mou 	qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
439*a27f6a2eSSuanming Mou 				socket_id);
440*a27f6a2eSSuanming Mou 	if (qp == NULL) {
441*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Failed to allocate QP memory.");
442*a27f6a2eSSuanming Mou 		rte_errno = ENOMEM;
443*a27f6a2eSSuanming Mou 		return -rte_errno;
444*a27f6a2eSSuanming Mou 	}
445*a27f6a2eSSuanming Mou 	if (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc,
446*a27f6a2eSSuanming Mou 				&cq_attr, socket_id) != 0) {
447*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Failed to create CQ.");
448*a27f6a2eSSuanming Mou 		goto error;
449*a27f6a2eSSuanming Mou 	}
450*a27f6a2eSSuanming Mou 	log_wqbb_n = rte_log2_u32(RTE_BIT32(log_nb_desc) *
451*a27f6a2eSSuanming Mou 				(priv->wqe_set_size / MLX5_SEND_WQE_BB));
452*a27f6a2eSSuanming Mou 	attr.pd = priv->cdev->pdn;
453*a27f6a2eSSuanming Mou 	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
454*a27f6a2eSSuanming Mou 	attr.cqn = qp->cq_obj.cq->id;
455*a27f6a2eSSuanming Mou 	attr.num_of_receive_wqes = 0;
456*a27f6a2eSSuanming Mou 	attr.num_of_send_wqbbs = RTE_BIT32(log_wqbb_n);
457*a27f6a2eSSuanming Mou 	attr.ts_format =
458*a27f6a2eSSuanming Mou 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
459*a27f6a2eSSuanming Mou 	ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj,
460*a27f6a2eSSuanming Mou 					attr.num_of_send_wqbbs * MLX5_WQE_SIZE,
461*a27f6a2eSSuanming Mou 					&attr, socket_id);
462*a27f6a2eSSuanming Mou 	if (ret) {
463*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Failed to create QP.");
464*a27f6a2eSSuanming Mou 		goto error;
465*a27f6a2eSSuanming Mou 	}
466*a27f6a2eSSuanming Mou 	if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
467*a27f6a2eSSuanming Mou 			      priv->dev_config.socket_id) != 0) {
468*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
469*a27f6a2eSSuanming Mou 			(uint32_t)qp_id);
470*a27f6a2eSSuanming Mou 		rte_errno = ENOMEM;
471*a27f6a2eSSuanming Mou 		goto error;
472*a27f6a2eSSuanming Mou 	}
473*a27f6a2eSSuanming Mou 	/*
474*a27f6a2eSSuanming Mou 	 * In Order to configure self loopback, when calling devx qp2rts the
475*a27f6a2eSSuanming Mou 	 * remote QP id that is used is the id of the same QP.
476*a27f6a2eSSuanming Mou 	 */
477*a27f6a2eSSuanming Mou 	if (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))
478*a27f6a2eSSuanming Mou 		goto error;
479*a27f6a2eSSuanming Mou 	qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
480*a27f6a2eSSuanming Mou 							   RTE_CACHE_LINE_SIZE);
481*a27f6a2eSSuanming Mou 	qp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));
482*a27f6a2eSSuanming Mou 	qp->entries_n = 1 << log_nb_desc;
483*a27f6a2eSSuanming Mou 	if (mlx5_crypto_indirect_mkeys_prepare(priv, qp, &mkey_attr,
484*a27f6a2eSSuanming Mou 					       mlx5_crypto_gcm_mkey_klm_update)) {
485*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Cannot allocate indirect memory regions.");
486*a27f6a2eSSuanming Mou 		rte_errno = ENOMEM;
487*a27f6a2eSSuanming Mou 		goto error;
488*a27f6a2eSSuanming Mou 	}
489*a27f6a2eSSuanming Mou 	mlx5_crypto_xts_qp_init(priv, qp);
490*a27f6a2eSSuanming Mou 	qp->priv = priv;
491*a27f6a2eSSuanming Mou 	dev->data->queue_pairs[qp_id] = qp;
492*a27f6a2eSSuanming Mou 	return 0;
493*a27f6a2eSSuanming Mou error:
494*a27f6a2eSSuanming Mou 	mlx5_crypto_xts_qp_release(qp);
495*a27f6a2eSSuanming Mou 	return -1;
496*a27f6a2eSSuanming Mou }
497*a27f6a2eSSuanming Mou 
498*a27f6a2eSSuanming Mou /*
499*a27f6a2eSSuanming Mou  * Calculate UMR WQE size and RDMA Write WQE size with the
500*a27f6a2eSSuanming Mou  * following limitations:
501*a27f6a2eSSuanming Mou  *	- Each WQE size is multiple of 64.
502*a27f6a2eSSuanming Mou  *	- The summarize of both UMR WQE and RDMA_W WQE is a power of 2.
503*a27f6a2eSSuanming Mou  *	- The number of entries in the UMR WQE's KLM list is multiple of 4.
504*a27f6a2eSSuanming Mou  */
505*a27f6a2eSSuanming Mou static void
506*a27f6a2eSSuanming Mou mlx5_crypto_xts_get_wqe_sizes(uint32_t segs_num, uint32_t *umr_size,
507*a27f6a2eSSuanming Mou 			      uint32_t *rdmaw_size)
508*a27f6a2eSSuanming Mou {
509*a27f6a2eSSuanming Mou 	uint32_t diff, wqe_set_size;
510*a27f6a2eSSuanming Mou 
511*a27f6a2eSSuanming Mou 	*umr_size = MLX5_CRYPTO_UMR_WQE_STATIC_SIZE +
512*a27f6a2eSSuanming Mou 			RTE_ALIGN(segs_num, 4) *
513*a27f6a2eSSuanming Mou 			sizeof(struct mlx5_wqe_dseg);
514*a27f6a2eSSuanming Mou 	/* Make sure UMR WQE size is multiple of WQBB. */
515*a27f6a2eSSuanming Mou 	*umr_size = RTE_ALIGN(*umr_size, MLX5_SEND_WQE_BB);
516*a27f6a2eSSuanming Mou 	*rdmaw_size = sizeof(struct mlx5_rdma_write_wqe) +
517*a27f6a2eSSuanming Mou 			sizeof(struct mlx5_wqe_dseg) *
518*a27f6a2eSSuanming Mou 			(segs_num <= 2 ? 2 : 2 +
519*a27f6a2eSSuanming Mou 			RTE_ALIGN(segs_num - 2, 4));
520*a27f6a2eSSuanming Mou 	/* Make sure RDMA_WRITE WQE size is multiple of WQBB. */
521*a27f6a2eSSuanming Mou 	*rdmaw_size = RTE_ALIGN(*rdmaw_size, MLX5_SEND_WQE_BB);
522*a27f6a2eSSuanming Mou 	wqe_set_size = *rdmaw_size + *umr_size;
523*a27f6a2eSSuanming Mou 	diff = rte_align32pow2(wqe_set_size) - wqe_set_size;
524*a27f6a2eSSuanming Mou 	/* Make sure wqe_set size is power of 2. */
525*a27f6a2eSSuanming Mou 	if (diff)
526*a27f6a2eSSuanming Mou 		*umr_size += diff;
527*a27f6a2eSSuanming Mou }
528*a27f6a2eSSuanming Mou 
529*a27f6a2eSSuanming Mou static uint8_t
530*a27f6a2eSSuanming Mou mlx5_crypto_xts_max_segs_num(uint16_t max_wqe_size)
531*a27f6a2eSSuanming Mou {
532*a27f6a2eSSuanming Mou 	int klms_sizes = max_wqe_size - MLX5_CRYPTO_UMR_WQE_STATIC_SIZE;
533*a27f6a2eSSuanming Mou 	uint32_t max_segs_cap = RTE_ALIGN_FLOOR(klms_sizes, MLX5_SEND_WQE_BB) /
534*a27f6a2eSSuanming Mou 			sizeof(struct mlx5_wqe_dseg);
535*a27f6a2eSSuanming Mou 
536*a27f6a2eSSuanming Mou 	MLX5_ASSERT(klms_sizes >= MLX5_SEND_WQE_BB);
537*a27f6a2eSSuanming Mou 	while (max_segs_cap) {
538*a27f6a2eSSuanming Mou 		uint32_t umr_wqe_size, rdmw_wqe_size;
539*a27f6a2eSSuanming Mou 
540*a27f6a2eSSuanming Mou 		mlx5_crypto_xts_get_wqe_sizes(max_segs_cap, &umr_wqe_size,
541*a27f6a2eSSuanming Mou 						&rdmw_wqe_size);
542*a27f6a2eSSuanming Mou 		if (umr_wqe_size <= max_wqe_size &&
543*a27f6a2eSSuanming Mou 				rdmw_wqe_size <= max_wqe_size)
544*a27f6a2eSSuanming Mou 			break;
545*a27f6a2eSSuanming Mou 		max_segs_cap -= 4;
546*a27f6a2eSSuanming Mou 	}
547*a27f6a2eSSuanming Mou 	return max_segs_cap;
548*a27f6a2eSSuanming Mou }
549*a27f6a2eSSuanming Mou 
550*a27f6a2eSSuanming Mou static int
551*a27f6a2eSSuanming Mou mlx5_crypto_xts_configure_wqe_size(struct mlx5_crypto_priv *priv,
552*a27f6a2eSSuanming Mou 				   uint16_t max_wqe_size, uint32_t max_segs_num)
553*a27f6a2eSSuanming Mou {
554*a27f6a2eSSuanming Mou 	uint32_t rdmw_wqe_size, umr_wqe_size;
555*a27f6a2eSSuanming Mou 
556*a27f6a2eSSuanming Mou 	mlx5_crypto_xts_get_wqe_sizes(max_segs_num, &umr_wqe_size,
557*a27f6a2eSSuanming Mou 			&rdmw_wqe_size);
558*a27f6a2eSSuanming Mou 	priv->wqe_set_size = rdmw_wqe_size + umr_wqe_size;
559*a27f6a2eSSuanming Mou 	if (umr_wqe_size > max_wqe_size ||
560*a27f6a2eSSuanming Mou 				rdmw_wqe_size > max_wqe_size) {
561*a27f6a2eSSuanming Mou 		DRV_LOG(ERR, "Invalid max_segs_num: %u. should be %u or lower.",
562*a27f6a2eSSuanming Mou 			max_segs_num,
563*a27f6a2eSSuanming Mou 			mlx5_crypto_xts_max_segs_num(max_wqe_size));
564*a27f6a2eSSuanming Mou 		rte_errno = EINVAL;
565*a27f6a2eSSuanming Mou 		return -EINVAL;
566*a27f6a2eSSuanming Mou 	}
567*a27f6a2eSSuanming Mou 	priv->umr_wqe_size = (uint16_t)umr_wqe_size;
568*a27f6a2eSSuanming Mou 	priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
569*a27f6a2eSSuanming Mou 	priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
570*a27f6a2eSSuanming Mou 	return 0;
571*a27f6a2eSSuanming Mou }
572*a27f6a2eSSuanming Mou 
573*a27f6a2eSSuanming Mou int
574*a27f6a2eSSuanming Mou mlx5_crypto_xts_init(struct mlx5_crypto_priv *priv)
575*a27f6a2eSSuanming Mou {
576*a27f6a2eSSuanming Mou 	struct mlx5_common_device *cdev = priv->cdev;
577*a27f6a2eSSuanming Mou 	struct rte_cryptodev *crypto_dev = priv->crypto_dev;
578*a27f6a2eSSuanming Mou 	struct rte_cryptodev_ops *dev_ops = crypto_dev->dev_ops;
579*a27f6a2eSSuanming Mou 	int ret;
580*a27f6a2eSSuanming Mou 
581*a27f6a2eSSuanming Mou 	ret = mlx5_crypto_xts_configure_wqe_size(priv,
582*a27f6a2eSSuanming Mou 		cdev->config.hca_attr.max_wqe_sz_sq, priv->max_segs_num);
583*a27f6a2eSSuanming Mou 	if (ret)
584*a27f6a2eSSuanming Mou 		return -EINVAL;
585*a27f6a2eSSuanming Mou 	/* Override AES-XST specified ops. */
586*a27f6a2eSSuanming Mou 	dev_ops->sym_session_configure = mlx5_crypto_xts_sym_session_configure;
587*a27f6a2eSSuanming Mou 	dev_ops->queue_pair_setup = mlx5_crypto_xts_queue_pair_setup;
588*a27f6a2eSSuanming Mou 	dev_ops->queue_pair_release = mlx5_crypto_xts_queue_pair_release;
589*a27f6a2eSSuanming Mou 	crypto_dev->dequeue_burst = mlx5_crypto_xts_dequeue_burst;
590*a27f6a2eSSuanming Mou 	crypto_dev->enqueue_burst = mlx5_crypto_xts_enqueue_burst;
591*a27f6a2eSSuanming Mou 	priv->caps = mlx5_crypto_caps;
592*a27f6a2eSSuanming Mou 	return 0;
593*a27f6a2eSSuanming Mou }
594