xref: /dpdk/drivers/crypto/mlx5/mlx5_crypto_gcm.c (revision e77506397fc8005c5129e22e9e2d15d5876790fd)
104da07e6SSuanming Mou /* SPDX-License-Identifier: BSD-3-Clause
204da07e6SSuanming Mou  * Copyright (c) 2023 NVIDIA Corporation & Affiliates
304da07e6SSuanming Mou  */
404da07e6SSuanming Mou 
504da07e6SSuanming Mou #include <rte_malloc.h>
604da07e6SSuanming Mou #include <rte_mempool.h>
704da07e6SSuanming Mou #include <rte_eal_paging.h>
804da07e6SSuanming Mou #include <rte_errno.h>
904da07e6SSuanming Mou #include <rte_log.h>
1004da07e6SSuanming Mou #include <bus_pci_driver.h>
1104da07e6SSuanming Mou #include <rte_memory.h>
12b0109583SSuanming Mou #include <rte_io.h>
1304da07e6SSuanming Mou 
1404da07e6SSuanming Mou #include <mlx5_glue.h>
1504da07e6SSuanming Mou #include <mlx5_common.h>
1604da07e6SSuanming Mou #include <mlx5_devx_cmds.h>
1704da07e6SSuanming Mou #include <mlx5_common_os.h>
1804da07e6SSuanming Mou 
1904da07e6SSuanming Mou #include "mlx5_crypto_utils.h"
2004da07e6SSuanming Mou #include "mlx5_crypto.h"
2104da07e6SSuanming Mou 
22b32dbedbSSuanming Mou /*
23b32dbedbSSuanming Mou  * AES-GCM uses indirect KLM mode. The UMR WQE comprises of WQE control +
24b32dbedbSSuanming Mou  * UMR control + mkey context + indirect KLM. The WQE size is aligned to
25b32dbedbSSuanming Mou  * be 3 WQEBBS.
26b32dbedbSSuanming Mou  */
27b32dbedbSSuanming Mou #define MLX5_UMR_GCM_WQE_SIZE \
28b32dbedbSSuanming Mou 	(RTE_ALIGN(sizeof(struct mlx5_umr_wqe) + sizeof(struct mlx5_wqe_dseg), \
29b32dbedbSSuanming Mou 			MLX5_SEND_WQE_BB))
30b32dbedbSSuanming Mou 
31b32dbedbSSuanming Mou #define MLX5_UMR_GCM_WQE_SET_SIZE \
32b32dbedbSSuanming Mou 	(MLX5_UMR_GCM_WQE_SIZE + \
33b32dbedbSSuanming Mou 	 RTE_ALIGN(sizeof(struct mlx5_wqe_send_en_wqe), \
34b32dbedbSSuanming Mou 	 MLX5_SEND_WQE_BB))
35b32dbedbSSuanming Mou 
36b0109583SSuanming Mou #define MLX5_UMR_GCM_WQE_STRIDE \
37b0109583SSuanming Mou 	(MLX5_UMR_GCM_WQE_SIZE / MLX5_SEND_WQE_BB)
38b0109583SSuanming Mou 
39b0109583SSuanming Mou #define MLX5_MMO_CRYPTO_OPC (MLX5_OPCODE_MMO | \
40b0109583SSuanming Mou 	(MLX5_OPC_MOD_MMO_CRYPTO << WQE_CSEG_OPC_MOD_OFFSET))
41b0109583SSuanming Mou 
42b0109583SSuanming Mou /*
43b0109583SSuanming Mou  * The status default value is RTE_CRYPTO_OP_STATUS_SUCCESS.
44b0109583SSuanming Mou  * Copy tag should fill different value to status.
45b0109583SSuanming Mou  */
46b0109583SSuanming Mou #define MLX5_CRYPTO_OP_STATUS_GCM_TAG_COPY (RTE_CRYPTO_OP_STATUS_SUCCESS + 1)
47b0109583SSuanming Mou 
48b0109583SSuanming Mou struct mlx5_crypto_gcm_op_info {
49b0109583SSuanming Mou 	bool need_umr;
50b0109583SSuanming Mou 	bool is_oop;
51b0109583SSuanming Mou 	bool is_enc;
52b0109583SSuanming Mou 	void *digest;
53b0109583SSuanming Mou 	void *src_addr;
54b0109583SSuanming Mou };
55b0109583SSuanming Mou 
56b0109583SSuanming Mou struct mlx5_crypto_gcm_data {
57b0109583SSuanming Mou 	void *src_addr;
58b0109583SSuanming Mou 	uint32_t src_bytes;
59b0109583SSuanming Mou 	void *dst_addr;
60b0109583SSuanming Mou 	uint32_t dst_bytes;
61b0109583SSuanming Mou 	uint32_t src_mkey;
62b0109583SSuanming Mou 	uint32_t dst_mkey;
63b0109583SSuanming Mou };
64b0109583SSuanming Mou 
65*e7750639SAndre Muezerie struct __rte_packed_begin mlx5_crypto_gcm_tag_cpy_info {
66b0109583SSuanming Mou 	void *digest;
67b0109583SSuanming Mou 	uint8_t tag_len;
68*e7750639SAndre Muezerie } __rte_packed_end;
69b0109583SSuanming Mou 
7004da07e6SSuanming Mou static struct rte_cryptodev_capabilities mlx5_crypto_gcm_caps[] = {
7104da07e6SSuanming Mou 	{
7204da07e6SSuanming Mou 		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
7304da07e6SSuanming Mou 	},
7404da07e6SSuanming Mou 	{
7504da07e6SSuanming Mou 		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
7604da07e6SSuanming Mou 	}
7704da07e6SSuanming Mou };
7804da07e6SSuanming Mou 
7904da07e6SSuanming Mou int
8098fb4bb0SSuanming Mou mlx5_crypto_dek_fill_gcm_attr(struct mlx5_crypto_dek *dek,
8198fb4bb0SSuanming Mou 			      struct mlx5_devx_dek_attr *dek_attr,
8298fb4bb0SSuanming Mou 			      void *cb_ctx)
8398fb4bb0SSuanming Mou {
8498fb4bb0SSuanming Mou 	uint32_t offset = 0;
8598fb4bb0SSuanming Mou 	struct mlx5_crypto_dek_ctx *ctx = cb_ctx;
8698fb4bb0SSuanming Mou 	struct rte_crypto_aead_xform *aead_ctx = &ctx->xform->aead;
8798fb4bb0SSuanming Mou 
8898fb4bb0SSuanming Mou 	if (aead_ctx->algo != RTE_CRYPTO_AEAD_AES_GCM) {
8998fb4bb0SSuanming Mou 		DRV_LOG(ERR, "Only AES-GCM algo supported.");
9098fb4bb0SSuanming Mou 		return -EINVAL;
9198fb4bb0SSuanming Mou 	}
9298fb4bb0SSuanming Mou 	dek_attr->key_purpose = MLX5_CRYPTO_KEY_PURPOSE_GCM;
9398fb4bb0SSuanming Mou 	switch (aead_ctx->key.length) {
9498fb4bb0SSuanming Mou 	case 16:
9598fb4bb0SSuanming Mou 		offset = 16;
9698fb4bb0SSuanming Mou 		dek->size = 16;
9798fb4bb0SSuanming Mou 		dek_attr->key_size = MLX5_CRYPTO_KEY_SIZE_128b;
9898fb4bb0SSuanming Mou 		break;
9998fb4bb0SSuanming Mou 	case 32:
10098fb4bb0SSuanming Mou 		dek->size = 32;
10198fb4bb0SSuanming Mou 		dek_attr->key_size = MLX5_CRYPTO_KEY_SIZE_256b;
10298fb4bb0SSuanming Mou 		break;
10398fb4bb0SSuanming Mou 	default:
10498fb4bb0SSuanming Mou 		DRV_LOG(ERR, "Wrapped key size not supported.");
10598fb4bb0SSuanming Mou 		return -EINVAL;
10698fb4bb0SSuanming Mou 	}
10798fb4bb0SSuanming Mou 	memcpy(&dek_attr->key[offset], aead_ctx->key.data, aead_ctx->key.length);
10898fb4bb0SSuanming Mou 	memcpy(&dek->data, aead_ctx->key.data, aead_ctx->key.length);
10998fb4bb0SSuanming Mou 	return 0;
11098fb4bb0SSuanming Mou }
11198fb4bb0SSuanming Mou 
1127f8eb434SSuanming Mou static int
1136c948396SSuanming Mou mlx5_crypto_generate_gcm_cap(struct mlx5_hca_crypto_mmo_attr *mmo_attr,
1146c948396SSuanming Mou 			     struct rte_cryptodev_capabilities *cap)
1156c948396SSuanming Mou {
1166c948396SSuanming Mou 	/* Init key size. */
1176c948396SSuanming Mou 	if (mmo_attr->gcm_128_encrypt && mmo_attr->gcm_128_decrypt &&
1186c948396SSuanming Mou 		mmo_attr->gcm_256_encrypt && mmo_attr->gcm_256_decrypt) {
1196c948396SSuanming Mou 		cap->sym.aead.key_size.min = 16;
1206c948396SSuanming Mou 		cap->sym.aead.key_size.max = 32;
1216c948396SSuanming Mou 		cap->sym.aead.key_size.increment = 16;
1226c948396SSuanming Mou 	} else if (mmo_attr->gcm_256_encrypt && mmo_attr->gcm_256_decrypt) {
1236c948396SSuanming Mou 		cap->sym.aead.key_size.min = 32;
1246c948396SSuanming Mou 		cap->sym.aead.key_size.max = 32;
1256c948396SSuanming Mou 		cap->sym.aead.key_size.increment = 0;
1266c948396SSuanming Mou 	} else if (mmo_attr->gcm_128_encrypt && mmo_attr->gcm_128_decrypt) {
1276c948396SSuanming Mou 		cap->sym.aead.key_size.min = 16;
1286c948396SSuanming Mou 		cap->sym.aead.key_size.max = 16;
1296c948396SSuanming Mou 		cap->sym.aead.key_size.increment = 0;
1306c948396SSuanming Mou 	} else {
1316c948396SSuanming Mou 		DRV_LOG(ERR, "No available AES-GCM encryption/decryption supported.");
1326c948396SSuanming Mou 		return -1;
1336c948396SSuanming Mou 	}
1346c948396SSuanming Mou 	/* Init tag size. */
1356c948396SSuanming Mou 	if (mmo_attr->gcm_auth_tag_128 && mmo_attr->gcm_auth_tag_96) {
1366c948396SSuanming Mou 		cap->sym.aead.digest_size.min = 12;
1376c948396SSuanming Mou 		cap->sym.aead.digest_size.max = 16;
1386c948396SSuanming Mou 		cap->sym.aead.digest_size.increment = 4;
1396c948396SSuanming Mou 	} else if (mmo_attr->gcm_auth_tag_96) {
1406c948396SSuanming Mou 		cap->sym.aead.digest_size.min = 12;
1416c948396SSuanming Mou 		cap->sym.aead.digest_size.max = 12;
1426c948396SSuanming Mou 		cap->sym.aead.digest_size.increment = 0;
1436c948396SSuanming Mou 	} else if (mmo_attr->gcm_auth_tag_128) {
1446c948396SSuanming Mou 		cap->sym.aead.digest_size.min = 16;
1456c948396SSuanming Mou 		cap->sym.aead.digest_size.max = 16;
1466c948396SSuanming Mou 		cap->sym.aead.digest_size.increment = 0;
1476c948396SSuanming Mou 	} else {
1486c948396SSuanming Mou 		DRV_LOG(ERR, "No available AES-GCM tag size supported.");
1496c948396SSuanming Mou 		return -1;
1506c948396SSuanming Mou 	}
1516c948396SSuanming Mou 	/* Init AAD size. */
1526c948396SSuanming Mou 	cap->sym.aead.aad_size.min = 0;
1536c948396SSuanming Mou 	cap->sym.aead.aad_size.max = UINT16_MAX;
1546c948396SSuanming Mou 	cap->sym.aead.aad_size.increment = 1;
1556c948396SSuanming Mou 	/* Init IV size. */
1566c948396SSuanming Mou 	cap->sym.aead.iv_size.min = 12;
1576c948396SSuanming Mou 	cap->sym.aead.iv_size.max = 12;
1586c948396SSuanming Mou 	cap->sym.aead.iv_size.increment = 0;
1596c948396SSuanming Mou 	/* Init left items. */
1606c948396SSuanming Mou 	cap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1616c948396SSuanming Mou 	cap->sym.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
1626c948396SSuanming Mou 	cap->sym.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
1636c948396SSuanming Mou 	return 0;
1646c948396SSuanming Mou }
1656c948396SSuanming Mou 
1666c948396SSuanming Mou static int
1677f8eb434SSuanming Mou mlx5_crypto_sym_gcm_session_configure(struct rte_cryptodev *dev,
1687f8eb434SSuanming Mou 				  struct rte_crypto_sym_xform *xform,
1697f8eb434SSuanming Mou 				  struct rte_cryptodev_sym_session *session)
1707f8eb434SSuanming Mou {
1717f8eb434SSuanming Mou 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
1727f8eb434SSuanming Mou 	struct mlx5_crypto_session *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(session);
1737f8eb434SSuanming Mou 	struct rte_crypto_aead_xform *aead = &xform->aead;
1747f8eb434SSuanming Mou 	uint32_t op_type;
1757f8eb434SSuanming Mou 
1767f8eb434SSuanming Mou 	if (unlikely(xform->next != NULL)) {
1777f8eb434SSuanming Mou 		DRV_LOG(ERR, "Xform next is not supported.");
1787f8eb434SSuanming Mou 		return -ENOTSUP;
1797f8eb434SSuanming Mou 	}
1807f8eb434SSuanming Mou 	if (aead->algo != RTE_CRYPTO_AEAD_AES_GCM) {
1817f8eb434SSuanming Mou 		DRV_LOG(ERR, "Only AES-GCM algorithm is supported.");
1827f8eb434SSuanming Mou 		return -ENOTSUP;
1837f8eb434SSuanming Mou 	}
1840750c8b1SSuanming Mou 
1857f8eb434SSuanming Mou 	if (aead->op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
1867f8eb434SSuanming Mou 		op_type = MLX5_CRYPTO_OP_TYPE_ENCRYPTION;
1877f8eb434SSuanming Mou 	else
1887f8eb434SSuanming Mou 		op_type = MLX5_CRYPTO_OP_TYPE_DECRYPTION;
1897f8eb434SSuanming Mou 	sess_private_data->op_type = op_type;
1907f8eb434SSuanming Mou 	sess_private_data->mmo_ctrl = rte_cpu_to_be_32
1917f8eb434SSuanming Mou 			(op_type << MLX5_CRYPTO_MMO_OP_OFFSET |
1927f8eb434SSuanming Mou 			 MLX5_ENCRYPTION_TYPE_AES_GCM << MLX5_CRYPTO_MMO_TYPE_OFFSET);
193b32dbedbSSuanming Mou 	sess_private_data->wqe_aad_len = rte_cpu_to_be_32((uint32_t)aead->aad_length);
194b32dbedbSSuanming Mou 	sess_private_data->wqe_tag_len = rte_cpu_to_be_32((uint32_t)aead->digest_length);
1957f8eb434SSuanming Mou 	sess_private_data->aad_len = aead->aad_length;
1967f8eb434SSuanming Mou 	sess_private_data->tag_len = aead->digest_length;
1977f8eb434SSuanming Mou 	sess_private_data->iv_offset = aead->iv.offset;
1987f8eb434SSuanming Mou 	sess_private_data->iv_len = aead->iv.length;
1997f8eb434SSuanming Mou 	sess_private_data->dek = mlx5_crypto_dek_prepare(priv, xform);
2007f8eb434SSuanming Mou 	if (sess_private_data->dek == NULL) {
2017f8eb434SSuanming Mou 		DRV_LOG(ERR, "Failed to prepare dek.");
2027f8eb434SSuanming Mou 		return -ENOMEM;
2037f8eb434SSuanming Mou 	}
2047f8eb434SSuanming Mou 	sess_private_data->dek_id =
2057f8eb434SSuanming Mou 			rte_cpu_to_be_32(sess_private_data->dek->obj->id &
2067f8eb434SSuanming Mou 					 0xffffff);
2077f8eb434SSuanming Mou 	DRV_LOG(DEBUG, "Session %p was configured.", sess_private_data);
2087f8eb434SSuanming Mou 	return 0;
2097f8eb434SSuanming Mou }
2107f8eb434SSuanming Mou 
211b32dbedbSSuanming Mou static void *
212b32dbedbSSuanming Mou mlx5_crypto_gcm_mkey_klm_update(struct mlx5_crypto_priv *priv,
213b32dbedbSSuanming Mou 				struct mlx5_crypto_qp *qp __rte_unused,
214b32dbedbSSuanming Mou 				uint32_t idx)
215b32dbedbSSuanming Mou {
216b32dbedbSSuanming Mou 	return &qp->klm_array[idx * priv->max_klm_num];
217b32dbedbSSuanming Mou }
218b32dbedbSSuanming Mou 
219b32dbedbSSuanming Mou static int
220b32dbedbSSuanming Mou mlx5_crypto_gcm_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
221b32dbedbSSuanming Mou {
222b32dbedbSSuanming Mou 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
223b32dbedbSSuanming Mou 	struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
224b32dbedbSSuanming Mou 
225b32dbedbSSuanming Mou 	if (qp->umr_qp_obj.qp != NULL)
226b32dbedbSSuanming Mou 		mlx5_devx_qp_destroy(&qp->umr_qp_obj);
227b32dbedbSSuanming Mou 	if (qp->qp_obj.qp != NULL)
228b32dbedbSSuanming Mou 		mlx5_devx_qp_destroy(&qp->qp_obj);
229b32dbedbSSuanming Mou 	if (qp->cq_obj.cq != NULL)
230b32dbedbSSuanming Mou 		mlx5_devx_cq_destroy(&qp->cq_obj);
231b32dbedbSSuanming Mou 	if (qp->mr.obj != NULL) {
232b32dbedbSSuanming Mou 		void *opaq = qp->mr.addr;
233b32dbedbSSuanming Mou 
234b32dbedbSSuanming Mou 		priv->dereg_mr_cb(&qp->mr);
235b32dbedbSSuanming Mou 		rte_free(opaq);
236b32dbedbSSuanming Mou 	}
237b32dbedbSSuanming Mou 	mlx5_crypto_indirect_mkeys_release(qp, qp->entries_n);
238b32dbedbSSuanming Mou 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
2390750c8b1SSuanming Mou 	rte_free(qp->ipsec_mem);
240b32dbedbSSuanming Mou 	rte_free(qp);
241b32dbedbSSuanming Mou 	dev->data->queue_pairs[qp_id] = NULL;
242b32dbedbSSuanming Mou 	return 0;
243b32dbedbSSuanming Mou }
244b32dbedbSSuanming Mou 
245b32dbedbSSuanming Mou static void
246b32dbedbSSuanming Mou mlx5_crypto_gcm_init_qp(struct mlx5_crypto_qp *qp)
247b32dbedbSSuanming Mou {
248b32dbedbSSuanming Mou 	volatile struct mlx5_gga_wqe *restrict wqe =
249b32dbedbSSuanming Mou 				    (volatile struct mlx5_gga_wqe *)qp->qp_obj.wqes;
250b32dbedbSSuanming Mou 	volatile union mlx5_gga_crypto_opaque *opaq = qp->opaque_addr;
251b32dbedbSSuanming Mou 	const uint32_t sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | 4u);
252b32dbedbSSuanming Mou 	const uint32_t flags = RTE_BE32(MLX5_COMP_ALWAYS <<
253b32dbedbSSuanming Mou 					MLX5_COMP_MODE_OFFSET);
254b32dbedbSSuanming Mou 	const uint32_t opaq_lkey = rte_cpu_to_be_32(qp->mr.lkey);
255b32dbedbSSuanming Mou 	int i;
256b32dbedbSSuanming Mou 
257b32dbedbSSuanming Mou 	/* All the next fields state should stay constant. */
258b32dbedbSSuanming Mou 	for (i = 0; i < qp->entries_n; ++i, ++wqe) {
259b32dbedbSSuanming Mou 		wqe->sq_ds = sq_ds;
260b32dbedbSSuanming Mou 		wqe->flags = flags;
261b32dbedbSSuanming Mou 		wqe->opaque_lkey = opaq_lkey;
262b32dbedbSSuanming Mou 		wqe->opaque_vaddr = rte_cpu_to_be_64((uint64_t)(uintptr_t)&opaq[i]);
263b32dbedbSSuanming Mou 	}
264b32dbedbSSuanming Mou }
265b32dbedbSSuanming Mou 
266b32dbedbSSuanming Mou static inline int
267b32dbedbSSuanming Mou mlx5_crypto_gcm_umr_qp_setup(struct rte_cryptodev *dev, struct mlx5_crypto_qp *qp,
268b32dbedbSSuanming Mou 			     int socket_id)
269b32dbedbSSuanming Mou {
270b32dbedbSSuanming Mou 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
271b32dbedbSSuanming Mou 	struct mlx5_devx_qp_attr attr = {0};
272b32dbedbSSuanming Mou 	uint32_t ret;
273b32dbedbSSuanming Mou 	uint32_t log_wqbb_n;
274b32dbedbSSuanming Mou 
275b32dbedbSSuanming Mou 	/* Set UMR + SEND_EN WQE as maximum same with crypto. */
276b32dbedbSSuanming Mou 	log_wqbb_n = rte_log2_u32(qp->entries_n *
277b32dbedbSSuanming Mou 			(MLX5_UMR_GCM_WQE_SET_SIZE / MLX5_SEND_WQE_BB));
278b32dbedbSSuanming Mou 	attr.pd = priv->cdev->pdn;
279b32dbedbSSuanming Mou 	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj);
280b32dbedbSSuanming Mou 	attr.cqn = qp->cq_obj.cq->id;
281b32dbedbSSuanming Mou 	attr.num_of_receive_wqes = 0;
282b32dbedbSSuanming Mou 	attr.num_of_send_wqbbs = RTE_BIT32(log_wqbb_n);
283b32dbedbSSuanming Mou 	attr.ts_format =
284b32dbedbSSuanming Mou 		mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
285b32dbedbSSuanming Mou 	attr.cd_master = 1;
286b32dbedbSSuanming Mou 	ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->umr_qp_obj,
287b32dbedbSSuanming Mou 				  attr.num_of_send_wqbbs * MLX5_SEND_WQE_BB,
288b32dbedbSSuanming Mou 				  &attr, socket_id);
289b32dbedbSSuanming Mou 	if (ret) {
290b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to create UMR QP.");
291b32dbedbSSuanming Mou 		return -1;
292b32dbedbSSuanming Mou 	}
293b32dbedbSSuanming Mou 	if (mlx5_devx_qp2rts(&qp->umr_qp_obj, qp->umr_qp_obj.qp->id)) {
294b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to change UMR QP state to RTS.");
295b32dbedbSSuanming Mou 		return -1;
296b32dbedbSSuanming Mou 	}
297b32dbedbSSuanming Mou 	/* Save the UMR WQEBBS for checking the WQE boundary. */
298b32dbedbSSuanming Mou 	qp->umr_wqbbs = attr.num_of_send_wqbbs;
299b32dbedbSSuanming Mou 	return 0;
300b32dbedbSSuanming Mou }
301b32dbedbSSuanming Mou 
302b32dbedbSSuanming Mou static int
303b32dbedbSSuanming Mou mlx5_crypto_gcm_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
304b32dbedbSSuanming Mou 			 const struct rte_cryptodev_qp_conf *qp_conf,
305b32dbedbSSuanming Mou 			 int socket_id)
306b32dbedbSSuanming Mou {
307b32dbedbSSuanming Mou 	struct mlx5_crypto_priv *priv = dev->data->dev_private;
308b32dbedbSSuanming Mou 	struct mlx5_hca_attr *attr = &priv->cdev->config.hca_attr;
309b32dbedbSSuanming Mou 	struct mlx5_crypto_qp *qp;
310b32dbedbSSuanming Mou 	struct mlx5_devx_cq_attr cq_attr = {
311b32dbedbSSuanming Mou 		.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
312b32dbedbSSuanming Mou 	};
313b32dbedbSSuanming Mou 	struct mlx5_devx_qp_attr qp_attr = {
314b32dbedbSSuanming Mou 		.pd = priv->cdev->pdn,
315b32dbedbSSuanming Mou 		.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
316b32dbedbSSuanming Mou 		.user_index = qp_id,
317b32dbedbSSuanming Mou 	};
318b32dbedbSSuanming Mou 	struct mlx5_devx_mkey_attr mkey_attr = {
319b32dbedbSSuanming Mou 		.pd = priv->cdev->pdn,
320b32dbedbSSuanming Mou 		.umr_en = 1,
321b32dbedbSSuanming Mou 		.klm_num = priv->max_klm_num,
322b32dbedbSSuanming Mou 	};
323b32dbedbSSuanming Mou 	uint32_t log_ops_n = rte_log2_u32(qp_conf->nb_descriptors);
324b32dbedbSSuanming Mou 	uint32_t entries = RTE_BIT32(log_ops_n);
325b32dbedbSSuanming Mou 	uint32_t alloc_size = sizeof(*qp);
3260750c8b1SSuanming Mou 	uint32_t extra_obj_size = 0;
327b32dbedbSSuanming Mou 	size_t mr_size, opaq_size;
328b32dbedbSSuanming Mou 	void *mr_buf;
329b32dbedbSSuanming Mou 	int ret;
330b32dbedbSSuanming Mou 
3310750c8b1SSuanming Mou 	if (!mlx5_crypto_is_ipsec_opt(priv))
3320750c8b1SSuanming Mou 		extra_obj_size = sizeof(struct mlx5_devx_obj *);
333b32dbedbSSuanming Mou 	alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
334b32dbedbSSuanming Mou 	alloc_size += (sizeof(struct rte_crypto_op *) +
3350750c8b1SSuanming Mou 		       extra_obj_size) * entries;
336b32dbedbSSuanming Mou 	qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
337b32dbedbSSuanming Mou 				socket_id);
338b32dbedbSSuanming Mou 	if (qp == NULL) {
339b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to allocate qp memory.");
340b32dbedbSSuanming Mou 		rte_errno = ENOMEM;
341b32dbedbSSuanming Mou 		return -rte_errno;
342b32dbedbSSuanming Mou 	}
343b32dbedbSSuanming Mou 	qp->priv = priv;
344b32dbedbSSuanming Mou 	qp->entries_n = entries;
345b32dbedbSSuanming Mou 	if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
346b32dbedbSSuanming Mou 				  priv->dev_config.socket_id)) {
347b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
348b32dbedbSSuanming Mou 			(uint32_t)qp_id);
349b32dbedbSSuanming Mou 		rte_errno = ENOMEM;
350b32dbedbSSuanming Mou 		goto err;
351b32dbedbSSuanming Mou 	}
352b32dbedbSSuanming Mou 	/*
353b32dbedbSSuanming Mou 	 * The following KLM pointer must be aligned with
354b32dbedbSSuanming Mou 	 * MLX5_UMR_KLM_PTR_ALIGN. Aligned opaq_size here
355b32dbedbSSuanming Mou 	 * to make the KLM pointer with offset be aligned.
356b32dbedbSSuanming Mou 	 */
357b32dbedbSSuanming Mou 	opaq_size = RTE_ALIGN(sizeof(union mlx5_gga_crypto_opaque) * entries,
358b32dbedbSSuanming Mou 			      MLX5_UMR_KLM_PTR_ALIGN);
359b32dbedbSSuanming Mou 	mr_size = (priv->max_klm_num * sizeof(struct mlx5_klm) * entries) + opaq_size;
360b32dbedbSSuanming Mou 	mr_buf = rte_calloc(__func__, (size_t)1, mr_size, MLX5_UMR_KLM_PTR_ALIGN);
361b32dbedbSSuanming Mou 	if (mr_buf == NULL) {
362b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to allocate mr memory.");
363b32dbedbSSuanming Mou 		rte_errno = ENOMEM;
364b32dbedbSSuanming Mou 		goto err;
365b32dbedbSSuanming Mou 	}
366b32dbedbSSuanming Mou 	if (priv->reg_mr_cb(priv->cdev->pd, mr_buf, mr_size, &qp->mr) != 0) {
367b32dbedbSSuanming Mou 		rte_free(mr_buf);
368b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to register opaque MR.");
369b32dbedbSSuanming Mou 		rte_errno = ENOMEM;
370b32dbedbSSuanming Mou 		goto err;
371b32dbedbSSuanming Mou 	}
372b32dbedbSSuanming Mou 	qp->opaque_addr = qp->mr.addr;
373b32dbedbSSuanming Mou 	qp->klm_array = RTE_PTR_ADD(qp->opaque_addr, opaq_size);
374b32dbedbSSuanming Mou 	/*
375b32dbedbSSuanming Mou 	 * Triple the CQ size as UMR QP which contains UMR and SEND_EN WQE
376b32dbedbSSuanming Mou 	 * will share this CQ .
377b32dbedbSSuanming Mou 	 */
3780750c8b1SSuanming Mou 	qp->cq_entries_n = rte_align32pow2(entries * (mlx5_crypto_is_ipsec_opt(priv) ? 1 : 3));
379b32dbedbSSuanming Mou 	ret = mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj,
380b32dbedbSSuanming Mou 				  rte_log2_u32(qp->cq_entries_n),
381b32dbedbSSuanming Mou 				  &cq_attr, socket_id);
382b32dbedbSSuanming Mou 	if (ret != 0) {
383b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to create CQ.");
384b32dbedbSSuanming Mou 		goto err;
385b32dbedbSSuanming Mou 	}
386b32dbedbSSuanming Mou 	qp_attr.cqn = qp->cq_obj.cq->id;
387b32dbedbSSuanming Mou 	qp_attr.ts_format = mlx5_ts_format_conv(attr->qp_ts_format);
388b32dbedbSSuanming Mou 	qp_attr.num_of_receive_wqes = 0;
389b32dbedbSSuanming Mou 	qp_attr.num_of_send_wqbbs = entries;
390b32dbedbSSuanming Mou 	qp_attr.mmo = attr->crypto_mmo.crypto_mmo_qp;
391b32dbedbSSuanming Mou 	/* Set MMO QP as follower as the input data may depend on UMR. */
3920750c8b1SSuanming Mou 	qp_attr.cd_slave_send = !mlx5_crypto_is_ipsec_opt(priv);
393b32dbedbSSuanming Mou 	ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj,
394b32dbedbSSuanming Mou 				  qp_attr.num_of_send_wqbbs * MLX5_WQE_SIZE,
395b32dbedbSSuanming Mou 				  &qp_attr, socket_id);
396b32dbedbSSuanming Mou 	if (ret != 0) {
397b32dbedbSSuanming Mou 		DRV_LOG(ERR, "Failed to create QP.");
398b32dbedbSSuanming Mou 		goto err;
399b32dbedbSSuanming Mou 	}
400b32dbedbSSuanming Mou 	mlx5_crypto_gcm_init_qp(qp);
401b32dbedbSSuanming Mou 	ret = mlx5_devx_qp2rts(&qp->qp_obj, 0);
402b32dbedbSSuanming Mou 	if (ret)
403b32dbedbSSuanming Mou 		goto err;
404b32dbedbSSuanming Mou 	qp->ops = (struct rte_crypto_op **)(qp + 1);
4050750c8b1SSuanming Mou 	if (!mlx5_crypto_is_ipsec_opt(priv)) {
406b32dbedbSSuanming Mou 		qp->mkey = (struct mlx5_devx_obj **)(qp->ops + entries);
407b32dbedbSSuanming Mou 		if (mlx5_crypto_gcm_umr_qp_setup(dev, qp, socket_id)) {
408b32dbedbSSuanming Mou 			DRV_LOG(ERR, "Failed to setup UMR QP.");
409b32dbedbSSuanming Mou 			goto err;
410b32dbedbSSuanming Mou 		}
411b32dbedbSSuanming Mou 		DRV_LOG(INFO, "QP %u: SQN=0x%X CQN=0x%X entries num = %u",
412b32dbedbSSuanming Mou 			(uint32_t)qp_id, qp->qp_obj.qp->id, qp->cq_obj.cq->id, entries);
413b32dbedbSSuanming Mou 		if (mlx5_crypto_indirect_mkeys_prepare(priv, qp, &mkey_attr,
414b32dbedbSSuanming Mou 						       mlx5_crypto_gcm_mkey_klm_update)) {
415b32dbedbSSuanming Mou 			DRV_LOG(ERR, "Cannot allocate indirect memory regions.");
416b32dbedbSSuanming Mou 			rte_errno = ENOMEM;
417b32dbedbSSuanming Mou 			goto err;
418b32dbedbSSuanming Mou 		}
4190750c8b1SSuanming Mou 	} else {
4200750c8b1SSuanming Mou 		extra_obj_size = sizeof(struct mlx5_crypto_ipsec_mem) * entries;
4210750c8b1SSuanming Mou 		qp->ipsec_mem = rte_calloc(__func__, (size_t)1, extra_obj_size,
4220750c8b1SSuanming Mou 					   RTE_CACHE_LINE_SIZE);
4230750c8b1SSuanming Mou 		if (!qp->ipsec_mem) {
4240750c8b1SSuanming Mou 			DRV_LOG(ERR, "Failed to allocate ipsec_mem.");
4250750c8b1SSuanming Mou 			goto err;
4260750c8b1SSuanming Mou 		}
4270750c8b1SSuanming Mou 	}
428b32dbedbSSuanming Mou 	dev->data->queue_pairs[qp_id] = qp;
429b32dbedbSSuanming Mou 	return 0;
430b32dbedbSSuanming Mou err:
431b32dbedbSSuanming Mou 	mlx5_crypto_gcm_qp_release(dev, qp_id);
432b32dbedbSSuanming Mou 	return -1;
433b32dbedbSSuanming Mou }
434b32dbedbSSuanming Mou 
435b0109583SSuanming Mou static __rte_always_inline void
436b0109583SSuanming Mou mlx5_crypto_gcm_get_op_info(struct mlx5_crypto_qp *qp,
437b0109583SSuanming Mou 			    struct rte_crypto_op *op,
438b0109583SSuanming Mou 			    struct mlx5_crypto_gcm_op_info *op_info)
439b0109583SSuanming Mou {
440b0109583SSuanming Mou 	struct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
441b0109583SSuanming Mou 	struct rte_mbuf *m_src = op->sym->m_src;
442b0109583SSuanming Mou 	void *aad_addr = op->sym->aead.aad.data;
443b0109583SSuanming Mou 	void *tag_addr = op->sym->aead.digest.data;
444b0109583SSuanming Mou 	void *src_addr = rte_pktmbuf_mtod_offset(m_src, void *, op->sym->aead.data.offset);
445b0109583SSuanming Mou 	struct rte_mbuf *m_dst = m_src;
446b0109583SSuanming Mou 	void *dst_addr = src_addr;
447b0109583SSuanming Mou 	void *expected_aad = NULL;
448b0109583SSuanming Mou 	void *expected_tag = NULL;
449b0109583SSuanming Mou 	bool is_enc = sess->op_type == MLX5_CRYPTO_OP_TYPE_ENCRYPTION;
450b0109583SSuanming Mou 	bool cp_aad = false;
451b0109583SSuanming Mou 	bool cp_tag = false;
452b0109583SSuanming Mou 
453b0109583SSuanming Mou 	op_info->is_oop = false;
454b0109583SSuanming Mou 	op_info->need_umr = false;
455b0109583SSuanming Mou 	op_info->is_enc = is_enc;
456b0109583SSuanming Mou 	op_info->digest = NULL;
457b0109583SSuanming Mou 	op_info->src_addr = aad_addr;
458b0109583SSuanming Mou 	if (op->sym->m_dst && op->sym->m_dst != m_src) {
459be793709SSuanming Mou 		/* Add 2 for AAD and digest. */
460be793709SSuanming Mou 		MLX5_ASSERT((uint32_t)(m_dst->nb_segs + m_src->nb_segs + 2) <
461be793709SSuanming Mou 			    qp->priv->max_klm_num);
462b0109583SSuanming Mou 		op_info->is_oop = true;
463b0109583SSuanming Mou 		m_dst = op->sym->m_dst;
464b0109583SSuanming Mou 		dst_addr = rte_pktmbuf_mtod_offset(m_dst, void *, op->sym->aead.data.offset);
465b0109583SSuanming Mou 		if (m_dst->nb_segs > 1) {
466b0109583SSuanming Mou 			op_info->need_umr = true;
467b0109583SSuanming Mou 			return;
468b0109583SSuanming Mou 		}
469b0109583SSuanming Mou 		/*
470b0109583SSuanming Mou 		 * If the op's mbuf has extra data offset, don't copy AAD to
471b0109583SSuanming Mou 		 * this area.
472b0109583SSuanming Mou 		 */
473b0109583SSuanming Mou 		if (rte_pktmbuf_headroom(m_dst) < sess->aad_len ||
474b0109583SSuanming Mou 		    op->sym->aead.data.offset) {
475b0109583SSuanming Mou 			op_info->need_umr = true;
476b0109583SSuanming Mou 			return;
477b0109583SSuanming Mou 		}
478be793709SSuanming Mou 	} else {
479be793709SSuanming Mou 		/* Add 2 for AAD and digest. */
480be793709SSuanming Mou 		MLX5_ASSERT((uint32_t)(m_src->nb_segs) + 2 < qp->priv->max_klm_num);
481b0109583SSuanming Mou 	}
482b0109583SSuanming Mou 	if (m_src->nb_segs > 1) {
483b0109583SSuanming Mou 		op_info->need_umr = true;
484b0109583SSuanming Mou 		return;
485b0109583SSuanming Mou 	}
486b0109583SSuanming Mou 	expected_aad = RTE_PTR_SUB(src_addr, sess->aad_len);
487b0109583SSuanming Mou 	if (expected_aad != aad_addr) {
488b0109583SSuanming Mou 		/*
489b0109583SSuanming Mou 		 * If the op's mbuf has extra data offset, don't copy AAD to
490b0109583SSuanming Mou 		 * this area.
491b0109583SSuanming Mou 		 */
492b0109583SSuanming Mou 		if (sess->aad_len > MLX5_CRYPTO_GCM_MAX_AAD ||
493b0109583SSuanming Mou 		    sess->aad_len > rte_pktmbuf_headroom(m_src) ||
494b0109583SSuanming Mou 		    op->sym->aead.data.offset) {
495b0109583SSuanming Mou 			op_info->need_umr = true;
496b0109583SSuanming Mou 			return;
497b0109583SSuanming Mou 		}
498b0109583SSuanming Mou 		cp_aad = true;
499b0109583SSuanming Mou 		op_info->src_addr = expected_aad;
500b0109583SSuanming Mou 	}
501b0109583SSuanming Mou 	expected_tag = RTE_PTR_ADD(is_enc ? dst_addr : src_addr, op->sym->aead.data.length);
502b0109583SSuanming Mou 	if (expected_tag != tag_addr) {
503b0109583SSuanming Mou 		struct rte_mbuf *mbuf = is_enc ? m_dst : m_src;
504b0109583SSuanming Mou 
505b0109583SSuanming Mou 		/*
506b0109583SSuanming Mou 		 * If op's mbuf is not fully set as payload, don't copy digest to
507b0109583SSuanming Mou 		 * the left area.
508b0109583SSuanming Mou 		 */
509b0109583SSuanming Mou 		if (rte_pktmbuf_tailroom(mbuf) < sess->tag_len ||
510b0109583SSuanming Mou 		    rte_pktmbuf_data_len(mbuf) != op->sym->aead.data.length) {
511b0109583SSuanming Mou 			op_info->need_umr = true;
512b0109583SSuanming Mou 			return;
513b0109583SSuanming Mou 		}
514b0109583SSuanming Mou 		if (is_enc) {
515b0109583SSuanming Mou 			op_info->digest = expected_tag;
516b0109583SSuanming Mou 			qp->cpy_tag_op++;
517b0109583SSuanming Mou 		} else {
518b0109583SSuanming Mou 			cp_tag = true;
519b0109583SSuanming Mou 		}
520b0109583SSuanming Mou 	}
521b0109583SSuanming Mou 	if (cp_aad)
522b0109583SSuanming Mou 		memcpy(expected_aad, aad_addr, sess->aad_len);
523b0109583SSuanming Mou 	if (cp_tag)
524b0109583SSuanming Mou 		memcpy(expected_tag, tag_addr, sess->tag_len);
525b0109583SSuanming Mou }
526b0109583SSuanming Mou 
527b0109583SSuanming Mou static __rte_always_inline uint32_t
528b0109583SSuanming Mou _mlx5_crypto_gcm_umr_build_mbuf_klm(struct mlx5_crypto_qp *qp,
529b0109583SSuanming Mou 				    struct rte_mbuf *mbuf,
530b0109583SSuanming Mou 				    struct mlx5_klm *klm,
531b0109583SSuanming Mou 				    uint32_t offset,
532b0109583SSuanming Mou 				    uint32_t *remain)
533b0109583SSuanming Mou {
534b0109583SSuanming Mou 	uint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);
535b0109583SSuanming Mou 	uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
536b0109583SSuanming Mou 
537b0109583SSuanming Mou 	if (data_len > *remain)
538b0109583SSuanming Mou 		data_len = *remain;
539b0109583SSuanming Mou 	*remain -= data_len;
540b0109583SSuanming Mou 	klm->byte_count = rte_cpu_to_be_32(data_len);
541b0109583SSuanming Mou 	klm->address = rte_cpu_to_be_64(addr);
542b0109583SSuanming Mou 	klm->mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf);
543b0109583SSuanming Mou 	return klm->mkey;
544b0109583SSuanming Mou }
545b0109583SSuanming Mou 
546b0109583SSuanming Mou static __rte_always_inline int
547b0109583SSuanming Mou mlx5_crypto_gcm_build_mbuf_chain_klms(struct mlx5_crypto_qp *qp,
548b0109583SSuanming Mou 				      struct rte_crypto_op *op,
549b0109583SSuanming Mou 				      struct rte_mbuf *mbuf,
550b0109583SSuanming Mou 				      struct mlx5_klm *klm)
551b0109583SSuanming Mou {
552b0109583SSuanming Mou 	uint32_t remain_len = op->sym->aead.data.length;
553b0109583SSuanming Mou 	__rte_unused uint32_t nb_segs = mbuf->nb_segs;
554b0109583SSuanming Mou 	uint32_t klm_n = 0;
555b0109583SSuanming Mou 
556b0109583SSuanming Mou 	/* mbuf seg num should be less than max_segs_num. */
557b0109583SSuanming Mou 	MLX5_ASSERT(nb_segs <= qp->priv->max_segs_num);
558b0109583SSuanming Mou 	/* First mbuf needs to take the data offset. */
559b0109583SSuanming Mou 	if (unlikely(_mlx5_crypto_gcm_umr_build_mbuf_klm(qp, mbuf, klm,
560b0109583SSuanming Mou 		     op->sym->aead.data.offset, &remain_len) == UINT32_MAX)) {
561b0109583SSuanming Mou 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
562b0109583SSuanming Mou 		return 0;
563b0109583SSuanming Mou 	}
564b0109583SSuanming Mou 	klm++;
565b0109583SSuanming Mou 	klm_n++;
566b0109583SSuanming Mou 	while (remain_len) {
567b0109583SSuanming Mou 		nb_segs--;
568b0109583SSuanming Mou 		mbuf = mbuf->next;
569b0109583SSuanming Mou 		MLX5_ASSERT(mbuf && nb_segs);
570b0109583SSuanming Mou 		if (unlikely(_mlx5_crypto_gcm_umr_build_mbuf_klm(qp, mbuf, klm,
571b0109583SSuanming Mou 						0, &remain_len) == UINT32_MAX)) {
572b0109583SSuanming Mou 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
573b0109583SSuanming Mou 			return 0;
574b0109583SSuanming Mou 		}
575b0109583SSuanming Mou 		klm++;
576b0109583SSuanming Mou 		klm_n++;
577b0109583SSuanming Mou 	}
578b0109583SSuanming Mou 	return klm_n;
579b0109583SSuanming Mou }
580b0109583SSuanming Mou 
581b0109583SSuanming Mou static __rte_always_inline int
582b0109583SSuanming Mou mlx5_crypto_gcm_build_klm_by_addr(struct mlx5_crypto_qp *qp,
583b0109583SSuanming Mou 				  struct mlx5_klm *klm,
584b0109583SSuanming Mou 				  void *addr,
585b0109583SSuanming Mou 				  uint32_t len)
586b0109583SSuanming Mou {
587b0109583SSuanming Mou 	klm->byte_count = rte_cpu_to_be_32(len);
588b0109583SSuanming Mou 	klm->address = rte_cpu_to_be_64((uintptr_t)addr);
589b0109583SSuanming Mou 	klm->mkey = mlx5_mr_addr2mr_bh(&qp->mr_ctrl, (uintptr_t)addr);
590b0109583SSuanming Mou 	if (klm->mkey == UINT32_MAX)
591b0109583SSuanming Mou 		return 0;
592b0109583SSuanming Mou 	return 1;
593b0109583SSuanming Mou }
594b0109583SSuanming Mou 
595b0109583SSuanming Mou static __rte_always_inline int
596b0109583SSuanming Mou mlx5_crypto_gcm_build_op_klm(struct mlx5_crypto_qp *qp,
597b0109583SSuanming Mou 			     struct rte_crypto_op *op,
598b0109583SSuanming Mou 			     struct mlx5_crypto_gcm_op_info *op_info,
599b0109583SSuanming Mou 			     struct mlx5_klm *klm,
600b0109583SSuanming Mou 			     uint32_t *len)
601b0109583SSuanming Mou {
602b0109583SSuanming Mou 	struct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
603b0109583SSuanming Mou 	struct mlx5_klm *digest = NULL, *aad = NULL;
604b0109583SSuanming Mou 	uint32_t total_len = op->sym->aead.data.length + sess->aad_len + sess->tag_len;
605b0109583SSuanming Mou 	uint32_t klm_n = 0, klm_src = 0, klm_dst = 0;
606b0109583SSuanming Mou 
607b0109583SSuanming Mou 	/* Build AAD KLM. */
608b0109583SSuanming Mou 	aad = klm;
609b0109583SSuanming Mou 	if (!mlx5_crypto_gcm_build_klm_by_addr(qp, aad, op->sym->aead.aad.data, sess->aad_len))
610b0109583SSuanming Mou 		return 0;
611b0109583SSuanming Mou 	klm_n++;
612b0109583SSuanming Mou 	/* Build src mubf KLM. */
613b0109583SSuanming Mou 	klm_src = mlx5_crypto_gcm_build_mbuf_chain_klms(qp, op, op->sym->m_src, &klm[klm_n]);
614b0109583SSuanming Mou 	if (!klm_src)
615b0109583SSuanming Mou 		return 0;
616b0109583SSuanming Mou 	klm_n += klm_src;
617b0109583SSuanming Mou 	/* Reserve digest KLM if needed. */
618b0109583SSuanming Mou 	if (!op_info->is_oop ||
619b0109583SSuanming Mou 	    sess->op_type == MLX5_CRYPTO_OP_TYPE_DECRYPTION) {
620b0109583SSuanming Mou 		digest = &klm[klm_n];
621b0109583SSuanming Mou 		klm_n++;
622b0109583SSuanming Mou 	}
623b0109583SSuanming Mou 	/* Build dst mbuf KLM. */
624b0109583SSuanming Mou 	if (op_info->is_oop) {
625b0109583SSuanming Mou 		klm[klm_n] = *aad;
626b0109583SSuanming Mou 		klm_n++;
627b0109583SSuanming Mou 		klm_dst = mlx5_crypto_gcm_build_mbuf_chain_klms(qp, op, op->sym->m_dst,
628b0109583SSuanming Mou 								&klm[klm_n]);
629b0109583SSuanming Mou 		if (!klm_dst)
630b0109583SSuanming Mou 			return 0;
631b0109583SSuanming Mou 		klm_n += klm_dst;
632b0109583SSuanming Mou 		total_len += (op->sym->aead.data.length + sess->aad_len);
633b0109583SSuanming Mou 	}
634b0109583SSuanming Mou 	/* Update digest at the end if it is not set. */
635b0109583SSuanming Mou 	if (!digest) {
636b0109583SSuanming Mou 		digest = &klm[klm_n];
637b0109583SSuanming Mou 		klm_n++;
638b0109583SSuanming Mou 	}
639b0109583SSuanming Mou 	/* Build digest KLM. */
640b0109583SSuanming Mou 	if (!mlx5_crypto_gcm_build_klm_by_addr(qp, digest, op->sym->aead.digest.data,
641b0109583SSuanming Mou 					       sess->tag_len))
642b0109583SSuanming Mou 		return 0;
643b0109583SSuanming Mou 	*len = total_len;
644b0109583SSuanming Mou 	return klm_n;
645b0109583SSuanming Mou }
646b0109583SSuanming Mou 
647b0109583SSuanming Mou static __rte_always_inline struct mlx5_wqe_cseg *
648b0109583SSuanming Mou mlx5_crypto_gcm_get_umr_wqe(struct mlx5_crypto_qp *qp)
649b0109583SSuanming Mou {
650b0109583SSuanming Mou 	uint32_t wqe_offset = qp->umr_pi & (qp->umr_wqbbs - 1);
651b0109583SSuanming Mou 	uint32_t left_wqbbs = qp->umr_wqbbs - wqe_offset;
652b0109583SSuanming Mou 	struct mlx5_wqe_cseg *wqe;
653b0109583SSuanming Mou 
654b0109583SSuanming Mou 	/* If UMR WQE is near the boundary. */
655b0109583SSuanming Mou 	if (left_wqbbs < MLX5_UMR_GCM_WQE_STRIDE) {
656b0109583SSuanming Mou 		/* Append NOP WQE as the left WQEBBS is not enough for UMR. */
657b0109583SSuanming Mou 		wqe = RTE_PTR_ADD(qp->umr_qp_obj.umem_buf, wqe_offset * MLX5_SEND_WQE_BB);
658b0109583SSuanming Mou 		wqe->opcode = rte_cpu_to_be_32(MLX5_OPCODE_NOP | ((uint32_t)qp->umr_pi << 8));
659b0109583SSuanming Mou 		wqe->sq_ds = rte_cpu_to_be_32((qp->umr_qp_obj.qp->id << 8) | (left_wqbbs << 2));
660b0109583SSuanming Mou 		wqe->flags = RTE_BE32(0);
661b0109583SSuanming Mou 		wqe->misc = RTE_BE32(0);
662b0109583SSuanming Mou 		qp->umr_pi += left_wqbbs;
663b0109583SSuanming Mou 		wqe_offset = qp->umr_pi & (qp->umr_wqbbs - 1);
664b0109583SSuanming Mou 	}
665b0109583SSuanming Mou 	wqe_offset *= MLX5_SEND_WQE_BB;
666b0109583SSuanming Mou 	return RTE_PTR_ADD(qp->umr_qp_obj.umem_buf, wqe_offset);
667b0109583SSuanming Mou }
668b0109583SSuanming Mou 
669b0109583SSuanming Mou static __rte_always_inline int
670b0109583SSuanming Mou mlx5_crypto_gcm_build_umr(struct mlx5_crypto_qp *qp,
671b0109583SSuanming Mou 			  struct rte_crypto_op *op,
672b0109583SSuanming Mou 			  uint32_t idx,
673b0109583SSuanming Mou 			  struct mlx5_crypto_gcm_op_info *op_info,
674b0109583SSuanming Mou 			  struct mlx5_crypto_gcm_data *data)
675b0109583SSuanming Mou {
676b0109583SSuanming Mou 	struct mlx5_crypto_priv *priv = qp->priv;
677b0109583SSuanming Mou 	struct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
678b0109583SSuanming Mou 	struct mlx5_wqe_cseg *wqe;
679b0109583SSuanming Mou 	struct mlx5_wqe_umr_cseg *ucseg;
680b0109583SSuanming Mou 	struct mlx5_wqe_mkey_cseg *mkc;
681b0109583SSuanming Mou 	struct mlx5_klm *iklm;
682b0109583SSuanming Mou 	struct mlx5_klm *klm = &qp->klm_array[idx * priv->max_klm_num];
683b0109583SSuanming Mou 	uint16_t klm_size, klm_align;
684b0109583SSuanming Mou 	uint32_t total_len;
685b0109583SSuanming Mou 
686b0109583SSuanming Mou 	/* Build KLM base on the op. */
687b0109583SSuanming Mou 	klm_size = mlx5_crypto_gcm_build_op_klm(qp, op, op_info, klm, &total_len);
688b0109583SSuanming Mou 	if (!klm_size)
689b0109583SSuanming Mou 		return -EINVAL;
690b0109583SSuanming Mou 	klm_align = RTE_ALIGN(klm_size, 4);
691b0109583SSuanming Mou 	/* Get UMR WQE memory. */
692b0109583SSuanming Mou 	wqe = mlx5_crypto_gcm_get_umr_wqe(qp);
693b0109583SSuanming Mou 	memset(wqe, 0, MLX5_UMR_GCM_WQE_SIZE);
694b0109583SSuanming Mou 	/* Set WQE control seg. Non-inline KLM UMR WQE size must be 9 WQE_DS. */
695b0109583SSuanming Mou 	wqe->opcode = rte_cpu_to_be_32(MLX5_OPCODE_UMR | ((uint32_t)qp->umr_pi << 8));
696b0109583SSuanming Mou 	wqe->sq_ds = rte_cpu_to_be_32((qp->umr_qp_obj.qp->id << 8) | 9);
697b0109583SSuanming Mou 	wqe->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET);
698b0109583SSuanming Mou 	wqe->misc = rte_cpu_to_be_32(qp->mkey[idx]->id);
699b0109583SSuanming Mou 	/* Set UMR WQE control seg. */
700b0109583SSuanming Mou 	ucseg = (struct mlx5_wqe_umr_cseg *)(wqe + 1);
701b0109583SSuanming Mou 	ucseg->mkey_mask |= RTE_BE64(1u << 0);
702b0109583SSuanming Mou 	ucseg->ko_to_bs = rte_cpu_to_be_32(klm_align << MLX5_UMRC_KO_OFFSET);
703b0109583SSuanming Mou 	/* Set mkey context seg. */
704b0109583SSuanming Mou 	mkc = (struct mlx5_wqe_mkey_cseg *)(ucseg + 1);
705b0109583SSuanming Mou 	mkc->len = rte_cpu_to_be_64(total_len);
706b0109583SSuanming Mou 	mkc->qpn_mkey = rte_cpu_to_be_32(0xffffff00 | (qp->mkey[idx]->id & 0xff));
707b0109583SSuanming Mou 	/* Set UMR pointer to data seg. */
708b0109583SSuanming Mou 	iklm = (struct mlx5_klm *)(mkc + 1);
709b0109583SSuanming Mou 	iklm->address = rte_cpu_to_be_64((uintptr_t)((char *)klm));
710b0109583SSuanming Mou 	iklm->mkey = rte_cpu_to_be_32(qp->mr.lkey);
711b0109583SSuanming Mou 	data->src_mkey = rte_cpu_to_be_32(qp->mkey[idx]->id);
712b0109583SSuanming Mou 	data->dst_mkey = data->src_mkey;
713b0109583SSuanming Mou 	data->src_addr = 0;
714b0109583SSuanming Mou 	data->src_bytes = sess->aad_len + op->sym->aead.data.length;
715b0109583SSuanming Mou 	data->dst_bytes = data->src_bytes;
716b0109583SSuanming Mou 	if (op_info->is_enc)
717b0109583SSuanming Mou 		data->dst_bytes += sess->tag_len;
718b0109583SSuanming Mou 	else
719b0109583SSuanming Mou 		data->src_bytes += sess->tag_len;
720b0109583SSuanming Mou 	if (op_info->is_oop)
721b0109583SSuanming Mou 		data->dst_addr = (void *)(uintptr_t)(data->src_bytes);
722b0109583SSuanming Mou 	else
723b0109583SSuanming Mou 		data->dst_addr = 0;
724b0109583SSuanming Mou 	/* Clear the padding memory. */
725b0109583SSuanming Mou 	memset(&klm[klm_size], 0, sizeof(struct mlx5_klm) * (klm_align - klm_size));
726b0109583SSuanming Mou 	/* Update PI and WQE */
727b0109583SSuanming Mou 	qp->umr_pi += MLX5_UMR_GCM_WQE_STRIDE;
728b0109583SSuanming Mou 	qp->umr_wqe = (uint8_t *)wqe;
729b0109583SSuanming Mou 	return 0;
730b0109583SSuanming Mou }
731b0109583SSuanming Mou 
732b0109583SSuanming Mou static __rte_always_inline void
733b0109583SSuanming Mou mlx5_crypto_gcm_build_send_en(struct mlx5_crypto_qp *qp)
734b0109583SSuanming Mou {
735b0109583SSuanming Mou 	uint32_t wqe_offset = (qp->umr_pi & (qp->umr_wqbbs - 1)) * MLX5_SEND_WQE_BB;
736b0109583SSuanming Mou 	struct mlx5_wqe_cseg *cs = RTE_PTR_ADD(qp->umr_qp_obj.wqes, wqe_offset);
737b0109583SSuanming Mou 	struct mlx5_wqe_qseg *qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
738b0109583SSuanming Mou 
739b0109583SSuanming Mou 	cs->opcode = rte_cpu_to_be_32(MLX5_OPCODE_SEND_EN | ((uint32_t)qp->umr_pi << 8));
740b0109583SSuanming Mou 	cs->sq_ds = rte_cpu_to_be_32((qp->umr_qp_obj.qp->id << 8) | 2);
741b0109583SSuanming Mou 	/*
742b0109583SSuanming Mou 	 * No need to generate the SEND_EN CQE as we want only GGA CQE
743b0109583SSuanming Mou 	 * in the CQ normally. We can compare qp->last_send_gga_pi with
744b0109583SSuanming Mou 	 * qp->pi to know if all SEND_EN be consumed.
745b0109583SSuanming Mou 	 */
746b0109583SSuanming Mou 	cs->flags = RTE_BE32((MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET) |
747b0109583SSuanming Mou 			MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);
748b0109583SSuanming Mou 	cs->misc = RTE_BE32(0);
749b0109583SSuanming Mou 	qs->max_index = rte_cpu_to_be_32(qp->pi);
750b0109583SSuanming Mou 	qs->qpn_cqn = rte_cpu_to_be_32(qp->qp_obj.qp->id);
751b0109583SSuanming Mou 	qp->umr_wqe = (uint8_t *)cs;
752b0109583SSuanming Mou 	qp->umr_pi += 1;
753b0109583SSuanming Mou }
754b0109583SSuanming Mou 
755b0109583SSuanming Mou static __rte_always_inline void
756b0109583SSuanming Mou mlx5_crypto_gcm_wqe_set(struct mlx5_crypto_qp *qp,
757b0109583SSuanming Mou 			struct rte_crypto_op *op,
758b0109583SSuanming Mou 			uint32_t idx,
759b0109583SSuanming Mou 			struct mlx5_crypto_gcm_data *data)
760b0109583SSuanming Mou {
761b0109583SSuanming Mou 	struct mlx5_crypto_session *sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
762b0109583SSuanming Mou 	struct mlx5_gga_wqe *wqe = &((struct mlx5_gga_wqe *)qp->qp_obj.wqes)[idx];
763b0109583SSuanming Mou 	union mlx5_gga_crypto_opaque *opaq = qp->opaque_addr;
764b0109583SSuanming Mou 
765b0109583SSuanming Mou 	memcpy(opaq[idx].cp.iv,
766b0109583SSuanming Mou 		rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), sess->iv_len);
767b0109583SSuanming Mou 	opaq[idx].cp.tag_size = sess->wqe_tag_len;
768b0109583SSuanming Mou 	opaq[idx].cp.aad_size = sess->wqe_aad_len;
769b0109583SSuanming Mou 	/* Update control seg. */
770b0109583SSuanming Mou 	wqe->opcode = rte_cpu_to_be_32(MLX5_MMO_CRYPTO_OPC + (qp->pi << 8));
771b0109583SSuanming Mou 	wqe->gga_ctrl1 = sess->mmo_ctrl;
772b0109583SSuanming Mou 	wqe->gga_ctrl2 = sess->dek_id;
773b0109583SSuanming Mou 	wqe->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET);
774b0109583SSuanming Mou 	/* Update op_info seg. */
775b0109583SSuanming Mou 	wqe->gather.bcount = rte_cpu_to_be_32(data->src_bytes);
776b0109583SSuanming Mou 	wqe->gather.lkey = data->src_mkey;
777b0109583SSuanming Mou 	wqe->gather.pbuf = rte_cpu_to_be_64((uintptr_t)data->src_addr);
778b0109583SSuanming Mou 	/* Update output seg. */
779b0109583SSuanming Mou 	wqe->scatter.bcount = rte_cpu_to_be_32(data->dst_bytes);
780b0109583SSuanming Mou 	wqe->scatter.lkey = data->dst_mkey;
781b0109583SSuanming Mou 	wqe->scatter.pbuf = rte_cpu_to_be_64((uintptr_t)data->dst_addr);
782b0109583SSuanming Mou 	qp->wqe = (uint8_t *)wqe;
783b0109583SSuanming Mou }
784b0109583SSuanming Mou 
785b0109583SSuanming Mou static uint16_t
786b0109583SSuanming Mou mlx5_crypto_gcm_enqueue_burst(void *queue_pair,
787b0109583SSuanming Mou 			      struct rte_crypto_op **ops,
788b0109583SSuanming Mou 			      uint16_t nb_ops)
789b0109583SSuanming Mou {
790b0109583SSuanming Mou 	struct mlx5_crypto_qp *qp = queue_pair;
791b0109583SSuanming Mou 	struct mlx5_crypto_session *sess;
792b0109583SSuanming Mou 	struct mlx5_crypto_priv *priv = qp->priv;
793b0109583SSuanming Mou 	struct mlx5_crypto_gcm_tag_cpy_info *tag;
794b0109583SSuanming Mou 	struct mlx5_crypto_gcm_data gcm_data;
795b0109583SSuanming Mou 	struct rte_crypto_op *op;
796b0109583SSuanming Mou 	struct mlx5_crypto_gcm_op_info op_info;
797b0109583SSuanming Mou 	uint16_t mask = qp->entries_n - 1;
798b0109583SSuanming Mou 	uint16_t remain = qp->entries_n - (qp->pi - qp->qp_ci);
799b0109583SSuanming Mou 	uint32_t idx;
800b0109583SSuanming Mou 	uint16_t umr_cnt = 0;
801b0109583SSuanming Mou 
802b0109583SSuanming Mou 	if (remain < nb_ops)
803b0109583SSuanming Mou 		nb_ops = remain;
804b0109583SSuanming Mou 	else
805b0109583SSuanming Mou 		remain = nb_ops;
806b0109583SSuanming Mou 	if (unlikely(remain == 0))
807b0109583SSuanming Mou 		return 0;
808b0109583SSuanming Mou 	do {
809b0109583SSuanming Mou 		op = *ops++;
810b0109583SSuanming Mou 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
811b0109583SSuanming Mou 		idx = qp->pi & mask;
812b0109583SSuanming Mou 		mlx5_crypto_gcm_get_op_info(qp, op, &op_info);
813b0109583SSuanming Mou 		if (!op_info.need_umr) {
814b0109583SSuanming Mou 			gcm_data.src_addr = op_info.src_addr;
815b0109583SSuanming Mou 			gcm_data.src_bytes = op->sym->aead.data.length + sess->aad_len;
816b0109583SSuanming Mou 			gcm_data.src_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->sym->m_src);
817b0109583SSuanming Mou 			if (op_info.is_oop) {
818b0109583SSuanming Mou 				gcm_data.dst_addr = RTE_PTR_SUB
819b0109583SSuanming Mou 					(rte_pktmbuf_mtod_offset(op->sym->m_dst,
820b0109583SSuanming Mou 					 void *, op->sym->aead.data.offset), sess->aad_len);
821b0109583SSuanming Mou 				gcm_data.dst_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->sym->m_dst);
822b0109583SSuanming Mou 			} else {
823b0109583SSuanming Mou 				gcm_data.dst_addr = gcm_data.src_addr;
824b0109583SSuanming Mou 				gcm_data.dst_mkey = gcm_data.src_mkey;
825b0109583SSuanming Mou 			}
826b0109583SSuanming Mou 			gcm_data.dst_bytes = gcm_data.src_bytes;
827b0109583SSuanming Mou 			if (op_info.is_enc)
828b0109583SSuanming Mou 				gcm_data.dst_bytes += sess->tag_len;
829b0109583SSuanming Mou 			else
830b0109583SSuanming Mou 				gcm_data.src_bytes += sess->tag_len;
831b0109583SSuanming Mou 		} else {
832b0109583SSuanming Mou 			if (unlikely(mlx5_crypto_gcm_build_umr(qp, op, idx,
833b0109583SSuanming Mou 							&op_info, &gcm_data))) {
834b0109583SSuanming Mou 				qp->stats.enqueue_err_count++;
835b0109583SSuanming Mou 				if (remain != nb_ops) {
836b0109583SSuanming Mou 					qp->stats.enqueued_count -= remain;
837b0109583SSuanming Mou 					break;
838b0109583SSuanming Mou 				}
839b0109583SSuanming Mou 				return 0;
840b0109583SSuanming Mou 			}
841b0109583SSuanming Mou 			umr_cnt++;
842b0109583SSuanming Mou 		}
843b0109583SSuanming Mou 		mlx5_crypto_gcm_wqe_set(qp, op, idx, &gcm_data);
844b0109583SSuanming Mou 		if (op_info.digest) {
845b0109583SSuanming Mou 			tag = (struct mlx5_crypto_gcm_tag_cpy_info *)op->sym->aead.digest.data;
846b0109583SSuanming Mou 			tag->digest = op_info.digest;
847b0109583SSuanming Mou 			tag->tag_len = sess->tag_len;
848b0109583SSuanming Mou 			op->status = MLX5_CRYPTO_OP_STATUS_GCM_TAG_COPY;
849b0109583SSuanming Mou 		} else {
850b0109583SSuanming Mou 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
851b0109583SSuanming Mou 		}
852b0109583SSuanming Mou 		qp->ops[idx] = op;
853b0109583SSuanming Mou 		qp->pi++;
854b0109583SSuanming Mou 	} while (--remain);
855b0109583SSuanming Mou 	qp->stats.enqueued_count += nb_ops;
856b0109583SSuanming Mou 	/* Update the last GGA cseg with COMP. */
857b0109583SSuanming Mou 	((struct mlx5_wqe_cseg *)qp->wqe)->flags =
858b0109583SSuanming Mou 		RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
859b0109583SSuanming Mou 	/* Only when there are no pending SEND_EN WQEs in background. */
860b0109583SSuanming Mou 	if (!umr_cnt && !qp->has_umr) {
861b0109583SSuanming Mou 		mlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,
862b0109583SSuanming Mou 				   qp->pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],
863b0109583SSuanming Mou 				   !priv->uar.dbnc);
864b0109583SSuanming Mou 	} else {
865b0109583SSuanming Mou 		mlx5_crypto_gcm_build_send_en(qp);
866b0109583SSuanming Mou 		mlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->umr_wqe,
867b0109583SSuanming Mou 				   qp->umr_pi, &qp->umr_qp_obj.db_rec[MLX5_SND_DBR],
868b0109583SSuanming Mou 				   !priv->uar.dbnc);
869b0109583SSuanming Mou 		qp->last_gga_pi = qp->pi;
870b0109583SSuanming Mou 		qp->has_umr = true;
871b0109583SSuanming Mou 	}
872b0109583SSuanming Mou 	return nb_ops;
873b0109583SSuanming Mou }
874b0109583SSuanming Mou 
875b0109583SSuanming Mou static __rte_noinline void
876b0109583SSuanming Mou mlx5_crypto_gcm_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
877b0109583SSuanming Mou {
878b0109583SSuanming Mou 	uint8_t op_code;
879b0109583SSuanming Mou 	const uint32_t idx = qp->cq_ci & (qp->entries_n - 1);
8803cddeba0SAlexander Kozyrev 	volatile struct mlx5_error_cqe *cqe = (volatile struct mlx5_error_cqe *)
881b0109583SSuanming Mou 							&qp->cq_obj.cqes[idx];
882b0109583SSuanming Mou 
883b0109583SSuanming Mou 	op_code = rte_be_to_cpu_32(cqe->s_wqe_opcode_qpn) >> MLX5_CQ_INDEX_WIDTH;
884b0109583SSuanming Mou 	DRV_LOG(ERR, "CQE ERR:0x%x, Vendor_ERR:0x%x, OP:0x%x, QPN:0x%x, WQE_CNT:0x%x",
885b0109583SSuanming Mou 		cqe->syndrome, cqe->vendor_err_synd, op_code,
886b0109583SSuanming Mou 		(rte_be_to_cpu_32(cqe->s_wqe_opcode_qpn) & 0xffffff),
887b0109583SSuanming Mou 		rte_be_to_cpu_16(cqe->wqe_counter));
888b0109583SSuanming Mou 	if (op && op_code == MLX5_OPCODE_MMO) {
889b0109583SSuanming Mou 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
890b0109583SSuanming Mou 		qp->stats.dequeue_err_count++;
891b0109583SSuanming Mou 	}
892b0109583SSuanming Mou }
893b0109583SSuanming Mou 
894b0109583SSuanming Mou static __rte_always_inline void
895b0109583SSuanming Mou mlx5_crypto_gcm_fill_op(struct mlx5_crypto_qp *qp,
896b0109583SSuanming Mou 			struct rte_crypto_op **ops,
897b0109583SSuanming Mou 			uint16_t orci,
898b0109583SSuanming Mou 			uint16_t rci,
899b0109583SSuanming Mou 			uint16_t op_mask)
900b0109583SSuanming Mou {
901b0109583SSuanming Mou 	uint16_t n;
902b0109583SSuanming Mou 
903b0109583SSuanming Mou 	orci &= op_mask;
904b0109583SSuanming Mou 	rci &= op_mask;
905b0109583SSuanming Mou 	if (unlikely(orci > rci)) {
906b0109583SSuanming Mou 		n = op_mask - orci + 1;
907b0109583SSuanming Mou 		memcpy(ops, &qp->ops[orci], n * sizeof(*ops));
908b0109583SSuanming Mou 		orci = 0;
909b0109583SSuanming Mou 	} else {
910b0109583SSuanming Mou 		n = 0;
911b0109583SSuanming Mou 	}
912b0109583SSuanming Mou 	/* rci can be 0 here, memcpy will skip that. */
913b0109583SSuanming Mou 	memcpy(&ops[n], &qp->ops[orci], (rci - orci) * sizeof(*ops));
914b0109583SSuanming Mou }
915b0109583SSuanming Mou 
916b0109583SSuanming Mou static __rte_always_inline void
917b0109583SSuanming Mou mlx5_crypto_gcm_cpy_tag(struct mlx5_crypto_qp *qp,
918b0109583SSuanming Mou 			uint16_t orci,
919b0109583SSuanming Mou 			uint16_t rci,
920b0109583SSuanming Mou 			uint16_t op_mask)
921b0109583SSuanming Mou {
922b0109583SSuanming Mou 	struct rte_crypto_op *op;
923b0109583SSuanming Mou 	struct mlx5_crypto_gcm_tag_cpy_info *tag;
924b0109583SSuanming Mou 
925b0109583SSuanming Mou 	while (qp->cpy_tag_op && orci != rci) {
926b0109583SSuanming Mou 		op = qp->ops[orci & op_mask];
927b0109583SSuanming Mou 		if (op->status == MLX5_CRYPTO_OP_STATUS_GCM_TAG_COPY) {
928b0109583SSuanming Mou 			tag = (struct mlx5_crypto_gcm_tag_cpy_info *)op->sym->aead.digest.data;
929b0109583SSuanming Mou 			memcpy(op->sym->aead.digest.data, tag->digest, tag->tag_len);
930b0109583SSuanming Mou 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
931b0109583SSuanming Mou 			qp->cpy_tag_op--;
932b0109583SSuanming Mou 		}
933b0109583SSuanming Mou 		orci++;
934b0109583SSuanming Mou 	}
935b0109583SSuanming Mou }
936b0109583SSuanming Mou 
937b0109583SSuanming Mou static uint16_t
938b0109583SSuanming Mou mlx5_crypto_gcm_dequeue_burst(void *queue_pair,
939b0109583SSuanming Mou 			      struct rte_crypto_op **ops,
940b0109583SSuanming Mou 			      uint16_t nb_ops)
941b0109583SSuanming Mou {
942b0109583SSuanming Mou 	struct mlx5_crypto_qp *qp = queue_pair;
943b0109583SSuanming Mou 	volatile struct mlx5_cqe *restrict cqe;
944b0109583SSuanming Mou 	const unsigned int cq_size = qp->cq_entries_n;
945b0109583SSuanming Mou 	const unsigned int mask = cq_size - 1;
946b0109583SSuanming Mou 	const unsigned int op_mask = qp->entries_n - 1;
947b0109583SSuanming Mou 	uint32_t idx;
948b0109583SSuanming Mou 	uint32_t next_idx = qp->cq_ci & mask;
949b0109583SSuanming Mou 	uint16_t reported_ci = qp->reported_ci;
950b0109583SSuanming Mou 	uint16_t qp_ci = qp->qp_ci;
951b0109583SSuanming Mou 	const uint16_t max = RTE_MIN((uint16_t)(qp->pi - reported_ci), nb_ops);
952b0109583SSuanming Mou 	uint16_t op_num = 0;
953b0109583SSuanming Mou 	int ret;
954b0109583SSuanming Mou 
955b0109583SSuanming Mou 	if (unlikely(max == 0))
956b0109583SSuanming Mou 		return 0;
957b0109583SSuanming Mou 	while (qp_ci - reported_ci < max) {
958b0109583SSuanming Mou 		idx = next_idx;
959b0109583SSuanming Mou 		next_idx = (qp->cq_ci + 1) & mask;
960b0109583SSuanming Mou 		cqe = &qp->cq_obj.cqes[idx];
961b0109583SSuanming Mou 		ret = check_cqe(cqe, cq_size, qp->cq_ci);
962b0109583SSuanming Mou 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
963b0109583SSuanming Mou 			if (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))
964b0109583SSuanming Mou 				mlx5_crypto_gcm_cqe_err_handle(qp,
965b0109583SSuanming Mou 						qp->ops[reported_ci & op_mask]);
966b0109583SSuanming Mou 			break;
967b0109583SSuanming Mou 		}
968b0109583SSuanming Mou 		qp_ci = rte_be_to_cpu_16(cqe->wqe_counter) + 1;
969b0109583SSuanming Mou 		if (qp->has_umr &&
970b0109583SSuanming Mou 		    (qp->last_gga_pi + 1) == qp_ci)
971b0109583SSuanming Mou 			qp->has_umr = false;
972b0109583SSuanming Mou 		qp->cq_ci++;
973b0109583SSuanming Mou 	}
974b0109583SSuanming Mou 	/* If wqe_counter changed, means CQE handled. */
975b0109583SSuanming Mou 	if (likely(qp->qp_ci != qp_ci)) {
976b0109583SSuanming Mou 		qp->qp_ci = qp_ci;
977b0109583SSuanming Mou 		rte_io_wmb();
978b0109583SSuanming Mou 		qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->cq_ci);
979b0109583SSuanming Mou 	}
980b0109583SSuanming Mou 	/* If reported_ci is not same with qp_ci, means op retrieved. */
981b0109583SSuanming Mou 	if (qp_ci != reported_ci) {
982b0109583SSuanming Mou 		op_num = RTE_MIN((uint16_t)(qp_ci - reported_ci), max);
983b0109583SSuanming Mou 		reported_ci += op_num;
984b0109583SSuanming Mou 		mlx5_crypto_gcm_cpy_tag(qp, qp->reported_ci, reported_ci, op_mask);
985b0109583SSuanming Mou 		mlx5_crypto_gcm_fill_op(qp, ops, qp->reported_ci, reported_ci, op_mask);
986b0109583SSuanming Mou 		qp->stats.dequeued_count += op_num;
987b0109583SSuanming Mou 		qp->reported_ci = reported_ci;
988b0109583SSuanming Mou 	}
989b0109583SSuanming Mou 	return op_num;
990b0109583SSuanming Mou }
991b0109583SSuanming Mou 
9920750c8b1SSuanming Mou static uint16_t
9930750c8b1SSuanming Mou mlx5_crypto_gcm_ipsec_enqueue_burst(void *queue_pair,
9940750c8b1SSuanming Mou 				    struct rte_crypto_op **ops,
9950750c8b1SSuanming Mou 				    uint16_t nb_ops)
9960750c8b1SSuanming Mou {
9970750c8b1SSuanming Mou 	struct mlx5_crypto_qp *qp = queue_pair;
9980750c8b1SSuanming Mou 	struct mlx5_crypto_session *sess;
9990750c8b1SSuanming Mou 	struct mlx5_crypto_priv *priv = qp->priv;
10000750c8b1SSuanming Mou 	struct mlx5_crypto_gcm_data gcm_data;
10010750c8b1SSuanming Mou 	struct rte_crypto_op *op;
10020750c8b1SSuanming Mou 	struct rte_mbuf *m_src;
10033455ed86SSuanming Mou 	struct rte_mbuf *m_dst;
10040750c8b1SSuanming Mou 	uint16_t mask = qp->entries_n - 1;
10050750c8b1SSuanming Mou 	uint16_t remain = qp->entries_n - (qp->pi - qp->qp_ci);
10060750c8b1SSuanming Mou 	uint32_t idx;
10070750c8b1SSuanming Mou 	uint32_t pkt_iv_len;
10080750c8b1SSuanming Mou 	uint8_t *payload;
10090750c8b1SSuanming Mou 
10100750c8b1SSuanming Mou 	if (remain < nb_ops)
10110750c8b1SSuanming Mou 		nb_ops = remain;
10120750c8b1SSuanming Mou 	else
10130750c8b1SSuanming Mou 		remain = nb_ops;
10140750c8b1SSuanming Mou 	if (unlikely(remain == 0))
10150750c8b1SSuanming Mou 		return 0;
10160750c8b1SSuanming Mou 	do {
10170750c8b1SSuanming Mou 		op = *ops++;
10180750c8b1SSuanming Mou 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
10190750c8b1SSuanming Mou 		idx = qp->pi & mask;
10200750c8b1SSuanming Mou 		m_src = op->sym->m_src;
10210750c8b1SSuanming Mou 		MLX5_ASSERT(m_src->nb_segs == 1);
10220750c8b1SSuanming Mou 		payload = rte_pktmbuf_mtod_offset(m_src, void *, op->sym->aead.data.offset);
10230750c8b1SSuanming Mou 		gcm_data.src_addr = RTE_PTR_SUB(payload, sess->aad_len);
10240750c8b1SSuanming Mou 		/*
10250750c8b1SSuanming Mou 		 * IPsec IV between payload and AAD should be equal or less than
10260750c8b1SSuanming Mou 		 * MLX5_CRYPTO_GCM_IPSEC_IV_SIZE.
10270750c8b1SSuanming Mou 		 */
10280750c8b1SSuanming Mou 		pkt_iv_len = RTE_PTR_DIFF(payload,
10290750c8b1SSuanming Mou 				RTE_PTR_ADD(op->sym->aead.aad.data, sess->aad_len));
10300750c8b1SSuanming Mou 		MLX5_ASSERT(pkt_iv_len <= MLX5_CRYPTO_GCM_IPSEC_IV_SIZE);
10310750c8b1SSuanming Mou 		gcm_data.src_bytes = op->sym->aead.data.length + sess->aad_len;
10320750c8b1SSuanming Mou 		gcm_data.src_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->sym->m_src);
10333455ed86SSuanming Mou 		m_dst = op->sym->m_dst;
10343455ed86SSuanming Mou 		if (m_dst && m_dst != m_src) {
10353455ed86SSuanming Mou 			MLX5_ASSERT(m_dst->nb_segs == 1 &&
10363455ed86SSuanming Mou 				    (rte_pktmbuf_headroom(m_dst) + op->sym->aead.data.offset)
10373455ed86SSuanming Mou 				    >= sess->aad_len + pkt_iv_len);
10383455ed86SSuanming Mou 			gcm_data.dst_addr = RTE_PTR_SUB
10393455ed86SSuanming Mou 				(rte_pktmbuf_mtod_offset(m_dst,
10403455ed86SSuanming Mou 				 void *, op->sym->aead.data.offset), sess->aad_len);
10413455ed86SSuanming Mou 			gcm_data.dst_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, m_dst);
10423455ed86SSuanming Mou 		} else {
10430750c8b1SSuanming Mou 			gcm_data.dst_addr = gcm_data.src_addr;
10440750c8b1SSuanming Mou 			gcm_data.dst_mkey = gcm_data.src_mkey;
10453455ed86SSuanming Mou 		}
10460750c8b1SSuanming Mou 		gcm_data.dst_bytes = gcm_data.src_bytes;
10470750c8b1SSuanming Mou 		/* Digest should follow payload. */
10483455ed86SSuanming Mou 		if (sess->op_type == MLX5_CRYPTO_OP_TYPE_ENCRYPTION) {
10493455ed86SSuanming Mou 			MLX5_ASSERT(RTE_PTR_ADD(gcm_data.dst_addr,
10503455ed86SSuanming Mou 				    sess->aad_len + op->sym->aead.data.length) ==
10510750c8b1SSuanming Mou 				    op->sym->aead.digest.data);
10520750c8b1SSuanming Mou 			gcm_data.dst_bytes += sess->tag_len;
10533455ed86SSuanming Mou 		} else {
10543455ed86SSuanming Mou 			MLX5_ASSERT(RTE_PTR_ADD(gcm_data.src_addr,
10553455ed86SSuanming Mou 				    sess->aad_len + op->sym->aead.data.length) ==
10563455ed86SSuanming Mou 				    op->sym->aead.digest.data);
10570750c8b1SSuanming Mou 			gcm_data.src_bytes += sess->tag_len;
10583455ed86SSuanming Mou 		}
10590750c8b1SSuanming Mou 		mlx5_crypto_gcm_wqe_set(qp, op, idx, &gcm_data);
10600750c8b1SSuanming Mou 		/*
10610750c8b1SSuanming Mou 		 * All the data such as IV have been copied above,
10620750c8b1SSuanming Mou 		 * shrink AAD before payload. First backup the mem,
10630750c8b1SSuanming Mou 		 * then do shrink.
10640750c8b1SSuanming Mou 		 */
10650750c8b1SSuanming Mou 		rte_memcpy(&qp->ipsec_mem[idx],
10660750c8b1SSuanming Mou 			   RTE_PTR_SUB(payload, MLX5_CRYPTO_GCM_IPSEC_IV_SIZE),
10670750c8b1SSuanming Mou 			   MLX5_CRYPTO_GCM_IPSEC_IV_SIZE);
10680750c8b1SSuanming Mou 		/* If no memory overlap, do copy directly, otherwise memmove. */
10690750c8b1SSuanming Mou 		if (likely(pkt_iv_len >= sess->aad_len))
10700750c8b1SSuanming Mou 			rte_memcpy(gcm_data.src_addr, op->sym->aead.aad.data, sess->aad_len);
10710750c8b1SSuanming Mou 		else
10720750c8b1SSuanming Mou 			memmove(gcm_data.src_addr, op->sym->aead.aad.data, sess->aad_len);
10730750c8b1SSuanming Mou 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
10740750c8b1SSuanming Mou 		qp->ops[idx] = op;
10750750c8b1SSuanming Mou 		qp->pi++;
10760750c8b1SSuanming Mou 	} while (--remain);
10770750c8b1SSuanming Mou 	qp->stats.enqueued_count += nb_ops;
10780750c8b1SSuanming Mou 	/* Update the last GGA cseg with COMP. */
10790750c8b1SSuanming Mou 	((struct mlx5_wqe_cseg *)qp->wqe)->flags =
10800750c8b1SSuanming Mou 		RTE_BE32(MLX5_COMP_ALWAYS << MLX5_COMP_MODE_OFFSET);
10810750c8b1SSuanming Mou 	mlx5_doorbell_ring(&priv->uar.bf_db, *(volatile uint64_t *)qp->wqe,
10820750c8b1SSuanming Mou 			   qp->pi, &qp->qp_obj.db_rec[MLX5_SND_DBR],
10830750c8b1SSuanming Mou 			   !priv->uar.dbnc);
10840750c8b1SSuanming Mou 	return nb_ops;
10850750c8b1SSuanming Mou }
10860750c8b1SSuanming Mou 
10870750c8b1SSuanming Mou static __rte_always_inline void
10880750c8b1SSuanming Mou mlx5_crypto_gcm_restore_ipsec_mem(struct mlx5_crypto_qp *qp,
10890750c8b1SSuanming Mou 				  uint16_t orci,
10900750c8b1SSuanming Mou 				  uint16_t rci,
10910750c8b1SSuanming Mou 				  uint16_t op_mask)
10920750c8b1SSuanming Mou {
10930750c8b1SSuanming Mou 	uint32_t idx;
10940750c8b1SSuanming Mou 	struct mlx5_crypto_session *sess;
10950750c8b1SSuanming Mou 	struct rte_crypto_op *op;
10960750c8b1SSuanming Mou 	struct rte_mbuf *m_src;
10973455ed86SSuanming Mou 	struct rte_mbuf *m_dst;
10980750c8b1SSuanming Mou 	uint8_t *payload;
10990750c8b1SSuanming Mou 
11000750c8b1SSuanming Mou 	while (orci != rci) {
11010750c8b1SSuanming Mou 		idx = orci & op_mask;
11020750c8b1SSuanming Mou 		op = qp->ops[idx];
11030750c8b1SSuanming Mou 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
11040750c8b1SSuanming Mou 		m_src = op->sym->m_src;
11050750c8b1SSuanming Mou 		payload = rte_pktmbuf_mtod_offset(m_src, void *,
11060750c8b1SSuanming Mou 						  op->sym->aead.data.offset);
11070750c8b1SSuanming Mou 		/* Restore the IPsec memory. */
11080750c8b1SSuanming Mou 		if (unlikely(sess->aad_len > MLX5_CRYPTO_GCM_IPSEC_IV_SIZE))
11090750c8b1SSuanming Mou 			memmove(op->sym->aead.aad.data,
11100750c8b1SSuanming Mou 				RTE_PTR_SUB(payload, sess->aad_len), sess->aad_len);
11110750c8b1SSuanming Mou 		rte_memcpy(RTE_PTR_SUB(payload, MLX5_CRYPTO_GCM_IPSEC_IV_SIZE),
11120750c8b1SSuanming Mou 			   &qp->ipsec_mem[idx], MLX5_CRYPTO_GCM_IPSEC_IV_SIZE);
11133455ed86SSuanming Mou 		m_dst = op->sym->m_dst;
11143455ed86SSuanming Mou 		if (m_dst && m_dst != m_src) {
11153455ed86SSuanming Mou 			uint32_t bytes_to_copy;
11163455ed86SSuanming Mou 
11173455ed86SSuanming Mou 			bytes_to_copy = RTE_PTR_DIFF(payload, op->sym->aead.aad.data);
11183455ed86SSuanming Mou 			rte_memcpy(RTE_PTR_SUB(rte_pktmbuf_mtod_offset(m_dst, void *,
11193455ed86SSuanming Mou 				   op->sym->aead.data.offset), bytes_to_copy),
11203455ed86SSuanming Mou 				   op->sym->aead.aad.data,
11213455ed86SSuanming Mou 				   bytes_to_copy);
11223455ed86SSuanming Mou 		}
11230750c8b1SSuanming Mou 		orci++;
11240750c8b1SSuanming Mou 	}
11250750c8b1SSuanming Mou }
11260750c8b1SSuanming Mou 
11270750c8b1SSuanming Mou static uint16_t
11280750c8b1SSuanming Mou mlx5_crypto_gcm_ipsec_dequeue_burst(void *queue_pair,
11290750c8b1SSuanming Mou 				    struct rte_crypto_op **ops,
11300750c8b1SSuanming Mou 				    uint16_t nb_ops)
11310750c8b1SSuanming Mou {
11320750c8b1SSuanming Mou 	struct mlx5_crypto_qp *qp = queue_pair;
11330750c8b1SSuanming Mou 	volatile struct mlx5_cqe *restrict cqe;
11340750c8b1SSuanming Mou 	const unsigned int cq_size = qp->cq_entries_n;
11350750c8b1SSuanming Mou 	const unsigned int mask = cq_size - 1;
11360750c8b1SSuanming Mou 	const unsigned int op_mask = qp->entries_n - 1;
11370750c8b1SSuanming Mou 	uint32_t idx;
11380750c8b1SSuanming Mou 	uint32_t next_idx = qp->cq_ci & mask;
11390750c8b1SSuanming Mou 	uint16_t reported_ci = qp->reported_ci;
11400750c8b1SSuanming Mou 	uint16_t qp_ci = qp->qp_ci;
11410750c8b1SSuanming Mou 	const uint16_t max = RTE_MIN((uint16_t)(qp->pi - reported_ci), nb_ops);
11420750c8b1SSuanming Mou 	uint16_t op_num = 0;
11430750c8b1SSuanming Mou 	int ret;
11440750c8b1SSuanming Mou 
11450750c8b1SSuanming Mou 	if (unlikely(max == 0))
11460750c8b1SSuanming Mou 		return 0;
11470750c8b1SSuanming Mou 	while (qp_ci - reported_ci < max) {
11480750c8b1SSuanming Mou 		idx = next_idx;
11490750c8b1SSuanming Mou 		next_idx = (qp->cq_ci + 1) & mask;
11500750c8b1SSuanming Mou 		cqe = &qp->cq_obj.cqes[idx];
11510750c8b1SSuanming Mou 		ret = check_cqe(cqe, cq_size, qp->cq_ci);
11520750c8b1SSuanming Mou 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
11530750c8b1SSuanming Mou 			if (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))
11540750c8b1SSuanming Mou 				mlx5_crypto_gcm_cqe_err_handle(qp,
11550750c8b1SSuanming Mou 						qp->ops[reported_ci & op_mask]);
11560750c8b1SSuanming Mou 			break;
11570750c8b1SSuanming Mou 		}
11580750c8b1SSuanming Mou 		qp_ci = rte_be_to_cpu_16(cqe->wqe_counter) + 1;
11590750c8b1SSuanming Mou 		qp->cq_ci++;
11600750c8b1SSuanming Mou 	}
11610750c8b1SSuanming Mou 	/* If wqe_counter changed, means CQE handled. */
11620750c8b1SSuanming Mou 	if (likely(qp->qp_ci != qp_ci)) {
11630750c8b1SSuanming Mou 		qp->qp_ci = qp_ci;
11640750c8b1SSuanming Mou 		rte_io_wmb();
11650750c8b1SSuanming Mou 		qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->cq_ci);
11660750c8b1SSuanming Mou 	}
11670750c8b1SSuanming Mou 	/* If reported_ci is not same with qp_ci, means op retrieved. */
11680750c8b1SSuanming Mou 	if (qp_ci != reported_ci) {
11690750c8b1SSuanming Mou 		op_num = RTE_MIN((uint16_t)(qp_ci - reported_ci), max);
11700750c8b1SSuanming Mou 		reported_ci += op_num;
11710750c8b1SSuanming Mou 		mlx5_crypto_gcm_restore_ipsec_mem(qp, qp->reported_ci, reported_ci, op_mask);
11720750c8b1SSuanming Mou 		mlx5_crypto_gcm_fill_op(qp, ops, qp->reported_ci, reported_ci, op_mask);
11730750c8b1SSuanming Mou 		qp->stats.dequeued_count += op_num;
11740750c8b1SSuanming Mou 		qp->reported_ci = reported_ci;
11750750c8b1SSuanming Mou 	}
11760750c8b1SSuanming Mou 	return op_num;
11770750c8b1SSuanming Mou }
11780750c8b1SSuanming Mou 
117998fb4bb0SSuanming Mou int
118004da07e6SSuanming Mou mlx5_crypto_gcm_init(struct mlx5_crypto_priv *priv)
118104da07e6SSuanming Mou {
11826c948396SSuanming Mou 	struct mlx5_common_device *cdev = priv->cdev;
11837f8eb434SSuanming Mou 	struct rte_cryptodev *crypto_dev = priv->crypto_dev;
11847f8eb434SSuanming Mou 	struct rte_cryptodev_ops *dev_ops = crypto_dev->dev_ops;
11856c948396SSuanming Mou 	int ret;
11867f8eb434SSuanming Mou 
11877f8eb434SSuanming Mou 	/* Override AES-GCM specified ops. */
11887f8eb434SSuanming Mou 	dev_ops->sym_session_configure = mlx5_crypto_sym_gcm_session_configure;
1189b32dbedbSSuanming Mou 	mlx5_os_set_reg_mr_cb(&priv->reg_mr_cb, &priv->dereg_mr_cb);
1190b32dbedbSSuanming Mou 	dev_ops->queue_pair_setup = mlx5_crypto_gcm_qp_setup;
1191b32dbedbSSuanming Mou 	dev_ops->queue_pair_release = mlx5_crypto_gcm_qp_release;
11920750c8b1SSuanming Mou 	if (mlx5_crypto_is_ipsec_opt(priv)) {
11930750c8b1SSuanming Mou 		crypto_dev->dequeue_burst = mlx5_crypto_gcm_ipsec_dequeue_burst;
11940750c8b1SSuanming Mou 		crypto_dev->enqueue_burst = mlx5_crypto_gcm_ipsec_enqueue_burst;
11950750c8b1SSuanming Mou 		priv->max_klm_num = 0;
11960750c8b1SSuanming Mou 	} else {
1197b0109583SSuanming Mou 		crypto_dev->dequeue_burst = mlx5_crypto_gcm_dequeue_burst;
1198b0109583SSuanming Mou 		crypto_dev->enqueue_burst = mlx5_crypto_gcm_enqueue_burst;
11990750c8b1SSuanming Mou 		priv->max_klm_num = RTE_ALIGN((priv->max_segs_num + 1) * 2 + 1,
12000750c8b1SSuanming Mou 					MLX5_UMR_KLM_NUM_ALIGN);
12010750c8b1SSuanming Mou 	}
12026c948396SSuanming Mou 	/* Generate GCM capability. */
12036c948396SSuanming Mou 	ret = mlx5_crypto_generate_gcm_cap(&cdev->config.hca_attr.crypto_mmo,
12046c948396SSuanming Mou 					   mlx5_crypto_gcm_caps);
12056c948396SSuanming Mou 	if (ret) {
12066c948396SSuanming Mou 		DRV_LOG(ERR, "No enough AES-GCM cap.");
12076c948396SSuanming Mou 		return -1;
12086c948396SSuanming Mou 	}
120904da07e6SSuanming Mou 	priv->caps = mlx5_crypto_gcm_caps;
121004da07e6SSuanming Mou 	return 0;
121104da07e6SSuanming Mou }
1212