xref: /dpdk/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c (revision b7bd72d8da9c13deba44b1ac9f7dfa8cda77f240)
10c4546deSFan Zhang /* SPDX-License-Identifier: BSD-3-Clause
2a815a04cSKai Ji  * Copyright(c) 2017-2022 Intel Corporation
30c4546deSFan Zhang  */
40c4546deSFan Zhang 
50c4546deSFan Zhang #include <rte_cryptodev.h>
60c4546deSFan Zhang #include <rte_security_driver.h>
70c4546deSFan Zhang 
80c4546deSFan Zhang #include "adf_transport_access_macros.h"
90c4546deSFan Zhang #include "icp_qat_fw.h"
100c4546deSFan Zhang #include "icp_qat_fw_la.h"
110c4546deSFan Zhang 
120c4546deSFan Zhang #include "qat_sym.h"
130c4546deSFan Zhang #include "qat_sym_session.h"
140c4546deSFan Zhang #include "qat_crypto.h"
150c4546deSFan Zhang #include "qat_crypto_pmd_gens.h"
160c4546deSFan Zhang 
17cffb726bSVikash Poddar static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen1[] = {
18cffb726bSVikash Poddar 	QAT_SYM_CIPHER_CAP(DES_CBC,
19cffb726bSVikash Poddar 		CAP_SET(block_size, 8),
20cffb726bSVikash Poddar 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
21cffb726bSVikash Poddar 	QAT_SYM_CIPHER_CAP(3DES_CBC,
22cffb726bSVikash Poddar 		CAP_SET(block_size, 8),
23cffb726bSVikash Poddar 		CAP_RNG(key_size, 8, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
24cffb726bSVikash Poddar 	QAT_SYM_CIPHER_CAP(3DES_CTR,
25cffb726bSVikash Poddar 		CAP_SET(block_size, 8),
26cffb726bSVikash Poddar 		CAP_RNG(key_size, 16, 24, 8), CAP_RNG(iv_size, 8, 8, 0)),
270c4546deSFan Zhang 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
280c4546deSFan Zhang 		CAP_SET(block_size, 64),
290c4546deSFan Zhang 		CAP_RNG(digest_size, 1, 20, 1)),
30cffb726bSVikash Poddar 	QAT_SYM_AUTH_CAP(SHA224,
31cffb726bSVikash Poddar 		CAP_SET(block_size, 64),
32cffb726bSVikash Poddar 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
33cffb726bSVikash Poddar 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
34cffb726bSVikash Poddar 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
35cffb726bSVikash Poddar 		CAP_SET(block_size, 64),
36cffb726bSVikash Poddar 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
37cffb726bSVikash Poddar 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
38cffb726bSVikash Poddar 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
39cffb726bSVikash Poddar 		CAP_SET(block_size, 64),
40cffb726bSVikash Poddar 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
41cffb726bSVikash Poddar 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
42cffb726bSVikash Poddar 	QAT_SYM_AUTH_CAP(MD5_HMAC,
43cffb726bSVikash Poddar 		CAP_SET(block_size, 64),
44cffb726bSVikash Poddar 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 16, 1),
45cffb726bSVikash Poddar 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
46cffb726bSVikash Poddar 	QAT_SYM_CIPHER_CAP(DES_DOCSISBPI,
47cffb726bSVikash Poddar 		CAP_SET(block_size, 8),
48cffb726bSVikash Poddar 		CAP_RNG(key_size, 8, 8, 0), CAP_RNG(iv_size, 8, 8, 0)),
49cffb726bSVikash Poddar };
50cffb726bSVikash Poddar 
51cffb726bSVikash Poddar static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen1[] = {
520c4546deSFan Zhang 	QAT_SYM_AEAD_CAP(AES_GCM,
530c4546deSFan Zhang 		CAP_SET(block_size, 16),
540c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
550c4546deSFan Zhang 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
560c4546deSFan Zhang 	QAT_SYM_AEAD_CAP(AES_CCM,
570c4546deSFan Zhang 		CAP_SET(block_size, 16),
580c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
590c4546deSFan Zhang 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
600c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(AES_GMAC,
610c4546deSFan Zhang 		CAP_SET(block_size, 16),
620c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
630c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
640c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(AES_CMAC,
650c4546deSFan Zhang 		CAP_SET(block_size, 16),
660c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
670c4546deSFan Zhang 			CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
680c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA256,
690c4546deSFan Zhang 		CAP_SET(block_size, 64),
700c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
710c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
720c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA384,
730c4546deSFan Zhang 		CAP_SET(block_size, 128),
740c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
750c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
760c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA512,
770c4546deSFan Zhang 		CAP_SET(block_size, 128),
780c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
790c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
800c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
810c4546deSFan Zhang 		CAP_SET(block_size, 64),
820c4546deSFan Zhang 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
830c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
840c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
850c4546deSFan Zhang 		CAP_SET(block_size, 128),
860c4546deSFan Zhang 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
870c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
880c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
890c4546deSFan Zhang 		CAP_SET(block_size, 128),
900c4546deSFan Zhang 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
910c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
920c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
930c4546deSFan Zhang 		CAP_SET(block_size, 16),
940c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
950c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
960c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SNOW3G_UIA2,
970c4546deSFan Zhang 		CAP_SET(block_size, 16),
980c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
990c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 16, 16, 0)),
1000c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(KASUMI_F9,
1010c4546deSFan Zhang 		CAP_SET(block_size, 8),
1020c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 4, 0),
1030c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
1040c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(NULL,
1050c4546deSFan Zhang 		CAP_SET(block_size, 1),
1060c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
1070c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
1080c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(AES_CBC,
1090c4546deSFan Zhang 		CAP_SET(block_size, 16),
1100c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
1110c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(AES_CTR,
1120c4546deSFan Zhang 		CAP_SET(block_size, 16),
1130c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
1140c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(AES_XTS,
1150c4546deSFan Zhang 		CAP_SET(block_size, 16),
1160c4546deSFan Zhang 		CAP_RNG(key_size, 32, 64, 32), CAP_RNG(iv_size, 16, 16, 0)),
1170c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
1180c4546deSFan Zhang 		CAP_SET(block_size, 16),
1190c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
1200c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(SNOW3G_UEA2,
1210c4546deSFan Zhang 		CAP_SET(block_size, 16),
1220c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
1230c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(KASUMI_F8,
1240c4546deSFan Zhang 		CAP_SET(block_size, 8),
1250c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 8, 8, 0)),
1260c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(NULL,
1270c4546deSFan Zhang 		CAP_SET(block_size, 1),
1280c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
1290c4546deSFan Zhang 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
1300c4546deSFan Zhang };
1310c4546deSFan Zhang 
1320c4546deSFan Zhang struct rte_cryptodev_ops qat_sym_crypto_ops_gen1 = {
1330c4546deSFan Zhang 
1340c4546deSFan Zhang 	/* Device related operations */
1350c4546deSFan Zhang 	.dev_configure		= qat_cryptodev_config,
1360c4546deSFan Zhang 	.dev_start		= qat_cryptodev_start,
1370c4546deSFan Zhang 	.dev_stop		= qat_cryptodev_stop,
1380c4546deSFan Zhang 	.dev_close		= qat_cryptodev_close,
1390c4546deSFan Zhang 	.dev_infos_get		= qat_cryptodev_info_get,
1400c4546deSFan Zhang 
1410c4546deSFan Zhang 	.stats_get		= qat_cryptodev_stats_get,
1420c4546deSFan Zhang 	.stats_reset		= qat_cryptodev_stats_reset,
1430c4546deSFan Zhang 	.queue_pair_setup	= qat_cryptodev_qp_setup,
1440c4546deSFan Zhang 	.queue_pair_release	= qat_cryptodev_qp_release,
1450c4546deSFan Zhang 
1460c4546deSFan Zhang 	/* Crypto related operations */
1470c4546deSFan Zhang 	.sym_session_get_size	= qat_sym_session_get_private_size,
1480c4546deSFan Zhang 	.sym_session_configure	= qat_sym_session_configure,
1490c4546deSFan Zhang 	.sym_session_clear	= qat_sym_session_clear,
15085fec6fdSKai Ji 
15185fec6fdSKai Ji 	/* Raw data-path API related operations */
15285fec6fdSKai Ji 	.sym_get_raw_dp_ctx_size = qat_sym_get_dp_ctx_size,
15385fec6fdSKai Ji 	.sym_configure_raw_dp_ctx = qat_sym_configure_dp_ctx,
1540c4546deSFan Zhang };
1550c4546deSFan Zhang 
156b6ac58aeSArek Kusztal static int
qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private * internals,const char * capa_memz_name,const uint16_t __rte_unused slice_map)157b6ac58aeSArek Kusztal qat_sym_crypto_cap_get_gen1(struct qat_cryptodev_private *internals,
158b6ac58aeSArek Kusztal 			const char *capa_memz_name,
159b6ac58aeSArek Kusztal 			const uint16_t __rte_unused slice_map)
1600c4546deSFan Zhang {
161cffb726bSVikash Poddar 
162cffb726bSVikash Poddar 	uint32_t legacy_capa_num;
163cffb726bSVikash Poddar 	uint32_t size = sizeof(qat_sym_crypto_caps_gen1);
164cffb726bSVikash Poddar 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen1);
165cffb726bSVikash Poddar 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
166cffb726bSVikash Poddar 
167*b7bd72d8SArkadiusz Kusztal 	if (unlikely(internals->qat_dev->options.legacy_alg))
168cffb726bSVikash Poddar 		size = size + legacy_size;
169b6ac58aeSArek Kusztal 
170b6ac58aeSArek Kusztal 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
171b6ac58aeSArek Kusztal 	if (internals->capa_mz == NULL) {
172b6ac58aeSArek Kusztal 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
173b6ac58aeSArek Kusztal 				size, rte_socket_id(), 0);
174b6ac58aeSArek Kusztal 		if (internals->capa_mz == NULL) {
175b6ac58aeSArek Kusztal 			QAT_LOG(DEBUG,
176b6ac58aeSArek Kusztal 				"Error allocating memzone for capabilities");
177b6ac58aeSArek Kusztal 			return -1;
178b6ac58aeSArek Kusztal 		}
179b6ac58aeSArek Kusztal 	}
180b6ac58aeSArek Kusztal 
181b6ac58aeSArek Kusztal 	struct rte_cryptodev_capabilities *addr =
182b6ac58aeSArek Kusztal 			(struct rte_cryptodev_capabilities *)
183b6ac58aeSArek Kusztal 				internals->capa_mz->addr;
184b6ac58aeSArek Kusztal 
185cffb726bSVikash Poddar 	struct rte_cryptodev_capabilities *capabilities;
186cffb726bSVikash Poddar 
187*b7bd72d8SArkadiusz Kusztal 	if (unlikely(internals->qat_dev->options.legacy_alg)) {
188cffb726bSVikash Poddar 		capabilities = qat_sym_crypto_legacy_caps_gen1;
189cffb726bSVikash Poddar 		memcpy(addr, capabilities, legacy_size);
190cffb726bSVikash Poddar 		addr += legacy_capa_num;
191b6ac58aeSArek Kusztal 	}
192cffb726bSVikash Poddar 	capabilities = qat_sym_crypto_caps_gen1;
193cffb726bSVikash Poddar 	memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen1));
194b6ac58aeSArek Kusztal 	internals->qat_dev_capabilities = internals->capa_mz->addr;
195b6ac58aeSArek Kusztal 
196b6ac58aeSArek Kusztal 	return 0;
1970c4546deSFan Zhang }
1980c4546deSFan Zhang 
1990c4546deSFan Zhang uint64_t
qat_sym_crypto_feature_flags_get_gen1(struct qat_pci_device * qat_dev __rte_unused)2000c4546deSFan Zhang qat_sym_crypto_feature_flags_get_gen1(
2010c4546deSFan Zhang 	struct qat_pci_device *qat_dev __rte_unused)
2020c4546deSFan Zhang {
2030c4546deSFan Zhang 	uint64_t feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2040c4546deSFan Zhang 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2050c4546deSFan Zhang 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2060c4546deSFan Zhang 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2070c4546deSFan Zhang 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2080c4546deSFan Zhang 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2090c4546deSFan Zhang 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2100c4546deSFan Zhang 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
2110c4546deSFan Zhang 			RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
2120c4546deSFan Zhang 			RTE_CRYPTODEV_FF_SYM_RAW_DP;
2130c4546deSFan Zhang 
2140c4546deSFan Zhang 	return feature_flags;
2150c4546deSFan Zhang }
2160c4546deSFan Zhang 
217a815a04cSKai Ji int
qat_sym_build_op_cipher_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)218a815a04cSKai Ji qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
219a815a04cSKai Ji 		uint8_t *out_msg, void *op_cookie)
220a815a04cSKai Ji {
221a815a04cSKai Ji 	register struct icp_qat_fw_la_bulk_req *req;
222a815a04cSKai Ji 	struct rte_crypto_op *op = in_op;
223a815a04cSKai Ji 	struct qat_sym_op_cookie *cookie = op_cookie;
224a815a04cSKai Ji 	struct rte_crypto_sgl in_sgl, out_sgl;
225a815a04cSKai Ji 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
226a815a04cSKai Ji 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
227a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr cipher_iv;
228a815a04cSKai Ji 	union rte_crypto_sym_ofs ofs;
229a815a04cSKai Ji 	int32_t total_len;
230a815a04cSKai Ji 
231a815a04cSKai Ji 	in_sgl.vec = in_vec;
232a815a04cSKai Ji 	out_sgl.vec = out_vec;
233a815a04cSKai Ji 
234a815a04cSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
235a815a04cSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
236a815a04cSKai Ji 
237a815a04cSKai Ji 	ofs.raw = qat_sym_convert_op_to_vec_cipher(op, ctx, &in_sgl, &out_sgl,
238a815a04cSKai Ji 			&cipher_iv, NULL, NULL);
239a815a04cSKai Ji 	if (unlikely(ofs.raw == UINT64_MAX)) {
240a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
241a815a04cSKai Ji 		return -EINVAL;
242a815a04cSKai Ji 	}
243a815a04cSKai Ji 
244a815a04cSKai Ji 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
245a815a04cSKai Ji 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
246a815a04cSKai Ji 	if (unlikely(total_len < 0)) {
247a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
248a815a04cSKai Ji 		return -EINVAL;
249a815a04cSKai Ji 	}
250a815a04cSKai Ji 
2516c868d6eSCiara Power 	if (ctx->is_zuc256)
2526c868d6eSCiara Power 		zuc256_modify_iv(cipher_iv.va);
2536c868d6eSCiara Power 
2540fda888bSSivaramakrishnan Venkat 	enqueue_one_cipher_job_gen1(ctx, req, &cipher_iv, ofs, total_len, op_cookie);
255a815a04cSKai Ji 
256a815a04cSKai Ji 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
257a815a04cSKai Ji 			NULL, NULL, NULL);
258a815a04cSKai Ji 
259a815a04cSKai Ji 	return 0;
260a815a04cSKai Ji }
261a815a04cSKai Ji 
262a815a04cSKai Ji int
qat_sym_build_op_auth_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)263a815a04cSKai Ji qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
264a815a04cSKai Ji 		uint8_t *out_msg, void *op_cookie)
265a815a04cSKai Ji {
266a815a04cSKai Ji 	register struct icp_qat_fw_la_bulk_req *req;
267a815a04cSKai Ji 	struct rte_crypto_op *op = in_op;
268a815a04cSKai Ji 	struct qat_sym_op_cookie *cookie = op_cookie;
269a815a04cSKai Ji 	struct rte_crypto_sgl in_sgl, out_sgl;
270a815a04cSKai Ji 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
271a815a04cSKai Ji 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
272a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr auth_iv;
273a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr digest;
274a815a04cSKai Ji 	union rte_crypto_sym_ofs ofs;
275a815a04cSKai Ji 	int32_t total_len;
2766c868d6eSCiara Power 	struct rte_cryptodev *cdev;
2776c868d6eSCiara Power 	struct qat_cryptodev_private *internals;
278a815a04cSKai Ji 
279a815a04cSKai Ji 	in_sgl.vec = in_vec;
280a815a04cSKai Ji 	out_sgl.vec = out_vec;
281a815a04cSKai Ji 
282a815a04cSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
283a815a04cSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
284a815a04cSKai Ji 
285a815a04cSKai Ji 	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
2868b4618a7SCiara Power 			NULL, &auth_iv, &digest, op_cookie);
287a815a04cSKai Ji 	if (unlikely(ofs.raw == UINT64_MAX)) {
288a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
289a815a04cSKai Ji 		return -EINVAL;
290a815a04cSKai Ji 	}
291a815a04cSKai Ji 
2926c868d6eSCiara Power 	cdev = rte_cryptodev_pmd_get_dev(ctx->dev_id);
2936c868d6eSCiara Power 	internals = cdev->data->dev_private;
2946c868d6eSCiara Power 
295*b7bd72d8SArkadiusz Kusztal 	if (internals->qat_dev->options.has_wireless_slice && !ctx->is_gmac)
2966c868d6eSCiara Power 		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
2976c868d6eSCiara Power 				req->comn_hdr.serv_specif_flags, 0);
2986c868d6eSCiara Power 
299a815a04cSKai Ji 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
300a815a04cSKai Ji 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
301a815a04cSKai Ji 	if (unlikely(total_len < 0)) {
302a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
303a815a04cSKai Ji 		return -EINVAL;
304a815a04cSKai Ji 	}
305a815a04cSKai Ji 
3066c868d6eSCiara Power 	if (ctx->is_zuc256)
3076c868d6eSCiara Power 		zuc256_modify_iv(auth_iv.va);
3086c868d6eSCiara Power 
309a815a04cSKai Ji 	enqueue_one_auth_job_gen1(ctx, req, &digest, &auth_iv, ofs,
310a815a04cSKai Ji 			total_len);
311a815a04cSKai Ji 
312a815a04cSKai Ji 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
313a815a04cSKai Ji 			&auth_iv, NULL, &digest);
314a815a04cSKai Ji 
315a815a04cSKai Ji 	return 0;
316a815a04cSKai Ji }
317a815a04cSKai Ji 
318a815a04cSKai Ji int
qat_sym_build_op_aead_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)319a815a04cSKai Ji qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
320a815a04cSKai Ji 		uint8_t *out_msg, void *op_cookie)
321a815a04cSKai Ji {
322a815a04cSKai Ji 	register struct icp_qat_fw_la_bulk_req *req;
323a815a04cSKai Ji 	struct rte_crypto_op *op = in_op;
324a815a04cSKai Ji 	struct qat_sym_op_cookie *cookie = op_cookie;
325a815a04cSKai Ji 	struct rte_crypto_sgl in_sgl, out_sgl;
326a815a04cSKai Ji 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
327a815a04cSKai Ji 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
328a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr cipher_iv;
329a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr aad;
330a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr digest;
331a815a04cSKai Ji 	union rte_crypto_sym_ofs ofs;
332a815a04cSKai Ji 	int32_t total_len;
333a815a04cSKai Ji 
334a815a04cSKai Ji 	in_sgl.vec = in_vec;
335a815a04cSKai Ji 	out_sgl.vec = out_vec;
336a815a04cSKai Ji 
337a815a04cSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
338a815a04cSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
339a815a04cSKai Ji 
340a815a04cSKai Ji 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
341a815a04cSKai Ji 			&cipher_iv, &aad, &digest);
342a815a04cSKai Ji 	if (unlikely(ofs.raw == UINT64_MAX)) {
343a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
344a815a04cSKai Ji 		return -EINVAL;
345a815a04cSKai Ji 	}
346a815a04cSKai Ji 
347a815a04cSKai Ji 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
348a815a04cSKai Ji 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
349a815a04cSKai Ji 	if (unlikely(total_len < 0)) {
350a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
351a815a04cSKai Ji 		return -EINVAL;
352a815a04cSKai Ji 	}
353a815a04cSKai Ji 
354a815a04cSKai Ji 	enqueue_one_aead_job_gen1(ctx, req, &cipher_iv, &digest, &aad, ofs,
355a815a04cSKai Ji 		total_len);
356a815a04cSKai Ji 
357a815a04cSKai Ji 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
358a815a04cSKai Ji 			NULL, &aad, &digest);
359a815a04cSKai Ji 
360a815a04cSKai Ji 	return 0;
361a815a04cSKai Ji }
362a815a04cSKai Ji 
363a815a04cSKai Ji int
qat_sym_build_op_chain_gen1(void * in_op,struct qat_sym_session * ctx,uint8_t * out_msg,void * op_cookie)364a815a04cSKai Ji qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
365a815a04cSKai Ji 		uint8_t *out_msg, void *op_cookie)
366a815a04cSKai Ji {
367a815a04cSKai Ji 	register struct icp_qat_fw_la_bulk_req *req;
368a815a04cSKai Ji 	struct rte_crypto_op *op = in_op;
369a815a04cSKai Ji 	struct qat_sym_op_cookie *cookie = op_cookie;
370a815a04cSKai Ji 	struct rte_crypto_sgl in_sgl = {0}, out_sgl = {0};
371a815a04cSKai Ji 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
372a815a04cSKai Ji 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
373a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr cipher_iv;
374a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr auth_iv;
375a815a04cSKai Ji 	struct rte_crypto_va_iova_ptr digest;
376a815a04cSKai Ji 	union rte_crypto_sym_ofs ofs;
377a815a04cSKai Ji 	int32_t total_len;
378a815a04cSKai Ji 
379a815a04cSKai Ji 	in_sgl.vec = in_vec;
380a815a04cSKai Ji 	out_sgl.vec = out_vec;
381a815a04cSKai Ji 
382a815a04cSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
383a815a04cSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
384a815a04cSKai Ji 
385a815a04cSKai Ji 	ofs.raw = qat_sym_convert_op_to_vec_chain(op, ctx, &in_sgl, &out_sgl,
3868b4618a7SCiara Power 			&cipher_iv, &auth_iv, &digest, cookie);
387a815a04cSKai Ji 	if (unlikely(ofs.raw == UINT64_MAX)) {
388a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
389a815a04cSKai Ji 		return -EINVAL;
390a815a04cSKai Ji 	}
391a815a04cSKai Ji 
392a815a04cSKai Ji 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
393a815a04cSKai Ji 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
394a815a04cSKai Ji 	if (unlikely(total_len < 0)) {
395a815a04cSKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
396a815a04cSKai Ji 		return -EINVAL;
397a815a04cSKai Ji 	}
398a815a04cSKai Ji 
3996c868d6eSCiara Power 	if (ctx->is_zuc256) {
4006c868d6eSCiara Power 		zuc256_modify_iv(cipher_iv.va);
4016c868d6eSCiara Power 		zuc256_modify_iv(auth_iv.va);
4026c868d6eSCiara Power 	}
4036c868d6eSCiara Power 
404a815a04cSKai Ji 	enqueue_one_chain_job_gen1(ctx, req, in_sgl.vec, in_sgl.num,
405a815a04cSKai Ji 			out_sgl.vec, out_sgl.num, &cipher_iv, &digest, &auth_iv,
4060fda888bSSivaramakrishnan Venkat 			ofs, total_len, cookie);
407a815a04cSKai Ji 
408a815a04cSKai Ji 	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
409a815a04cSKai Ji 			&auth_iv, NULL, &digest);
410a815a04cSKai Ji 
411a815a04cSKai Ji 	return 0;
412a815a04cSKai Ji }
413a815a04cSKai Ji 
4140c4546deSFan Zhang #define QAT_SECURITY_SYM_CAPABILITIES					\
4150c4546deSFan Zhang 	{	/* AES DOCSIS BPI */					\
4160c4546deSFan Zhang 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,			\
4170c4546deSFan Zhang 		{.sym = {						\
4180c4546deSFan Zhang 			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,	\
4190c4546deSFan Zhang 			{.cipher = {					\
4200c4546deSFan Zhang 				.algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
4210c4546deSFan Zhang 				.block_size = 16,			\
4220c4546deSFan Zhang 				.key_size = {				\
4230c4546deSFan Zhang 					.min = 16,			\
4240c4546deSFan Zhang 					.max = 32,			\
4250c4546deSFan Zhang 					.increment = 16			\
4260c4546deSFan Zhang 				},					\
4270c4546deSFan Zhang 				.iv_size = {				\
4280c4546deSFan Zhang 					.min = 16,			\
4290c4546deSFan Zhang 					.max = 16,			\
4300c4546deSFan Zhang 					.increment = 0			\
4310c4546deSFan Zhang 				}					\
4320c4546deSFan Zhang 			}, }						\
4330c4546deSFan Zhang 		}, }							\
4340c4546deSFan Zhang 	}
4350c4546deSFan Zhang 
4360c4546deSFan Zhang #define QAT_SECURITY_CAPABILITIES(sym)					\
4370c4546deSFan Zhang 	[0] = {	/* DOCSIS Uplink */					\
4380c4546deSFan Zhang 		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,	\
4390c4546deSFan Zhang 		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,		\
4400c4546deSFan Zhang 		.docsis = {						\
4410c4546deSFan Zhang 			.direction = RTE_SECURITY_DOCSIS_UPLINK		\
4420c4546deSFan Zhang 		},							\
4430c4546deSFan Zhang 		.crypto_capabilities = (sym)				\
4440c4546deSFan Zhang 	},								\
4450c4546deSFan Zhang 	[1] = {	/* DOCSIS Downlink */					\
4460c4546deSFan Zhang 		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,	\
4470c4546deSFan Zhang 		.protocol = RTE_SECURITY_PROTOCOL_DOCSIS,		\
4480c4546deSFan Zhang 		.docsis = {						\
4490c4546deSFan Zhang 			.direction = RTE_SECURITY_DOCSIS_DOWNLINK	\
4500c4546deSFan Zhang 		},							\
4510c4546deSFan Zhang 		.crypto_capabilities = (sym)				\
4520c4546deSFan Zhang 	}
4530c4546deSFan Zhang 
4540c4546deSFan Zhang static const struct rte_cryptodev_capabilities
4550c4546deSFan Zhang 					qat_security_sym_capabilities[] = {
4560c4546deSFan Zhang 	QAT_SECURITY_SYM_CAPABILITIES,
4570c4546deSFan Zhang 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
4580c4546deSFan Zhang };
4590c4546deSFan Zhang 
4600c4546deSFan Zhang static const struct rte_security_capability qat_security_capabilities_gen1[] = {
4610c4546deSFan Zhang 	QAT_SECURITY_CAPABILITIES(qat_security_sym_capabilities),
4620c4546deSFan Zhang 	{
4630c4546deSFan Zhang 		.action = RTE_SECURITY_ACTION_TYPE_NONE
4640c4546deSFan Zhang 	}
4650c4546deSFan Zhang };
4660c4546deSFan Zhang 
4670c4546deSFan Zhang static const struct rte_security_capability *
qat_security_cap_get_gen1(void * dev __rte_unused)4680c4546deSFan Zhang qat_security_cap_get_gen1(void *dev __rte_unused)
4690c4546deSFan Zhang {
4700c4546deSFan Zhang 	return qat_security_capabilities_gen1;
4710c4546deSFan Zhang }
4720c4546deSFan Zhang 
4730c4546deSFan Zhang struct rte_security_ops security_qat_ops_gen1 = {
4740c4546deSFan Zhang 		.session_create = qat_security_session_create,
4750c4546deSFan Zhang 		.session_update = NULL,
47666837861SAkhil Goyal 		.session_get_size = qat_security_session_get_size,
4770c4546deSFan Zhang 		.session_stats_get = NULL,
4780c4546deSFan Zhang 		.session_destroy = qat_security_session_destroy,
4790c4546deSFan Zhang 		.set_pkt_metadata = NULL,
4800c4546deSFan Zhang 		.capabilities_get = qat_security_cap_get_gen1
4810c4546deSFan Zhang };
4820c4546deSFan Zhang 
4830c4546deSFan Zhang void *
qat_sym_create_security_gen1(void * cryptodev)4840c4546deSFan Zhang qat_sym_create_security_gen1(void *cryptodev)
4850c4546deSFan Zhang {
4860c4546deSFan Zhang 	struct rte_security_ctx *security_instance;
4870c4546deSFan Zhang 
4880c4546deSFan Zhang 	security_instance = rte_malloc(NULL, sizeof(struct rte_security_ctx),
4890c4546deSFan Zhang 			RTE_CACHE_LINE_SIZE);
4900c4546deSFan Zhang 	if (security_instance == NULL)
4910c4546deSFan Zhang 		return NULL;
4920c4546deSFan Zhang 
4930c4546deSFan Zhang 	security_instance->device = cryptodev;
4940c4546deSFan Zhang 	security_instance->ops = &security_qat_ops_gen1;
4950c4546deSFan Zhang 	security_instance->sess_cnt = 0;
4960c4546deSFan Zhang 
4970c4546deSFan Zhang 	return (void *)security_instance;
4980c4546deSFan Zhang }
4990c4546deSFan Zhang 
500254558c8SKai Ji int
qat_sym_dp_enqueue_single_cipher_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest __rte_unused,struct rte_crypto_va_iova_ptr * aad __rte_unused,void * user_data)50185fec6fdSKai Ji qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
50285fec6fdSKai Ji 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
50385fec6fdSKai Ji 	union rte_crypto_sym_ofs ofs,
50485fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *iv,
50585fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *digest __rte_unused,
50685fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *aad __rte_unused,
50785fec6fdSKai Ji 	void *user_data)
50885fec6fdSKai Ji {
50985fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
51085fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
51185fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
51285fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
51385fec6fdSKai Ji 	struct qat_sym_op_cookie *cookie;
51485fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
51585fec6fdSKai Ji 	int32_t data_len;
51685fec6fdSKai Ji 	uint32_t tail = dp_ctx->tail;
51785fec6fdSKai Ji 
51885fec6fdSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)(
51985fec6fdSKai Ji 		(uint8_t *)tx_queue->base_addr + tail);
52085fec6fdSKai Ji 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
52185fec6fdSKai Ji 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
52285fec6fdSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
52385fec6fdSKai Ji 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
52485fec6fdSKai Ji 
52585fec6fdSKai Ji 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
52685fec6fdSKai Ji 			data, n_data_vecs, NULL, 0);
52785fec6fdSKai Ji 	if (unlikely(data_len < 0))
52885fec6fdSKai Ji 		return -1;
52985fec6fdSKai Ji 
5306c868d6eSCiara Power 	if (ctx->is_zuc256)
5316c868d6eSCiara Power 		zuc256_modify_iv(iv->va);
5326c868d6eSCiara Power 
5330fda888bSSivaramakrishnan Venkat 	enqueue_one_cipher_job_gen1(ctx, req, iv, ofs, (uint32_t)data_len, cookie);
53485fec6fdSKai Ji 
53585fec6fdSKai Ji 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
53685fec6fdSKai Ji 			NULL, NULL, NULL);
53785fec6fdSKai Ji 
53885fec6fdSKai Ji 	dp_ctx->tail = tail;
53985fec6fdSKai Ji 	dp_ctx->cached_enqueue++;
54085fec6fdSKai Ji 
54185fec6fdSKai Ji 	return 0;
54285fec6fdSKai Ji }
54385fec6fdSKai Ji 
54485fec6fdSKai Ji uint32_t
qat_sym_dp_enqueue_cipher_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)54585fec6fdSKai Ji qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
54685fec6fdSKai Ji 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
54785fec6fdSKai Ji 	void *user_data[], int *status)
54885fec6fdSKai Ji {
54985fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
55085fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
55185fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
55285fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
55385fec6fdSKai Ji 	uint32_t i, n;
55485fec6fdSKai Ji 	uint32_t tail;
55585fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
55685fec6fdSKai Ji 	int32_t data_len;
55785fec6fdSKai Ji 
55885fec6fdSKai Ji 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
55985fec6fdSKai Ji 	if (unlikely(n == 0)) {
56085fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
56185fec6fdSKai Ji 		*status = 0;
56285fec6fdSKai Ji 		return 0;
56385fec6fdSKai Ji 	}
56485fec6fdSKai Ji 
56585fec6fdSKai Ji 	tail = dp_ctx->tail;
56685fec6fdSKai Ji 
56785fec6fdSKai Ji 	for (i = 0; i < n; i++) {
56885fec6fdSKai Ji 		struct qat_sym_op_cookie *cookie =
56985fec6fdSKai Ji 			qp->op_cookies[tail >> tx_queue->trailz];
57085fec6fdSKai Ji 
57185fec6fdSKai Ji 		req  = (struct icp_qat_fw_la_bulk_req *)(
57285fec6fdSKai Ji 			(uint8_t *)tx_queue->base_addr + tail);
57385fec6fdSKai Ji 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
57485fec6fdSKai Ji 
575ff01b26fSKai Ji 		if (vec->dest_sgl) {
576ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
577ff01b26fSKai Ji 				user_data[i], cookie,
578ff01b26fSKai Ji 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
579ff01b26fSKai Ji 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
580ff01b26fSKai Ji 		} else {
581ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
582ff01b26fSKai Ji 				user_data[i], cookie,
583ff01b26fSKai Ji 				vec->src_sgl[i].vec,
58485fec6fdSKai Ji 				vec->src_sgl[i].num, NULL, 0);
585ff01b26fSKai Ji 		}
586ff01b26fSKai Ji 
58785fec6fdSKai Ji 		if (unlikely(data_len < 0))
58885fec6fdSKai Ji 			break;
5896c868d6eSCiara Power 
5906c868d6eSCiara Power 		if (ctx->is_zuc256)
5916c868d6eSCiara Power 			zuc256_modify_iv(vec->iv[i].va);
5926c868d6eSCiara Power 
59385fec6fdSKai Ji 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
5940fda888bSSivaramakrishnan Venkat 			(uint32_t)data_len, cookie);
59585fec6fdSKai Ji 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
59685fec6fdSKai Ji 
59785fec6fdSKai Ji 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
59885fec6fdSKai Ji 				vec->src_sgl[i].num, &vec->iv[i],
59985fec6fdSKai Ji 				NULL, NULL, NULL);
60085fec6fdSKai Ji 	}
60185fec6fdSKai Ji 
60285fec6fdSKai Ji 	if (unlikely(i < n))
60385fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
60485fec6fdSKai Ji 
60585fec6fdSKai Ji 	dp_ctx->tail = tail;
60685fec6fdSKai Ji 	dp_ctx->cached_enqueue += i;
60785fec6fdSKai Ji 	*status = 0;
60885fec6fdSKai Ji 	return i;
60985fec6fdSKai Ji }
61085fec6fdSKai Ji 
61185fec6fdSKai Ji int
qat_sym_dp_enqueue_single_auth_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv __rte_unused,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)61285fec6fdSKai Ji qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
61385fec6fdSKai Ji 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
61485fec6fdSKai Ji 	union rte_crypto_sym_ofs ofs,
61585fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *iv __rte_unused,
61685fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *digest,
61785fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *auth_iv,
61885fec6fdSKai Ji 	void *user_data)
61985fec6fdSKai Ji {
62085fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
62185fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
62285fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
62385fec6fdSKai Ji 	struct qat_sym_op_cookie *cookie;
62485fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
62585fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
62685fec6fdSKai Ji 	int32_t data_len;
62785fec6fdSKai Ji 	uint32_t tail = dp_ctx->tail;
628d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr null_digest;
629d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr *job_digest = digest;
63085fec6fdSKai Ji 
63185fec6fdSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)(
63285fec6fdSKai Ji 		(uint8_t *)tx_queue->base_addr + tail);
63385fec6fdSKai Ji 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
63485fec6fdSKai Ji 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
63585fec6fdSKai Ji 
63685fec6fdSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
63785fec6fdSKai Ji 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
63885fec6fdSKai Ji 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
63985fec6fdSKai Ji 			data, n_data_vecs, NULL, 0);
64085fec6fdSKai Ji 	if (unlikely(data_len < 0))
64185fec6fdSKai Ji 		return -1;
64285fec6fdSKai Ji 
6436c868d6eSCiara Power 	if (ctx->is_zuc256)
6446c868d6eSCiara Power 		zuc256_modify_iv(auth_iv->va);
6456c868d6eSCiara Power 
646d7d52b37SCiara Power 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
647d7d52b37SCiara Power 		null_digest.iova = cookie->digest_null_phys_addr;
648d7d52b37SCiara Power 		job_digest = &null_digest;
649d7d52b37SCiara Power 	}
650d7d52b37SCiara Power 
651d7d52b37SCiara Power 	enqueue_one_auth_job_gen1(ctx, req, job_digest, auth_iv, ofs,
65285fec6fdSKai Ji 		(uint32_t)data_len);
65385fec6fdSKai Ji 
65485fec6fdSKai Ji 	dp_ctx->tail = tail;
65585fec6fdSKai Ji 	dp_ctx->cached_enqueue++;
65685fec6fdSKai Ji 
65785fec6fdSKai Ji 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, NULL,
65885fec6fdSKai Ji 			auth_iv, NULL, digest);
659ba02a9f6SMaxime Coquelin 
66085fec6fdSKai Ji 	return 0;
66185fec6fdSKai Ji }
66285fec6fdSKai Ji 
66385fec6fdSKai Ji uint32_t
qat_sym_dp_enqueue_auth_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)66485fec6fdSKai Ji qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
66585fec6fdSKai Ji 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
66685fec6fdSKai Ji 	void *user_data[], int *status)
66785fec6fdSKai Ji {
66885fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
66985fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
67085fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
67185fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
67285fec6fdSKai Ji 	uint32_t i, n;
67385fec6fdSKai Ji 	uint32_t tail;
67485fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
67585fec6fdSKai Ji 	int32_t data_len;
676d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr null_digest;
677d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr *job_digest = NULL;
67885fec6fdSKai Ji 
67985fec6fdSKai Ji 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
68085fec6fdSKai Ji 	if (unlikely(n == 0)) {
68185fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
68285fec6fdSKai Ji 		*status = 0;
68385fec6fdSKai Ji 		return 0;
68485fec6fdSKai Ji 	}
68585fec6fdSKai Ji 
68685fec6fdSKai Ji 	tail = dp_ctx->tail;
68785fec6fdSKai Ji 
68885fec6fdSKai Ji 	for (i = 0; i < n; i++) {
68985fec6fdSKai Ji 		struct qat_sym_op_cookie *cookie =
69085fec6fdSKai Ji 			qp->op_cookies[tail >> tx_queue->trailz];
69185fec6fdSKai Ji 
69285fec6fdSKai Ji 		req  = (struct icp_qat_fw_la_bulk_req *)(
69385fec6fdSKai Ji 			(uint8_t *)tx_queue->base_addr + tail);
69485fec6fdSKai Ji 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
69585fec6fdSKai Ji 
696ff01b26fSKai Ji 		if (vec->dest_sgl) {
697ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
698ff01b26fSKai Ji 				user_data[i], cookie,
699ff01b26fSKai Ji 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
700ff01b26fSKai Ji 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
701ff01b26fSKai Ji 		} else {
702ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
703ff01b26fSKai Ji 				user_data[i], cookie,
704ff01b26fSKai Ji 				vec->src_sgl[i].vec,
705ff01b26fSKai Ji 				vec->src_sgl[i].num, NULL, 0);
706ff01b26fSKai Ji 		}
707ff01b26fSKai Ji 
70885fec6fdSKai Ji 		if (unlikely(data_len < 0))
70985fec6fdSKai Ji 			break;
710d7d52b37SCiara Power 
7116c868d6eSCiara Power 		if (ctx->is_zuc256)
7126c868d6eSCiara Power 			zuc256_modify_iv(vec->auth_iv[i].va);
7136c868d6eSCiara Power 
714d7d52b37SCiara Power 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
715d7d52b37SCiara Power 			null_digest.iova = cookie->digest_null_phys_addr;
716d7d52b37SCiara Power 			job_digest = &null_digest;
717d7d52b37SCiara Power 		} else
718d7d52b37SCiara Power 			job_digest = &vec->digest[i];
719d7d52b37SCiara Power 
720d7d52b37SCiara Power 		enqueue_one_auth_job_gen1(ctx, req, job_digest,
72185fec6fdSKai Ji 			&vec->auth_iv[i], ofs, (uint32_t)data_len);
72285fec6fdSKai Ji 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
72385fec6fdSKai Ji 
72485fec6fdSKai Ji 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
72585fec6fdSKai Ji 				vec->src_sgl[i].num, NULL, &vec->auth_iv[i],
72685fec6fdSKai Ji 				NULL, &vec->digest[i]);
72785fec6fdSKai Ji 	}
72885fec6fdSKai Ji 
72985fec6fdSKai Ji 	if (unlikely(i < n))
73085fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
73185fec6fdSKai Ji 
73285fec6fdSKai Ji 	dp_ctx->tail = tail;
73385fec6fdSKai Ji 	dp_ctx->cached_enqueue += i;
73485fec6fdSKai Ji 	*status = 0;
73585fec6fdSKai Ji 	return i;
73685fec6fdSKai Ji }
73785fec6fdSKai Ji 
73885fec6fdSKai Ji int
qat_sym_dp_enqueue_single_chain_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * cipher_iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * auth_iv,void * user_data)73985fec6fdSKai Ji qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
74085fec6fdSKai Ji 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
74185fec6fdSKai Ji 	union rte_crypto_sym_ofs ofs,
74285fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *cipher_iv,
74385fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *digest,
74485fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *auth_iv,
74585fec6fdSKai Ji 	void *user_data)
74685fec6fdSKai Ji {
74785fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
74885fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
74985fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
75085fec6fdSKai Ji 	struct qat_sym_op_cookie *cookie;
75185fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
75285fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
75385fec6fdSKai Ji 	int32_t data_len;
75485fec6fdSKai Ji 	uint32_t tail = dp_ctx->tail;
755d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr null_digest;
756d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr *job_digest = digest;
75785fec6fdSKai Ji 
75885fec6fdSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)(
75985fec6fdSKai Ji 		(uint8_t *)tx_queue->base_addr + tail);
76085fec6fdSKai Ji 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
76185fec6fdSKai Ji 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
76285fec6fdSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
76385fec6fdSKai Ji 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
76485fec6fdSKai Ji 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
76585fec6fdSKai Ji 			data, n_data_vecs, NULL, 0);
76685fec6fdSKai Ji 	if (unlikely(data_len < 0))
76785fec6fdSKai Ji 		return -1;
76885fec6fdSKai Ji 
7696c868d6eSCiara Power 	if (ctx->is_zuc256) {
7706c868d6eSCiara Power 		zuc256_modify_iv(cipher_iv->va);
7716c868d6eSCiara Power 		zuc256_modify_iv(auth_iv->va);
7726c868d6eSCiara Power 	}
7736c868d6eSCiara Power 
774d7d52b37SCiara Power 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
775d7d52b37SCiara Power 		null_digest.iova = cookie->digest_null_phys_addr;
776d7d52b37SCiara Power 		job_digest = &null_digest;
777d7d52b37SCiara Power 	}
778d7d52b37SCiara Power 
77985fec6fdSKai Ji 	if (unlikely(enqueue_one_chain_job_gen1(ctx, req, data, n_data_vecs,
780d7d52b37SCiara Power 			NULL, 0, cipher_iv, job_digest, auth_iv, ofs,
7810fda888bSSivaramakrishnan Venkat 			(uint32_t)data_len, cookie)))
78285fec6fdSKai Ji 		return -1;
78385fec6fdSKai Ji 
78485fec6fdSKai Ji 	dp_ctx->tail = tail;
78585fec6fdSKai Ji 	dp_ctx->cached_enqueue++;
78685fec6fdSKai Ji 
78785fec6fdSKai Ji 
78885fec6fdSKai Ji 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, cipher_iv,
78985fec6fdSKai Ji 			auth_iv, NULL, digest);
790ba02a9f6SMaxime Coquelin 
79185fec6fdSKai Ji 	return 0;
79285fec6fdSKai Ji }
79385fec6fdSKai Ji 
79485fec6fdSKai Ji uint32_t
qat_sym_dp_enqueue_chain_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)79585fec6fdSKai Ji qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
79685fec6fdSKai Ji 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
79785fec6fdSKai Ji 	void *user_data[], int *status)
79885fec6fdSKai Ji {
79985fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
80085fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
80185fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
80285fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
80385fec6fdSKai Ji 	uint32_t i, n;
80485fec6fdSKai Ji 	uint32_t tail;
80585fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
80685fec6fdSKai Ji 	int32_t data_len;
807d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr null_digest;
808d7d52b37SCiara Power 	struct rte_crypto_va_iova_ptr *job_digest;
80985fec6fdSKai Ji 
81085fec6fdSKai Ji 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
81185fec6fdSKai Ji 	if (unlikely(n == 0)) {
81285fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
81385fec6fdSKai Ji 		*status = 0;
81485fec6fdSKai Ji 		return 0;
81585fec6fdSKai Ji 	}
81685fec6fdSKai Ji 
81785fec6fdSKai Ji 	tail = dp_ctx->tail;
81885fec6fdSKai Ji 
81985fec6fdSKai Ji 	for (i = 0; i < n; i++) {
82085fec6fdSKai Ji 		struct qat_sym_op_cookie *cookie =
82185fec6fdSKai Ji 			qp->op_cookies[tail >> tx_queue->trailz];
82285fec6fdSKai Ji 
82385fec6fdSKai Ji 		req  = (struct icp_qat_fw_la_bulk_req *)(
82485fec6fdSKai Ji 			(uint8_t *)tx_queue->base_addr + tail);
82585fec6fdSKai Ji 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
82685fec6fdSKai Ji 
827ff01b26fSKai Ji 		if (vec->dest_sgl) {
828ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
829ff01b26fSKai Ji 				user_data[i], cookie,
830ff01b26fSKai Ji 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
831ff01b26fSKai Ji 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
832ff01b26fSKai Ji 		} else {
833ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
834ff01b26fSKai Ji 				user_data[i], cookie,
835ff01b26fSKai Ji 				vec->src_sgl[i].vec,
836ff01b26fSKai Ji 				vec->src_sgl[i].num, NULL, 0);
837ff01b26fSKai Ji 		}
838ff01b26fSKai Ji 
83985fec6fdSKai Ji 		if (unlikely(data_len < 0))
84085fec6fdSKai Ji 			break;
84185fec6fdSKai Ji 
8426c868d6eSCiara Power 		if (ctx->is_zuc256) {
8436c868d6eSCiara Power 			zuc256_modify_iv(vec->iv[i].va);
8446c868d6eSCiara Power 			zuc256_modify_iv(vec->auth_iv[i].va);
8456c868d6eSCiara Power 		}
8466c868d6eSCiara Power 
847d7d52b37SCiara Power 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
848d7d52b37SCiara Power 			null_digest.iova = cookie->digest_null_phys_addr;
849d7d52b37SCiara Power 			job_digest = &null_digest;
850d7d52b37SCiara Power 		} else
851d7d52b37SCiara Power 			job_digest = &vec->digest[i];
852d7d52b37SCiara Power 
85385fec6fdSKai Ji 		if (unlikely(enqueue_one_chain_job_gen1(ctx, req,
85485fec6fdSKai Ji 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
85585fec6fdSKai Ji 				NULL, 0,
856d7d52b37SCiara Power 				&vec->iv[i], job_digest,
8570fda888bSSivaramakrishnan Venkat 				&vec->auth_iv[i], ofs, (uint32_t)data_len, cookie)))
85885fec6fdSKai Ji 			break;
85985fec6fdSKai Ji 
86085fec6fdSKai Ji 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
86185fec6fdSKai Ji 
86285fec6fdSKai Ji 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
86385fec6fdSKai Ji 				vec->src_sgl[i].num, &vec->iv[i],
86485fec6fdSKai Ji 				&vec->auth_iv[i],
86585fec6fdSKai Ji 				NULL, &vec->digest[i]);
86685fec6fdSKai Ji 	}
86785fec6fdSKai Ji 
86885fec6fdSKai Ji 	if (unlikely(i < n))
86985fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
87085fec6fdSKai Ji 
87185fec6fdSKai Ji 	dp_ctx->tail = tail;
87285fec6fdSKai Ji 	dp_ctx->cached_enqueue += i;
87385fec6fdSKai Ji 	*status = 0;
87485fec6fdSKai Ji 	return i;
87585fec6fdSKai Ji }
87685fec6fdSKai Ji 
87785fec6fdSKai Ji int
qat_sym_dp_enqueue_single_aead_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_vec * data,uint16_t n_data_vecs,union rte_crypto_sym_ofs ofs,struct rte_crypto_va_iova_ptr * iv,struct rte_crypto_va_iova_ptr * digest,struct rte_crypto_va_iova_ptr * aad,void * user_data)87885fec6fdSKai Ji qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
87985fec6fdSKai Ji 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
88085fec6fdSKai Ji 	union rte_crypto_sym_ofs ofs,
88185fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *iv,
88285fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *digest,
88385fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *aad,
88485fec6fdSKai Ji 	void *user_data)
88585fec6fdSKai Ji {
88685fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
88785fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
88885fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
88985fec6fdSKai Ji 	struct qat_sym_op_cookie *cookie;
89085fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
89185fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
89285fec6fdSKai Ji 
89385fec6fdSKai Ji 	int32_t data_len;
89485fec6fdSKai Ji 	uint32_t tail = dp_ctx->tail;
89585fec6fdSKai Ji 
89685fec6fdSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)(
89785fec6fdSKai Ji 		(uint8_t *)tx_queue->base_addr + tail);
89885fec6fdSKai Ji 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
89985fec6fdSKai Ji 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
90085fec6fdSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
90185fec6fdSKai Ji 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
90285fec6fdSKai Ji 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
90385fec6fdSKai Ji 			data, n_data_vecs, NULL, 0);
90485fec6fdSKai Ji 	if (unlikely(data_len < 0))
90585fec6fdSKai Ji 		return -1;
90685fec6fdSKai Ji 
90785fec6fdSKai Ji 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs,
90885fec6fdSKai Ji 		(uint32_t)data_len);
90985fec6fdSKai Ji 
91085fec6fdSKai Ji 	dp_ctx->tail = tail;
91185fec6fdSKai Ji 	dp_ctx->cached_enqueue++;
91285fec6fdSKai Ji 
91385fec6fdSKai Ji 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
91485fec6fdSKai Ji 			NULL, aad, digest);
915ba02a9f6SMaxime Coquelin 
91685fec6fdSKai Ji 	return 0;
91785fec6fdSKai Ji }
91885fec6fdSKai Ji 
91985fec6fdSKai Ji uint32_t
qat_sym_dp_enqueue_aead_jobs_gen1(void * qp_data,uint8_t * drv_ctx,struct rte_crypto_sym_vec * vec,union rte_crypto_sym_ofs ofs,void * user_data[],int * status)92085fec6fdSKai Ji qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
92185fec6fdSKai Ji 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
92285fec6fdSKai Ji 	void *user_data[], int *status)
92385fec6fdSKai Ji {
92485fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
92585fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
92685fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
92785fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
92885fec6fdSKai Ji 	uint32_t i, n;
92985fec6fdSKai Ji 	uint32_t tail;
93085fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
93185fec6fdSKai Ji 	int32_t data_len;
93285fec6fdSKai Ji 
93385fec6fdSKai Ji 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
93485fec6fdSKai Ji 	if (unlikely(n == 0)) {
93585fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
93685fec6fdSKai Ji 		*status = 0;
93785fec6fdSKai Ji 		return 0;
93885fec6fdSKai Ji 	}
93985fec6fdSKai Ji 
94085fec6fdSKai Ji 	tail = dp_ctx->tail;
94185fec6fdSKai Ji 
94285fec6fdSKai Ji 	for (i = 0; i < n; i++) {
94385fec6fdSKai Ji 		struct qat_sym_op_cookie *cookie =
94485fec6fdSKai Ji 			qp->op_cookies[tail >> tx_queue->trailz];
94585fec6fdSKai Ji 
94685fec6fdSKai Ji 		req  = (struct icp_qat_fw_la_bulk_req *)(
94785fec6fdSKai Ji 			(uint8_t *)tx_queue->base_addr + tail);
94885fec6fdSKai Ji 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
94985fec6fdSKai Ji 
950ff01b26fSKai Ji 		if (vec->dest_sgl) {
951ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
952ff01b26fSKai Ji 				user_data[i], cookie,
953ff01b26fSKai Ji 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
954ff01b26fSKai Ji 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
955ff01b26fSKai Ji 		} else {
956ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
957ff01b26fSKai Ji 				user_data[i], cookie,
958ff01b26fSKai Ji 				vec->src_sgl[i].vec,
959ff01b26fSKai Ji 				vec->src_sgl[i].num, NULL, 0);
960ff01b26fSKai Ji 		}
961ff01b26fSKai Ji 
96285fec6fdSKai Ji 		if (unlikely(data_len < 0))
96385fec6fdSKai Ji 			break;
96485fec6fdSKai Ji 
96585fec6fdSKai Ji 		enqueue_one_aead_job_gen1(ctx, req, &vec->iv[i],
96685fec6fdSKai Ji 				&vec->digest[i], &vec->aad[i], ofs,
96785fec6fdSKai Ji 				(uint32_t)data_len);
96885fec6fdSKai Ji 
96985fec6fdSKai Ji 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
97085fec6fdSKai Ji 
97185fec6fdSKai Ji 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
97285fec6fdSKai Ji 				vec->src_sgl[i].num, &vec->iv[i], NULL,
97385fec6fdSKai Ji 				&vec->aad[i], &vec->digest[i]);
97485fec6fdSKai Ji 	}
97585fec6fdSKai Ji 
97685fec6fdSKai Ji 	if (unlikely(i < n))
97785fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
97885fec6fdSKai Ji 
97985fec6fdSKai Ji 	dp_ctx->tail = tail;
98085fec6fdSKai Ji 	dp_ctx->cached_enqueue += i;
98185fec6fdSKai Ji 	*status = 0;
98285fec6fdSKai Ji 	return i;
98385fec6fdSKai Ji }
98485fec6fdSKai Ji 
98585fec6fdSKai Ji 
98685fec6fdSKai Ji uint32_t
qat_sym_dp_dequeue_burst_gen1(void * qp_data,uint8_t * drv_ctx,rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,uint32_t max_nb_to_dequeue,rte_cryptodev_raw_post_dequeue_t post_dequeue,void ** out_user_data,uint8_t is_user_data_array,uint32_t * n_success_jobs,int * return_status)98785fec6fdSKai Ji qat_sym_dp_dequeue_burst_gen1(void *qp_data, uint8_t *drv_ctx,
98885fec6fdSKai Ji 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
98985fec6fdSKai Ji 	uint32_t max_nb_to_dequeue,
99085fec6fdSKai Ji 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
99185fec6fdSKai Ji 	void **out_user_data, uint8_t is_user_data_array,
99285fec6fdSKai Ji 	uint32_t *n_success_jobs, int *return_status)
99385fec6fdSKai Ji {
99485fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
99585fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
99685fec6fdSKai Ji 	struct qat_queue *rx_queue = &qp->rx_q;
99785fec6fdSKai Ji 	struct icp_qat_fw_comn_resp *resp;
99885fec6fdSKai Ji 	void *resp_opaque;
99985fec6fdSKai Ji 	uint32_t i, n, inflight;
100085fec6fdSKai Ji 	uint32_t head;
100185fec6fdSKai Ji 	uint8_t status;
100285fec6fdSKai Ji 
100385fec6fdSKai Ji 	*n_success_jobs = 0;
100485fec6fdSKai Ji 	*return_status = 0;
100585fec6fdSKai Ji 	head = dp_ctx->head;
100685fec6fdSKai Ji 
100785fec6fdSKai Ji 	inflight = qp->enqueued - qp->dequeued;
100885fec6fdSKai Ji 	if (unlikely(inflight == 0))
100985fec6fdSKai Ji 		return 0;
101085fec6fdSKai Ji 
101185fec6fdSKai Ji 	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
101285fec6fdSKai Ji 			head);
101385fec6fdSKai Ji 	/* no operation ready */
101485fec6fdSKai Ji 	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
101585fec6fdSKai Ji 		return 0;
101685fec6fdSKai Ji 
101785fec6fdSKai Ji 	resp_opaque = (void *)(uintptr_t)resp->opaque_data;
101885fec6fdSKai Ji 	/* get the dequeue count */
101985fec6fdSKai Ji 	if (get_dequeue_count) {
102085fec6fdSKai Ji 		n = get_dequeue_count(resp_opaque);
102185fec6fdSKai Ji 		if (unlikely(n == 0))
102285fec6fdSKai Ji 			return 0;
102385fec6fdSKai Ji 	} else {
102485fec6fdSKai Ji 		if (unlikely(max_nb_to_dequeue == 0))
102585fec6fdSKai Ji 			return 0;
102685fec6fdSKai Ji 		n = max_nb_to_dequeue;
102785fec6fdSKai Ji 	}
102885fec6fdSKai Ji 
102985fec6fdSKai Ji 	out_user_data[0] = resp_opaque;
103085fec6fdSKai Ji 	status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
103185fec6fdSKai Ji 	post_dequeue(resp_opaque, 0, status);
103285fec6fdSKai Ji 	*n_success_jobs += status;
103385fec6fdSKai Ji 
103485fec6fdSKai Ji 	head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
103585fec6fdSKai Ji 
103685fec6fdSKai Ji 	/* we already finished dequeue when n == 1 */
103785fec6fdSKai Ji 	if (unlikely(n == 1)) {
103885fec6fdSKai Ji 		i = 1;
103985fec6fdSKai Ji 		goto end_deq;
104085fec6fdSKai Ji 	}
104185fec6fdSKai Ji 
104285fec6fdSKai Ji 	if (is_user_data_array) {
104385fec6fdSKai Ji 		for (i = 1; i < n; i++) {
104485fec6fdSKai Ji 			resp = (struct icp_qat_fw_comn_resp *)(
104585fec6fdSKai Ji 				(uint8_t *)rx_queue->base_addr + head);
104685fec6fdSKai Ji 			if (unlikely(*(uint32_t *)resp ==
104785fec6fdSKai Ji 					ADF_RING_EMPTY_SIG))
104885fec6fdSKai Ji 				goto end_deq;
104985fec6fdSKai Ji 			out_user_data[i] = (void *)(uintptr_t)resp->opaque_data;
105085fec6fdSKai Ji 			status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
105185fec6fdSKai Ji 			*n_success_jobs += status;
105285fec6fdSKai Ji 			post_dequeue(out_user_data[i], i, status);
105385fec6fdSKai Ji 			head = (head + rx_queue->msg_size) &
105485fec6fdSKai Ji 					rx_queue->modulo_mask;
105585fec6fdSKai Ji 		}
105685fec6fdSKai Ji 
105785fec6fdSKai Ji 		goto end_deq;
105885fec6fdSKai Ji 	}
105985fec6fdSKai Ji 
106085fec6fdSKai Ji 	/* opaque is not array */
106185fec6fdSKai Ji 	for (i = 1; i < n; i++) {
106285fec6fdSKai Ji 		resp = (struct icp_qat_fw_comn_resp *)(
106385fec6fdSKai Ji 			(uint8_t *)rx_queue->base_addr + head);
106485fec6fdSKai Ji 		status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
106585fec6fdSKai Ji 		if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
106685fec6fdSKai Ji 			goto end_deq;
106785fec6fdSKai Ji 		head = (head + rx_queue->msg_size) &
106885fec6fdSKai Ji 				rx_queue->modulo_mask;
106985fec6fdSKai Ji 		post_dequeue(resp_opaque, i, status);
107085fec6fdSKai Ji 		*n_success_jobs += status;
107185fec6fdSKai Ji 	}
107285fec6fdSKai Ji 
107385fec6fdSKai Ji end_deq:
107485fec6fdSKai Ji 	dp_ctx->head = head;
107585fec6fdSKai Ji 	dp_ctx->cached_dequeue += i;
107685fec6fdSKai Ji 	return i;
107785fec6fdSKai Ji }
107885fec6fdSKai Ji 
107985fec6fdSKai Ji void *
qat_sym_dp_dequeue_single_gen1(void * qp_data,uint8_t * drv_ctx,int * dequeue_status,enum rte_crypto_op_status * op_status)108085fec6fdSKai Ji qat_sym_dp_dequeue_single_gen1(void *qp_data, uint8_t *drv_ctx,
108185fec6fdSKai Ji 	int *dequeue_status, enum rte_crypto_op_status *op_status)
108285fec6fdSKai Ji {
108385fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
108485fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
108585fec6fdSKai Ji 	struct qat_queue *rx_queue = &qp->rx_q;
108685fec6fdSKai Ji 	register struct icp_qat_fw_comn_resp *resp;
108785fec6fdSKai Ji 
108885fec6fdSKai Ji 	resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
108985fec6fdSKai Ji 			dp_ctx->head);
109085fec6fdSKai Ji 
109185fec6fdSKai Ji 	if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
109285fec6fdSKai Ji 		return NULL;
109385fec6fdSKai Ji 
109485fec6fdSKai Ji 	dp_ctx->head = (dp_ctx->head + rx_queue->msg_size) &
109585fec6fdSKai Ji 			rx_queue->modulo_mask;
109685fec6fdSKai Ji 	dp_ctx->cached_dequeue++;
109785fec6fdSKai Ji 
109885fec6fdSKai Ji 	*op_status = QAT_SYM_DP_IS_RESP_SUCCESS(resp) ?
109985fec6fdSKai Ji 			RTE_CRYPTO_OP_STATUS_SUCCESS :
110085fec6fdSKai Ji 			RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
110185fec6fdSKai Ji 	*dequeue_status = 0;
110285fec6fdSKai Ji 	return (void *)(uintptr_t)resp->opaque_data;
110385fec6fdSKai Ji }
110485fec6fdSKai Ji 
110585fec6fdSKai Ji int
qat_sym_dp_enqueue_done_gen1(void * qp_data,uint8_t * drv_ctx,uint32_t n)110685fec6fdSKai Ji qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
110785fec6fdSKai Ji {
110885fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
110985fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
111085fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
111185fec6fdSKai Ji 
111285fec6fdSKai Ji 	if (unlikely(dp_ctx->cached_enqueue != n))
111385fec6fdSKai Ji 		return -1;
111485fec6fdSKai Ji 
111585fec6fdSKai Ji 	qp->enqueued += n;
111685fec6fdSKai Ji 	qp->stats.enqueued_count += n;
111785fec6fdSKai Ji 
111885fec6fdSKai Ji 	tx_queue->tail = dp_ctx->tail;
111985fec6fdSKai Ji 
112085fec6fdSKai Ji 	WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
112185fec6fdSKai Ji 			tx_queue->hw_bundle_number,
112285fec6fdSKai Ji 			tx_queue->hw_queue_number, tx_queue->tail);
112385fec6fdSKai Ji 	tx_queue->csr_tail = tx_queue->tail;
112485fec6fdSKai Ji 	dp_ctx->cached_enqueue = 0;
112585fec6fdSKai Ji 
112685fec6fdSKai Ji 	return 0;
112785fec6fdSKai Ji }
112885fec6fdSKai Ji 
112985fec6fdSKai Ji int
qat_sym_dp_dequeue_done_gen1(void * qp_data,uint8_t * drv_ctx,uint32_t n)113085fec6fdSKai Ji qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n)
113185fec6fdSKai Ji {
113285fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
113385fec6fdSKai Ji 	struct qat_queue *rx_queue = &qp->rx_q;
113485fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
113585fec6fdSKai Ji 
113685fec6fdSKai Ji 	if (unlikely(dp_ctx->cached_dequeue != n))
113785fec6fdSKai Ji 		return -1;
113885fec6fdSKai Ji 
113985fec6fdSKai Ji 	rx_queue->head = dp_ctx->head;
114085fec6fdSKai Ji 	rx_queue->nb_processed_responses += n;
114185fec6fdSKai Ji 	qp->dequeued += n;
114285fec6fdSKai Ji 	qp->stats.dequeued_count += n;
114385fec6fdSKai Ji 	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
114485fec6fdSKai Ji 		uint32_t old_head, new_head;
114585fec6fdSKai Ji 		uint32_t max_head;
114685fec6fdSKai Ji 
114785fec6fdSKai Ji 		old_head = rx_queue->csr_head;
114885fec6fdSKai Ji 		new_head = rx_queue->head;
114985fec6fdSKai Ji 		max_head = qp->nb_descriptors * rx_queue->msg_size;
115085fec6fdSKai Ji 
115185fec6fdSKai Ji 		/* write out free descriptors */
115285fec6fdSKai Ji 		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
115385fec6fdSKai Ji 
115485fec6fdSKai Ji 		if (new_head < old_head) {
115585fec6fdSKai Ji 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
115685fec6fdSKai Ji 					max_head - old_head);
115785fec6fdSKai Ji 			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
115885fec6fdSKai Ji 					new_head);
115985fec6fdSKai Ji 		} else {
116085fec6fdSKai Ji 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
116185fec6fdSKai Ji 					old_head);
116285fec6fdSKai Ji 		}
116385fec6fdSKai Ji 		rx_queue->nb_processed_responses = 0;
116485fec6fdSKai Ji 		rx_queue->csr_head = new_head;
116585fec6fdSKai Ji 
116685fec6fdSKai Ji 		/* write current head to CSR */
116785fec6fdSKai Ji 		WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
116885fec6fdSKai Ji 			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
116985fec6fdSKai Ji 			new_head);
117085fec6fdSKai Ji 	}
117185fec6fdSKai Ji 
117285fec6fdSKai Ji 	dp_ctx->cached_dequeue = 0;
117385fec6fdSKai Ji 	return 0;
117485fec6fdSKai Ji }
117585fec6fdSKai Ji 
117685fec6fdSKai Ji int
qat_sym_configure_raw_dp_ctx_gen1(void * _raw_dp_ctx,void * _ctx)117785fec6fdSKai Ji qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx)
117885fec6fdSKai Ji {
117985fec6fdSKai Ji 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
118085fec6fdSKai Ji 	struct qat_sym_session *ctx = _ctx;
118185fec6fdSKai Ji 
118285fec6fdSKai Ji 	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen1;
118385fec6fdSKai Ji 	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
118485fec6fdSKai Ji 	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
118585fec6fdSKai Ji 	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen1;
118685fec6fdSKai Ji 
118785fec6fdSKai Ji 	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
118885fec6fdSKai Ji 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
118985fec6fdSKai Ji 			!ctx->is_gmac) {
119085fec6fdSKai Ji 		/* AES-GCM or AES-CCM */
119185fec6fdSKai Ji 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
119285fec6fdSKai Ji 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
119385fec6fdSKai Ji 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
119485fec6fdSKai Ji 			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
119585fec6fdSKai Ji 			&& ctx->qat_hash_alg ==
119685fec6fdSKai Ji 					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
119785fec6fdSKai Ji 			raw_dp_ctx->enqueue_burst =
119885fec6fdSKai Ji 					qat_sym_dp_enqueue_aead_jobs_gen1;
119985fec6fdSKai Ji 			raw_dp_ctx->enqueue =
120085fec6fdSKai Ji 					qat_sym_dp_enqueue_single_aead_gen1;
120185fec6fdSKai Ji 		} else {
120285fec6fdSKai Ji 			raw_dp_ctx->enqueue_burst =
120385fec6fdSKai Ji 					qat_sym_dp_enqueue_chain_jobs_gen1;
120485fec6fdSKai Ji 			raw_dp_ctx->enqueue =
120585fec6fdSKai Ji 					qat_sym_dp_enqueue_single_chain_gen1;
120685fec6fdSKai Ji 		}
120785fec6fdSKai Ji 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
120885fec6fdSKai Ji 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
120985fec6fdSKai Ji 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
121085fec6fdSKai Ji 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
121185fec6fdSKai Ji 		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
121285fec6fdSKai Ji 			ctx->qat_cipher_alg ==
121385fec6fdSKai Ji 				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
121485fec6fdSKai Ji 			raw_dp_ctx->enqueue_burst =
121585fec6fdSKai Ji 					qat_sym_dp_enqueue_aead_jobs_gen1;
121685fec6fdSKai Ji 			raw_dp_ctx->enqueue =
121785fec6fdSKai Ji 					qat_sym_dp_enqueue_single_aead_gen1;
121885fec6fdSKai Ji 		} else {
121985fec6fdSKai Ji 			raw_dp_ctx->enqueue_burst =
122085fec6fdSKai Ji 					qat_sym_dp_enqueue_cipher_jobs_gen1;
122185fec6fdSKai Ji 			raw_dp_ctx->enqueue =
122285fec6fdSKai Ji 					qat_sym_dp_enqueue_single_cipher_gen1;
122385fec6fdSKai Ji 		}
122485fec6fdSKai Ji 	} else
122585fec6fdSKai Ji 		return -1;
122685fec6fdSKai Ji 
122785fec6fdSKai Ji 	return 0;
122885fec6fdSKai Ji }
122985fec6fdSKai Ji 
123085fec6fdSKai Ji int
qat_sym_crypto_set_session_gen1(void * cryptodev __rte_unused,void * session)1231254558c8SKai Ji qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
1232254558c8SKai Ji {
1233254558c8SKai Ji 	struct qat_sym_session *ctx = session;
1234254558c8SKai Ji 	qat_sym_build_request_t build_request = NULL;
1235254558c8SKai Ji 	enum rte_proc_type_t proc_type = rte_eal_process_type();
1236254558c8SKai Ji 	int handle_mixed = 0;
1237254558c8SKai Ji 
12381df04571SKai Ji 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
12391df04571SKai Ji 		return -EINVAL;
12401df04571SKai Ji 
1241254558c8SKai Ji 	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1242254558c8SKai Ji 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
1243254558c8SKai Ji 			!ctx->is_gmac) {
1244254558c8SKai Ji 		/* AES-GCM or AES-CCM */
1245254558c8SKai Ji 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1246254558c8SKai Ji 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1247254558c8SKai Ji 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1248254558c8SKai Ji 			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1249254558c8SKai Ji 			&& ctx->qat_hash_alg ==
1250254558c8SKai Ji 					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1251254558c8SKai Ji 			/* do_aead = 1; */
1252254558c8SKai Ji 			build_request = qat_sym_build_op_aead_gen1;
1253254558c8SKai Ji 		} else {
1254254558c8SKai Ji 			/* do_auth = 1; do_cipher = 1; */
1255254558c8SKai Ji 			build_request = qat_sym_build_op_chain_gen1;
1256254558c8SKai Ji 			handle_mixed = 1;
1257254558c8SKai Ji 		}
1258254558c8SKai Ji 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
1259254558c8SKai Ji 		/* do_auth = 1; do_cipher = 0;*/
1260254558c8SKai Ji 		build_request = qat_sym_build_op_auth_gen1;
1261254558c8SKai Ji 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1262254558c8SKai Ji 		/* do_auth = 0; do_cipher = 1; */
1263254558c8SKai Ji 		build_request = qat_sym_build_op_cipher_gen1;
1264ce7a737cSKevin O'Sullivan 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_CRC) {
1265ce7a737cSKevin O'Sullivan 		/* do_auth = 1; do_cipher = 1; */
1266ce7a737cSKevin O'Sullivan 		build_request = qat_sym_build_op_chain_gen1;
1267ce7a737cSKevin O'Sullivan 		handle_mixed = 1;
1268254558c8SKai Ji 	}
1269254558c8SKai Ji 
1270254558c8SKai Ji 	if (build_request)
1271254558c8SKai Ji 		ctx->build_request[proc_type] = build_request;
1272254558c8SKai Ji 	else
1273254558c8SKai Ji 		return -EINVAL;
1274254558c8SKai Ji 
1275254558c8SKai Ji 	/* no more work if not mixed op */
1276254558c8SKai Ji 	if (!handle_mixed)
1277254558c8SKai Ji 		return 0;
1278254558c8SKai Ji 
1279254558c8SKai Ji 	/* Check none supported algs if mixed */
1280254558c8SKai Ji 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
1281254558c8SKai Ji 			ctx->qat_cipher_alg !=
1282254558c8SKai Ji 			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1283254558c8SKai Ji 		return -ENOTSUP;
1284254558c8SKai Ji 	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
1285254558c8SKai Ji 			ctx->qat_cipher_alg !=
1286254558c8SKai Ji 			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
1287254558c8SKai Ji 		return -ENOTSUP;
1288254558c8SKai Ji 	} else if ((ctx->aes_cmac ||
1289254558c8SKai Ji 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
1290254558c8SKai Ji 			(ctx->qat_cipher_alg ==
1291254558c8SKai Ji 			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1292254558c8SKai Ji 			ctx->qat_cipher_alg ==
1293254558c8SKai Ji 			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
1294254558c8SKai Ji 		return -ENOTSUP;
1295254558c8SKai Ji 	}
1296254558c8SKai Ji 
1297254558c8SKai Ji 	return 0;
1298254558c8SKai Ji }
12990c4546deSFan Zhang 
RTE_INIT(qat_sym_crypto_gen1_init)13000c4546deSFan Zhang RTE_INIT(qat_sym_crypto_gen1_init)
13010c4546deSFan Zhang {
13020c4546deSFan Zhang 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
13030c4546deSFan Zhang 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
13040c4546deSFan Zhang 			qat_sym_crypto_cap_get_gen1;
1305254558c8SKai Ji 	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
1306254558c8SKai Ji 			qat_sym_crypto_set_session_gen1;
130785fec6fdSKai Ji 	qat_sym_gen_dev_ops[QAT_GEN1].set_raw_dp_ctx =
130885fec6fdSKai Ji 			qat_sym_configure_raw_dp_ctx_gen1;
13090c4546deSFan Zhang 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
13100c4546deSFan Zhang 			qat_sym_crypto_feature_flags_get_gen1;
13110c4546deSFan Zhang 	qat_sym_gen_dev_ops[QAT_GEN1].create_security_ctx =
13120c4546deSFan Zhang 			qat_sym_create_security_gen1;
13130c4546deSFan Zhang }
1314