xref: /dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c (revision c355c2d8e65f02fa9621249c9b2a111477230c89)
10c4546deSFan Zhang /* SPDX-License-Identifier: BSD-3-Clause
2254558c8SKai Ji  * Copyright(c) 2017-2022 Intel Corporation
30c4546deSFan Zhang  */
40c4546deSFan Zhang 
50c4546deSFan Zhang #include <rte_cryptodev.h>
60c4546deSFan Zhang #include <cryptodev_pmd.h>
70c4546deSFan Zhang #include "qat_sym_session.h"
80c4546deSFan Zhang #include "qat_sym.h"
90c4546deSFan Zhang #include "qat_asym.h"
100c4546deSFan Zhang #include "qat_crypto.h"
110c4546deSFan Zhang #include "qat_crypto_pmd_gens.h"
12*c355c2d8SBrian Dooley #include "adf_transport_access_macros_gen4vf.h"
130c4546deSFan Zhang 
14cffb726bSVikash Poddar 
15cffb726bSVikash Poddar static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = {
16cffb726bSVikash Poddar 	QAT_SYM_PLAIN_AUTH_CAP(SHA1,
170c4546deSFan Zhang 		CAP_SET(block_size, 64),
18cffb726bSVikash Poddar 		CAP_RNG(digest_size, 1, 20, 1)),
19cffb726bSVikash Poddar 	QAT_SYM_AUTH_CAP(SHA224,
20cffb726bSVikash Poddar 		CAP_SET(block_size, 64),
21cffb726bSVikash Poddar 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 28, 1),
220c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
230c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA224_HMAC,
240c4546deSFan Zhang 		CAP_SET(block_size, 64),
250c4546deSFan Zhang 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 28, 1),
260c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
27cffb726bSVikash Poddar 	QAT_SYM_AUTH_CAP(SHA1_HMAC,
28cffb726bSVikash Poddar 		CAP_SET(block_size, 64),
29cffb726bSVikash Poddar 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 20, 1),
30cffb726bSVikash Poddar 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
31cffb726bSVikash Poddar 	QAT_SYM_CIPHER_CAP(SM4_ECB,
32cffb726bSVikash Poddar 		CAP_SET(block_size, 16),
33cffb726bSVikash Poddar 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 0, 0, 0)),
34cffb726bSVikash Poddar };
35cffb726bSVikash Poddar 
36cffb726bSVikash Poddar static struct rte_cryptodev_capabilities qat_sym_crypto_caps_gen4[] = {
37cffb726bSVikash Poddar 	QAT_SYM_CIPHER_CAP(AES_CBC,
38cffb726bSVikash Poddar 		CAP_SET(block_size, 16),
39cffb726bSVikash Poddar 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
400c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA256_HMAC,
410c4546deSFan Zhang 		CAP_SET(block_size, 64),
420c4546deSFan Zhang 		CAP_RNG(key_size, 1, 64, 1), CAP_RNG(digest_size, 1, 32, 1),
430c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
440c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA384_HMAC,
450c4546deSFan Zhang 		CAP_SET(block_size, 128),
460c4546deSFan Zhang 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 48, 1),
470c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
480c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA512_HMAC,
490c4546deSFan Zhang 		CAP_SET(block_size, 128),
500c4546deSFan Zhang 		CAP_RNG(key_size, 1, 128, 1), CAP_RNG(digest_size, 1, 64, 1),
510c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
520c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(AES_XCBC_MAC,
530c4546deSFan Zhang 		CAP_SET(block_size, 16),
540c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 12, 12, 0),
550c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
560c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(AES_CMAC,
570c4546deSFan Zhang 		CAP_SET(block_size, 16),
580c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 4),
590c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
600c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(AES_DOCSISBPI,
610c4546deSFan Zhang 		CAP_SET(block_size, 16),
620c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 16), CAP_RNG(iv_size, 16, 16, 0)),
630c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(NULL,
640c4546deSFan Zhang 		CAP_SET(block_size, 1),
650c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(digest_size),
660c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
670c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(NULL,
680c4546deSFan Zhang 		CAP_SET(block_size, 1),
690c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG_ZERO(iv_size)),
700c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA256,
710c4546deSFan Zhang 		CAP_SET(block_size, 64),
720c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 32, 1),
730c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
740c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA384,
750c4546deSFan Zhang 		CAP_SET(block_size, 128),
760c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 48, 1),
770c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
780c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(SHA512,
790c4546deSFan Zhang 		CAP_SET(block_size, 128),
800c4546deSFan Zhang 		CAP_RNG_ZERO(key_size), CAP_RNG(digest_size, 1, 64, 1),
810c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
820c4546deSFan Zhang 	QAT_SYM_CIPHER_CAP(AES_CTR,
830c4546deSFan Zhang 		CAP_SET(block_size, 16),
840c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(iv_size, 16, 16, 0)),
850c4546deSFan Zhang 	QAT_SYM_AEAD_CAP(AES_GCM,
860c4546deSFan Zhang 		CAP_SET(block_size, 16),
870c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
880c4546deSFan Zhang 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 0, 12, 12)),
890c4546deSFan Zhang 	QAT_SYM_AEAD_CAP(AES_CCM,
900c4546deSFan Zhang 		CAP_SET(block_size, 16),
910c4546deSFan Zhang 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(digest_size, 4, 16, 2),
920c4546deSFan Zhang 		CAP_RNG(aad_size, 0, 224, 1), CAP_RNG(iv_size, 7, 13, 1)),
930c4546deSFan Zhang 	QAT_SYM_AUTH_CAP(AES_GMAC,
940c4546deSFan Zhang 		CAP_SET(block_size, 16),
950c4546deSFan Zhang 		CAP_RNG(key_size, 16, 32, 8), CAP_RNG(digest_size, 8, 16, 4),
960c4546deSFan Zhang 		CAP_RNG_ZERO(aad_size), CAP_RNG(iv_size, 0, 12, 12)),
970c4546deSFan Zhang 	QAT_SYM_AEAD_CAP(CHACHA20_POLY1305,
980c4546deSFan Zhang 		CAP_SET(block_size, 64),
990c4546deSFan Zhang 		CAP_RNG(key_size, 32, 32, 0),
1000c4546deSFan Zhang 		CAP_RNG(digest_size, 16, 16, 0),
1010c4546deSFan Zhang 		CAP_RNG(aad_size, 0, 240, 1), CAP_RNG(iv_size, 12, 12, 0)),
10292522c84SArek Kusztal 	QAT_SYM_CIPHER_CAP(SM4_CBC,
10392522c84SArek Kusztal 		CAP_SET(block_size, 16),
10492522c84SArek Kusztal 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
10592522c84SArek Kusztal 	QAT_SYM_CIPHER_CAP(SM4_CTR,
10692522c84SArek Kusztal 		CAP_SET(block_size, 16),
10792522c84SArek Kusztal 		CAP_RNG(key_size, 16, 16, 0), CAP_RNG(iv_size, 16, 16, 0)),
1086291de62SCiara Power 	QAT_SYM_PLAIN_AUTH_CAP(SM3,
1096291de62SCiara Power 		CAP_SET(block_size, 64),
1106291de62SCiara Power 		CAP_RNG(digest_size, 32, 32, 0)),
111171c655bSArkadiusz Kusztal 	QAT_SYM_AUTH_CAP(SM3_HMAC,
112171c655bSArkadiusz Kusztal 		CAP_SET(block_size, 64),
113171c655bSArkadiusz Kusztal 		CAP_RNG(key_size, 16, 64, 4), CAP_RNG(digest_size, 32, 32, 0),
114171c655bSArkadiusz Kusztal 		CAP_RNG_ZERO(aad_size), CAP_RNG_ZERO(iv_size)),
1150c4546deSFan Zhang 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
1160c4546deSFan Zhang };
1170c4546deSFan Zhang 
118b6ac58aeSArek Kusztal static int
119b6ac58aeSArek Kusztal qat_sym_crypto_cap_get_gen4(struct qat_cryptodev_private *internals,
120b6ac58aeSArek Kusztal 			const char *capa_memz_name,
121b6ac58aeSArek Kusztal 			const uint16_t __rte_unused slice_map)
1220c4546deSFan Zhang {
123cffb726bSVikash Poddar 	uint32_t legacy_capa_num;
124cffb726bSVikash Poddar 	uint32_t size = sizeof(qat_sym_crypto_caps_gen4);
125cffb726bSVikash Poddar 	uint32_t legacy_size = sizeof(qat_sym_crypto_legacy_caps_gen4);
126cffb726bSVikash Poddar 	legacy_capa_num = legacy_size/sizeof(struct rte_cryptodev_capabilities);
127cffb726bSVikash Poddar 
128b7bd72d8SArkadiusz Kusztal 	if (unlikely(internals->qat_dev->options.legacy_alg))
129cffb726bSVikash Poddar 		size = size + legacy_size;
130b6ac58aeSArek Kusztal 
131b6ac58aeSArek Kusztal 	internals->capa_mz = rte_memzone_lookup(capa_memz_name);
132b6ac58aeSArek Kusztal 	if (internals->capa_mz == NULL) {
133b6ac58aeSArek Kusztal 		internals->capa_mz = rte_memzone_reserve(capa_memz_name,
134b6ac58aeSArek Kusztal 				size, rte_socket_id(), 0);
135b6ac58aeSArek Kusztal 		if (internals->capa_mz == NULL) {
136b6ac58aeSArek Kusztal 			QAT_LOG(DEBUG,
137b6ac58aeSArek Kusztal 				"Error allocating memzone for capabilities");
138b6ac58aeSArek Kusztal 			return -1;
139b6ac58aeSArek Kusztal 		}
140b6ac58aeSArek Kusztal 	}
141b6ac58aeSArek Kusztal 
142b6ac58aeSArek Kusztal 	struct rte_cryptodev_capabilities *addr =
143b6ac58aeSArek Kusztal 			(struct rte_cryptodev_capabilities *)
144b6ac58aeSArek Kusztal 				internals->capa_mz->addr;
145b6ac58aeSArek Kusztal 
146cffb726bSVikash Poddar 	struct rte_cryptodev_capabilities *capabilities;
147cffb726bSVikash Poddar 
148b7bd72d8SArkadiusz Kusztal 	if (unlikely(internals->qat_dev->options.legacy_alg)) {
149cffb726bSVikash Poddar 		capabilities = qat_sym_crypto_legacy_caps_gen4;
150cffb726bSVikash Poddar 		memcpy(addr, capabilities, legacy_size);
151cffb726bSVikash Poddar 		addr += legacy_capa_num;
152b6ac58aeSArek Kusztal 	}
153cffb726bSVikash Poddar 	capabilities = qat_sym_crypto_caps_gen4;
154cffb726bSVikash Poddar 	memcpy(addr, capabilities, sizeof(qat_sym_crypto_caps_gen4));
155b6ac58aeSArek Kusztal 	internals->qat_dev_capabilities = internals->capa_mz->addr;
156b6ac58aeSArek Kusztal 
157b6ac58aeSArek Kusztal 	return 0;
1580c4546deSFan Zhang }
1590c4546deSFan Zhang 
160254558c8SKai Ji static __rte_always_inline void
161254558c8SKai Ji enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
162254558c8SKai Ji 	struct icp_qat_fw_la_bulk_req *req,
163254558c8SKai Ji 	struct rte_crypto_va_iova_ptr *iv,
164254558c8SKai Ji 	struct rte_crypto_va_iova_ptr *digest,
165254558c8SKai Ji 	struct rte_crypto_va_iova_ptr *aad,
166254558c8SKai Ji 	union rte_crypto_sym_ofs ofs, uint32_t data_len)
167254558c8SKai Ji {
168254558c8SKai Ji 	if (ctx->is_single_pass && ctx->is_ucs) {
169254558c8SKai Ji 		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
170254558c8SKai Ji 			(void *)&req->serv_specif_rqpars;
171254558c8SKai Ji 		struct icp_qat_fw_la_cipher_req_params *cipher_param =
172254558c8SKai Ji 			(void *)&req->serv_specif_rqpars;
173254558c8SKai Ji 
174254558c8SKai Ji 		/* QAT GEN4 uses single pass to treat AEAD as cipher
175254558c8SKai Ji 		 * operation
176254558c8SKai Ji 		 */
177254558c8SKai Ji 		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
178254558c8SKai Ji 				req);
179254558c8SKai Ji 		cipher_param->cipher_offset = ofs.ofs.cipher.head;
180254558c8SKai Ji 		cipher_param->cipher_length = data_len -
181254558c8SKai Ji 				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
182254558c8SKai Ji 
183254558c8SKai Ji 		cipher_param_20->spc_aad_addr = aad->iova;
184254558c8SKai Ji 		cipher_param_20->spc_auth_res_addr = digest->iova;
185254558c8SKai Ji 
186254558c8SKai Ji 		return;
187254558c8SKai Ji 	}
188254558c8SKai Ji 
189254558c8SKai Ji 	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
190254558c8SKai Ji }
191254558c8SKai Ji 
192254558c8SKai Ji static int
193254558c8SKai Ji qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
194254558c8SKai Ji 		uint8_t *out_msg, void *op_cookie)
195254558c8SKai Ji {
196254558c8SKai Ji 	register struct icp_qat_fw_la_bulk_req *qat_req;
197254558c8SKai Ji 	struct rte_crypto_op *op = in_op;
198254558c8SKai Ji 	struct qat_sym_op_cookie *cookie = op_cookie;
199254558c8SKai Ji 	struct rte_crypto_sgl in_sgl, out_sgl;
200254558c8SKai Ji 	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
201254558c8SKai Ji 			out_vec[QAT_SYM_SGL_MAX_NUMBER];
202254558c8SKai Ji 	struct rte_crypto_va_iova_ptr cipher_iv;
203254558c8SKai Ji 	struct rte_crypto_va_iova_ptr aad;
204254558c8SKai Ji 	struct rte_crypto_va_iova_ptr digest;
205254558c8SKai Ji 	union rte_crypto_sym_ofs ofs;
206254558c8SKai Ji 	int32_t total_len;
207254558c8SKai Ji 
208254558c8SKai Ji 	in_sgl.vec = in_vec;
209254558c8SKai Ji 	out_sgl.vec = out_vec;
210254558c8SKai Ji 
211254558c8SKai Ji 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
212254558c8SKai Ji 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
213254558c8SKai Ji 
214254558c8SKai Ji 	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
215254558c8SKai Ji 			&cipher_iv, &aad, &digest);
216254558c8SKai Ji 	if (unlikely(ofs.raw == UINT64_MAX)) {
217254558c8SKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
218254558c8SKai Ji 		return -EINVAL;
219254558c8SKai Ji 	}
220254558c8SKai Ji 
221254558c8SKai Ji 	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
222254558c8SKai Ji 			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
223254558c8SKai Ji 	if (unlikely(total_len < 0)) {
224254558c8SKai Ji 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
225254558c8SKai Ji 		return -EINVAL;
226254558c8SKai Ji 	}
227254558c8SKai Ji 
228254558c8SKai Ji 	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
229254558c8SKai Ji 		total_len);
230254558c8SKai Ji 
231254558c8SKai Ji 	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
232254558c8SKai Ji 			NULL, &aad, &digest);
233254558c8SKai Ji 
234254558c8SKai Ji 	return 0;
235254558c8SKai Ji }
236254558c8SKai Ji 
23759cda512SCiara Power int
238*c355c2d8SBrian Dooley qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
239*c355c2d8SBrian Dooley {
240*c355c2d8SBrian Dooley 	struct qat_qp *qp = qp_data;
241*c355c2d8SBrian Dooley 	struct qat_queue *tx_queue = &qp->tx_q;
242*c355c2d8SBrian Dooley 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
243*c355c2d8SBrian Dooley 
244*c355c2d8SBrian Dooley 	if (unlikely(dp_ctx->cached_enqueue != n))
245*c355c2d8SBrian Dooley 		return -1;
246*c355c2d8SBrian Dooley 
247*c355c2d8SBrian Dooley 	qp->enqueued += n;
248*c355c2d8SBrian Dooley 	qp->stats.enqueued_count += n;
249*c355c2d8SBrian Dooley 
250*c355c2d8SBrian Dooley 	tx_queue->tail = dp_ctx->tail;
251*c355c2d8SBrian Dooley 
252*c355c2d8SBrian Dooley 	WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
253*c355c2d8SBrian Dooley 		tx_queue->hw_bundle_number,
254*c355c2d8SBrian Dooley 		tx_queue->hw_queue_number, tx_queue->tail);
255*c355c2d8SBrian Dooley 
256*c355c2d8SBrian Dooley 	tx_queue->csr_tail = tx_queue->tail;
257*c355c2d8SBrian Dooley 	dp_ctx->cached_enqueue = 0;
258*c355c2d8SBrian Dooley 
259*c355c2d8SBrian Dooley 	return 0;
260*c355c2d8SBrian Dooley }
261*c355c2d8SBrian Dooley 
262*c355c2d8SBrian Dooley int
263*c355c2d8SBrian Dooley qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
264*c355c2d8SBrian Dooley {
265*c355c2d8SBrian Dooley 	struct qat_qp *qp = qp_data;
266*c355c2d8SBrian Dooley 	struct qat_queue *rx_queue = &qp->rx_q;
267*c355c2d8SBrian Dooley 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
268*c355c2d8SBrian Dooley 
269*c355c2d8SBrian Dooley 	if (unlikely(dp_ctx->cached_dequeue != n))
270*c355c2d8SBrian Dooley 		return -1;
271*c355c2d8SBrian Dooley 
272*c355c2d8SBrian Dooley 	rx_queue->head = dp_ctx->head;
273*c355c2d8SBrian Dooley 	rx_queue->nb_processed_responses += n;
274*c355c2d8SBrian Dooley 	qp->dequeued += n;
275*c355c2d8SBrian Dooley 	qp->stats.dequeued_count += n;
276*c355c2d8SBrian Dooley 	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
277*c355c2d8SBrian Dooley 		uint32_t old_head, new_head;
278*c355c2d8SBrian Dooley 		uint32_t max_head;
279*c355c2d8SBrian Dooley 
280*c355c2d8SBrian Dooley 		old_head = rx_queue->csr_head;
281*c355c2d8SBrian Dooley 		new_head = rx_queue->head;
282*c355c2d8SBrian Dooley 		max_head = qp->nb_descriptors * rx_queue->msg_size;
283*c355c2d8SBrian Dooley 
284*c355c2d8SBrian Dooley 		/* write out free descriptors */
285*c355c2d8SBrian Dooley 		void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
286*c355c2d8SBrian Dooley 
287*c355c2d8SBrian Dooley 		if (new_head < old_head) {
288*c355c2d8SBrian Dooley 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
289*c355c2d8SBrian Dooley 					max_head - old_head);
290*c355c2d8SBrian Dooley 			memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
291*c355c2d8SBrian Dooley 					new_head);
292*c355c2d8SBrian Dooley 		} else {
293*c355c2d8SBrian Dooley 			memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
294*c355c2d8SBrian Dooley 					old_head);
295*c355c2d8SBrian Dooley 		}
296*c355c2d8SBrian Dooley 		rx_queue->nb_processed_responses = 0;
297*c355c2d8SBrian Dooley 		rx_queue->csr_head = new_head;
298*c355c2d8SBrian Dooley 
299*c355c2d8SBrian Dooley 		/* write current head to CSR */
300*c355c2d8SBrian Dooley 		WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
301*c355c2d8SBrian Dooley 			rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
302*c355c2d8SBrian Dooley 			new_head);
303*c355c2d8SBrian Dooley 	}
304*c355c2d8SBrian Dooley 
305*c355c2d8SBrian Dooley 	dp_ctx->cached_dequeue = 0;
306*c355c2d8SBrian Dooley 	return 0;
307*c355c2d8SBrian Dooley }
308*c355c2d8SBrian Dooley 
309*c355c2d8SBrian Dooley int
310254558c8SKai Ji qat_sym_crypto_set_session_gen4(void *cdev, void *session)
311254558c8SKai Ji {
312254558c8SKai Ji 	struct qat_sym_session *ctx = session;
313254558c8SKai Ji 	enum rte_proc_type_t proc_type = rte_eal_process_type();
314254558c8SKai Ji 	int ret;
315254558c8SKai Ji 
3161df04571SKai Ji 	if (proc_type == RTE_PROC_AUTO || proc_type == RTE_PROC_INVALID)
3171df04571SKai Ji 		return -EINVAL;
3181df04571SKai Ji 
319254558c8SKai Ji 	ret = qat_sym_crypto_set_session_gen1(cdev, session);
320254558c8SKai Ji 	/* special single pass build request for GEN4 */
321254558c8SKai Ji 	if (ctx->is_single_pass && ctx->is_ucs)
322254558c8SKai Ji 		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
323254558c8SKai Ji 
324254558c8SKai Ji 	if (ret == -ENOTSUP) {
325254558c8SKai Ji 		/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
326254558c8SKai Ji 		 * this is addressed by GEN4
327254558c8SKai Ji 		 */
328254558c8SKai Ji 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
329254558c8SKai Ji 				ctx->qat_cipher_alg !=
330254558c8SKai Ji 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
331254558c8SKai Ji 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
332254558c8SKai Ji 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
333254558c8SKai Ji 		} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
334254558c8SKai Ji 				ctx->qat_cipher_alg !=
335254558c8SKai Ji 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
336254558c8SKai Ji 			qat_sym_session_set_ext_hash_flags_gen2(ctx,
337254558c8SKai Ji 				1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
338254558c8SKai Ji 		} else if ((ctx->aes_cmac ||
339254558c8SKai Ji 				ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
340254558c8SKai Ji 				(ctx->qat_cipher_alg ==
341254558c8SKai Ji 				ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
342254558c8SKai Ji 				ctx->qat_cipher_alg ==
343254558c8SKai Ji 				ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
344254558c8SKai Ji 			qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
345254558c8SKai Ji 		}
346254558c8SKai Ji 
347254558c8SKai Ji 		ret = 0;
348254558c8SKai Ji 	}
349254558c8SKai Ji 
350254558c8SKai Ji 	return ret;
351254558c8SKai Ji }
352254558c8SKai Ji 
35385fec6fdSKai Ji static int
35485fec6fdSKai Ji qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
35585fec6fdSKai Ji 	struct rte_crypto_vec *data, uint16_t n_data_vecs,
35685fec6fdSKai Ji 	union rte_crypto_sym_ofs ofs,
35785fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *iv,
35885fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *digest,
35985fec6fdSKai Ji 	struct rte_crypto_va_iova_ptr *aad,
36085fec6fdSKai Ji 	void *user_data)
36185fec6fdSKai Ji {
36285fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
36385fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
36485fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
36585fec6fdSKai Ji 	struct qat_sym_op_cookie *cookie;
36685fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
36785fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
36885fec6fdSKai Ji 
36985fec6fdSKai Ji 	int32_t data_len;
37085fec6fdSKai Ji 	uint32_t tail = dp_ctx->tail;
37185fec6fdSKai Ji 
37285fec6fdSKai Ji 	req = (struct icp_qat_fw_la_bulk_req *)(
37385fec6fdSKai Ji 		(uint8_t *)tx_queue->base_addr + tail);
37485fec6fdSKai Ji 	cookie = qp->op_cookies[tail >> tx_queue->trailz];
37585fec6fdSKai Ji 	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
37685fec6fdSKai Ji 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
37785fec6fdSKai Ji 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
37885fec6fdSKai Ji 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
37985fec6fdSKai Ji 			data, n_data_vecs, NULL, 0);
38085fec6fdSKai Ji 	if (unlikely(data_len < 0))
38185fec6fdSKai Ji 		return -1;
38285fec6fdSKai Ji 
38385fec6fdSKai Ji 	enqueue_one_aead_job_gen4(ctx, req, iv, digest, aad, ofs,
38485fec6fdSKai Ji 		(uint32_t)data_len);
38585fec6fdSKai Ji 
38685fec6fdSKai Ji 	dp_ctx->tail = tail;
38785fec6fdSKai Ji 	dp_ctx->cached_enqueue++;
38885fec6fdSKai Ji 
38985fec6fdSKai Ji 	qat_sym_debug_log_dump(req, ctx, data, n_data_vecs, iv,
39085fec6fdSKai Ji 			NULL, aad, digest);
391ba02a9f6SMaxime Coquelin 
39285fec6fdSKai Ji 	return 0;
39385fec6fdSKai Ji }
39485fec6fdSKai Ji 
39585fec6fdSKai Ji static uint32_t
39685fec6fdSKai Ji qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
39785fec6fdSKai Ji 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
39885fec6fdSKai Ji 	void *user_data[], int *status)
39985fec6fdSKai Ji {
40085fec6fdSKai Ji 	struct qat_qp *qp = qp_data;
40185fec6fdSKai Ji 	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
40285fec6fdSKai Ji 	struct qat_queue *tx_queue = &qp->tx_q;
40385fec6fdSKai Ji 	struct qat_sym_session *ctx = dp_ctx->session;
40485fec6fdSKai Ji 	uint32_t i, n;
40585fec6fdSKai Ji 	uint32_t tail;
40685fec6fdSKai Ji 	struct icp_qat_fw_la_bulk_req *req;
40785fec6fdSKai Ji 	int32_t data_len;
40885fec6fdSKai Ji 
40985fec6fdSKai Ji 	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
41085fec6fdSKai Ji 	if (unlikely(n == 0)) {
41185fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
41285fec6fdSKai Ji 		*status = 0;
41385fec6fdSKai Ji 		return 0;
41485fec6fdSKai Ji 	}
41585fec6fdSKai Ji 
41685fec6fdSKai Ji 	tail = dp_ctx->tail;
41785fec6fdSKai Ji 
41885fec6fdSKai Ji 	for (i = 0; i < n; i++) {
41985fec6fdSKai Ji 		struct qat_sym_op_cookie *cookie =
42085fec6fdSKai Ji 			qp->op_cookies[tail >> tx_queue->trailz];
42185fec6fdSKai Ji 
42285fec6fdSKai Ji 		req  = (struct icp_qat_fw_la_bulk_req *)(
42385fec6fdSKai Ji 			(uint8_t *)tx_queue->base_addr + tail);
42485fec6fdSKai Ji 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
42585fec6fdSKai Ji 
426ff01b26fSKai Ji 		if (vec->dest_sgl) {
427ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
428ff01b26fSKai Ji 				user_data[i], cookie,
429ff01b26fSKai Ji 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
430ff01b26fSKai Ji 				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
431ff01b26fSKai Ji 		} else {
432ff01b26fSKai Ji 			data_len = qat_sym_build_req_set_data(req,
433ff01b26fSKai Ji 				user_data[i], cookie,
434ff01b26fSKai Ji 				vec->src_sgl[i].vec,
435ff01b26fSKai Ji 				vec->src_sgl[i].num, NULL, 0);
436ff01b26fSKai Ji 		}
437ff01b26fSKai Ji 
43885fec6fdSKai Ji 		if (unlikely(data_len < 0))
43985fec6fdSKai Ji 			break;
44085fec6fdSKai Ji 
44185fec6fdSKai Ji 		enqueue_one_aead_job_gen4(ctx, req, &vec->iv[i],
44285fec6fdSKai Ji 				&vec->digest[i], &vec->aad[i], ofs,
44385fec6fdSKai Ji 				(uint32_t)data_len);
44485fec6fdSKai Ji 
44585fec6fdSKai Ji 		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
44685fec6fdSKai Ji 
44785fec6fdSKai Ji 		qat_sym_debug_log_dump(req, ctx, vec->src_sgl[i].vec,
44885fec6fdSKai Ji 				vec->src_sgl[i].num, &vec->iv[i], NULL,
44985fec6fdSKai Ji 				&vec->aad[i], &vec->digest[i]);
45085fec6fdSKai Ji 	}
45185fec6fdSKai Ji 
45285fec6fdSKai Ji 	if (unlikely(i < n))
45385fec6fdSKai Ji 		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
45485fec6fdSKai Ji 
45585fec6fdSKai Ji 	dp_ctx->tail = tail;
45685fec6fdSKai Ji 	dp_ctx->cached_enqueue += i;
45785fec6fdSKai Ji 	*status = 0;
45885fec6fdSKai Ji 	return i;
45985fec6fdSKai Ji }
46085fec6fdSKai Ji 
46159cda512SCiara Power int
46285fec6fdSKai Ji qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
46385fec6fdSKai Ji {
46485fec6fdSKai Ji 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
46585fec6fdSKai Ji 	struct qat_sym_session *ctx = _ctx;
46685fec6fdSKai Ji 
467*c355c2d8SBrian Dooley 	raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4;
468*c355c2d8SBrian Dooley 	raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
469*c355c2d8SBrian Dooley 	raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
470*c355c2d8SBrian Dooley 	raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4;
471*c355c2d8SBrian Dooley 
472*c355c2d8SBrian Dooley 	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
473*c355c2d8SBrian Dooley 			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
474*c355c2d8SBrian Dooley 			!ctx->is_gmac) {
475*c355c2d8SBrian Dooley 		/* AES-GCM or AES-CCM */
476*c355c2d8SBrian Dooley 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
477*c355c2d8SBrian Dooley 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
478*c355c2d8SBrian Dooley 			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
479*c355c2d8SBrian Dooley 			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
480*c355c2d8SBrian Dooley 			&& ctx->qat_hash_alg ==
481*c355c2d8SBrian Dooley 					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
482*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue_burst =
483*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_aead_jobs_gen1;
484*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue =
485*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_single_aead_gen1;
486*c355c2d8SBrian Dooley 		} else {
487*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue_burst =
488*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_chain_jobs_gen1;
489*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue =
490*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_single_chain_gen1;
491*c355c2d8SBrian Dooley 		}
492*c355c2d8SBrian Dooley 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
493*c355c2d8SBrian Dooley 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
494*c355c2d8SBrian Dooley 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
495*c355c2d8SBrian Dooley 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
496*c355c2d8SBrian Dooley 		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
497*c355c2d8SBrian Dooley 			ctx->qat_cipher_alg ==
498*c355c2d8SBrian Dooley 				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
499*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue_burst =
500*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_aead_jobs_gen1;
501*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue =
502*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_single_aead_gen1;
503*c355c2d8SBrian Dooley 		} else {
504*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue_burst =
505*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_cipher_jobs_gen1;
506*c355c2d8SBrian Dooley 			raw_dp_ctx->enqueue =
507*c355c2d8SBrian Dooley 					qat_sym_dp_enqueue_single_cipher_gen1;
508*c355c2d8SBrian Dooley 		}
509*c355c2d8SBrian Dooley 	} else
510*c355c2d8SBrian Dooley 		return -1;
51185fec6fdSKai Ji 
51285fec6fdSKai Ji 	if (ctx->is_single_pass && ctx->is_ucs) {
51385fec6fdSKai Ji 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
51485fec6fdSKai Ji 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead_gen4;
51585fec6fdSKai Ji 	}
51685fec6fdSKai Ji 
51785fec6fdSKai Ji 	return 0;
51885fec6fdSKai Ji }
51985fec6fdSKai Ji 
5200c4546deSFan Zhang RTE_INIT(qat_sym_crypto_gen4_init)
5210c4546deSFan Zhang {
5222e98e808SArkadiusz Kusztal 	qat_sym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
5230c4546deSFan Zhang 		qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
5242e98e808SArkadiusz Kusztal 	qat_sym_gen_dev_ops[QAT_VQAT].get_capabilities =
5250c4546deSFan Zhang 		qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
5260c4546deSFan Zhang 			qat_sym_crypto_cap_get_gen4;
5272e98e808SArkadiusz Kusztal 	qat_sym_gen_dev_ops[QAT_VQAT].set_session =
528254558c8SKai Ji 		qat_sym_gen_dev_ops[QAT_GEN4].set_session =
529254558c8SKai Ji 			qat_sym_crypto_set_session_gen4;
53085fec6fdSKai Ji 	qat_sym_gen_dev_ops[QAT_GEN4].set_raw_dp_ctx =
53185fec6fdSKai Ji 			qat_sym_configure_raw_dp_ctx_gen4;
5322e98e808SArkadiusz Kusztal 	qat_sym_gen_dev_ops[QAT_VQAT].get_feature_flags =
5330c4546deSFan Zhang 		qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
5340c4546deSFan Zhang 			qat_sym_crypto_feature_flags_get_gen1;
5350c4546deSFan Zhang 	qat_sym_gen_dev_ops[QAT_GEN4].create_security_ctx =
5360c4546deSFan Zhang 			qat_sym_create_security_gen1;
5370c4546deSFan Zhang }
5380c4546deSFan Zhang 
5390c4546deSFan Zhang RTE_INIT(qat_asym_crypto_gen4_init)
5400c4546deSFan Zhang {
5412e98e808SArkadiusz Kusztal 	qat_asym_gen_dev_ops[QAT_VQAT].cryptodev_ops =
542efb1a06bSArek Kusztal 		qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops =
543efb1a06bSArek Kusztal 			&qat_asym_crypto_ops_gen1;
5442e98e808SArkadiusz Kusztal 	qat_asym_gen_dev_ops[QAT_VQAT].get_capabilities =
545efb1a06bSArek Kusztal 		qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities =
546efb1a06bSArek Kusztal 			qat_asym_crypto_cap_get_gen1;
5472e98e808SArkadiusz Kusztal 	qat_asym_gen_dev_ops[QAT_VQAT].get_feature_flags =
548efb1a06bSArek Kusztal 		qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags =
549efb1a06bSArek Kusztal 			qat_asym_crypto_feature_flags_get_gen1;
5502e98e808SArkadiusz Kusztal 	qat_asym_gen_dev_ops[QAT_VQAT].set_session =
551efb1a06bSArek Kusztal 		qat_asym_gen_dev_ops[QAT_GEN4].set_session =
552efb1a06bSArek Kusztal 			qat_asym_crypto_set_session_gen1;
5530c4546deSFan Zhang }
554