xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
19d5f73c2SGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
2b5e761fcSGagandeep Singh  * Copyright 2021-2022 NXP
39d5f73c2SGagandeep Singh  */
49d5f73c2SGagandeep Singh 
59d5f73c2SGagandeep Singh #include <rte_byteorder.h>
69d5f73c2SGagandeep Singh #include <rte_common.h>
79d5f73c2SGagandeep Singh #include <cryptodev_pmd.h>
89d5f73c2SGagandeep Singh #include <rte_crypto.h>
99d5f73c2SGagandeep Singh #include <rte_cryptodev.h>
109d5f73c2SGagandeep Singh #include <rte_security_driver.h>
119d5f73c2SGagandeep Singh 
129d5f73c2SGagandeep Singh /* RTA header files */
1378156d38SGagandeep Singh #include <desc/algo.h>
149d5f73c2SGagandeep Singh #include <desc/ipsec.h>
159d5f73c2SGagandeep Singh 
16a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h>
179d5f73c2SGagandeep Singh #include <dpaa_sec.h>
189d5f73c2SGagandeep Singh #include <dpaa_sec_log.h>
199d5f73c2SGagandeep Singh 
209d5f73c2SGagandeep Singh struct dpaa_sec_raw_dp_ctx {
219d5f73c2SGagandeep Singh 	dpaa_sec_session *session;
229d5f73c2SGagandeep Singh 	uint32_t tail;
239d5f73c2SGagandeep Singh 	uint32_t head;
249d5f73c2SGagandeep Singh 	uint16_t cached_enqueue;
259d5f73c2SGagandeep Singh 	uint16_t cached_dequeue;
269d5f73c2SGagandeep Singh };
279d5f73c2SGagandeep Singh 
2878156d38SGagandeep Singh static inline int
2978156d38SGagandeep Singh is_encode(dpaa_sec_session *ses)
3078156d38SGagandeep Singh {
3178156d38SGagandeep Singh 	return ses->dir == DIR_ENC;
3278156d38SGagandeep Singh }
3378156d38SGagandeep Singh 
3478156d38SGagandeep Singh static inline int is_decode(dpaa_sec_session *ses)
3578156d38SGagandeep Singh {
3678156d38SGagandeep Singh 	return ses->dir == DIR_DEC;
3778156d38SGagandeep Singh }
3878156d38SGagandeep Singh 
399d5f73c2SGagandeep Singh static __rte_always_inline int
409d5f73c2SGagandeep Singh dpaa_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
419d5f73c2SGagandeep Singh {
429d5f73c2SGagandeep Singh 	RTE_SET_USED(qp_data);
439d5f73c2SGagandeep Singh 	RTE_SET_USED(drv_ctx);
449d5f73c2SGagandeep Singh 	RTE_SET_USED(n);
459d5f73c2SGagandeep Singh 
469d5f73c2SGagandeep Singh 	return 0;
479d5f73c2SGagandeep Singh }
489d5f73c2SGagandeep Singh 
499d5f73c2SGagandeep Singh static __rte_always_inline int
509d5f73c2SGagandeep Singh dpaa_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
519d5f73c2SGagandeep Singh {
529d5f73c2SGagandeep Singh 	RTE_SET_USED(qp_data);
539d5f73c2SGagandeep Singh 	RTE_SET_USED(drv_ctx);
549d5f73c2SGagandeep Singh 	RTE_SET_USED(n);
559d5f73c2SGagandeep Singh 
569d5f73c2SGagandeep Singh 	return 0;
579d5f73c2SGagandeep Singh }
589d5f73c2SGagandeep Singh 
599d5f73c2SGagandeep Singh static inline struct dpaa_sec_op_ctx *
609d5f73c2SGagandeep Singh dpaa_sec_alloc_raw_ctx(dpaa_sec_session *ses, int sg_count)
619d5f73c2SGagandeep Singh {
629d5f73c2SGagandeep Singh 	struct dpaa_sec_op_ctx *ctx;
639d5f73c2SGagandeep Singh 	int i, retval;
649d5f73c2SGagandeep Singh 
659d5f73c2SGagandeep Singh 	retval = rte_mempool_get(
669d5f73c2SGagandeep Singh 			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
679d5f73c2SGagandeep Singh 			(void **)(&ctx));
689d5f73c2SGagandeep Singh 	if (!ctx || retval) {
699d5f73c2SGagandeep Singh 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
709d5f73c2SGagandeep Singh 		return NULL;
719d5f73c2SGagandeep Singh 	}
729d5f73c2SGagandeep Singh 	/*
739d5f73c2SGagandeep Singh 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
749d5f73c2SGagandeep Singh 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
759d5f73c2SGagandeep Singh 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
769d5f73c2SGagandeep Singh 	 * each packet, memset is costlier than dcbz_64().
779d5f73c2SGagandeep Singh 	 */
789d5f73c2SGagandeep Singh 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
799d5f73c2SGagandeep Singh 		dcbz_64(&ctx->job.sg[i]);
809d5f73c2SGagandeep Singh 
819d5f73c2SGagandeep Singh 	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
829d5f73c2SGagandeep Singh 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
839d5f73c2SGagandeep Singh 
849d5f73c2SGagandeep Singh 	return ctx;
859d5f73c2SGagandeep Singh }
869d5f73c2SGagandeep Singh 
879d5f73c2SGagandeep Singh static struct dpaa_sec_job *
889d5f73c2SGagandeep Singh build_dpaa_raw_dp_auth_fd(uint8_t *drv_ctx,
899d5f73c2SGagandeep Singh 			struct rte_crypto_sgl *sgl,
909d5f73c2SGagandeep Singh 			struct rte_crypto_sgl *dest_sgl,
919d5f73c2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *iv,
929d5f73c2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *digest,
939d5f73c2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *auth_iv,
949d5f73c2SGagandeep Singh 			union rte_crypto_sym_ofs ofs,
9578156d38SGagandeep Singh 			void *userdata,
9678156d38SGagandeep Singh 			struct qm_fd *fd)
979d5f73c2SGagandeep Singh {
989d5f73c2SGagandeep Singh 	RTE_SET_USED(dest_sgl);
999d5f73c2SGagandeep Singh 	RTE_SET_USED(iv);
1009d5f73c2SGagandeep Singh 	RTE_SET_USED(auth_iv);
10178156d38SGagandeep Singh 	RTE_SET_USED(fd);
1029d5f73c2SGagandeep Singh 
10378156d38SGagandeep Singh 	dpaa_sec_session *ses =
10478156d38SGagandeep Singh 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
10578156d38SGagandeep Singh 	struct dpaa_sec_job *cf;
10678156d38SGagandeep Singh 	struct dpaa_sec_op_ctx *ctx;
10778156d38SGagandeep Singh 	struct qm_sg_entry *sg, *out_sg, *in_sg;
10878156d38SGagandeep Singh 	phys_addr_t start_addr;
10978156d38SGagandeep Singh 	uint8_t *old_digest, extra_segs;
11078156d38SGagandeep Singh 	int data_len, data_offset, total_len = 0;
11178156d38SGagandeep Singh 	unsigned int i;
11278156d38SGagandeep Singh 
11378156d38SGagandeep Singh 	for (i = 0; i < sgl->num; i++)
11478156d38SGagandeep Singh 		total_len += sgl->vec[i].len;
11578156d38SGagandeep Singh 
11678156d38SGagandeep Singh 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
11778156d38SGagandeep Singh 	data_offset =  ofs.ofs.auth.head;
11878156d38SGagandeep Singh 
11978156d38SGagandeep Singh 	/* Support only length in bits for SNOW3G and ZUC */
12078156d38SGagandeep Singh 
12178156d38SGagandeep Singh 	if (is_decode(ses))
12278156d38SGagandeep Singh 		extra_segs = 3;
12378156d38SGagandeep Singh 	else
12478156d38SGagandeep Singh 		extra_segs = 2;
12578156d38SGagandeep Singh 
12678156d38SGagandeep Singh 	if (sgl->num > MAX_SG_ENTRIES) {
12778156d38SGagandeep Singh 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
12878156d38SGagandeep Singh 				MAX_SG_ENTRIES);
1299d5f73c2SGagandeep Singh 		return NULL;
1309d5f73c2SGagandeep Singh 	}
13178156d38SGagandeep Singh 	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + extra_segs);
13278156d38SGagandeep Singh 	if (!ctx)
13378156d38SGagandeep Singh 		return NULL;
13478156d38SGagandeep Singh 
13578156d38SGagandeep Singh 	cf = &ctx->job;
13678156d38SGagandeep Singh 	ctx->userdata = (void *)userdata;
13778156d38SGagandeep Singh 	old_digest = ctx->digest;
13878156d38SGagandeep Singh 
13978156d38SGagandeep Singh 	/* output */
14078156d38SGagandeep Singh 	out_sg = &cf->sg[0];
14178156d38SGagandeep Singh 	qm_sg_entry_set64(out_sg, digest->iova);
14278156d38SGagandeep Singh 	out_sg->length = ses->digest_length;
14378156d38SGagandeep Singh 	cpu_to_hw_sg(out_sg);
14478156d38SGagandeep Singh 
14578156d38SGagandeep Singh 	/* input */
14678156d38SGagandeep Singh 	in_sg = &cf->sg[1];
14778156d38SGagandeep Singh 	/* need to extend the input to a compound frame */
14878156d38SGagandeep Singh 	in_sg->extension = 1;
14978156d38SGagandeep Singh 	in_sg->final = 1;
15078156d38SGagandeep Singh 	in_sg->length = data_len;
15178156d38SGagandeep Singh 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
15278156d38SGagandeep Singh 
15378156d38SGagandeep Singh 	/* 1st seg */
15478156d38SGagandeep Singh 	sg = in_sg + 1;
15578156d38SGagandeep Singh 
15678156d38SGagandeep Singh 	if (ses->iv.length) {
15778156d38SGagandeep Singh 		uint8_t *iv_ptr;
15878156d38SGagandeep Singh 
15978156d38SGagandeep Singh 		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
16078156d38SGagandeep Singh 						   ses->iv.offset);
16178156d38SGagandeep Singh 
16278156d38SGagandeep Singh 		if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
16378156d38SGagandeep Singh 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
16478156d38SGagandeep Singh 			sg->length = 12;
16578156d38SGagandeep Singh 		} else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
16678156d38SGagandeep Singh 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
16778156d38SGagandeep Singh 			sg->length = 8;
16878156d38SGagandeep Singh 		} else {
16978156d38SGagandeep Singh 			sg->length = ses->iv.length;
17078156d38SGagandeep Singh 		}
17178156d38SGagandeep Singh 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
17278156d38SGagandeep Singh 		in_sg->length += sg->length;
17378156d38SGagandeep Singh 		cpu_to_hw_sg(sg);
17478156d38SGagandeep Singh 		sg++;
17578156d38SGagandeep Singh 	}
17678156d38SGagandeep Singh 
17778156d38SGagandeep Singh 	qm_sg_entry_set64(sg, sgl->vec[0].iova);
17878156d38SGagandeep Singh 	sg->offset = data_offset;
17978156d38SGagandeep Singh 
18078156d38SGagandeep Singh 	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
18178156d38SGagandeep Singh 		sg->length = data_len;
18278156d38SGagandeep Singh 	} else {
18378156d38SGagandeep Singh 		sg->length = sgl->vec[0].len - data_offset;
18478156d38SGagandeep Singh 
18578156d38SGagandeep Singh 		/* remaining i/p segs */
18678156d38SGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
18778156d38SGagandeep Singh 			cpu_to_hw_sg(sg);
18878156d38SGagandeep Singh 			sg++;
18978156d38SGagandeep Singh 			qm_sg_entry_set64(sg, sgl->vec[i].iova);
19078156d38SGagandeep Singh 			if (data_len > (int)sgl->vec[i].len)
19178156d38SGagandeep Singh 				sg->length = sgl->vec[0].len;
19278156d38SGagandeep Singh 			else
19378156d38SGagandeep Singh 				sg->length = data_len;
19478156d38SGagandeep Singh 
19578156d38SGagandeep Singh 			data_len = data_len - sg->length;
19678156d38SGagandeep Singh 			if (data_len < 1)
19778156d38SGagandeep Singh 				break;
19878156d38SGagandeep Singh 		}
19978156d38SGagandeep Singh 	}
20078156d38SGagandeep Singh 
20178156d38SGagandeep Singh 	if (is_decode(ses)) {
20278156d38SGagandeep Singh 		/* Digest verification case */
20378156d38SGagandeep Singh 		cpu_to_hw_sg(sg);
20478156d38SGagandeep Singh 		sg++;
20578156d38SGagandeep Singh 		rte_memcpy(old_digest, digest->va,
20678156d38SGagandeep Singh 				ses->digest_length);
20778156d38SGagandeep Singh 		start_addr = rte_dpaa_mem_vtop(old_digest);
20878156d38SGagandeep Singh 		qm_sg_entry_set64(sg, start_addr);
20978156d38SGagandeep Singh 		sg->length = ses->digest_length;
21078156d38SGagandeep Singh 		in_sg->length += ses->digest_length;
21178156d38SGagandeep Singh 	}
21278156d38SGagandeep Singh 	sg->final = 1;
21378156d38SGagandeep Singh 	cpu_to_hw_sg(sg);
21478156d38SGagandeep Singh 	cpu_to_hw_sg(in_sg);
21578156d38SGagandeep Singh 
21678156d38SGagandeep Singh 	return cf;
21778156d38SGagandeep Singh }
21878156d38SGagandeep Singh 
21978156d38SGagandeep Singh static inline struct dpaa_sec_job *
220fe6a8ee2SGagandeep Singh build_raw_cipher_auth_gcm_sg(uint8_t *drv_ctx,
221fe6a8ee2SGagandeep Singh 			struct rte_crypto_sgl *sgl,
222fe6a8ee2SGagandeep Singh 			struct rte_crypto_sgl *dest_sgl,
223fe6a8ee2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *iv,
224fe6a8ee2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *digest,
225fe6a8ee2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *auth_iv,
226fe6a8ee2SGagandeep Singh 			union rte_crypto_sym_ofs ofs,
227fe6a8ee2SGagandeep Singh 			void *userdata,
228fe6a8ee2SGagandeep Singh 			struct qm_fd *fd)
229fe6a8ee2SGagandeep Singh {
230fe6a8ee2SGagandeep Singh 	dpaa_sec_session *ses =
231fe6a8ee2SGagandeep Singh 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
232fe6a8ee2SGagandeep Singh 	struct dpaa_sec_job *cf;
233fe6a8ee2SGagandeep Singh 	struct dpaa_sec_op_ctx *ctx;
234fe6a8ee2SGagandeep Singh 	struct qm_sg_entry *sg, *out_sg, *in_sg;
235fe6a8ee2SGagandeep Singh 	uint8_t extra_req_segs;
236fe6a8ee2SGagandeep Singh 	uint8_t *IV_ptr = iv->va;
237fe6a8ee2SGagandeep Singh 	int data_len = 0, aead_len = 0;
238fe6a8ee2SGagandeep Singh 	unsigned int i;
239fe6a8ee2SGagandeep Singh 
240fe6a8ee2SGagandeep Singh 	for (i = 0; i < sgl->num; i++)
241fe6a8ee2SGagandeep Singh 		data_len += sgl->vec[i].len;
242fe6a8ee2SGagandeep Singh 
243fe6a8ee2SGagandeep Singh 	extra_req_segs = 4;
244fe6a8ee2SGagandeep Singh 	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
245fe6a8ee2SGagandeep Singh 
246fe6a8ee2SGagandeep Singh 	if (ses->auth_only_len)
247fe6a8ee2SGagandeep Singh 		extra_req_segs++;
248fe6a8ee2SGagandeep Singh 
249fe6a8ee2SGagandeep Singh 	if (sgl->num > MAX_SG_ENTRIES) {
250fe6a8ee2SGagandeep Singh 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
251fe6a8ee2SGagandeep Singh 				MAX_SG_ENTRIES);
252fe6a8ee2SGagandeep Singh 		return NULL;
253fe6a8ee2SGagandeep Singh 	}
254fe6a8ee2SGagandeep Singh 
255fe6a8ee2SGagandeep Singh 	ctx = dpaa_sec_alloc_raw_ctx(ses,  sgl->num * 2 + extra_req_segs);
256fe6a8ee2SGagandeep Singh 	if (!ctx)
257fe6a8ee2SGagandeep Singh 		return NULL;
258fe6a8ee2SGagandeep Singh 
259fe6a8ee2SGagandeep Singh 	cf = &ctx->job;
260fe6a8ee2SGagandeep Singh 	ctx->userdata = (void *)userdata;
261fe6a8ee2SGagandeep Singh 
262fe6a8ee2SGagandeep Singh 	rte_prefetch0(cf->sg);
263fe6a8ee2SGagandeep Singh 
264fe6a8ee2SGagandeep Singh 	/* output */
265fe6a8ee2SGagandeep Singh 	out_sg = &cf->sg[0];
266fe6a8ee2SGagandeep Singh 	out_sg->extension = 1;
267fe6a8ee2SGagandeep Singh 	if (is_encode(ses))
268fe6a8ee2SGagandeep Singh 		out_sg->length = aead_len + ses->digest_length;
269fe6a8ee2SGagandeep Singh 	else
270fe6a8ee2SGagandeep Singh 		out_sg->length = aead_len;
271fe6a8ee2SGagandeep Singh 
272fe6a8ee2SGagandeep Singh 	/* output sg entries */
273fe6a8ee2SGagandeep Singh 	sg = &cf->sg[2];
274fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
275fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(out_sg);
276fe6a8ee2SGagandeep Singh 
277fe6a8ee2SGagandeep Singh 	if (dest_sgl) {
278fe6a8ee2SGagandeep Singh 		/* 1st seg */
279fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
280fe6a8ee2SGagandeep Singh 		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
281fe6a8ee2SGagandeep Singh 		sg->offset = ofs.ofs.cipher.head;
282fe6a8ee2SGagandeep Singh 
283fe6a8ee2SGagandeep Singh 		/* Successive segs */
284fe6a8ee2SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
285fe6a8ee2SGagandeep Singh 			cpu_to_hw_sg(sg);
286fe6a8ee2SGagandeep Singh 			sg++;
287fe6a8ee2SGagandeep Singh 			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
288fe6a8ee2SGagandeep Singh 			sg->length = dest_sgl->vec[i].len;
289fe6a8ee2SGagandeep Singh 		}
290fe6a8ee2SGagandeep Singh 	} else {
291fe6a8ee2SGagandeep Singh 		/* 1st seg */
292fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[0].iova);
293fe6a8ee2SGagandeep Singh 		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
294fe6a8ee2SGagandeep Singh 		sg->offset = ofs.ofs.cipher.head;
295fe6a8ee2SGagandeep Singh 
296fe6a8ee2SGagandeep Singh 		/* Successive segs */
297fe6a8ee2SGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
298fe6a8ee2SGagandeep Singh 			cpu_to_hw_sg(sg);
299fe6a8ee2SGagandeep Singh 			sg++;
300fe6a8ee2SGagandeep Singh 			qm_sg_entry_set64(sg, sgl->vec[i].iova);
301fe6a8ee2SGagandeep Singh 			sg->length = sgl->vec[i].len;
302fe6a8ee2SGagandeep Singh 		}
303fe6a8ee2SGagandeep Singh 
304fe6a8ee2SGagandeep Singh 	}
305fe6a8ee2SGagandeep Singh 
306fe6a8ee2SGagandeep Singh 	if (is_encode(ses)) {
307fe6a8ee2SGagandeep Singh 		cpu_to_hw_sg(sg);
308fe6a8ee2SGagandeep Singh 		/* set auth output */
309fe6a8ee2SGagandeep Singh 		sg++;
310fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, digest->iova);
311fe6a8ee2SGagandeep Singh 		sg->length = ses->digest_length;
312fe6a8ee2SGagandeep Singh 	}
313fe6a8ee2SGagandeep Singh 	sg->final = 1;
314fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(sg);
315fe6a8ee2SGagandeep Singh 
316fe6a8ee2SGagandeep Singh 	/* input */
317fe6a8ee2SGagandeep Singh 	in_sg = &cf->sg[1];
318fe6a8ee2SGagandeep Singh 	in_sg->extension = 1;
319fe6a8ee2SGagandeep Singh 	in_sg->final = 1;
320fe6a8ee2SGagandeep Singh 	if (is_encode(ses))
321fe6a8ee2SGagandeep Singh 		in_sg->length = ses->iv.length + aead_len
322fe6a8ee2SGagandeep Singh 						+ ses->auth_only_len;
323fe6a8ee2SGagandeep Singh 	else
324fe6a8ee2SGagandeep Singh 		in_sg->length = ses->iv.length + aead_len
325fe6a8ee2SGagandeep Singh 				+ ses->auth_only_len + ses->digest_length;
326fe6a8ee2SGagandeep Singh 
327fe6a8ee2SGagandeep Singh 	/* input sg entries */
328fe6a8ee2SGagandeep Singh 	sg++;
329fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
330fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(in_sg);
331fe6a8ee2SGagandeep Singh 
332fe6a8ee2SGagandeep Singh 	/* 1st seg IV */
333fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
334fe6a8ee2SGagandeep Singh 	sg->length = ses->iv.length;
335fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(sg);
336fe6a8ee2SGagandeep Singh 
337fe6a8ee2SGagandeep Singh 	/* 2 seg auth only */
338fe6a8ee2SGagandeep Singh 	if (ses->auth_only_len) {
339fe6a8ee2SGagandeep Singh 		sg++;
340fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, auth_iv->iova);
341fe6a8ee2SGagandeep Singh 		sg->length = ses->auth_only_len;
342fe6a8ee2SGagandeep Singh 		cpu_to_hw_sg(sg);
343fe6a8ee2SGagandeep Singh 	}
344fe6a8ee2SGagandeep Singh 
345fe6a8ee2SGagandeep Singh 	/* 3rd seg */
346fe6a8ee2SGagandeep Singh 	sg++;
347fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(sg, sgl->vec[0].iova);
348fe6a8ee2SGagandeep Singh 	sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
349fe6a8ee2SGagandeep Singh 	sg->offset = ofs.ofs.cipher.head;
350fe6a8ee2SGagandeep Singh 
351fe6a8ee2SGagandeep Singh 	/* Successive segs */
352fe6a8ee2SGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
353fe6a8ee2SGagandeep Singh 		cpu_to_hw_sg(sg);
354fe6a8ee2SGagandeep Singh 		sg++;
355fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[i].iova);
356fe6a8ee2SGagandeep Singh 		sg->length =  sgl->vec[i].len;
357fe6a8ee2SGagandeep Singh 	}
358fe6a8ee2SGagandeep Singh 
359fe6a8ee2SGagandeep Singh 	if (is_decode(ses)) {
360fe6a8ee2SGagandeep Singh 		cpu_to_hw_sg(sg);
361fe6a8ee2SGagandeep Singh 		sg++;
362fe6a8ee2SGagandeep Singh 		memcpy(ctx->digest, digest->va,
363fe6a8ee2SGagandeep Singh 			ses->digest_length);
364fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
365fe6a8ee2SGagandeep Singh 		sg->length = ses->digest_length;
366fe6a8ee2SGagandeep Singh 	}
367fe6a8ee2SGagandeep Singh 	sg->final = 1;
368fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(sg);
369fe6a8ee2SGagandeep Singh 
370fe6a8ee2SGagandeep Singh 	if (ses->auth_only_len)
371fe6a8ee2SGagandeep Singh 		fd->cmd = 0x80000000 | ses->auth_only_len;
372fe6a8ee2SGagandeep Singh 
373fe6a8ee2SGagandeep Singh 	return cf;
374fe6a8ee2SGagandeep Singh }
375fe6a8ee2SGagandeep Singh 
376fe6a8ee2SGagandeep Singh static inline struct dpaa_sec_job *
37778156d38SGagandeep Singh build_dpaa_raw_dp_chain_fd(uint8_t *drv_ctx,
37878156d38SGagandeep Singh 			struct rte_crypto_sgl *sgl,
37978156d38SGagandeep Singh 			struct rte_crypto_sgl *dest_sgl,
38078156d38SGagandeep Singh 			struct rte_crypto_va_iova_ptr *iv,
38178156d38SGagandeep Singh 			struct rte_crypto_va_iova_ptr *digest,
38278156d38SGagandeep Singh 			struct rte_crypto_va_iova_ptr *auth_iv,
38378156d38SGagandeep Singh 			union rte_crypto_sym_ofs ofs,
38478156d38SGagandeep Singh 			void *userdata,
38578156d38SGagandeep Singh 			struct qm_fd *fd)
38678156d38SGagandeep Singh {
38778156d38SGagandeep Singh 	RTE_SET_USED(auth_iv);
38878156d38SGagandeep Singh 
38978156d38SGagandeep Singh 	dpaa_sec_session *ses =
39078156d38SGagandeep Singh 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
39178156d38SGagandeep Singh 	struct dpaa_sec_job *cf;
39278156d38SGagandeep Singh 	struct dpaa_sec_op_ctx *ctx;
39378156d38SGagandeep Singh 	struct qm_sg_entry *sg, *out_sg, *in_sg;
39478156d38SGagandeep Singh 	uint8_t *IV_ptr = iv->va;
39578156d38SGagandeep Singh 	unsigned int i;
39678156d38SGagandeep Singh 	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
39778156d38SGagandeep Singh 				ofs.ofs.auth.head;
398b5e761fcSGagandeep Singh 	uint16_t auth_tail_len;
399b5e761fcSGagandeep Singh 	uint32_t auth_only_len;
40078156d38SGagandeep Singh 	int data_len = 0, auth_len = 0, cipher_len = 0;
40178156d38SGagandeep Singh 
40278156d38SGagandeep Singh 	for (i = 0; i < sgl->num; i++)
40378156d38SGagandeep Singh 		data_len += sgl->vec[i].len;
40478156d38SGagandeep Singh 
40578156d38SGagandeep Singh 	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
40678156d38SGagandeep Singh 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
407b5e761fcSGagandeep Singh 	auth_tail_len = auth_len - cipher_len - auth_hdr_len;
408b5e761fcSGagandeep Singh 	auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
40978156d38SGagandeep Singh 
41078156d38SGagandeep Singh 	if (sgl->num > MAX_SG_ENTRIES) {
41178156d38SGagandeep Singh 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
41278156d38SGagandeep Singh 				MAX_SG_ENTRIES);
41378156d38SGagandeep Singh 		return NULL;
41478156d38SGagandeep Singh 	}
41578156d38SGagandeep Singh 
41678156d38SGagandeep Singh 	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
41778156d38SGagandeep Singh 	if (!ctx)
41878156d38SGagandeep Singh 		return NULL;
41978156d38SGagandeep Singh 
42078156d38SGagandeep Singh 	cf = &ctx->job;
42178156d38SGagandeep Singh 	ctx->userdata = (void *)userdata;
42278156d38SGagandeep Singh 
42378156d38SGagandeep Singh 	rte_prefetch0(cf->sg);
42478156d38SGagandeep Singh 
42578156d38SGagandeep Singh 	/* output */
42678156d38SGagandeep Singh 	out_sg = &cf->sg[0];
42778156d38SGagandeep Singh 	out_sg->extension = 1;
42878156d38SGagandeep Singh 	if (is_encode(ses))
42978156d38SGagandeep Singh 		out_sg->length = cipher_len + ses->digest_length;
43078156d38SGagandeep Singh 	else
43178156d38SGagandeep Singh 		out_sg->length = cipher_len;
43278156d38SGagandeep Singh 
43378156d38SGagandeep Singh 	/* output sg entries */
43478156d38SGagandeep Singh 	sg = &cf->sg[2];
43578156d38SGagandeep Singh 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
43678156d38SGagandeep Singh 	cpu_to_hw_sg(out_sg);
43778156d38SGagandeep Singh 
43878156d38SGagandeep Singh 	/* 1st seg */
43978156d38SGagandeep Singh 	if (dest_sgl) {
44078156d38SGagandeep Singh 		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
44178156d38SGagandeep Singh 		sg->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
44278156d38SGagandeep Singh 		sg->offset = ofs.ofs.cipher.head;
44378156d38SGagandeep Singh 
44478156d38SGagandeep Singh 		/* Successive segs */
44578156d38SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
44678156d38SGagandeep Singh 			cpu_to_hw_sg(sg);
44778156d38SGagandeep Singh 			sg++;
44878156d38SGagandeep Singh 			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
44978156d38SGagandeep Singh 			sg->length = dest_sgl->vec[i].len;
45078156d38SGagandeep Singh 		}
451b5e761fcSGagandeep Singh 		sg->length -= ofs.ofs.cipher.tail;
45278156d38SGagandeep Singh 	} else {
45378156d38SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[0].iova);
45478156d38SGagandeep Singh 		sg->length = sgl->vec[0].len - ofs.ofs.cipher.head;
45578156d38SGagandeep Singh 		sg->offset = ofs.ofs.cipher.head;
45678156d38SGagandeep Singh 
45778156d38SGagandeep Singh 		/* Successive segs */
45878156d38SGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
45978156d38SGagandeep Singh 			cpu_to_hw_sg(sg);
46078156d38SGagandeep Singh 			sg++;
46178156d38SGagandeep Singh 			qm_sg_entry_set64(sg, sgl->vec[i].iova);
46278156d38SGagandeep Singh 			sg->length = sgl->vec[i].len;
46378156d38SGagandeep Singh 		}
464b5e761fcSGagandeep Singh 		sg->length -= ofs.ofs.cipher.tail;
46578156d38SGagandeep Singh 	}
46678156d38SGagandeep Singh 
46778156d38SGagandeep Singh 	if (is_encode(ses)) {
46878156d38SGagandeep Singh 		cpu_to_hw_sg(sg);
46978156d38SGagandeep Singh 		/* set auth output */
47078156d38SGagandeep Singh 		sg++;
47178156d38SGagandeep Singh 		qm_sg_entry_set64(sg, digest->iova);
47278156d38SGagandeep Singh 		sg->length = ses->digest_length;
47378156d38SGagandeep Singh 	}
47478156d38SGagandeep Singh 	sg->final = 1;
47578156d38SGagandeep Singh 	cpu_to_hw_sg(sg);
47678156d38SGagandeep Singh 
47778156d38SGagandeep Singh 	/* input */
47878156d38SGagandeep Singh 	in_sg = &cf->sg[1];
47978156d38SGagandeep Singh 	in_sg->extension = 1;
48078156d38SGagandeep Singh 	in_sg->final = 1;
48178156d38SGagandeep Singh 	if (is_encode(ses))
48278156d38SGagandeep Singh 		in_sg->length = ses->iv.length + auth_len;
48378156d38SGagandeep Singh 	else
48478156d38SGagandeep Singh 		in_sg->length = ses->iv.length + auth_len
48578156d38SGagandeep Singh 						+ ses->digest_length;
48678156d38SGagandeep Singh 
48778156d38SGagandeep Singh 	/* input sg entries */
48878156d38SGagandeep Singh 	sg++;
48978156d38SGagandeep Singh 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
49078156d38SGagandeep Singh 	cpu_to_hw_sg(in_sg);
49178156d38SGagandeep Singh 
49278156d38SGagandeep Singh 	/* 1st seg IV */
49378156d38SGagandeep Singh 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
49478156d38SGagandeep Singh 	sg->length = ses->iv.length;
49578156d38SGagandeep Singh 	cpu_to_hw_sg(sg);
49678156d38SGagandeep Singh 
49778156d38SGagandeep Singh 	/* 2 seg */
49878156d38SGagandeep Singh 	sg++;
49978156d38SGagandeep Singh 	qm_sg_entry_set64(sg, sgl->vec[0].iova);
50078156d38SGagandeep Singh 	sg->length = sgl->vec[0].len - ofs.ofs.auth.head;
50178156d38SGagandeep Singh 	sg->offset = ofs.ofs.auth.head;
50278156d38SGagandeep Singh 
50378156d38SGagandeep Singh 	/* Successive segs */
50478156d38SGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
50578156d38SGagandeep Singh 		cpu_to_hw_sg(sg);
50678156d38SGagandeep Singh 		sg++;
50778156d38SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[i].iova);
50878156d38SGagandeep Singh 		sg->length = sgl->vec[i].len;
50978156d38SGagandeep Singh 	}
51078156d38SGagandeep Singh 
51178156d38SGagandeep Singh 	if (is_decode(ses)) {
51278156d38SGagandeep Singh 		cpu_to_hw_sg(sg);
51378156d38SGagandeep Singh 		sg++;
51478156d38SGagandeep Singh 		memcpy(ctx->digest, digest->va,
51578156d38SGagandeep Singh 			ses->digest_length);
51678156d38SGagandeep Singh 		qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
51778156d38SGagandeep Singh 		sg->length = ses->digest_length;
51878156d38SGagandeep Singh 	}
51978156d38SGagandeep Singh 	sg->final = 1;
52078156d38SGagandeep Singh 	cpu_to_hw_sg(sg);
52178156d38SGagandeep Singh 
52278156d38SGagandeep Singh 	if (auth_only_len)
52378156d38SGagandeep Singh 		fd->cmd = 0x80000000 | auth_only_len;
52478156d38SGagandeep Singh 
52578156d38SGagandeep Singh 	return cf;
52678156d38SGagandeep Singh }
5279d5f73c2SGagandeep Singh 
5289d5f73c2SGagandeep Singh static struct dpaa_sec_job *
5299d5f73c2SGagandeep Singh build_dpaa_raw_dp_cipher_fd(uint8_t *drv_ctx,
5309d5f73c2SGagandeep Singh 			struct rte_crypto_sgl *sgl,
5319d5f73c2SGagandeep Singh 			struct rte_crypto_sgl *dest_sgl,
5329d5f73c2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *iv,
5339d5f73c2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *digest,
5349d5f73c2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *auth_iv,
5359d5f73c2SGagandeep Singh 			union rte_crypto_sym_ofs ofs,
53678156d38SGagandeep Singh 			void *userdata,
53778156d38SGagandeep Singh 			struct qm_fd *fd)
5389d5f73c2SGagandeep Singh {
5399d5f73c2SGagandeep Singh 	RTE_SET_USED(digest);
5409d5f73c2SGagandeep Singh 	RTE_SET_USED(auth_iv);
54178156d38SGagandeep Singh 	RTE_SET_USED(fd);
54278156d38SGagandeep Singh 
5439d5f73c2SGagandeep Singh 	dpaa_sec_session *ses =
5449d5f73c2SGagandeep Singh 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
5459d5f73c2SGagandeep Singh 	struct dpaa_sec_job *cf;
5469d5f73c2SGagandeep Singh 	struct dpaa_sec_op_ctx *ctx;
5479d5f73c2SGagandeep Singh 	struct qm_sg_entry *sg, *out_sg, *in_sg;
5489d5f73c2SGagandeep Singh 	unsigned int i;
5499d5f73c2SGagandeep Singh 	uint8_t *IV_ptr = iv->va;
5509d5f73c2SGagandeep Singh 	int data_len, total_len = 0, data_offset;
5519d5f73c2SGagandeep Singh 
5529d5f73c2SGagandeep Singh 	for (i = 0; i < sgl->num; i++)
5539d5f73c2SGagandeep Singh 		total_len += sgl->vec[i].len;
5549d5f73c2SGagandeep Singh 
5559d5f73c2SGagandeep Singh 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
5569d5f73c2SGagandeep Singh 	data_offset = ofs.ofs.cipher.head;
5579d5f73c2SGagandeep Singh 
5589d5f73c2SGagandeep Singh 	/* Support lengths in bits only for SNOW3G and ZUC */
5599d5f73c2SGagandeep Singh 	if (sgl->num > MAX_SG_ENTRIES) {
5609d5f73c2SGagandeep Singh 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
5619d5f73c2SGagandeep Singh 				MAX_SG_ENTRIES);
5629d5f73c2SGagandeep Singh 		return NULL;
5639d5f73c2SGagandeep Singh 	}
5649d5f73c2SGagandeep Singh 
5659d5f73c2SGagandeep Singh 	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 3);
5669d5f73c2SGagandeep Singh 	if (!ctx)
5679d5f73c2SGagandeep Singh 		return NULL;
5689d5f73c2SGagandeep Singh 
5699d5f73c2SGagandeep Singh 	cf = &ctx->job;
5709d5f73c2SGagandeep Singh 	ctx->userdata = (void *)userdata;
5719d5f73c2SGagandeep Singh 
5729d5f73c2SGagandeep Singh 	/* output */
5739d5f73c2SGagandeep Singh 	out_sg = &cf->sg[0];
5749d5f73c2SGagandeep Singh 	out_sg->extension = 1;
5759d5f73c2SGagandeep Singh 	out_sg->length = data_len;
5769d5f73c2SGagandeep Singh 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
5779d5f73c2SGagandeep Singh 	cpu_to_hw_sg(out_sg);
5789d5f73c2SGagandeep Singh 
5799d5f73c2SGagandeep Singh 	if (dest_sgl) {
5809d5f73c2SGagandeep Singh 		/* 1st seg */
5819d5f73c2SGagandeep Singh 		sg = &cf->sg[2];
5829d5f73c2SGagandeep Singh 		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
5839d5f73c2SGagandeep Singh 		sg->length = dest_sgl->vec[0].len - data_offset;
5849d5f73c2SGagandeep Singh 		sg->offset = data_offset;
5859d5f73c2SGagandeep Singh 
5869d5f73c2SGagandeep Singh 		/* Successive segs */
5879d5f73c2SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
5889d5f73c2SGagandeep Singh 			cpu_to_hw_sg(sg);
5899d5f73c2SGagandeep Singh 			sg++;
5909d5f73c2SGagandeep Singh 			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
5919d5f73c2SGagandeep Singh 			sg->length = dest_sgl->vec[i].len;
5929d5f73c2SGagandeep Singh 		}
5939d5f73c2SGagandeep Singh 	} else {
5949d5f73c2SGagandeep Singh 		/* 1st seg */
5959d5f73c2SGagandeep Singh 		sg = &cf->sg[2];
5969d5f73c2SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[0].iova);
5979d5f73c2SGagandeep Singh 		sg->length = sgl->vec[0].len - data_offset;
5989d5f73c2SGagandeep Singh 		sg->offset = data_offset;
5999d5f73c2SGagandeep Singh 
6009d5f73c2SGagandeep Singh 		/* Successive segs */
6019d5f73c2SGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
6029d5f73c2SGagandeep Singh 			cpu_to_hw_sg(sg);
6039d5f73c2SGagandeep Singh 			sg++;
6049d5f73c2SGagandeep Singh 			qm_sg_entry_set64(sg, sgl->vec[i].iova);
6059d5f73c2SGagandeep Singh 			sg->length = sgl->vec[i].len;
6069d5f73c2SGagandeep Singh 		}
6079d5f73c2SGagandeep Singh 
6089d5f73c2SGagandeep Singh 	}
6099d5f73c2SGagandeep Singh 	sg->final = 1;
6109d5f73c2SGagandeep Singh 	cpu_to_hw_sg(sg);
6119d5f73c2SGagandeep Singh 
6129d5f73c2SGagandeep Singh 	/* input */
6139d5f73c2SGagandeep Singh 	in_sg = &cf->sg[1];
6149d5f73c2SGagandeep Singh 	in_sg->extension = 1;
6159d5f73c2SGagandeep Singh 	in_sg->final = 1;
6169d5f73c2SGagandeep Singh 	in_sg->length = data_len + ses->iv.length;
6179d5f73c2SGagandeep Singh 
6189d5f73c2SGagandeep Singh 	sg++;
6199d5f73c2SGagandeep Singh 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
6209d5f73c2SGagandeep Singh 	cpu_to_hw_sg(in_sg);
6219d5f73c2SGagandeep Singh 
6229d5f73c2SGagandeep Singh 	/* IV */
6239d5f73c2SGagandeep Singh 	qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
6249d5f73c2SGagandeep Singh 	sg->length = ses->iv.length;
6259d5f73c2SGagandeep Singh 	cpu_to_hw_sg(sg);
6269d5f73c2SGagandeep Singh 
6279d5f73c2SGagandeep Singh 	/* 1st seg */
6289d5f73c2SGagandeep Singh 	sg++;
6299d5f73c2SGagandeep Singh 	qm_sg_entry_set64(sg, sgl->vec[0].iova);
6309d5f73c2SGagandeep Singh 	sg->length = sgl->vec[0].len - data_offset;
6319d5f73c2SGagandeep Singh 	sg->offset = data_offset;
6329d5f73c2SGagandeep Singh 
6339d5f73c2SGagandeep Singh 	/* Successive segs */
6349d5f73c2SGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
6359d5f73c2SGagandeep Singh 		cpu_to_hw_sg(sg);
6369d5f73c2SGagandeep Singh 		sg++;
6379d5f73c2SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[i].iova);
6389d5f73c2SGagandeep Singh 		sg->length = sgl->vec[i].len;
6399d5f73c2SGagandeep Singh 	}
6409d5f73c2SGagandeep Singh 	sg->final = 1;
6419d5f73c2SGagandeep Singh 	cpu_to_hw_sg(sg);
6429d5f73c2SGagandeep Singh 
6439d5f73c2SGagandeep Singh 	return cf;
6449d5f73c2SGagandeep Singh }
6459d5f73c2SGagandeep Singh 
646fe6a8ee2SGagandeep Singh static inline struct dpaa_sec_job *
647fe6a8ee2SGagandeep Singh build_dpaa_raw_proto_sg(uint8_t *drv_ctx,
648fe6a8ee2SGagandeep Singh 			struct rte_crypto_sgl *sgl,
649fe6a8ee2SGagandeep Singh 			struct rte_crypto_sgl *dest_sgl,
650fe6a8ee2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *iv,
651fe6a8ee2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *digest,
652fe6a8ee2SGagandeep Singh 			struct rte_crypto_va_iova_ptr *auth_iv,
653fe6a8ee2SGagandeep Singh 			union rte_crypto_sym_ofs ofs,
654fe6a8ee2SGagandeep Singh 			void *userdata,
655fe6a8ee2SGagandeep Singh 			struct qm_fd *fd)
656fe6a8ee2SGagandeep Singh {
657fe6a8ee2SGagandeep Singh 	RTE_SET_USED(iv);
658fe6a8ee2SGagandeep Singh 	RTE_SET_USED(digest);
659fe6a8ee2SGagandeep Singh 	RTE_SET_USED(auth_iv);
660fe6a8ee2SGagandeep Singh 	RTE_SET_USED(ofs);
661fe6a8ee2SGagandeep Singh 
662fe6a8ee2SGagandeep Singh 	dpaa_sec_session *ses =
663fe6a8ee2SGagandeep Singh 		((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
664fe6a8ee2SGagandeep Singh 	struct dpaa_sec_job *cf;
665fe6a8ee2SGagandeep Singh 	struct dpaa_sec_op_ctx *ctx;
666fe6a8ee2SGagandeep Singh 	struct qm_sg_entry *sg, *out_sg, *in_sg;
667fe6a8ee2SGagandeep Singh 	uint32_t in_len = 0, out_len = 0;
668fe6a8ee2SGagandeep Singh 	unsigned int i;
669fe6a8ee2SGagandeep Singh 
670fe6a8ee2SGagandeep Singh 	if (sgl->num > MAX_SG_ENTRIES) {
671fe6a8ee2SGagandeep Singh 		DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
672fe6a8ee2SGagandeep Singh 				MAX_SG_ENTRIES);
673fe6a8ee2SGagandeep Singh 		return NULL;
674fe6a8ee2SGagandeep Singh 	}
675fe6a8ee2SGagandeep Singh 
676fe6a8ee2SGagandeep Singh 	ctx = dpaa_sec_alloc_raw_ctx(ses, sgl->num * 2 + 4);
677fe6a8ee2SGagandeep Singh 	if (!ctx)
678fe6a8ee2SGagandeep Singh 		return NULL;
679fe6a8ee2SGagandeep Singh 	cf = &ctx->job;
680fe6a8ee2SGagandeep Singh 	ctx->userdata = (void *)userdata;
681fe6a8ee2SGagandeep Singh 	/* output */
682fe6a8ee2SGagandeep Singh 	out_sg = &cf->sg[0];
683fe6a8ee2SGagandeep Singh 	out_sg->extension = 1;
684fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
685fe6a8ee2SGagandeep Singh 
686fe6a8ee2SGagandeep Singh 	if (dest_sgl) {
687fe6a8ee2SGagandeep Singh 		/* 1st seg */
688fe6a8ee2SGagandeep Singh 		sg = &cf->sg[2];
689fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, dest_sgl->vec[0].iova);
690fe6a8ee2SGagandeep Singh 		sg->offset = 0;
691fe6a8ee2SGagandeep Singh 		sg->length = dest_sgl->vec[0].len;
692fe6a8ee2SGagandeep Singh 		out_len += sg->length;
693fe6a8ee2SGagandeep Singh 
694fe6a8ee2SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
695fe6a8ee2SGagandeep Singh 		/* Successive segs */
696fe6a8ee2SGagandeep Singh 			cpu_to_hw_sg(sg);
697fe6a8ee2SGagandeep Singh 			sg++;
698fe6a8ee2SGagandeep Singh 			qm_sg_entry_set64(sg, dest_sgl->vec[i].iova);
699fe6a8ee2SGagandeep Singh 			sg->offset = 0;
700fe6a8ee2SGagandeep Singh 			sg->length = dest_sgl->vec[i].len;
701fe6a8ee2SGagandeep Singh 			out_len += sg->length;
702fe6a8ee2SGagandeep Singh 		}
703fe6a8ee2SGagandeep Singh 		sg->length = dest_sgl->vec[i - 1].tot_len;
704fe6a8ee2SGagandeep Singh 	} else {
705fe6a8ee2SGagandeep Singh 		/* 1st seg */
706fe6a8ee2SGagandeep Singh 		sg = &cf->sg[2];
707fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[0].iova);
708fe6a8ee2SGagandeep Singh 		sg->offset = 0;
709fe6a8ee2SGagandeep Singh 		sg->length = sgl->vec[0].len;
710fe6a8ee2SGagandeep Singh 		out_len += sg->length;
711fe6a8ee2SGagandeep Singh 
712fe6a8ee2SGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
713fe6a8ee2SGagandeep Singh 		/* Successive segs */
714fe6a8ee2SGagandeep Singh 			cpu_to_hw_sg(sg);
715fe6a8ee2SGagandeep Singh 			sg++;
716fe6a8ee2SGagandeep Singh 			qm_sg_entry_set64(sg, sgl->vec[i].iova);
717fe6a8ee2SGagandeep Singh 			sg->offset = 0;
718fe6a8ee2SGagandeep Singh 			sg->length = sgl->vec[i].len;
719fe6a8ee2SGagandeep Singh 			out_len += sg->length;
720fe6a8ee2SGagandeep Singh 		}
721fe6a8ee2SGagandeep Singh 		sg->length = sgl->vec[i - 1].tot_len;
722fe6a8ee2SGagandeep Singh 
723fe6a8ee2SGagandeep Singh 	}
724fe6a8ee2SGagandeep Singh 	out_len += sg->length;
725fe6a8ee2SGagandeep Singh 	sg->final = 1;
726fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(sg);
727fe6a8ee2SGagandeep Singh 
728fe6a8ee2SGagandeep Singh 	out_sg->length = out_len;
729fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(out_sg);
730fe6a8ee2SGagandeep Singh 
731fe6a8ee2SGagandeep Singh 	/* input */
732fe6a8ee2SGagandeep Singh 	in_sg = &cf->sg[1];
733fe6a8ee2SGagandeep Singh 	in_sg->extension = 1;
734fe6a8ee2SGagandeep Singh 	in_sg->final = 1;
735fe6a8ee2SGagandeep Singh 	in_len = sgl->vec[0].len;
736fe6a8ee2SGagandeep Singh 
737fe6a8ee2SGagandeep Singh 	sg++;
738fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
739fe6a8ee2SGagandeep Singh 
740fe6a8ee2SGagandeep Singh 	/* 1st seg */
741fe6a8ee2SGagandeep Singh 	qm_sg_entry_set64(sg, sgl->vec[0].iova);
742fe6a8ee2SGagandeep Singh 	sg->length = sgl->vec[0].len;
743fe6a8ee2SGagandeep Singh 	sg->offset = 0;
744fe6a8ee2SGagandeep Singh 
745fe6a8ee2SGagandeep Singh 	/* Successive segs */
746fe6a8ee2SGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
747fe6a8ee2SGagandeep Singh 		cpu_to_hw_sg(sg);
748fe6a8ee2SGagandeep Singh 		sg++;
749fe6a8ee2SGagandeep Singh 		qm_sg_entry_set64(sg, sgl->vec[i].iova);
750fe6a8ee2SGagandeep Singh 		sg->length = sgl->vec[i].len;
751fe6a8ee2SGagandeep Singh 		sg->offset = 0;
752fe6a8ee2SGagandeep Singh 		in_len += sg->length;
753fe6a8ee2SGagandeep Singh 	}
754fe6a8ee2SGagandeep Singh 	sg->final = 1;
755fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(sg);
756fe6a8ee2SGagandeep Singh 
757fe6a8ee2SGagandeep Singh 	in_sg->length = in_len;
758fe6a8ee2SGagandeep Singh 	cpu_to_hw_sg(in_sg);
759fe6a8ee2SGagandeep Singh 
760fe6a8ee2SGagandeep Singh 	if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
761fe6a8ee2SGagandeep Singh 		fd->cmd = 0x80000000 |
762fe6a8ee2SGagandeep Singh 			*((uint32_t *)((uint8_t *)userdata +
763fe6a8ee2SGagandeep Singh 			ses->pdcp.hfn_ovd_offset));
764*f665790aSDavid Marchand 		DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u",
765fe6a8ee2SGagandeep Singh 			*((uint32_t *)((uint8_t *)userdata +
766fe6a8ee2SGagandeep Singh 			ses->pdcp.hfn_ovd_offset)),
767fe6a8ee2SGagandeep Singh 			ses->pdcp.hfn_ovd);
768fe6a8ee2SGagandeep Singh 	}
769fe6a8ee2SGagandeep Singh 
770fe6a8ee2SGagandeep Singh 	return cf;
771fe6a8ee2SGagandeep Singh }
772fe6a8ee2SGagandeep Singh 
7739d5f73c2SGagandeep Singh static uint32_t
7749d5f73c2SGagandeep Singh dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
7759d5f73c2SGagandeep Singh 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
7769d5f73c2SGagandeep Singh 	void *user_data[], int *status)
7779d5f73c2SGagandeep Singh {
7789d5f73c2SGagandeep Singh 	/* Function to transmit the frames to given device and queuepair */
7799d5f73c2SGagandeep Singh 	uint32_t loop;
7809d5f73c2SGagandeep Singh 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
7819d5f73c2SGagandeep Singh 	uint16_t num_tx = 0;
7829d5f73c2SGagandeep Singh 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
7839d5f73c2SGagandeep Singh 	uint32_t frames_to_send;
7849d5f73c2SGagandeep Singh 	struct dpaa_sec_job *cf;
7859d5f73c2SGagandeep Singh 	dpaa_sec_session *ses =
7869d5f73c2SGagandeep Singh 			((struct dpaa_sec_raw_dp_ctx *)drv_ctx)->session;
7879d5f73c2SGagandeep Singh 	uint32_t flags[DPAA_SEC_BURST] = {0};
7889d5f73c2SGagandeep Singh 	struct qman_fq *inq[DPAA_SEC_BURST];
7899d5f73c2SGagandeep Singh 
7909d5f73c2SGagandeep Singh 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
7919d5f73c2SGagandeep Singh 		if (rte_dpaa_portal_init((void *)0)) {
7929d5f73c2SGagandeep Singh 			DPAA_SEC_ERR("Failure in affining portal");
7939d5f73c2SGagandeep Singh 			return 0;
7949d5f73c2SGagandeep Singh 		}
7959d5f73c2SGagandeep Singh 	}
7969d5f73c2SGagandeep Singh 
7979d5f73c2SGagandeep Singh 	while (vec->num) {
7989d5f73c2SGagandeep Singh 		frames_to_send = (vec->num > DPAA_SEC_BURST) ?
7999d5f73c2SGagandeep Singh 				DPAA_SEC_BURST : vec->num;
8009d5f73c2SGagandeep Singh 		for (loop = 0; loop < frames_to_send; loop++) {
8019d5f73c2SGagandeep Singh 			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
8029d5f73c2SGagandeep Singh 				if (dpaa_sec_attach_sess_q(dpaa_qp, ses)) {
8039d5f73c2SGagandeep Singh 					frames_to_send = loop;
8049d5f73c2SGagandeep Singh 					goto send_pkts;
8059d5f73c2SGagandeep Singh 				}
8069d5f73c2SGagandeep Singh 			} else if (unlikely(ses->qp[rte_lcore_id() %
8079d5f73c2SGagandeep Singh 						MAX_DPAA_CORES] != dpaa_qp)) {
8089d5f73c2SGagandeep Singh 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
809*f665790aSDavid Marchand 					" New qp = %p",
8109d5f73c2SGagandeep Singh 					ses->qp[rte_lcore_id() %
8119d5f73c2SGagandeep Singh 					MAX_DPAA_CORES], dpaa_qp);
8129d5f73c2SGagandeep Singh 				frames_to_send = loop;
8139d5f73c2SGagandeep Singh 				goto send_pkts;
8149d5f73c2SGagandeep Singh 			}
8159d5f73c2SGagandeep Singh 
8169d5f73c2SGagandeep Singh 			/*Clear the unused FD fields before sending*/
8179d5f73c2SGagandeep Singh 			fd = &fds[loop];
8189d5f73c2SGagandeep Singh 			memset(fd, 0, sizeof(struct qm_fd));
8199d5f73c2SGagandeep Singh 			cf = ses->build_raw_dp_fd(drv_ctx,
8209d5f73c2SGagandeep Singh 						&vec->src_sgl[loop],
8219d5f73c2SGagandeep Singh 						&vec->dest_sgl[loop],
8229d5f73c2SGagandeep Singh 						&vec->iv[loop],
8239d5f73c2SGagandeep Singh 						&vec->digest[loop],
8249d5f73c2SGagandeep Singh 						&vec->auth_iv[loop],
8259d5f73c2SGagandeep Singh 						ofs,
82678156d38SGagandeep Singh 						user_data[loop],
82778156d38SGagandeep Singh 						fd);
8289d5f73c2SGagandeep Singh 			if (!cf) {
8299d5f73c2SGagandeep Singh 				DPAA_SEC_ERR("error: Improper packet contents"
8309d5f73c2SGagandeep Singh 					" for crypto operation");
8319d5f73c2SGagandeep Singh 				goto skip_tx;
8329d5f73c2SGagandeep Singh 			}
8339d5f73c2SGagandeep Singh 			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
8349d5f73c2SGagandeep Singh 			qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
8359d5f73c2SGagandeep Singh 			fd->_format1 = qm_fd_compound;
8369d5f73c2SGagandeep Singh 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
8379d5f73c2SGagandeep Singh 
8389d5f73c2SGagandeep Singh 			status[loop] = 1;
8399d5f73c2SGagandeep Singh 		}
8409d5f73c2SGagandeep Singh send_pkts:
8419d5f73c2SGagandeep Singh 		loop = 0;
8429d5f73c2SGagandeep Singh 		while (loop < frames_to_send) {
8439d5f73c2SGagandeep Singh 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
8449d5f73c2SGagandeep Singh 					&flags[loop], frames_to_send - loop);
8459d5f73c2SGagandeep Singh 		}
8469d5f73c2SGagandeep Singh 		vec->num -= frames_to_send;
8479d5f73c2SGagandeep Singh 		num_tx += frames_to_send;
8489d5f73c2SGagandeep Singh 	}
8499d5f73c2SGagandeep Singh 
8509d5f73c2SGagandeep Singh skip_tx:
8519d5f73c2SGagandeep Singh 	dpaa_qp->tx_pkts += num_tx;
8529d5f73c2SGagandeep Singh 	dpaa_qp->tx_errs += vec->num - num_tx;
8539d5f73c2SGagandeep Singh 
8549d5f73c2SGagandeep Singh 	return num_tx;
8559d5f73c2SGagandeep Singh }
8569d5f73c2SGagandeep Singh 
8579d5f73c2SGagandeep Singh static int
8589d5f73c2SGagandeep Singh dpaa_sec_deq_raw(struct dpaa_sec_qp *qp, void **out_user_data,
8599d5f73c2SGagandeep Singh 		uint8_t is_user_data_array,
8609d5f73c2SGagandeep Singh 		rte_cryptodev_raw_post_dequeue_t post_dequeue,
8619d5f73c2SGagandeep Singh 		int nb_ops)
8629d5f73c2SGagandeep Singh {
8639d5f73c2SGagandeep Singh 	struct qman_fq *fq;
8649d5f73c2SGagandeep Singh 	unsigned int pkts = 0;
8659d5f73c2SGagandeep Singh 	int num_rx_bufs, ret;
8669d5f73c2SGagandeep Singh 	struct qm_dqrr_entry *dq;
8679d5f73c2SGagandeep Singh 	uint32_t vdqcr_flags = 0;
8689d5f73c2SGagandeep Singh 	uint8_t is_success = 0;
8699d5f73c2SGagandeep Singh 
8709d5f73c2SGagandeep Singh 	fq = &qp->outq;
8719d5f73c2SGagandeep Singh 	/*
8729d5f73c2SGagandeep Singh 	 * Until request for four buffers, we provide exact number of buffers.
8739d5f73c2SGagandeep Singh 	 * Otherwise we do not set the QM_VDQCR_EXACT flag.
8749d5f73c2SGagandeep Singh 	 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
8759d5f73c2SGagandeep Singh 	 * requested, so we request two less in this case.
8769d5f73c2SGagandeep Singh 	 */
8779d5f73c2SGagandeep Singh 	if (nb_ops < 4) {
8789d5f73c2SGagandeep Singh 		vdqcr_flags = QM_VDQCR_EXACT;
8799d5f73c2SGagandeep Singh 		num_rx_bufs = nb_ops;
8809d5f73c2SGagandeep Singh 	} else {
8819d5f73c2SGagandeep Singh 		num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
8829d5f73c2SGagandeep Singh 			(DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
8839d5f73c2SGagandeep Singh 	}
8849d5f73c2SGagandeep Singh 	ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
8859d5f73c2SGagandeep Singh 	if (ret)
8869d5f73c2SGagandeep Singh 		return 0;
8879d5f73c2SGagandeep Singh 
8889d5f73c2SGagandeep Singh 	do {
8899d5f73c2SGagandeep Singh 		const struct qm_fd *fd;
8909d5f73c2SGagandeep Singh 		struct dpaa_sec_job *job;
8919d5f73c2SGagandeep Singh 		struct dpaa_sec_op_ctx *ctx;
8929d5f73c2SGagandeep Singh 
8939d5f73c2SGagandeep Singh 		dq = qman_dequeue(fq);
8949d5f73c2SGagandeep Singh 		if (!dq)
8959d5f73c2SGagandeep Singh 			continue;
8969d5f73c2SGagandeep Singh 
8979d5f73c2SGagandeep Singh 		fd = &dq->fd;
8989d5f73c2SGagandeep Singh 		/* sg is embedded in an op ctx,
8999d5f73c2SGagandeep Singh 		 * sg[0] is for output
9009d5f73c2SGagandeep Singh 		 * sg[1] for input
9019d5f73c2SGagandeep Singh 		 */
9029d5f73c2SGagandeep Singh 		job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
9039d5f73c2SGagandeep Singh 
9049d5f73c2SGagandeep Singh 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
9059d5f73c2SGagandeep Singh 		ctx->fd_status = fd->status;
9069d5f73c2SGagandeep Singh 		if (is_user_data_array)
9079d5f73c2SGagandeep Singh 			out_user_data[pkts] = ctx->userdata;
9089d5f73c2SGagandeep Singh 		else
9099d5f73c2SGagandeep Singh 			out_user_data[0] = ctx->userdata;
9109d5f73c2SGagandeep Singh 
9119d5f73c2SGagandeep Singh 		if (!ctx->fd_status) {
9129d5f73c2SGagandeep Singh 			is_success = true;
9139d5f73c2SGagandeep Singh 		} else {
9149d5f73c2SGagandeep Singh 			is_success = false;
9159d5f73c2SGagandeep Singh 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
9169d5f73c2SGagandeep Singh 		}
9179d5f73c2SGagandeep Singh 		post_dequeue(ctx->op, pkts, is_success);
9189d5f73c2SGagandeep Singh 		pkts++;
9199d5f73c2SGagandeep Singh 
9209d5f73c2SGagandeep Singh 		/* report op status to sym->op and then free the ctx memory */
9219d5f73c2SGagandeep Singh 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
9229d5f73c2SGagandeep Singh 
9239d5f73c2SGagandeep Singh 		qman_dqrr_consume(fq, dq);
9249d5f73c2SGagandeep Singh 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
9259d5f73c2SGagandeep Singh 
9269d5f73c2SGagandeep Singh 	return pkts;
9279d5f73c2SGagandeep Singh }
9289d5f73c2SGagandeep Singh 
9299d5f73c2SGagandeep Singh 
9309d5f73c2SGagandeep Singh static __rte_always_inline uint32_t
9319d5f73c2SGagandeep Singh dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
9329d5f73c2SGagandeep Singh 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
9339d5f73c2SGagandeep Singh 	uint32_t max_nb_to_dequeue,
9349d5f73c2SGagandeep Singh 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
9359d5f73c2SGagandeep Singh 	void **out_user_data, uint8_t is_user_data_array,
9369d5f73c2SGagandeep Singh 	uint32_t *n_success, int *dequeue_status)
9379d5f73c2SGagandeep Singh {
9389d5f73c2SGagandeep Singh 	RTE_SET_USED(drv_ctx);
9399d5f73c2SGagandeep Singh 	RTE_SET_USED(get_dequeue_count);
9409d5f73c2SGagandeep Singh 	uint16_t num_rx;
9419d5f73c2SGagandeep Singh 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp_data;
9429d5f73c2SGagandeep Singh 	uint32_t nb_ops = max_nb_to_dequeue;
9439d5f73c2SGagandeep Singh 
9449d5f73c2SGagandeep Singh 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
9459d5f73c2SGagandeep Singh 		if (rte_dpaa_portal_init((void *)0)) {
9469d5f73c2SGagandeep Singh 			DPAA_SEC_ERR("Failure in affining portal");
9479d5f73c2SGagandeep Singh 			return 0;
9489d5f73c2SGagandeep Singh 		}
9499d5f73c2SGagandeep Singh 	}
9509d5f73c2SGagandeep Singh 
9519d5f73c2SGagandeep Singh 	num_rx = dpaa_sec_deq_raw(dpaa_qp, out_user_data,
9529d5f73c2SGagandeep Singh 			is_user_data_array, post_dequeue, nb_ops);
9539d5f73c2SGagandeep Singh 
9549d5f73c2SGagandeep Singh 	dpaa_qp->rx_pkts += num_rx;
9559d5f73c2SGagandeep Singh 	*dequeue_status = 1;
9569d5f73c2SGagandeep Singh 	*n_success = num_rx;
9579d5f73c2SGagandeep Singh 
958*f665790aSDavid Marchand 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets", num_rx);
9599d5f73c2SGagandeep Singh 
9609d5f73c2SGagandeep Singh 	return num_rx;
9619d5f73c2SGagandeep Singh }
9629d5f73c2SGagandeep Singh 
9639d5f73c2SGagandeep Singh static __rte_always_inline int
9649d5f73c2SGagandeep Singh dpaa_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
9659d5f73c2SGagandeep Singh 	struct rte_crypto_vec *data_vec,
9669d5f73c2SGagandeep Singh 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
9679d5f73c2SGagandeep Singh 	struct rte_crypto_va_iova_ptr *iv,
9689d5f73c2SGagandeep Singh 	struct rte_crypto_va_iova_ptr *digest,
9699d5f73c2SGagandeep Singh 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
9709d5f73c2SGagandeep Singh 	void *user_data)
9719d5f73c2SGagandeep Singh {
9729d5f73c2SGagandeep Singh 	RTE_SET_USED(qp_data);
9739d5f73c2SGagandeep Singh 	RTE_SET_USED(drv_ctx);
9749d5f73c2SGagandeep Singh 	RTE_SET_USED(data_vec);
9759d5f73c2SGagandeep Singh 	RTE_SET_USED(n_data_vecs);
9769d5f73c2SGagandeep Singh 	RTE_SET_USED(ofs);
9779d5f73c2SGagandeep Singh 	RTE_SET_USED(iv);
9789d5f73c2SGagandeep Singh 	RTE_SET_USED(digest);
9799d5f73c2SGagandeep Singh 	RTE_SET_USED(aad_or_auth_iv);
9809d5f73c2SGagandeep Singh 	RTE_SET_USED(user_data);
9819d5f73c2SGagandeep Singh 
9829d5f73c2SGagandeep Singh 	return 0;
9839d5f73c2SGagandeep Singh }
9849d5f73c2SGagandeep Singh 
9859d5f73c2SGagandeep Singh static __rte_always_inline void *
9869d5f73c2SGagandeep Singh dpaa_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
9879d5f73c2SGagandeep Singh 	enum rte_crypto_op_status *op_status)
9889d5f73c2SGagandeep Singh {
9899d5f73c2SGagandeep Singh 	RTE_SET_USED(qp_data);
9909d5f73c2SGagandeep Singh 	RTE_SET_USED(drv_ctx);
9919d5f73c2SGagandeep Singh 	RTE_SET_USED(dequeue_status);
9929d5f73c2SGagandeep Singh 	RTE_SET_USED(op_status);
9939d5f73c2SGagandeep Singh 
9949d5f73c2SGagandeep Singh 	return NULL;
9959d5f73c2SGagandeep Singh }
9969d5f73c2SGagandeep Singh 
9979d5f73c2SGagandeep Singh int
9989d5f73c2SGagandeep Singh dpaa_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
9999d5f73c2SGagandeep Singh 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
10009d5f73c2SGagandeep Singh 	enum rte_crypto_op_sess_type sess_type,
10019d5f73c2SGagandeep Singh 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
10029d5f73c2SGagandeep Singh {
10039d5f73c2SGagandeep Singh 	dpaa_sec_session *sess;
10049d5f73c2SGagandeep Singh 	struct dpaa_sec_raw_dp_ctx *dp_ctx;
10059d5f73c2SGagandeep Singh 	RTE_SET_USED(qp_id);
10069d5f73c2SGagandeep Singh 
10079d5f73c2SGagandeep Singh 	if (!is_update) {
10089d5f73c2SGagandeep Singh 		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
10099d5f73c2SGagandeep Singh 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
10109d5f73c2SGagandeep Singh 	}
10119d5f73c2SGagandeep Singh 
10129d5f73c2SGagandeep Singh 	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
10133f3fc330SAkhil Goyal 		sess = SECURITY_GET_SESS_PRIV(session_ctx.sec_sess);
10149d5f73c2SGagandeep Singh 	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1015bdce2564SAkhil Goyal 		sess = (dpaa_sec_session *)
10162a440d6aSAkhil Goyal 			CRYPTODEV_GET_SYM_SESS_PRIV(session_ctx.crypto_sess);
10179d5f73c2SGagandeep Singh 	else
10189d5f73c2SGagandeep Singh 		return -ENOTSUP;
10199d5f73c2SGagandeep Singh 	raw_dp_ctx->dequeue_burst = dpaa_sec_raw_dequeue_burst;
10209d5f73c2SGagandeep Singh 	raw_dp_ctx->dequeue = dpaa_sec_raw_dequeue;
10219d5f73c2SGagandeep Singh 	raw_dp_ctx->dequeue_done = dpaa_sec_raw_dequeue_done;
10229d5f73c2SGagandeep Singh 	raw_dp_ctx->enqueue_burst = dpaa_sec_raw_enqueue_burst;
10239d5f73c2SGagandeep Singh 	raw_dp_ctx->enqueue = dpaa_sec_raw_enqueue;
10249d5f73c2SGagandeep Singh 	raw_dp_ctx->enqueue_done = dpaa_sec_raw_enqueue_done;
10259d5f73c2SGagandeep Singh 
10269d5f73c2SGagandeep Singh 	if (sess->ctxt == DPAA_SEC_CIPHER)
10279d5f73c2SGagandeep Singh 		sess->build_raw_dp_fd = build_dpaa_raw_dp_cipher_fd;
10289d5f73c2SGagandeep Singh 	else if (sess->ctxt == DPAA_SEC_AUTH)
10299d5f73c2SGagandeep Singh 		sess->build_raw_dp_fd = build_dpaa_raw_dp_auth_fd;
103078156d38SGagandeep Singh 	else if (sess->ctxt == DPAA_SEC_CIPHER_HASH)
103178156d38SGagandeep Singh 		sess->build_raw_dp_fd = build_dpaa_raw_dp_chain_fd;
1032fe6a8ee2SGagandeep Singh 	else if (sess->ctxt == DPAA_SEC_AEAD)
1033fe6a8ee2SGagandeep Singh 		sess->build_raw_dp_fd = build_raw_cipher_auth_gcm_sg;
1034fe6a8ee2SGagandeep Singh 	else if (sess->ctxt == DPAA_SEC_IPSEC ||
1035fe6a8ee2SGagandeep Singh 			sess->ctxt == DPAA_SEC_PDCP)
1036fe6a8ee2SGagandeep Singh 		sess->build_raw_dp_fd = build_dpaa_raw_proto_sg;
10379d5f73c2SGagandeep Singh 	else
10389d5f73c2SGagandeep Singh 		return -ENOTSUP;
10399d5f73c2SGagandeep Singh 	dp_ctx = (struct dpaa_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
10409d5f73c2SGagandeep Singh 	dp_ctx->session = sess;
10419d5f73c2SGagandeep Singh 
10429d5f73c2SGagandeep Singh 	return 0;
10439d5f73c2SGagandeep Singh }
10449d5f73c2SGagandeep Singh 
10459d5f73c2SGagandeep Singh int
10469d5f73c2SGagandeep Singh dpaa_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
10479d5f73c2SGagandeep Singh {
10489d5f73c2SGagandeep Singh 	return sizeof(struct dpaa_sec_raw_dp_ctx);
10499d5f73c2SGagandeep Singh }
1050