xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c (revision 12d98eceb8ac89d6284a2a56f9b83cca40b73e80)
14a81d34aSGagandeep Singh /* SPDX-License-Identifier: BSD-3-Clause
2*12d98eceSJun Yang  * Copyright 2021-2022, 2024 NXP
34a81d34aSGagandeep Singh  */
44a81d34aSGagandeep Singh 
54a81d34aSGagandeep Singh #include <cryptodev_pmd.h>
6b4f22ca5SDavid Marchand #include <bus_fslmc_driver.h>
74a81d34aSGagandeep Singh #include <fslmc_vfio.h>
84a81d34aSGagandeep Singh #include <dpaa2_hw_pvt.h>
94a81d34aSGagandeep Singh #include <dpaa2_hw_dpio.h>
104a81d34aSGagandeep Singh 
114a81d34aSGagandeep Singh #include "dpaa2_sec_priv.h"
124a81d34aSGagandeep Singh #include "dpaa2_sec_logs.h"
134a81d34aSGagandeep Singh 
14d9d36d6fSGagandeep Singh #include <desc/algo.h>
15d9d36d6fSGagandeep Singh 
164a81d34aSGagandeep Singh struct dpaa2_sec_raw_dp_ctx {
174a81d34aSGagandeep Singh 	dpaa2_sec_session *session;
184a81d34aSGagandeep Singh 	uint32_t tail;
194a81d34aSGagandeep Singh 	uint32_t head;
204a81d34aSGagandeep Singh 	uint16_t cached_enqueue;
214a81d34aSGagandeep Singh 	uint16_t cached_dequeue;
224a81d34aSGagandeep Singh };
234a81d34aSGagandeep Singh 
244a81d34aSGagandeep Singh static int
254a81d34aSGagandeep Singh build_raw_dp_chain_fd(uint8_t *drv_ctx,
264a81d34aSGagandeep Singh 		       struct rte_crypto_sgl *sgl,
276bb2ce73SGagandeep Singh 		       struct rte_crypto_sgl *dest_sgl,
284a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *iv,
294a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *digest,
304a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *auth_iv,
314a81d34aSGagandeep Singh 		       union rte_crypto_sym_ofs ofs,
324a81d34aSGagandeep Singh 		       void *userdata,
334a81d34aSGagandeep Singh 		       struct qbman_fd *fd)
344a81d34aSGagandeep Singh {
354a81d34aSGagandeep Singh 	RTE_SET_USED(auth_iv);
36aa6ec1fdSGagandeep Singh 
37aa6ec1fdSGagandeep Singh 	dpaa2_sec_session *sess =
38aa6ec1fdSGagandeep Singh 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
39aa6ec1fdSGagandeep Singh 	struct ctxt_priv *priv = sess->ctxt;
40aa6ec1fdSGagandeep Singh 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
41aa6ec1fdSGagandeep Singh 	struct sec_flow_context *flc;
42aa6ec1fdSGagandeep Singh 	int data_len = 0, auth_len = 0, cipher_len = 0;
43aa6ec1fdSGagandeep Singh 	unsigned int i = 0;
44aa6ec1fdSGagandeep Singh 	uint16_t auth_hdr_len = ofs.ofs.cipher.head -
45aa6ec1fdSGagandeep Singh 				ofs.ofs.auth.head;
46aa6ec1fdSGagandeep Singh 
4746cc6dccSGagandeep Singh 	uint16_t auth_tail_len;
4846cc6dccSGagandeep Singh 	uint32_t auth_only_len;
49aa6ec1fdSGagandeep Singh 	int icv_len = sess->digest_length;
50aa6ec1fdSGagandeep Singh 	uint8_t *old_icv;
51aa6ec1fdSGagandeep Singh 	uint8_t *iv_ptr = iv->va;
52aa6ec1fdSGagandeep Singh 
53aa6ec1fdSGagandeep Singh 	for (i = 0; i < sgl->num; i++)
54aa6ec1fdSGagandeep Singh 		data_len += sgl->vec[i].len;
55aa6ec1fdSGagandeep Singh 
56aa6ec1fdSGagandeep Singh 	cipher_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
57aa6ec1fdSGagandeep Singh 	auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
5846cc6dccSGagandeep Singh 	auth_tail_len = auth_len - cipher_len - auth_hdr_len;
5946cc6dccSGagandeep Singh 	auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
60aa6ec1fdSGagandeep Singh 	/* first FLE entry used to store session ctxt */
61aa6ec1fdSGagandeep Singh 	fle = (struct qbman_fle *)rte_malloc(NULL,
62aa6ec1fdSGagandeep Singh 			FLE_SG_MEM_SIZE(2 * sgl->num),
63aa6ec1fdSGagandeep Singh 			RTE_CACHE_LINE_SIZE);
64aa6ec1fdSGagandeep Singh 	if (unlikely(!fle)) {
65aa6ec1fdSGagandeep Singh 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
66aa6ec1fdSGagandeep Singh 		return -ENOMEM;
67aa6ec1fdSGagandeep Singh 	}
68aa6ec1fdSGagandeep Singh 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
69aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
70aa6ec1fdSGagandeep Singh 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
71aa6ec1fdSGagandeep Singh 
72aa6ec1fdSGagandeep Singh 	op_fle = fle + 1;
73aa6ec1fdSGagandeep Singh 	ip_fle = fle + 2;
74aa6ec1fdSGagandeep Singh 	sge = fle + 3;
75aa6ec1fdSGagandeep Singh 
76aa6ec1fdSGagandeep Singh 	/* Save the shared descriptor */
77aa6ec1fdSGagandeep Singh 	flc = &priv->flc_desc[0].flc;
78aa6ec1fdSGagandeep Singh 
79aa6ec1fdSGagandeep Singh 	/* Configure FD as a FRAME LIST */
80aa6ec1fdSGagandeep Singh 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
81aa6ec1fdSGagandeep Singh 	DPAA2_SET_FD_COMPOUND_FMT(fd);
82aa6ec1fdSGagandeep Singh 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
83aa6ec1fdSGagandeep Singh 
84aa6ec1fdSGagandeep Singh 	/* Configure Output FLE with Scatter/Gather Entry */
85aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(op_fle);
86aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
87aa6ec1fdSGagandeep Singh 
88aa6ec1fdSGagandeep Singh 	if (auth_only_len)
89aa6ec1fdSGagandeep Singh 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
90aa6ec1fdSGagandeep Singh 
91aa6ec1fdSGagandeep Singh 	op_fle->length = (sess->dir == DIR_ENC) ?
92aa6ec1fdSGagandeep Singh 			(cipher_len + icv_len) :
93aa6ec1fdSGagandeep Singh 			cipher_len;
94aa6ec1fdSGagandeep Singh 
956bb2ce73SGagandeep Singh 	/* OOP */
966bb2ce73SGagandeep Singh 	if (dest_sgl) {
976bb2ce73SGagandeep Singh 		/* Configure Output SGE for Encap/Decap */
981e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + ofs.ofs.cipher.head);
996bb2ce73SGagandeep Singh 		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
1006bb2ce73SGagandeep Singh 
1016bb2ce73SGagandeep Singh 		/* o/p segs */
1026bb2ce73SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
1036bb2ce73SGagandeep Singh 			sge++;
1046bb2ce73SGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
1056bb2ce73SGagandeep Singh 			sge->length = dest_sgl->vec[i].len;
1066bb2ce73SGagandeep Singh 		}
10746cc6dccSGagandeep Singh 		sge->length -= ofs.ofs.cipher.tail;
1086bb2ce73SGagandeep Singh 	} else {
109aa6ec1fdSGagandeep Singh 		/* Configure Output SGE for Encap/Decap */
1101e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head);
1116bb2ce73SGagandeep Singh 		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
112aa6ec1fdSGagandeep Singh 
113aa6ec1fdSGagandeep Singh 		/* o/p segs */
114aa6ec1fdSGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
115aa6ec1fdSGagandeep Singh 			sge++;
116aa6ec1fdSGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
117aa6ec1fdSGagandeep Singh 			sge->length = sgl->vec[i].len;
118aa6ec1fdSGagandeep Singh 		}
11946cc6dccSGagandeep Singh 		sge->length -= ofs.ofs.cipher.tail;
1206bb2ce73SGagandeep Singh 	}
121aa6ec1fdSGagandeep Singh 
122aa6ec1fdSGagandeep Singh 	if (sess->dir == DIR_ENC) {
123aa6ec1fdSGagandeep Singh 		sge++;
124aa6ec1fdSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge,
125aa6ec1fdSGagandeep Singh 			digest->iova);
126aa6ec1fdSGagandeep Singh 		sge->length = icv_len;
127aa6ec1fdSGagandeep Singh 	}
128aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
129aa6ec1fdSGagandeep Singh 
130aa6ec1fdSGagandeep Singh 	sge++;
131aa6ec1fdSGagandeep Singh 
132aa6ec1fdSGagandeep Singh 	/* Configure Input FLE with Scatter/Gather Entry */
133aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
134aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(ip_fle);
135aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_FIN(ip_fle);
136aa6ec1fdSGagandeep Singh 
137aa6ec1fdSGagandeep Singh 	ip_fle->length = (sess->dir == DIR_ENC) ?
138aa6ec1fdSGagandeep Singh 			(auth_len + sess->iv.length) :
139aa6ec1fdSGagandeep Singh 			(auth_len + sess->iv.length +
140aa6ec1fdSGagandeep Singh 			icv_len);
141aa6ec1fdSGagandeep Singh 
142aa6ec1fdSGagandeep Singh 	/* Configure Input SGE for Encap/Decap */
143aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
144aa6ec1fdSGagandeep Singh 	sge->length = sess->iv.length;
145aa6ec1fdSGagandeep Singh 
146aa6ec1fdSGagandeep Singh 	sge++;
1471e522746SApeksha Gupta 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.auth.head);
148aa6ec1fdSGagandeep Singh 	sge->length = sgl->vec[0].len - ofs.ofs.auth.head;
149aa6ec1fdSGagandeep Singh 
150aa6ec1fdSGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
151aa6ec1fdSGagandeep Singh 		sge++;
152aa6ec1fdSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
153aa6ec1fdSGagandeep Singh 		sge->length = sgl->vec[i].len;
154aa6ec1fdSGagandeep Singh 	}
155aa6ec1fdSGagandeep Singh 
156aa6ec1fdSGagandeep Singh 	if (sess->dir == DIR_DEC) {
157aa6ec1fdSGagandeep Singh 		sge++;
158aa6ec1fdSGagandeep Singh 		old_icv = (uint8_t *)(sge + 1);
159aa6ec1fdSGagandeep Singh 		memcpy(old_icv, digest->va,
160aa6ec1fdSGagandeep Singh 			icv_len);
161aa6ec1fdSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
162aa6ec1fdSGagandeep Singh 		sge->length = icv_len;
163aa6ec1fdSGagandeep Singh 	}
164aa6ec1fdSGagandeep Singh 
165aa6ec1fdSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
166aa6ec1fdSGagandeep Singh 	if (auth_only_len) {
167aa6ec1fdSGagandeep Singh 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
168aa6ec1fdSGagandeep Singh 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
169aa6ec1fdSGagandeep Singh 	}
170aa6ec1fdSGagandeep Singh 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1714a81d34aSGagandeep Singh 
1724a81d34aSGagandeep Singh 	return 0;
1734a81d34aSGagandeep Singh }
1744a81d34aSGagandeep Singh 
1754a81d34aSGagandeep Singh static int
1764a81d34aSGagandeep Singh build_raw_dp_aead_fd(uint8_t *drv_ctx,
1774a81d34aSGagandeep Singh 		       struct rte_crypto_sgl *sgl,
1786bb2ce73SGagandeep Singh 		       struct rte_crypto_sgl *dest_sgl,
1794a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *iv,
1804a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *digest,
1814a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *auth_iv,
1824a81d34aSGagandeep Singh 		       union rte_crypto_sym_ofs ofs,
1834a81d34aSGagandeep Singh 		       void *userdata,
1844a81d34aSGagandeep Singh 		       struct qbman_fd *fd)
1854a81d34aSGagandeep Singh {
186f393cd8eSGagandeep Singh 	dpaa2_sec_session *sess =
187f393cd8eSGagandeep Singh 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
188f393cd8eSGagandeep Singh 	struct ctxt_priv *priv = sess->ctxt;
189f393cd8eSGagandeep Singh 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
190f393cd8eSGagandeep Singh 	struct sec_flow_context *flc;
191f393cd8eSGagandeep Singh 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
192f393cd8eSGagandeep Singh 	int icv_len = sess->digest_length;
193f393cd8eSGagandeep Singh 	uint8_t *old_icv;
194f393cd8eSGagandeep Singh 	uint8_t *IV_ptr = iv->va;
195f393cd8eSGagandeep Singh 	unsigned int i = 0;
196f393cd8eSGagandeep Singh 	int data_len = 0, aead_len = 0;
197f393cd8eSGagandeep Singh 
198f393cd8eSGagandeep Singh 	for (i = 0; i < sgl->num; i++)
199f393cd8eSGagandeep Singh 		data_len += sgl->vec[i].len;
200f393cd8eSGagandeep Singh 
201f393cd8eSGagandeep Singh 	aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
202f393cd8eSGagandeep Singh 
203f393cd8eSGagandeep Singh 	/* first FLE entry used to store mbuf and session ctxt */
204f393cd8eSGagandeep Singh 	fle = (struct qbman_fle *)rte_malloc(NULL,
205f393cd8eSGagandeep Singh 			FLE_SG_MEM_SIZE(2 * sgl->num),
206f393cd8eSGagandeep Singh 			RTE_CACHE_LINE_SIZE);
207f393cd8eSGagandeep Singh 	if (unlikely(!fle)) {
208f393cd8eSGagandeep Singh 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
209f393cd8eSGagandeep Singh 		return -ENOMEM;
210f393cd8eSGagandeep Singh 	}
211f393cd8eSGagandeep Singh 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
212f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
213f393cd8eSGagandeep Singh 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
214f393cd8eSGagandeep Singh 
215f393cd8eSGagandeep Singh 	op_fle = fle + 1;
216f393cd8eSGagandeep Singh 	ip_fle = fle + 2;
217f393cd8eSGagandeep Singh 	sge = fle + 3;
218f393cd8eSGagandeep Singh 
219f393cd8eSGagandeep Singh 	/* Save the shared descriptor */
220f393cd8eSGagandeep Singh 	flc = &priv->flc_desc[0].flc;
221f393cd8eSGagandeep Singh 
222f393cd8eSGagandeep Singh 	/* Configure FD as a FRAME LIST */
223f393cd8eSGagandeep Singh 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224f393cd8eSGagandeep Singh 	DPAA2_SET_FD_COMPOUND_FMT(fd);
225f393cd8eSGagandeep Singh 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
226f393cd8eSGagandeep Singh 
227f393cd8eSGagandeep Singh 	/* Configure Output FLE with Scatter/Gather Entry */
228f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(op_fle);
229f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
230f393cd8eSGagandeep Singh 
231f393cd8eSGagandeep Singh 	if (auth_only_len)
232f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
233f393cd8eSGagandeep Singh 
234f393cd8eSGagandeep Singh 	op_fle->length = (sess->dir == DIR_ENC) ?
235f393cd8eSGagandeep Singh 			(aead_len + icv_len) :
236f393cd8eSGagandeep Singh 			aead_len;
237f393cd8eSGagandeep Singh 
2386bb2ce73SGagandeep Singh 	/* OOP */
2396bb2ce73SGagandeep Singh 	if (dest_sgl) {
2406bb2ce73SGagandeep Singh 		/* Configure Output SGE for Encap/Decap */
2411e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova +  ofs.ofs.cipher.head);
2426bb2ce73SGagandeep Singh 		sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head;
2436bb2ce73SGagandeep Singh 
2446bb2ce73SGagandeep Singh 		/* o/p segs */
2456bb2ce73SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
2466bb2ce73SGagandeep Singh 			sge++;
2476bb2ce73SGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
2486bb2ce73SGagandeep Singh 			sge->length = dest_sgl->vec[i].len;
2496bb2ce73SGagandeep Singh 		}
2506bb2ce73SGagandeep Singh 	} else {
251f393cd8eSGagandeep Singh 		/* Configure Output SGE for Encap/Decap */
2521e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head);
253f393cd8eSGagandeep Singh 		sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
254f393cd8eSGagandeep Singh 
255f393cd8eSGagandeep Singh 		/* o/p segs */
256f393cd8eSGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
257f393cd8eSGagandeep Singh 			sge++;
258f393cd8eSGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
259f393cd8eSGagandeep Singh 			sge->length = sgl->vec[i].len;
260f393cd8eSGagandeep Singh 		}
2616bb2ce73SGagandeep Singh 	}
262f393cd8eSGagandeep Singh 
263f393cd8eSGagandeep Singh 	if (sess->dir == DIR_ENC) {
264f393cd8eSGagandeep Singh 		sge++;
265f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, digest->iova);
266f393cd8eSGagandeep Singh 		sge->length = icv_len;
267f393cd8eSGagandeep Singh 	}
268f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
269f393cd8eSGagandeep Singh 
270f393cd8eSGagandeep Singh 	sge++;
271f393cd8eSGagandeep Singh 
272f393cd8eSGagandeep Singh 	/* Configure Input FLE with Scatter/Gather Entry */
273f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
274f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(ip_fle);
275f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_FIN(ip_fle);
276f393cd8eSGagandeep Singh 	ip_fle->length = (sess->dir == DIR_ENC) ?
277f393cd8eSGagandeep Singh 		(aead_len + sess->iv.length + auth_only_len) :
278f393cd8eSGagandeep Singh 		(aead_len + sess->iv.length + auth_only_len +
279f393cd8eSGagandeep Singh 		icv_len);
280f393cd8eSGagandeep Singh 
281f393cd8eSGagandeep Singh 	/* Configure Input SGE for Encap/Decap */
282f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
283f393cd8eSGagandeep Singh 	sge->length = sess->iv.length;
284f393cd8eSGagandeep Singh 
285f393cd8eSGagandeep Singh 	sge++;
286f393cd8eSGagandeep Singh 	if (auth_only_len) {
287f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, auth_iv->iova);
288f393cd8eSGagandeep Singh 		sge->length = auth_only_len;
289f393cd8eSGagandeep Singh 		sge++;
290f393cd8eSGagandeep Singh 	}
291f393cd8eSGagandeep Singh 
2921e522746SApeksha Gupta 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + ofs.ofs.cipher.head);
293f393cd8eSGagandeep Singh 	sge->length = sgl->vec[0].len - ofs.ofs.cipher.head;
294f393cd8eSGagandeep Singh 
295f393cd8eSGagandeep Singh 	/* i/p segs */
296f393cd8eSGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
297f393cd8eSGagandeep Singh 		sge++;
298f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
299f393cd8eSGagandeep Singh 		sge->length = sgl->vec[i].len;
300f393cd8eSGagandeep Singh 	}
301f393cd8eSGagandeep Singh 
302f393cd8eSGagandeep Singh 	if (sess->dir == DIR_DEC) {
303f393cd8eSGagandeep Singh 		sge++;
304f393cd8eSGagandeep Singh 		old_icv = (uint8_t *)(sge + 1);
305f393cd8eSGagandeep Singh 		memcpy(old_icv,  digest->va, icv_len);
306f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
307f393cd8eSGagandeep Singh 		sge->length = icv_len;
308f393cd8eSGagandeep Singh 	}
309f393cd8eSGagandeep Singh 
310f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
311f393cd8eSGagandeep Singh 	if (auth_only_len) {
312f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
313f393cd8eSGagandeep Singh 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
314f393cd8eSGagandeep Singh 	}
315f393cd8eSGagandeep Singh 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
3164a81d34aSGagandeep Singh 
3174a81d34aSGagandeep Singh 	return 0;
3184a81d34aSGagandeep Singh }
3194a81d34aSGagandeep Singh 
3204a81d34aSGagandeep Singh static int
3214a81d34aSGagandeep Singh build_raw_dp_auth_fd(uint8_t *drv_ctx,
3224a81d34aSGagandeep Singh 		       struct rte_crypto_sgl *sgl,
3236bb2ce73SGagandeep Singh 		       struct rte_crypto_sgl *dest_sgl,
3244a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *iv,
3254a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *digest,
3264a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *auth_iv,
3274a81d34aSGagandeep Singh 		       union rte_crypto_sym_ofs ofs,
3284a81d34aSGagandeep Singh 		       void *userdata,
3294a81d34aSGagandeep Singh 		       struct qbman_fd *fd)
3304a81d34aSGagandeep Singh {
3314a81d34aSGagandeep Singh 	RTE_SET_USED(iv);
3324a81d34aSGagandeep Singh 	RTE_SET_USED(auth_iv);
3336bb2ce73SGagandeep Singh 	RTE_SET_USED(dest_sgl);
334d9d36d6fSGagandeep Singh 
335d9d36d6fSGagandeep Singh 	dpaa2_sec_session *sess =
336d9d36d6fSGagandeep Singh 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
337d9d36d6fSGagandeep Singh 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
338d9d36d6fSGagandeep Singh 	struct sec_flow_context *flc;
339d9d36d6fSGagandeep Singh 	int total_len = 0, data_len = 0, data_offset;
340d9d36d6fSGagandeep Singh 	uint8_t *old_digest;
341d9d36d6fSGagandeep Singh 	struct ctxt_priv *priv = sess->ctxt;
342d9d36d6fSGagandeep Singh 	unsigned int i;
343d9d36d6fSGagandeep Singh 
344d9d36d6fSGagandeep Singh 	for (i = 0; i < sgl->num; i++)
345d9d36d6fSGagandeep Singh 		total_len += sgl->vec[i].len;
346d9d36d6fSGagandeep Singh 
347d9d36d6fSGagandeep Singh 	data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
348d9d36d6fSGagandeep Singh 	data_offset = ofs.ofs.auth.head;
349d9d36d6fSGagandeep Singh 
3503da64325SGagandeep Singh 	/* For SNOW3G and ZUC, lengths in bits only supported */
351d9d36d6fSGagandeep Singh 	fle = (struct qbman_fle *)rte_malloc(NULL,
352d9d36d6fSGagandeep Singh 		FLE_SG_MEM_SIZE(2 * sgl->num),
353d9d36d6fSGagandeep Singh 			RTE_CACHE_LINE_SIZE);
354d9d36d6fSGagandeep Singh 	if (unlikely(!fle)) {
355d9d36d6fSGagandeep Singh 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
356d9d36d6fSGagandeep Singh 		return -ENOMEM;
357d9d36d6fSGagandeep Singh 	}
358d9d36d6fSGagandeep Singh 	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
359d9d36d6fSGagandeep Singh 	/* first FLE entry used to store mbuf and session ctxt */
360d9d36d6fSGagandeep Singh 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
361d9d36d6fSGagandeep Singh 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
362d9d36d6fSGagandeep Singh 	op_fle = fle + 1;
363d9d36d6fSGagandeep Singh 	ip_fle = fle + 2;
364d9d36d6fSGagandeep Singh 	sge = fle + 3;
365d9d36d6fSGagandeep Singh 
366d9d36d6fSGagandeep Singh 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
367d9d36d6fSGagandeep Singh 
368d9d36d6fSGagandeep Singh 	/* sg FD */
369d9d36d6fSGagandeep Singh 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
370d9d36d6fSGagandeep Singh 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
371d9d36d6fSGagandeep Singh 	DPAA2_SET_FD_COMPOUND_FMT(fd);
372d9d36d6fSGagandeep Singh 
373d9d36d6fSGagandeep Singh 	/* o/p fle */
374d9d36d6fSGagandeep Singh 	DPAA2_SET_FLE_ADDR(op_fle,
375d9d36d6fSGagandeep Singh 			DPAA2_VADDR_TO_IOVA(digest->va));
376d9d36d6fSGagandeep Singh 	op_fle->length = sess->digest_length;
377d9d36d6fSGagandeep Singh 
378d9d36d6fSGagandeep Singh 	/* i/p fle */
379d9d36d6fSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(ip_fle);
380d9d36d6fSGagandeep Singh 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
381d9d36d6fSGagandeep Singh 	ip_fle->length = data_len;
382d9d36d6fSGagandeep Singh 
383d9d36d6fSGagandeep Singh 	if (sess->iv.length) {
384d9d36d6fSGagandeep Singh 		uint8_t *iv_ptr;
385d9d36d6fSGagandeep Singh 
386d9d36d6fSGagandeep Singh 		iv_ptr = rte_crypto_op_ctod_offset(userdata, uint8_t *,
387d9d36d6fSGagandeep Singh 						sess->iv.offset);
388d9d36d6fSGagandeep Singh 
389d9d36d6fSGagandeep Singh 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
390d9d36d6fSGagandeep Singh 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
391d9d36d6fSGagandeep Singh 			sge->length = 12;
392d9d36d6fSGagandeep Singh 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
393d9d36d6fSGagandeep Singh 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
394d9d36d6fSGagandeep Singh 			sge->length = 8;
395d9d36d6fSGagandeep Singh 		} else {
396d9d36d6fSGagandeep Singh 			sge->length = sess->iv.length;
397d9d36d6fSGagandeep Singh 		}
398d9d36d6fSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
399d9d36d6fSGagandeep Singh 		ip_fle->length += sge->length;
400d9d36d6fSGagandeep Singh 		sge++;
401d9d36d6fSGagandeep Singh 	}
402d9d36d6fSGagandeep Singh 	/* i/p 1st seg */
4031e522746SApeksha Gupta 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset);
404d9d36d6fSGagandeep Singh 
405d9d36d6fSGagandeep Singh 	if (data_len <= (int)(sgl->vec[0].len - data_offset)) {
406d9d36d6fSGagandeep Singh 		sge->length = data_len;
407d9d36d6fSGagandeep Singh 		data_len = 0;
408d9d36d6fSGagandeep Singh 	} else {
409d9d36d6fSGagandeep Singh 		sge->length = sgl->vec[0].len - data_offset;
410d9d36d6fSGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
411d9d36d6fSGagandeep Singh 			sge++;
412d9d36d6fSGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
413d9d36d6fSGagandeep Singh 			sge->length = sgl->vec[i].len;
414d9d36d6fSGagandeep Singh 		}
415d9d36d6fSGagandeep Singh 	}
416d9d36d6fSGagandeep Singh 	if (sess->dir == DIR_DEC) {
417d9d36d6fSGagandeep Singh 		/* Digest verification case */
418d9d36d6fSGagandeep Singh 		sge++;
419d9d36d6fSGagandeep Singh 		old_digest = (uint8_t *)(sge + 1);
420d9d36d6fSGagandeep Singh 		rte_memcpy(old_digest, digest->va,
421d9d36d6fSGagandeep Singh 			sess->digest_length);
422d9d36d6fSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
423d9d36d6fSGagandeep Singh 		sge->length = sess->digest_length;
424d9d36d6fSGagandeep Singh 		ip_fle->length += sess->digest_length;
425d9d36d6fSGagandeep Singh 	}
426d9d36d6fSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
427d9d36d6fSGagandeep Singh 	DPAA2_SET_FLE_FIN(ip_fle);
428d9d36d6fSGagandeep Singh 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
4294a81d34aSGagandeep Singh 
4304a81d34aSGagandeep Singh 	return 0;
4314a81d34aSGagandeep Singh }
4324a81d34aSGagandeep Singh 
4334a81d34aSGagandeep Singh static int
4344a81d34aSGagandeep Singh build_raw_dp_proto_fd(uint8_t *drv_ctx,
4354a81d34aSGagandeep Singh 		       struct rte_crypto_sgl *sgl,
4366bb2ce73SGagandeep Singh 		       struct rte_crypto_sgl *dest_sgl,
4374a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *iv,
4384a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *digest,
4394a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *auth_iv,
4404a81d34aSGagandeep Singh 		       union rte_crypto_sym_ofs ofs,
4414a81d34aSGagandeep Singh 		       void *userdata,
4424a81d34aSGagandeep Singh 		       struct qbman_fd *fd)
4434a81d34aSGagandeep Singh {
4444a81d34aSGagandeep Singh 	RTE_SET_USED(iv);
4454a81d34aSGagandeep Singh 	RTE_SET_USED(digest);
4464a81d34aSGagandeep Singh 	RTE_SET_USED(auth_iv);
4474a81d34aSGagandeep Singh 	RTE_SET_USED(ofs);
4484a81d34aSGagandeep Singh 
449f393cd8eSGagandeep Singh 	dpaa2_sec_session *sess =
450f393cd8eSGagandeep Singh 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
451f393cd8eSGagandeep Singh 	struct ctxt_priv *priv = sess->ctxt;
452f393cd8eSGagandeep Singh 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
453f393cd8eSGagandeep Singh 	struct sec_flow_context *flc;
454f393cd8eSGagandeep Singh 	uint32_t in_len = 0, out_len = 0, i;
455f393cd8eSGagandeep Singh 
456f393cd8eSGagandeep Singh 	/* first FLE entry used to store mbuf and session ctxt */
457f393cd8eSGagandeep Singh 	fle = (struct qbman_fle *)rte_malloc(NULL,
458f393cd8eSGagandeep Singh 			FLE_SG_MEM_SIZE(2 * sgl->num),
459f393cd8eSGagandeep Singh 			RTE_CACHE_LINE_SIZE);
460f393cd8eSGagandeep Singh 	if (unlikely(!fle)) {
461f393cd8eSGagandeep Singh 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
462f393cd8eSGagandeep Singh 		return -ENOMEM;
463f393cd8eSGagandeep Singh 	}
464f393cd8eSGagandeep Singh 	memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num));
465f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
466f393cd8eSGagandeep Singh 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
467f393cd8eSGagandeep Singh 
468f393cd8eSGagandeep Singh 	/* Save the shared descriptor */
469f393cd8eSGagandeep Singh 	flc = &priv->flc_desc[0].flc;
470f393cd8eSGagandeep Singh 	op_fle = fle + 1;
471f393cd8eSGagandeep Singh 	ip_fle = fle + 2;
472f393cd8eSGagandeep Singh 	sge = fle + 3;
473f393cd8eSGagandeep Singh 
474f393cd8eSGagandeep Singh 	DPAA2_SET_FD_IVP(fd);
475f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_IVP(op_fle);
476f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_IVP(ip_fle);
477f393cd8eSGagandeep Singh 
478f393cd8eSGagandeep Singh 	/* Configure FD as a FRAME LIST */
479f393cd8eSGagandeep Singh 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
480f393cd8eSGagandeep Singh 	DPAA2_SET_FD_COMPOUND_FMT(fd);
481f393cd8eSGagandeep Singh 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
482f393cd8eSGagandeep Singh 
483f393cd8eSGagandeep Singh 	/* Configure Output FLE with Scatter/Gather Entry */
484f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(op_fle);
485f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
486f393cd8eSGagandeep Singh 
4876bb2ce73SGagandeep Singh 	/* OOP */
4886bb2ce73SGagandeep Singh 	if (dest_sgl) {
4896bb2ce73SGagandeep Singh 		/* Configure Output SGE for Encap/Decap */
4906bb2ce73SGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova);
4916bb2ce73SGagandeep Singh 		sge->length = dest_sgl->vec[0].len;
4926bb2ce73SGagandeep Singh 		out_len += sge->length;
4936bb2ce73SGagandeep Singh 		/* o/p segs */
4946bb2ce73SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
4956bb2ce73SGagandeep Singh 			sge++;
4966bb2ce73SGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
4976bb2ce73SGagandeep Singh 			sge->length = dest_sgl->vec[i].len;
4986bb2ce73SGagandeep Singh 			out_len += sge->length;
4996bb2ce73SGagandeep Singh 		}
5006bb2ce73SGagandeep Singh 		sge->length = dest_sgl->vec[i - 1].tot_len;
5016bb2ce73SGagandeep Singh 
5026bb2ce73SGagandeep Singh 	} else {
503f393cd8eSGagandeep Singh 		/* Configure Output SGE for Encap/Decap */
504f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
505f393cd8eSGagandeep Singh 		sge->length = sgl->vec[0].len;
506f393cd8eSGagandeep Singh 		out_len += sge->length;
507f393cd8eSGagandeep Singh 		/* o/p segs */
508f393cd8eSGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
509f393cd8eSGagandeep Singh 			sge++;
510f393cd8eSGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
511f393cd8eSGagandeep Singh 			sge->length = sgl->vec[i].len;
512f393cd8eSGagandeep Singh 			out_len += sge->length;
513f393cd8eSGagandeep Singh 		}
514f393cd8eSGagandeep Singh 		sge->length = sgl->vec[i - 1].tot_len;
5156bb2ce73SGagandeep Singh 	}
516f393cd8eSGagandeep Singh 	out_len += sge->length;
517f393cd8eSGagandeep Singh 
518f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
519f393cd8eSGagandeep Singh 	op_fle->length = out_len;
520f393cd8eSGagandeep Singh 
521f393cd8eSGagandeep Singh 	sge++;
522f393cd8eSGagandeep Singh 
523f393cd8eSGagandeep Singh 	/* Configure Input FLE with Scatter/Gather Entry */
524f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
525f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(ip_fle);
526f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_FIN(ip_fle);
527f393cd8eSGagandeep Singh 
528f393cd8eSGagandeep Singh 	/* Configure input SGE for Encap/Decap */
529f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova);
530f393cd8eSGagandeep Singh 	sge->length = sgl->vec[0].len;
531f393cd8eSGagandeep Singh 	in_len += sge->length;
532f393cd8eSGagandeep Singh 	/* i/p segs */
533f393cd8eSGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
534f393cd8eSGagandeep Singh 		sge++;
535f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
536f393cd8eSGagandeep Singh 		sge->length = sgl->vec[i].len;
537f393cd8eSGagandeep Singh 		in_len += sge->length;
5384a81d34aSGagandeep Singh 	}
5394a81d34aSGagandeep Singh 
540f393cd8eSGagandeep Singh 	ip_fle->length = in_len;
541f393cd8eSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
542f393cd8eSGagandeep Singh 
543f393cd8eSGagandeep Singh 	/* In case of PDCP, per packet HFN is stored in
544f393cd8eSGagandeep Singh 	 * mbuf priv after sym_op.
545f393cd8eSGagandeep Singh 	 */
546f393cd8eSGagandeep Singh 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
547f393cd8eSGagandeep Singh 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata +
548f393cd8eSGagandeep Singh 				sess->pdcp.hfn_ovd_offset);
549f393cd8eSGagandeep Singh 		/* enable HFN override */
550f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
551f393cd8eSGagandeep Singh 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
552f393cd8eSGagandeep Singh 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
553f393cd8eSGagandeep Singh 	}
554f393cd8eSGagandeep Singh 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
5554a81d34aSGagandeep Singh 
5564a81d34aSGagandeep Singh 	return 0;
5574a81d34aSGagandeep Singh }
5584a81d34aSGagandeep Singh 
5594a81d34aSGagandeep Singh static int
5604a81d34aSGagandeep Singh build_raw_dp_cipher_fd(uint8_t *drv_ctx,
5614a81d34aSGagandeep Singh 		       struct rte_crypto_sgl *sgl,
5626bb2ce73SGagandeep Singh 		       struct rte_crypto_sgl *dest_sgl,
5634a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *iv,
5644a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *digest,
5654a81d34aSGagandeep Singh 		       struct rte_crypto_va_iova_ptr *auth_iv,
5664a81d34aSGagandeep Singh 		       union rte_crypto_sym_ofs ofs,
5674a81d34aSGagandeep Singh 		       void *userdata,
5684a81d34aSGagandeep Singh 		       struct qbman_fd *fd)
5694a81d34aSGagandeep Singh {
5704a81d34aSGagandeep Singh 	RTE_SET_USED(digest);
5714a81d34aSGagandeep Singh 	RTE_SET_USED(auth_iv);
5724a81d34aSGagandeep Singh 
5734a81d34aSGagandeep Singh 	dpaa2_sec_session *sess =
5744a81d34aSGagandeep Singh 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
5754a81d34aSGagandeep Singh 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
5764a81d34aSGagandeep Singh 	int total_len = 0, data_len = 0, data_offset;
5774a81d34aSGagandeep Singh 	struct sec_flow_context *flc;
5784a81d34aSGagandeep Singh 	struct ctxt_priv *priv = sess->ctxt;
5794a81d34aSGagandeep Singh 	unsigned int i;
5804a81d34aSGagandeep Singh 
5814a81d34aSGagandeep Singh 	for (i = 0; i < sgl->num; i++)
5824a81d34aSGagandeep Singh 		total_len += sgl->vec[i].len;
5834a81d34aSGagandeep Singh 
5844a81d34aSGagandeep Singh 	data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
5854a81d34aSGagandeep Singh 	data_offset = ofs.ofs.cipher.head;
5864a81d34aSGagandeep Singh 
5873da64325SGagandeep Singh 	/* For SNOW3G and ZUC, lengths in bits only supported */
5884a81d34aSGagandeep Singh 	/* first FLE entry used to store mbuf and session ctxt */
5894a81d34aSGagandeep Singh 	fle = (struct qbman_fle *)rte_malloc(NULL,
5904a81d34aSGagandeep Singh 			FLE_SG_MEM_SIZE(2*sgl->num),
5914a81d34aSGagandeep Singh 			RTE_CACHE_LINE_SIZE);
5924a81d34aSGagandeep Singh 	if (!fle) {
5934a81d34aSGagandeep Singh 		DPAA2_SEC_ERR("RAW CIPHER SG: Memory alloc failed for SGE");
5944a81d34aSGagandeep Singh 		return -ENOMEM;
5954a81d34aSGagandeep Singh 	}
5964a81d34aSGagandeep Singh 	memset(fle, 0, FLE_SG_MEM_SIZE(2*sgl->num));
5974a81d34aSGagandeep Singh 	/* first FLE entry used to store userdata and session ctxt */
5984a81d34aSGagandeep Singh 	DPAA2_SET_FLE_ADDR(fle, (size_t)userdata);
5994a81d34aSGagandeep Singh 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
6004a81d34aSGagandeep Singh 
6014a81d34aSGagandeep Singh 	op_fle = fle + 1;
6024a81d34aSGagandeep Singh 	ip_fle = fle + 2;
6034a81d34aSGagandeep Singh 	sge = fle + 3;
6044a81d34aSGagandeep Singh 
6054a81d34aSGagandeep Singh 	flc = &priv->flc_desc[0].flc;
6064a81d34aSGagandeep Singh 
6074a81d34aSGagandeep Singh 	DPAA2_SEC_DP_DEBUG(
608f665790aSDavid Marchand 		"RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d",
6094a81d34aSGagandeep Singh 		data_offset,
6104a81d34aSGagandeep Singh 		data_len,
6114a81d34aSGagandeep Singh 		sess->iv.length);
6124a81d34aSGagandeep Singh 
6134a81d34aSGagandeep Singh 	/* o/p fle */
6144a81d34aSGagandeep Singh 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
6154a81d34aSGagandeep Singh 	op_fle->length = data_len;
6164a81d34aSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(op_fle);
6174a81d34aSGagandeep Singh 
6186bb2ce73SGagandeep Singh 	/* OOP */
6196bb2ce73SGagandeep Singh 	if (dest_sgl) {
6206bb2ce73SGagandeep Singh 		/* o/p 1st seg */
6211e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova + data_offset);
6226bb2ce73SGagandeep Singh 		sge->length = dest_sgl->vec[0].len - data_offset;
6236bb2ce73SGagandeep Singh 
6246bb2ce73SGagandeep Singh 		/* o/p segs */
6256bb2ce73SGagandeep Singh 		for (i = 1; i < dest_sgl->num; i++) {
6266bb2ce73SGagandeep Singh 			sge++;
6276bb2ce73SGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova);
6286bb2ce73SGagandeep Singh 			sge->length = dest_sgl->vec[i].len;
6296bb2ce73SGagandeep Singh 		}
6306bb2ce73SGagandeep Singh 	} else {
6314a81d34aSGagandeep Singh 		/* o/p 1st seg */
6321e522746SApeksha Gupta 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset);
6334a81d34aSGagandeep Singh 		sge->length = sgl->vec[0].len - data_offset;
6344a81d34aSGagandeep Singh 
6354a81d34aSGagandeep Singh 		/* o/p segs */
6364a81d34aSGagandeep Singh 		for (i = 1; i < sgl->num; i++) {
6374a81d34aSGagandeep Singh 			sge++;
6384a81d34aSGagandeep Singh 			DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
6394a81d34aSGagandeep Singh 			sge->length = sgl->vec[i].len;
6404a81d34aSGagandeep Singh 		}
6416bb2ce73SGagandeep Singh 	}
6424a81d34aSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
6434a81d34aSGagandeep Singh 
6444a81d34aSGagandeep Singh 	DPAA2_SEC_DP_DEBUG(
645f665790aSDavid Marchand 		"RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d",
6464a81d34aSGagandeep Singh 		flc, fle, fle->addr_hi, fle->addr_lo,
6474a81d34aSGagandeep Singh 		fle->length);
6484a81d34aSGagandeep Singh 
6494a81d34aSGagandeep Singh 	/* i/p fle */
6504a81d34aSGagandeep Singh 	sge++;
6514a81d34aSGagandeep Singh 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
6524a81d34aSGagandeep Singh 	ip_fle->length = sess->iv.length + data_len;
6534a81d34aSGagandeep Singh 	DPAA2_SET_FLE_SG_EXT(ip_fle);
6544a81d34aSGagandeep Singh 
6554a81d34aSGagandeep Singh 	/* i/p IV */
6564a81d34aSGagandeep Singh 	DPAA2_SET_FLE_ADDR(sge, iv->iova);
6574a81d34aSGagandeep Singh 	sge->length = sess->iv.length;
6584a81d34aSGagandeep Singh 
6594a81d34aSGagandeep Singh 	sge++;
6604a81d34aSGagandeep Singh 
6614a81d34aSGagandeep Singh 	/* i/p 1st seg */
6621e522746SApeksha Gupta 	DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova + data_offset);
6634a81d34aSGagandeep Singh 	sge->length = sgl->vec[0].len - data_offset;
6644a81d34aSGagandeep Singh 
6654a81d34aSGagandeep Singh 	/* i/p segs */
6664a81d34aSGagandeep Singh 	for (i = 1; i < sgl->num; i++) {
6674a81d34aSGagandeep Singh 		sge++;
6684a81d34aSGagandeep Singh 		DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova);
6694a81d34aSGagandeep Singh 		sge->length = sgl->vec[i].len;
6704a81d34aSGagandeep Singh 	}
6714a81d34aSGagandeep Singh 	DPAA2_SET_FLE_FIN(sge);
6724a81d34aSGagandeep Singh 	DPAA2_SET_FLE_FIN(ip_fle);
6734a81d34aSGagandeep Singh 
6744a81d34aSGagandeep Singh 	/* sg fd */
6754a81d34aSGagandeep Singh 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
6764a81d34aSGagandeep Singh 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
6774a81d34aSGagandeep Singh 	DPAA2_SET_FD_COMPOUND_FMT(fd);
6784a81d34aSGagandeep Singh 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
6794a81d34aSGagandeep Singh 
6804a81d34aSGagandeep Singh 	DPAA2_SEC_DP_DEBUG(
681f665790aSDavid Marchand 		"RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d",
6824a81d34aSGagandeep Singh 		DPAA2_GET_FD_ADDR(fd),
6834a81d34aSGagandeep Singh 		DPAA2_GET_FD_OFFSET(fd),
6844a81d34aSGagandeep Singh 		DPAA2_GET_FD_LEN(fd));
6854a81d34aSGagandeep Singh 
6864a81d34aSGagandeep Singh 	return 0;
6874a81d34aSGagandeep Singh }
6884a81d34aSGagandeep Singh 
6894a81d34aSGagandeep Singh static __rte_always_inline uint32_t
6904a81d34aSGagandeep Singh dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx,
6914a81d34aSGagandeep Singh 	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
6924a81d34aSGagandeep Singh 	void *user_data[], int *status)
6934a81d34aSGagandeep Singh {
6944a81d34aSGagandeep Singh 	RTE_SET_USED(user_data);
6954a81d34aSGagandeep Singh 	uint32_t loop;
6964a81d34aSGagandeep Singh 	int32_t ret;
6974a81d34aSGagandeep Singh 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
6984a81d34aSGagandeep Singh 	uint32_t frames_to_send, retry_count;
6994a81d34aSGagandeep Singh 	struct qbman_eq_desc eqdesc;
7004a81d34aSGagandeep Singh 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
7014a81d34aSGagandeep Singh 	dpaa2_sec_session *sess =
7024a81d34aSGagandeep Singh 		((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session;
7034a81d34aSGagandeep Singh 	struct qbman_swp *swp;
7044a81d34aSGagandeep Singh 	uint16_t num_tx = 0;
7054a81d34aSGagandeep Singh 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
7064a81d34aSGagandeep Singh 
7074a81d34aSGagandeep Singh 	if (unlikely(vec->num == 0))
7084a81d34aSGagandeep Singh 		return 0;
7094a81d34aSGagandeep Singh 
7104a81d34aSGagandeep Singh 	if (sess == NULL) {
7114a81d34aSGagandeep Singh 		DPAA2_SEC_ERR("sessionless raw crypto not supported");
7124a81d34aSGagandeep Singh 		return 0;
7134a81d34aSGagandeep Singh 	}
7144a81d34aSGagandeep Singh 	/*Prepare enqueue descriptor*/
7154a81d34aSGagandeep Singh 	qbman_eq_desc_clear(&eqdesc);
7164a81d34aSGagandeep Singh 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
7174a81d34aSGagandeep Singh 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
7184a81d34aSGagandeep Singh 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
7194a81d34aSGagandeep Singh 
7204a81d34aSGagandeep Singh 	if (!DPAA2_PER_LCORE_DPIO) {
7214a81d34aSGagandeep Singh 		ret = dpaa2_affine_qbman_swp();
7224a81d34aSGagandeep Singh 		if (ret) {
7234a81d34aSGagandeep Singh 			DPAA2_SEC_ERR(
724f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
7254a81d34aSGagandeep Singh 				rte_gettid());
7264a81d34aSGagandeep Singh 			return 0;
7274a81d34aSGagandeep Singh 		}
7284a81d34aSGagandeep Singh 	}
7294a81d34aSGagandeep Singh 	swp = DPAA2_PER_LCORE_PORTAL;
7304a81d34aSGagandeep Singh 
7314a81d34aSGagandeep Singh 	while (vec->num) {
7324a81d34aSGagandeep Singh 		frames_to_send = (vec->num > dpaa2_eqcr_size) ?
7334a81d34aSGagandeep Singh 			dpaa2_eqcr_size : vec->num;
7344a81d34aSGagandeep Singh 
7354a81d34aSGagandeep Singh 		for (loop = 0; loop < frames_to_send; loop++) {
7364a81d34aSGagandeep Singh 			/*Clear the unused FD fields before sending*/
7374a81d34aSGagandeep Singh 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
7384a81d34aSGagandeep Singh 			ret = sess->build_raw_dp_fd(drv_ctx,
7394a81d34aSGagandeep Singh 						    &vec->src_sgl[loop],
7406bb2ce73SGagandeep Singh 						    &vec->dest_sgl[loop],
7414a81d34aSGagandeep Singh 						    &vec->iv[loop],
7424a81d34aSGagandeep Singh 						    &vec->digest[loop],
7434a81d34aSGagandeep Singh 						    &vec->auth_iv[loop],
7444a81d34aSGagandeep Singh 						    ofs,
7454a81d34aSGagandeep Singh 						    user_data[loop],
7464a81d34aSGagandeep Singh 						    &fd_arr[loop]);
7474a81d34aSGagandeep Singh 			if (ret) {
7484a81d34aSGagandeep Singh 				DPAA2_SEC_ERR("error: Improper packet contents"
7494a81d34aSGagandeep Singh 					      " for crypto operation");
7504a81d34aSGagandeep Singh 				goto skip_tx;
7514a81d34aSGagandeep Singh 			}
7524a81d34aSGagandeep Singh 			status[loop] = 1;
7534a81d34aSGagandeep Singh 		}
7544a81d34aSGagandeep Singh 
7554a81d34aSGagandeep Singh 		loop = 0;
7564a81d34aSGagandeep Singh 		retry_count = 0;
7574a81d34aSGagandeep Singh 		while (loop < frames_to_send) {
7584a81d34aSGagandeep Singh 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
7594a81d34aSGagandeep Singh 							 &fd_arr[loop],
7604a81d34aSGagandeep Singh 							 &flags[loop],
7614a81d34aSGagandeep Singh 							 frames_to_send - loop);
7624a81d34aSGagandeep Singh 			if (unlikely(ret < 0)) {
7634a81d34aSGagandeep Singh 				retry_count++;
7644a81d34aSGagandeep Singh 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
7654a81d34aSGagandeep Singh 					num_tx += loop;
7664a81d34aSGagandeep Singh 					vec->num -= loop;
7674a81d34aSGagandeep Singh 					goto skip_tx;
7684a81d34aSGagandeep Singh 				}
7694a81d34aSGagandeep Singh 			} else {
7704a81d34aSGagandeep Singh 				loop += ret;
7714a81d34aSGagandeep Singh 				retry_count = 0;
7724a81d34aSGagandeep Singh 			}
7734a81d34aSGagandeep Singh 		}
7744a81d34aSGagandeep Singh 
7754a81d34aSGagandeep Singh 		num_tx += loop;
7764a81d34aSGagandeep Singh 		vec->num -= loop;
7774a81d34aSGagandeep Singh 	}
7784a81d34aSGagandeep Singh skip_tx:
7794a81d34aSGagandeep Singh 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
7804a81d34aSGagandeep Singh 	dpaa2_qp->tx_vq.err_pkts += vec->num;
7814a81d34aSGagandeep Singh 
7824a81d34aSGagandeep Singh 	return num_tx;
7834a81d34aSGagandeep Singh }
7844a81d34aSGagandeep Singh 
7854a81d34aSGagandeep Singh static __rte_always_inline int
7864a81d34aSGagandeep Singh dpaa2_sec_raw_enqueue(void *qp_data, uint8_t *drv_ctx,
7874a81d34aSGagandeep Singh 	struct rte_crypto_vec *data_vec,
7884a81d34aSGagandeep Singh 	uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
7894a81d34aSGagandeep Singh 	struct rte_crypto_va_iova_ptr *iv,
7904a81d34aSGagandeep Singh 	struct rte_crypto_va_iova_ptr *digest,
7914a81d34aSGagandeep Singh 	struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
7924a81d34aSGagandeep Singh 	void *user_data)
7934a81d34aSGagandeep Singh {
7944a81d34aSGagandeep Singh 	RTE_SET_USED(qp_data);
7954a81d34aSGagandeep Singh 	RTE_SET_USED(drv_ctx);
7964a81d34aSGagandeep Singh 	RTE_SET_USED(data_vec);
7974a81d34aSGagandeep Singh 	RTE_SET_USED(n_data_vecs);
7984a81d34aSGagandeep Singh 	RTE_SET_USED(ofs);
7994a81d34aSGagandeep Singh 	RTE_SET_USED(iv);
8004a81d34aSGagandeep Singh 	RTE_SET_USED(digest);
8014a81d34aSGagandeep Singh 	RTE_SET_USED(aad_or_auth_iv);
8024a81d34aSGagandeep Singh 	RTE_SET_USED(user_data);
8034a81d34aSGagandeep Singh 
8044a81d34aSGagandeep Singh 	return 0;
8054a81d34aSGagandeep Singh }
8064a81d34aSGagandeep Singh 
8074a81d34aSGagandeep Singh static inline void *
8084a81d34aSGagandeep Singh sec_fd_to_userdata(const struct qbman_fd *fd)
8094a81d34aSGagandeep Singh {
8104a81d34aSGagandeep Singh 	struct qbman_fle *fle;
8114a81d34aSGagandeep Singh 	void *userdata;
8124a81d34aSGagandeep Singh 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
8134a81d34aSGagandeep Singh 
814f665790aSDavid Marchand 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x",
8154a81d34aSGagandeep Singh 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
8164a81d34aSGagandeep Singh 	userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
8174a81d34aSGagandeep Singh 	/* free the fle memory */
8184a81d34aSGagandeep Singh 	rte_free((void *)(fle-1));
8194a81d34aSGagandeep Singh 
8204a81d34aSGagandeep Singh 	return userdata;
8214a81d34aSGagandeep Singh }
8224a81d34aSGagandeep Singh 
8234a81d34aSGagandeep Singh static __rte_always_inline uint32_t
8244a81d34aSGagandeep Singh dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
8254a81d34aSGagandeep Singh 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
8264a81d34aSGagandeep Singh 	uint32_t max_nb_to_dequeue,
8274a81d34aSGagandeep Singh 	rte_cryptodev_raw_post_dequeue_t post_dequeue,
8284a81d34aSGagandeep Singh 	void **out_user_data, uint8_t is_user_data_array,
8294a81d34aSGagandeep Singh 	uint32_t *n_success, int *dequeue_status)
8304a81d34aSGagandeep Singh {
8314a81d34aSGagandeep Singh 	RTE_SET_USED(drv_ctx);
8324a81d34aSGagandeep Singh 	RTE_SET_USED(get_dequeue_count);
8334a81d34aSGagandeep Singh 
8344a81d34aSGagandeep Singh 	/* Function is responsible to receive frames for a given device and VQ*/
8354a81d34aSGagandeep Singh 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp_data;
8364a81d34aSGagandeep Singh 	struct qbman_result *dq_storage;
8374a81d34aSGagandeep Singh 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
8384a81d34aSGagandeep Singh 	int ret, num_rx = 0;
8393da64325SGagandeep Singh 	uint8_t is_last = 0, status, is_success = 0;
8404a81d34aSGagandeep Singh 	struct qbman_swp *swp;
8414a81d34aSGagandeep Singh 	const struct qbman_fd *fd;
8424a81d34aSGagandeep Singh 	struct qbman_pull_desc pulldesc;
8434a81d34aSGagandeep Singh 	void *user_data;
8444a81d34aSGagandeep Singh 	uint32_t nb_ops = max_nb_to_dequeue;
8454a81d34aSGagandeep Singh 
8464a81d34aSGagandeep Singh 	if (!DPAA2_PER_LCORE_DPIO) {
8474a81d34aSGagandeep Singh 		ret = dpaa2_affine_qbman_swp();
8484a81d34aSGagandeep Singh 		if (ret) {
8494a81d34aSGagandeep Singh 			DPAA2_SEC_ERR(
850f665790aSDavid Marchand 				"Failed to allocate IO portal, tid: %d",
8514a81d34aSGagandeep Singh 				rte_gettid());
8524a81d34aSGagandeep Singh 			return 0;
8534a81d34aSGagandeep Singh 		}
8544a81d34aSGagandeep Singh 	}
8554a81d34aSGagandeep Singh 	swp = DPAA2_PER_LCORE_PORTAL;
856*12d98eceSJun Yang 	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
8574a81d34aSGagandeep Singh 
8584a81d34aSGagandeep Singh 	qbman_pull_desc_clear(&pulldesc);
8594a81d34aSGagandeep Singh 	qbman_pull_desc_set_numframes(&pulldesc,
8604a81d34aSGagandeep Singh 				      (nb_ops > dpaa2_dqrr_size) ?
8614a81d34aSGagandeep Singh 				      dpaa2_dqrr_size : nb_ops);
8624a81d34aSGagandeep Singh 	qbman_pull_desc_set_fq(&pulldesc, fqid);
8634a81d34aSGagandeep Singh 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
8644a81d34aSGagandeep Singh 				    (uint64_t)DPAA2_VADDR_TO_IOVA(dq_storage),
8654a81d34aSGagandeep Singh 				    1);
8664a81d34aSGagandeep Singh 
8674a81d34aSGagandeep Singh 	/*Issue a volatile dequeue command. */
8684a81d34aSGagandeep Singh 	while (1) {
8694a81d34aSGagandeep Singh 		if (qbman_swp_pull(swp, &pulldesc)) {
8704a81d34aSGagandeep Singh 			DPAA2_SEC_WARN(
8714a81d34aSGagandeep Singh 				"SEC VDQ command is not issued : QBMAN busy");
8724a81d34aSGagandeep Singh 			/* Portal was busy, try again */
8734a81d34aSGagandeep Singh 			continue;
8744a81d34aSGagandeep Singh 		}
8754a81d34aSGagandeep Singh 		break;
8764a81d34aSGagandeep Singh 	};
8774a81d34aSGagandeep Singh 
8784a81d34aSGagandeep Singh 	/* Receive the packets till Last Dequeue entry is found with
8794a81d34aSGagandeep Singh 	 * respect to the above issues PULL command.
8804a81d34aSGagandeep Singh 	 */
8814a81d34aSGagandeep Singh 	while (!is_last) {
8824a81d34aSGagandeep Singh 		/* Check if the previous issued command is completed.
8834a81d34aSGagandeep Singh 		 * Also seems like the SWP is shared between the Ethernet Driver
8844a81d34aSGagandeep Singh 		 * and the SEC driver.
8854a81d34aSGagandeep Singh 		 */
8864a81d34aSGagandeep Singh 		while (!qbman_check_command_complete(dq_storage))
8874a81d34aSGagandeep Singh 			;
8884a81d34aSGagandeep Singh 
8894a81d34aSGagandeep Singh 		/* Loop until the dq_storage is updated with
8904a81d34aSGagandeep Singh 		 * new token by QBMAN
8914a81d34aSGagandeep Singh 		 */
8924a81d34aSGagandeep Singh 		while (!qbman_check_new_result(dq_storage))
8934a81d34aSGagandeep Singh 			;
8944a81d34aSGagandeep Singh 		/* Check whether Last Pull command is Expired and
8954a81d34aSGagandeep Singh 		 * setting Condition for Loop termination
8964a81d34aSGagandeep Singh 		 */
8974a81d34aSGagandeep Singh 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
8984a81d34aSGagandeep Singh 			is_last = 1;
8994a81d34aSGagandeep Singh 			/* Check for valid frame. */
9004a81d34aSGagandeep Singh 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
9014a81d34aSGagandeep Singh 			if (unlikely(
9024a81d34aSGagandeep Singh 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
903f665790aSDavid Marchand 				DPAA2_SEC_DP_DEBUG("No frame is delivered");
9044a81d34aSGagandeep Singh 				continue;
9054a81d34aSGagandeep Singh 			}
9064a81d34aSGagandeep Singh 		}
9074a81d34aSGagandeep Singh 
9084a81d34aSGagandeep Singh 		fd = qbman_result_DQ_fd(dq_storage);
9094a81d34aSGagandeep Singh 		user_data = sec_fd_to_userdata(fd);
9104a81d34aSGagandeep Singh 		if (is_user_data_array)
9114a81d34aSGagandeep Singh 			out_user_data[num_rx] = user_data;
9124a81d34aSGagandeep Singh 		else
9134a81d34aSGagandeep Singh 			out_user_data[0] = user_data;
9144a81d34aSGagandeep Singh 		if (unlikely(fd->simple.frc)) {
9154a81d34aSGagandeep Singh 			/* TODO Parse SEC errors */
9164a81d34aSGagandeep Singh 			DPAA2_SEC_ERR("SEC returned Error - %x",
9174a81d34aSGagandeep Singh 				      fd->simple.frc);
9183da64325SGagandeep Singh 			is_success = false;
9194a81d34aSGagandeep Singh 		} else {
9203da64325SGagandeep Singh 			is_success = true;
9214a81d34aSGagandeep Singh 		}
9223da64325SGagandeep Singh 		post_dequeue(user_data, num_rx, is_success);
9234a81d34aSGagandeep Singh 
9244a81d34aSGagandeep Singh 		num_rx++;
9254a81d34aSGagandeep Singh 		dq_storage++;
9264a81d34aSGagandeep Singh 	} /* End of Packet Rx loop */
9274a81d34aSGagandeep Singh 
9284a81d34aSGagandeep Singh 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
9294a81d34aSGagandeep Singh 	*dequeue_status = 1;
9304a81d34aSGagandeep Singh 	*n_success = num_rx;
9314a81d34aSGagandeep Singh 
932f665790aSDavid Marchand 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets", num_rx);
9334a81d34aSGagandeep Singh 	/*Return the total number of packets received to DPAA2 app*/
9344a81d34aSGagandeep Singh 	return num_rx;
9354a81d34aSGagandeep Singh }
9364a81d34aSGagandeep Singh 
9374a81d34aSGagandeep Singh static __rte_always_inline void *
9384a81d34aSGagandeep Singh dpaa2_sec_raw_dequeue(void *qp_data, uint8_t *drv_ctx, int *dequeue_status,
9394a81d34aSGagandeep Singh 		enum rte_crypto_op_status *op_status)
9404a81d34aSGagandeep Singh {
9414a81d34aSGagandeep Singh 	RTE_SET_USED(qp_data);
9424a81d34aSGagandeep Singh 	RTE_SET_USED(drv_ctx);
9434a81d34aSGagandeep Singh 	RTE_SET_USED(dequeue_status);
9444a81d34aSGagandeep Singh 	RTE_SET_USED(op_status);
9454a81d34aSGagandeep Singh 
9464a81d34aSGagandeep Singh 	return NULL;
9474a81d34aSGagandeep Singh }
9484a81d34aSGagandeep Singh 
9494a81d34aSGagandeep Singh static __rte_always_inline int
9504a81d34aSGagandeep Singh dpaa2_sec_raw_enqueue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
9514a81d34aSGagandeep Singh {
9524a81d34aSGagandeep Singh 	RTE_SET_USED(qp_data);
9534a81d34aSGagandeep Singh 	RTE_SET_USED(drv_ctx);
9544a81d34aSGagandeep Singh 	RTE_SET_USED(n);
9554a81d34aSGagandeep Singh 
9564a81d34aSGagandeep Singh 	return 0;
9574a81d34aSGagandeep Singh }
9584a81d34aSGagandeep Singh 
9594a81d34aSGagandeep Singh static __rte_always_inline int
9604a81d34aSGagandeep Singh dpaa2_sec_raw_dequeue_done(void *qp_data, uint8_t *drv_ctx, uint32_t n)
9614a81d34aSGagandeep Singh {
9624a81d34aSGagandeep Singh 	RTE_SET_USED(qp_data);
9634a81d34aSGagandeep Singh 	RTE_SET_USED(drv_ctx);
9644a81d34aSGagandeep Singh 	RTE_SET_USED(n);
9654a81d34aSGagandeep Singh 
9664a81d34aSGagandeep Singh 	return 0;
9674a81d34aSGagandeep Singh }
9684a81d34aSGagandeep Singh 
9694a81d34aSGagandeep Singh int
9704a81d34aSGagandeep Singh dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
9714a81d34aSGagandeep Singh 	struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
9724a81d34aSGagandeep Singh 	enum rte_crypto_op_sess_type sess_type,
9734a81d34aSGagandeep Singh 	union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
9744a81d34aSGagandeep Singh {
9754a81d34aSGagandeep Singh 	dpaa2_sec_session *sess;
9764a81d34aSGagandeep Singh 	struct dpaa2_sec_raw_dp_ctx *dp_ctx;
9774a81d34aSGagandeep Singh 	RTE_SET_USED(qp_id);
9784a81d34aSGagandeep Singh 
9794a81d34aSGagandeep Singh 	if (!is_update) {
9804a81d34aSGagandeep Singh 		memset(raw_dp_ctx, 0, sizeof(*raw_dp_ctx));
9814a81d34aSGagandeep Singh 		raw_dp_ctx->qp_data = dev->data->queue_pairs[qp_id];
9824a81d34aSGagandeep Singh 	}
9834a81d34aSGagandeep Singh 
9844a81d34aSGagandeep Singh 	if (sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
9853f3fc330SAkhil Goyal 		sess = SECURITY_GET_SESS_PRIV(session_ctx.sec_sess);
9864a81d34aSGagandeep Singh 	else if (sess_type == RTE_CRYPTO_OP_WITH_SESSION)
9872a440d6aSAkhil Goyal 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(session_ctx.crypto_sess);
9884a81d34aSGagandeep Singh 	else
9894a81d34aSGagandeep Singh 		return -ENOTSUP;
9904a81d34aSGagandeep Singh 	raw_dp_ctx->dequeue_burst = dpaa2_sec_raw_dequeue_burst;
9914a81d34aSGagandeep Singh 	raw_dp_ctx->dequeue = dpaa2_sec_raw_dequeue;
9924a81d34aSGagandeep Singh 	raw_dp_ctx->dequeue_done = dpaa2_sec_raw_dequeue_done;
9934a81d34aSGagandeep Singh 	raw_dp_ctx->enqueue_burst = dpaa2_sec_raw_enqueue_burst;
9944a81d34aSGagandeep Singh 	raw_dp_ctx->enqueue = dpaa2_sec_raw_enqueue;
9954a81d34aSGagandeep Singh 	raw_dp_ctx->enqueue_done = dpaa2_sec_raw_enqueue_done;
9964a81d34aSGagandeep Singh 
9974a81d34aSGagandeep Singh 	if (sess->ctxt_type == DPAA2_SEC_CIPHER_HASH)
9984a81d34aSGagandeep Singh 		sess->build_raw_dp_fd = build_raw_dp_chain_fd;
9994a81d34aSGagandeep Singh 	else if (sess->ctxt_type == DPAA2_SEC_AEAD)
10004a81d34aSGagandeep Singh 		sess->build_raw_dp_fd = build_raw_dp_aead_fd;
10014a81d34aSGagandeep Singh 	else if (sess->ctxt_type == DPAA2_SEC_AUTH)
10024a81d34aSGagandeep Singh 		sess->build_raw_dp_fd = build_raw_dp_auth_fd;
10034a81d34aSGagandeep Singh 	else if (sess->ctxt_type == DPAA2_SEC_CIPHER)
10044a81d34aSGagandeep Singh 		sess->build_raw_dp_fd = build_raw_dp_cipher_fd;
1005f393cd8eSGagandeep Singh 	else if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
1006f393cd8eSGagandeep Singh 		sess->ctxt_type == DPAA2_SEC_PDCP)
10074a81d34aSGagandeep Singh 		sess->build_raw_dp_fd = build_raw_dp_proto_fd;
10084a81d34aSGagandeep Singh 	else
10094a81d34aSGagandeep Singh 		return -ENOTSUP;
10104a81d34aSGagandeep Singh 	dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;
10114a81d34aSGagandeep Singh 	dp_ctx->session = sess;
10124a81d34aSGagandeep Singh 
10134a81d34aSGagandeep Singh 	return 0;
10144a81d34aSGagandeep Singh }
10154a81d34aSGagandeep Singh 
10164a81d34aSGagandeep Singh int
10174a81d34aSGagandeep Singh dpaa2_sec_get_dp_ctx_size(__rte_unused struct rte_cryptodev *dev)
10184a81d34aSGagandeep Singh {
10194a81d34aSGagandeep Singh 	return sizeof(struct dpaa2_sec_raw_dp_ctx);
10204a81d34aSGagandeep Singh }
1021