xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2023 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_udp.h>
14 #include <rte_mbuf.h>
15 #include <rte_cryptodev.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_string_fns.h>
19 #include <rte_cycles.h>
20 #include <rte_kvargs.h>
21 #include <dev_driver.h>
22 #include <cryptodev_pmd.h>
23 #include <rte_common.h>
24 #include <bus_fslmc_driver.h>
25 #include <fslmc_vfio.h>
26 #include <dpaa2_hw_pvt.h>
27 #include <dpaa2_hw_dpio.h>
28 #include <dpaa2_hw_mempool.h>
29 #include <fsl_dpopr.h>
30 #include <fsl_dpseci.h>
31 #include <fsl_mc_sys.h>
32 #include <rte_hexdump.h>
33 
34 #include "dpaa2_sec_priv.h"
35 #include "dpaa2_sec_event.h"
36 #include "dpaa2_sec_logs.h"
37 
38 /* RTA header files */
39 #include <desc/ipsec.h>
40 #include <desc/pdcp.h>
41 #include <desc/sdap.h>
42 #include <desc/algo.h>
43 
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52 
53 #define NO_PREFETCH 0
54 
55 #define DRIVER_DUMP_MODE "drv_dump_mode"
56 #define DRIVER_STRICT_ORDER "drv_strict_order"
57 
58 /* DPAA2_SEC_DP_DUMP levels */
59 enum dpaa2_sec_dump_levels {
60 	DPAA2_SEC_DP_NO_DUMP,
61 	DPAA2_SEC_DP_ERR_DUMP,
62 	DPAA2_SEC_DP_FULL_DUMP
63 };
64 
65 uint8_t cryptodev_driver_id;
66 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
67 
68 static inline void
69 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
70 {
71 	struct qbman_fle *fle;
72 	struct rte_crypto_op *op;
73 
74 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
75 		return;
76 
77 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
78 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
79 	/* free the fle memory */
80 	if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
81 		rte_mempool_put(qp->fle_pool, (void *)(fle-1));
82 	else
83 		rte_free((void *)(fle-1));
84 }
85 
86 static inline int
87 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
88 			   struct rte_crypto_op *op,
89 			   struct qbman_fd *fd, uint16_t bpid)
90 {
91 	struct rte_crypto_sym_op *sym_op = op->sym;
92 	struct ctxt_priv *priv = sess->ctxt;
93 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
94 	struct sec_flow_context *flc;
95 	struct rte_mbuf *mbuf;
96 	uint32_t in_len = 0, out_len = 0;
97 
98 	if (sym_op->m_dst)
99 		mbuf = sym_op->m_dst;
100 	else
101 		mbuf = sym_op->m_src;
102 
103 	/* first FLE entry used to store mbuf and session ctxt */
104 	fle = (struct qbman_fle *)rte_malloc(NULL,
105 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
106 			RTE_CACHE_LINE_SIZE);
107 	if (unlikely(!fle)) {
108 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
109 		return -ENOMEM;
110 	}
111 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
112 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
113 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
114 
115 	/* Save the shared descriptor */
116 	flc = &priv->flc_desc[0].flc;
117 
118 	op_fle = fle + 1;
119 	ip_fle = fle + 2;
120 	sge = fle + 3;
121 
122 	if (likely(bpid < MAX_BPID)) {
123 		DPAA2_SET_FD_BPID(fd, bpid);
124 		DPAA2_SET_FLE_BPID(op_fle, bpid);
125 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
126 	} else {
127 		DPAA2_SET_FD_IVP(fd);
128 		DPAA2_SET_FLE_IVP(op_fle);
129 		DPAA2_SET_FLE_IVP(ip_fle);
130 	}
131 
132 	/* Configure FD as a FRAME LIST */
133 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
134 	DPAA2_SET_FD_COMPOUND_FMT(fd);
135 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
136 
137 	/* Configure Output FLE with Scatter/Gather Entry */
138 	DPAA2_SET_FLE_SG_EXT(op_fle);
139 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
140 
141 	/* Configure Output SGE for Encap/Decap */
142 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
143 	/* o/p segs */
144 	while (mbuf->next) {
145 		sge->length = mbuf->data_len;
146 		out_len += sge->length;
147 		sge++;
148 		mbuf = mbuf->next;
149 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
150 	}
151 	/* using buf_len for last buf - so that extra data can be added */
152 	sge->length = mbuf->buf_len - mbuf->data_off;
153 	out_len += sge->length;
154 
155 	DPAA2_SET_FLE_FIN(sge);
156 	op_fle->length = out_len;
157 
158 	sge++;
159 	mbuf = sym_op->m_src;
160 
161 	/* Configure Input FLE with Scatter/Gather Entry */
162 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
163 	DPAA2_SET_FLE_SG_EXT(ip_fle);
164 	DPAA2_SET_FLE_FIN(ip_fle);
165 
166 	/* Configure input SGE for Encap/Decap */
167 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
168 	sge->length = mbuf->data_len;
169 	in_len += sge->length;
170 
171 	mbuf = mbuf->next;
172 	/* i/p segs */
173 	while (mbuf) {
174 		sge++;
175 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
176 		sge->length = mbuf->data_len;
177 		in_len += sge->length;
178 		mbuf = mbuf->next;
179 	}
180 	ip_fle->length = in_len;
181 	DPAA2_SET_FLE_FIN(sge);
182 
183 	/* In case of PDCP, per packet HFN is stored in
184 	 * mbuf priv after sym_op.
185 	 */
186 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
187 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
188 					sess->pdcp.hfn_ovd_offset);
189 		/* enable HFN override */
190 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
191 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
192 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
193 	}
194 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
195 
196 	return 0;
197 }
198 
199 static inline int
200 build_proto_compound_fd(dpaa2_sec_session *sess,
201 	       struct rte_crypto_op *op,
202 	       struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
203 {
204 	struct rte_crypto_sym_op *sym_op = op->sym;
205 	struct ctxt_priv *priv = sess->ctxt;
206 	struct qbman_fle *fle, *ip_fle, *op_fle;
207 	struct sec_flow_context *flc;
208 	struct rte_mbuf *src_mbuf = sym_op->m_src;
209 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
210 	int retval;
211 
212 	if (!dst_mbuf)
213 		dst_mbuf = src_mbuf;
214 
215 	/* Save the shared descriptor */
216 	flc = &priv->flc_desc[0].flc;
217 
218 	/* we are using the first FLE entry to store Mbuf */
219 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
220 	if (retval) {
221 		DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
222 		return -ENOMEM;
223 	}
224 	memset(fle, 0, FLE_POOL_BUF_SIZE);
225 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
226 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
227 
228 	op_fle = fle + 1;
229 	ip_fle = fle + 2;
230 
231 	if (likely(bpid < MAX_BPID)) {
232 		DPAA2_SET_FD_BPID(fd, bpid);
233 		DPAA2_SET_FLE_BPID(op_fle, bpid);
234 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
235 	} else {
236 		DPAA2_SET_FD_IVP(fd);
237 		DPAA2_SET_FLE_IVP(op_fle);
238 		DPAA2_SET_FLE_IVP(ip_fle);
239 	}
240 
241 	/* Configure FD as a FRAME LIST */
242 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
243 	DPAA2_SET_FD_COMPOUND_FMT(fd);
244 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
245 
246 	/* Configure Output FLE with dst mbuf data  */
247 	DPAA2_SET_FLE_ADDR(op_fle, rte_pktmbuf_iova(dst_mbuf));
248 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
249 
250 	/* Configure Input FLE with src mbuf data */
251 	DPAA2_SET_FLE_ADDR(ip_fle, rte_pktmbuf_iova(src_mbuf));
252 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
253 
254 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
255 	DPAA2_SET_FLE_FIN(ip_fle);
256 
257 	/* In case of PDCP, per packet HFN is stored in
258 	 * mbuf priv after sym_op.
259 	 */
260 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
261 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
262 					sess->pdcp.hfn_ovd_offset);
263 		/* enable HFN override */
264 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
265 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
266 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
267 	}
268 
269 	return 0;
270 
271 }
272 
273 static inline int
274 build_proto_fd(dpaa2_sec_session *sess,
275 	       struct rte_crypto_op *op,
276 	       struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
277 {
278 	struct rte_crypto_sym_op *sym_op = op->sym;
279 	if (sym_op->m_dst)
280 		return build_proto_compound_fd(sess, op, fd, bpid, qp);
281 
282 	struct ctxt_priv *priv = sess->ctxt;
283 	struct sec_flow_context *flc;
284 	struct rte_mbuf *mbuf = sym_op->m_src;
285 
286 	if (likely(bpid < MAX_BPID))
287 		DPAA2_SET_FD_BPID(fd, bpid);
288 	else
289 		DPAA2_SET_FD_IVP(fd);
290 
291 	/* Save the shared descriptor */
292 	flc = &priv->flc_desc[0].flc;
293 
294 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
295 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
296 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
297 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
298 
299 	/* save physical address of mbuf */
300 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
301 	mbuf->buf_iova = (size_t)op;
302 
303 	return 0;
304 }
305 
306 static inline int
307 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
308 		 struct rte_crypto_op *op,
309 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
310 {
311 	struct rte_crypto_sym_op *sym_op = op->sym;
312 	struct ctxt_priv *priv = sess->ctxt;
313 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
314 	struct sec_flow_context *flc;
315 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
316 	int icv_len = sess->digest_length;
317 	uint8_t *old_icv;
318 	struct rte_mbuf *mbuf;
319 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
320 			sess->iv.offset);
321 
322 	if (sym_op->m_dst)
323 		mbuf = sym_op->m_dst;
324 	else
325 		mbuf = sym_op->m_src;
326 
327 	/* first FLE entry used to store mbuf and session ctxt */
328 	fle = (struct qbman_fle *)rte_malloc(NULL,
329 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
330 			RTE_CACHE_LINE_SIZE);
331 	if (unlikely(!fle)) {
332 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
333 		return -ENOMEM;
334 	}
335 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
336 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
337 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
338 
339 	op_fle = fle + 1;
340 	ip_fle = fle + 2;
341 	sge = fle + 3;
342 
343 	/* Save the shared descriptor */
344 	flc = &priv->flc_desc[0].flc;
345 
346 	/* Configure FD as a FRAME LIST */
347 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
348 	DPAA2_SET_FD_COMPOUND_FMT(fd);
349 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
350 
351 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
352 		   "iv-len=%d data_off: 0x%x\n",
353 		   sym_op->aead.data.offset,
354 		   sym_op->aead.data.length,
355 		   sess->digest_length,
356 		   sess->iv.length,
357 		   sym_op->m_src->data_off);
358 
359 	/* Configure Output FLE with Scatter/Gather Entry */
360 	DPAA2_SET_FLE_SG_EXT(op_fle);
361 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
362 
363 	if (auth_only_len)
364 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
365 
366 	op_fle->length = (sess->dir == DIR_ENC) ?
367 			(sym_op->aead.data.length + icv_len) :
368 			sym_op->aead.data.length;
369 
370 	/* Configure Output SGE for Encap/Decap */
371 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset);
372 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
373 
374 	mbuf = mbuf->next;
375 	/* o/p segs */
376 	while (mbuf) {
377 		sge++;
378 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
379 		sge->length = mbuf->data_len;
380 		mbuf = mbuf->next;
381 	}
382 	sge->length -= icv_len;
383 
384 	if (sess->dir == DIR_ENC) {
385 		sge++;
386 		DPAA2_SET_FLE_ADDR(sge,
387 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
388 		sge->length = icv_len;
389 	}
390 	DPAA2_SET_FLE_FIN(sge);
391 
392 	sge++;
393 	mbuf = sym_op->m_src;
394 
395 	/* Configure Input FLE with Scatter/Gather Entry */
396 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
397 	DPAA2_SET_FLE_SG_EXT(ip_fle);
398 	DPAA2_SET_FLE_FIN(ip_fle);
399 	ip_fle->length = (sess->dir == DIR_ENC) ?
400 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
401 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
402 		 icv_len);
403 
404 	/* Configure Input SGE for Encap/Decap */
405 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
406 	sge->length = sess->iv.length;
407 
408 	sge++;
409 	if (auth_only_len) {
410 		DPAA2_SET_FLE_ADDR(sge,
411 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
412 		sge->length = auth_only_len;
413 		sge++;
414 	}
415 
416 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset);
417 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
418 
419 	mbuf = mbuf->next;
420 	/* i/p segs */
421 	while (mbuf) {
422 		sge++;
423 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
424 		sge->length = mbuf->data_len;
425 		mbuf = mbuf->next;
426 	}
427 
428 	if (sess->dir == DIR_DEC) {
429 		sge++;
430 		old_icv = (uint8_t *)(sge + 1);
431 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
432 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
433 		sge->length = icv_len;
434 	}
435 
436 	DPAA2_SET_FLE_FIN(sge);
437 	if (auth_only_len) {
438 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
439 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
440 	}
441 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
442 
443 	return 0;
444 }
445 
446 static inline int
447 build_authenc_gcm_fd(dpaa2_sec_session *sess,
448 		     struct rte_crypto_op *op,
449 		     struct qbman_fd *fd, uint16_t bpid,
450 		     struct dpaa2_sec_qp *qp)
451 {
452 	struct rte_crypto_sym_op *sym_op = op->sym;
453 	struct ctxt_priv *priv = sess->ctxt;
454 	struct qbman_fle *fle, *sge;
455 	struct sec_flow_context *flc;
456 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
457 	int icv_len = sess->digest_length, retval;
458 	uint8_t *old_icv;
459 	struct rte_mbuf *dst;
460 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
461 			sess->iv.offset);
462 
463 	if (sym_op->m_dst)
464 		dst = sym_op->m_dst;
465 	else
466 		dst = sym_op->m_src;
467 
468 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
469 	 * Currently we donot know which FLE has the mbuf stored.
470 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
471 	 * to get the MBUF Addr from the previous FLE.
472 	 * We can have a better approach to use the inline Mbuf
473 	 */
474 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
475 	if (retval) {
476 		DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
477 		return -ENOMEM;
478 	}
479 	memset(fle, 0, FLE_POOL_BUF_SIZE);
480 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
481 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
482 	fle = fle + 1;
483 	sge = fle + 2;
484 	if (likely(bpid < MAX_BPID)) {
485 		DPAA2_SET_FD_BPID(fd, bpid);
486 		DPAA2_SET_FLE_BPID(fle, bpid);
487 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
488 		DPAA2_SET_FLE_BPID(sge, bpid);
489 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
490 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
491 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
492 	} else {
493 		DPAA2_SET_FD_IVP(fd);
494 		DPAA2_SET_FLE_IVP(fle);
495 		DPAA2_SET_FLE_IVP((fle + 1));
496 		DPAA2_SET_FLE_IVP(sge);
497 		DPAA2_SET_FLE_IVP((sge + 1));
498 		DPAA2_SET_FLE_IVP((sge + 2));
499 		DPAA2_SET_FLE_IVP((sge + 3));
500 	}
501 
502 	/* Save the shared descriptor */
503 	flc = &priv->flc_desc[0].flc;
504 	/* Configure FD as a FRAME LIST */
505 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
506 	DPAA2_SET_FD_COMPOUND_FMT(fd);
507 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
508 
509 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
510 		   "iv-len=%d data_off: 0x%x\n",
511 		   sym_op->aead.data.offset,
512 		   sym_op->aead.data.length,
513 		   sess->digest_length,
514 		   sess->iv.length,
515 		   sym_op->m_src->data_off);
516 
517 	/* Configure Output FLE with Scatter/Gather Entry */
518 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
519 	if (auth_only_len)
520 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
521 	fle->length = (sess->dir == DIR_ENC) ?
522 			(sym_op->aead.data.length + icv_len) :
523 			sym_op->aead.data.length;
524 
525 	DPAA2_SET_FLE_SG_EXT(fle);
526 
527 	/* Configure Output SGE for Encap/Decap */
528 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->aead.data.offset);
529 	sge->length = sym_op->aead.data.length;
530 
531 	if (sess->dir == DIR_ENC) {
532 		sge++;
533 		DPAA2_SET_FLE_ADDR(sge,
534 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
535 		sge->length = sess->digest_length;
536 	}
537 	DPAA2_SET_FLE_FIN(sge);
538 
539 	sge++;
540 	fle++;
541 
542 	/* Configure Input FLE with Scatter/Gather Entry */
543 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
544 	DPAA2_SET_FLE_SG_EXT(fle);
545 	DPAA2_SET_FLE_FIN(fle);
546 	fle->length = (sess->dir == DIR_ENC) ?
547 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
548 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
549 		 sess->digest_length);
550 
551 	/* Configure Input SGE for Encap/Decap */
552 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
553 	sge->length = sess->iv.length;
554 	sge++;
555 	if (auth_only_len) {
556 		DPAA2_SET_FLE_ADDR(sge,
557 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
558 		sge->length = auth_only_len;
559 		DPAA2_SET_FLE_BPID(sge, bpid);
560 		sge++;
561 	}
562 
563 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->aead.data.offset);
564 	sge->length = sym_op->aead.data.length;
565 	if (sess->dir == DIR_DEC) {
566 		sge++;
567 		old_icv = (uint8_t *)(sge + 1);
568 		memcpy(old_icv,	sym_op->aead.digest.data,
569 		       sess->digest_length);
570 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
571 		sge->length = sess->digest_length;
572 	}
573 	DPAA2_SET_FLE_FIN(sge);
574 
575 	if (auth_only_len) {
576 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
577 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
578 	}
579 
580 	DPAA2_SET_FD_LEN(fd, fle->length);
581 	return 0;
582 }
583 
584 static inline int
585 build_authenc_sg_fd(dpaa2_sec_session *sess,
586 		 struct rte_crypto_op *op,
587 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
588 {
589 	struct rte_crypto_sym_op *sym_op = op->sym;
590 	struct ctxt_priv *priv = sess->ctxt;
591 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
592 	struct sec_flow_context *flc;
593 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
594 				sym_op->auth.data.offset;
595 	uint16_t auth_tail_len = sym_op->auth.data.length -
596 				sym_op->cipher.data.length - auth_hdr_len;
597 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
598 	int icv_len = sess->digest_length;
599 	uint8_t *old_icv;
600 	struct rte_mbuf *mbuf;
601 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
602 			sess->iv.offset);
603 
604 	if (sym_op->m_dst)
605 		mbuf = sym_op->m_dst;
606 	else
607 		mbuf = sym_op->m_src;
608 
609 	/* first FLE entry used to store mbuf and session ctxt */
610 	fle = (struct qbman_fle *)rte_malloc(NULL,
611 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
612 			RTE_CACHE_LINE_SIZE);
613 	if (unlikely(!fle)) {
614 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
615 		return -ENOMEM;
616 	}
617 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
618 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
619 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
620 
621 	op_fle = fle + 1;
622 	ip_fle = fle + 2;
623 	sge = fle + 3;
624 
625 	/* Save the shared descriptor */
626 	flc = &priv->flc_desc[0].flc;
627 
628 	/* Configure FD as a FRAME LIST */
629 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
630 	DPAA2_SET_FD_COMPOUND_FMT(fd);
631 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
632 
633 	DPAA2_SEC_DP_DEBUG(
634 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
635 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
636 		sym_op->auth.data.offset,
637 		sym_op->auth.data.length,
638 		sess->digest_length,
639 		sym_op->cipher.data.offset,
640 		sym_op->cipher.data.length,
641 		sess->iv.length,
642 		sym_op->m_src->data_off);
643 
644 	/* Configure Output FLE with Scatter/Gather Entry */
645 	DPAA2_SET_FLE_SG_EXT(op_fle);
646 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
647 
648 	if (auth_only_len)
649 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
650 
651 	op_fle->length = (sess->dir == DIR_ENC) ?
652 			(sym_op->cipher.data.length + icv_len) :
653 			sym_op->cipher.data.length;
654 
655 	/* Configure Output SGE for Encap/Decap */
656 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset);
657 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
658 
659 	mbuf = mbuf->next;
660 	/* o/p segs */
661 	while (mbuf) {
662 		sge++;
663 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
664 		sge->length = mbuf->data_len;
665 		mbuf = mbuf->next;
666 	}
667 	sge->length -= icv_len;
668 
669 	if (sess->dir == DIR_ENC) {
670 		sge++;
671 		DPAA2_SET_FLE_ADDR(sge,
672 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
673 		sge->length = icv_len;
674 	}
675 	DPAA2_SET_FLE_FIN(sge);
676 
677 	sge++;
678 	mbuf = sym_op->m_src;
679 
680 	/* Configure Input FLE with Scatter/Gather Entry */
681 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
682 	DPAA2_SET_FLE_SG_EXT(ip_fle);
683 	DPAA2_SET_FLE_FIN(ip_fle);
684 	ip_fle->length = (sess->dir == DIR_ENC) ?
685 			(sym_op->auth.data.length + sess->iv.length) :
686 			(sym_op->auth.data.length + sess->iv.length +
687 			 icv_len);
688 
689 	/* Configure Input SGE for Encap/Decap */
690 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
691 	sge->length = sess->iv.length;
692 
693 	sge++;
694 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset);
695 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
696 
697 	mbuf = mbuf->next;
698 	/* i/p segs */
699 	while (mbuf) {
700 		sge++;
701 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
702 		sge->length = mbuf->data_len;
703 		mbuf = mbuf->next;
704 	}
705 	sge->length -= icv_len;
706 
707 	if (sess->dir == DIR_DEC) {
708 		sge++;
709 		old_icv = (uint8_t *)(sge + 1);
710 		memcpy(old_icv,	sym_op->auth.digest.data,
711 		       icv_len);
712 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
713 		sge->length = icv_len;
714 	}
715 
716 	DPAA2_SET_FLE_FIN(sge);
717 	if (auth_only_len) {
718 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
719 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
720 	}
721 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
722 
723 	return 0;
724 }
725 
726 static inline int
727 build_authenc_fd(dpaa2_sec_session *sess,
728 		 struct rte_crypto_op *op,
729 		 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
730 {
731 	struct rte_crypto_sym_op *sym_op = op->sym;
732 	struct ctxt_priv *priv = sess->ctxt;
733 	struct qbman_fle *fle, *sge;
734 	struct sec_flow_context *flc;
735 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
736 				sym_op->auth.data.offset;
737 	uint16_t auth_tail_len = sym_op->auth.data.length -
738 				sym_op->cipher.data.length - auth_hdr_len;
739 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
740 
741 	int icv_len = sess->digest_length, retval;
742 	uint8_t *old_icv;
743 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
744 			sess->iv.offset);
745 	struct rte_mbuf *dst;
746 
747 	if (sym_op->m_dst)
748 		dst = sym_op->m_dst;
749 	else
750 		dst = sym_op->m_src;
751 
752 	/* we are using the first FLE entry to store Mbuf.
753 	 * Currently we donot know which FLE has the mbuf stored.
754 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
755 	 * to get the MBUF Addr from the previous FLE.
756 	 * We can have a better approach to use the inline Mbuf
757 	 */
758 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
759 	if (retval) {
760 		DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
761 		return -ENOMEM;
762 	}
763 	memset(fle, 0, FLE_POOL_BUF_SIZE);
764 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
765 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
766 	fle = fle + 1;
767 	sge = fle + 2;
768 	if (likely(bpid < MAX_BPID)) {
769 		DPAA2_SET_FD_BPID(fd, bpid);
770 		DPAA2_SET_FLE_BPID(fle, bpid);
771 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
772 		DPAA2_SET_FLE_BPID(sge, bpid);
773 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
774 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
775 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
776 	} else {
777 		DPAA2_SET_FD_IVP(fd);
778 		DPAA2_SET_FLE_IVP(fle);
779 		DPAA2_SET_FLE_IVP((fle + 1));
780 		DPAA2_SET_FLE_IVP(sge);
781 		DPAA2_SET_FLE_IVP((sge + 1));
782 		DPAA2_SET_FLE_IVP((sge + 2));
783 		DPAA2_SET_FLE_IVP((sge + 3));
784 	}
785 
786 	/* Save the shared descriptor */
787 	flc = &priv->flc_desc[0].flc;
788 	/* Configure FD as a FRAME LIST */
789 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
790 	DPAA2_SET_FD_COMPOUND_FMT(fd);
791 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
792 
793 	DPAA2_SEC_DP_DEBUG(
794 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
795 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
796 		sym_op->auth.data.offset,
797 		sym_op->auth.data.length,
798 		sess->digest_length,
799 		sym_op->cipher.data.offset,
800 		sym_op->cipher.data.length,
801 		sess->iv.length,
802 		sym_op->m_src->data_off);
803 
804 	/* Configure Output FLE with Scatter/Gather Entry */
805 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
806 	if (auth_only_len)
807 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
808 	fle->length = (sess->dir == DIR_ENC) ?
809 			(sym_op->cipher.data.length + icv_len) :
810 			sym_op->cipher.data.length;
811 
812 	DPAA2_SET_FLE_SG_EXT(fle);
813 
814 	/* Configure Output SGE for Encap/Decap */
815 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->cipher.data.offset);
816 	sge->length = sym_op->cipher.data.length;
817 
818 	if (sess->dir == DIR_ENC) {
819 		sge++;
820 		DPAA2_SET_FLE_ADDR(sge,
821 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
822 		sge->length = sess->digest_length;
823 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
824 					sess->iv.length));
825 	}
826 	DPAA2_SET_FLE_FIN(sge);
827 
828 	sge++;
829 	fle++;
830 
831 	/* Configure Input FLE with Scatter/Gather Entry */
832 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
833 	DPAA2_SET_FLE_SG_EXT(fle);
834 	DPAA2_SET_FLE_FIN(fle);
835 	fle->length = (sess->dir == DIR_ENC) ?
836 			(sym_op->auth.data.length + sess->iv.length) :
837 			(sym_op->auth.data.length + sess->iv.length +
838 			 sess->digest_length);
839 
840 	/* Configure Input SGE for Encap/Decap */
841 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
842 	sge->length = sess->iv.length;
843 	sge++;
844 
845 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->auth.data.offset);
846 	sge->length = sym_op->auth.data.length;
847 	if (sess->dir == DIR_DEC) {
848 		sge++;
849 		old_icv = (uint8_t *)(sge + 1);
850 		memcpy(old_icv,	sym_op->auth.digest.data,
851 		       sess->digest_length);
852 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 		sge->length = sess->digest_length;
854 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 				 sess->digest_length +
856 				 sess->iv.length));
857 	}
858 	DPAA2_SET_FLE_FIN(sge);
859 	if (auth_only_len) {
860 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
862 	}
863 	return 0;
864 }
865 
866 static inline int build_auth_sg_fd(
867 		dpaa2_sec_session *sess,
868 		struct rte_crypto_op *op,
869 		struct qbman_fd *fd,
870 		__rte_unused uint16_t bpid)
871 {
872 	struct rte_crypto_sym_op *sym_op = op->sym;
873 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 	struct sec_flow_context *flc;
875 	struct ctxt_priv *priv = sess->ctxt;
876 	int data_len, data_offset;
877 	uint8_t *old_digest;
878 	struct rte_mbuf *mbuf;
879 
880 	data_len = sym_op->auth.data.length;
881 	data_offset = sym_op->auth.data.offset;
882 
883 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 		if ((data_len & 7) || (data_offset & 7)) {
886 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
887 			return -ENOTSUP;
888 		}
889 
890 		data_len = data_len >> 3;
891 		data_offset = data_offset >> 3;
892 	}
893 
894 	mbuf = sym_op->m_src;
895 	fle = (struct qbman_fle *)rte_malloc(NULL,
896 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 			RTE_CACHE_LINE_SIZE);
898 	if (unlikely(!fle)) {
899 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
900 		return -ENOMEM;
901 	}
902 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 	/* first FLE entry used to store mbuf and session ctxt */
904 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
906 	op_fle = fle + 1;
907 	ip_fle = fle + 2;
908 	sge = fle + 3;
909 
910 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
911 	/* sg FD */
912 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 	DPAA2_SET_FD_COMPOUND_FMT(fd);
915 
916 	/* o/p fle */
917 	DPAA2_SET_FLE_ADDR(op_fle,
918 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 	op_fle->length = sess->digest_length;
920 
921 	/* i/p fle */
922 	DPAA2_SET_FLE_SG_EXT(ip_fle);
923 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 	ip_fle->length = data_len;
925 
926 	if (sess->iv.length) {
927 		uint8_t *iv_ptr;
928 
929 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 						   sess->iv.offset);
931 
932 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
934 			sge->length = 12;
935 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
937 			sge->length = 8;
938 		} else {
939 			sge->length = sess->iv.length;
940 		}
941 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 		ip_fle->length += sge->length;
943 		sge++;
944 	}
945 	/* i/p 1st seg */
946 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset);
947 
948 	if (data_len <= (mbuf->data_len - data_offset)) {
949 		sge->length = data_len;
950 		data_len = 0;
951 	} else {
952 		sge->length = mbuf->data_len - data_offset;
953 
954 		/* remaining i/p segs */
955 		while ((data_len = data_len - sge->length) &&
956 		       (mbuf = mbuf->next)) {
957 			sge++;
958 			DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
959 			if (data_len > mbuf->data_len)
960 				sge->length = mbuf->data_len;
961 			else
962 				sge->length = data_len;
963 		}
964 	}
965 
966 	if (sess->dir == DIR_DEC) {
967 		/* Digest verification case */
968 		sge++;
969 		old_digest = (uint8_t *)(sge + 1);
970 		rte_memcpy(old_digest, sym_op->auth.digest.data,
971 			   sess->digest_length);
972 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
973 		sge->length = sess->digest_length;
974 		ip_fle->length += sess->digest_length;
975 	}
976 	DPAA2_SET_FLE_FIN(sge);
977 	DPAA2_SET_FLE_FIN(ip_fle);
978 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
979 
980 	return 0;
981 }
982 
983 static inline int
984 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
985 	      struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
986 {
987 	struct rte_crypto_sym_op *sym_op = op->sym;
988 	struct qbman_fle *fle, *sge;
989 	struct sec_flow_context *flc;
990 	struct ctxt_priv *priv = sess->ctxt;
991 	int data_len, data_offset;
992 	uint8_t *old_digest;
993 	int retval;
994 
995 	data_len = sym_op->auth.data.length;
996 	data_offset = sym_op->auth.data.offset;
997 
998 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
999 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1000 		if ((data_len & 7) || (data_offset & 7)) {
1001 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1002 			return -ENOTSUP;
1003 		}
1004 
1005 		data_len = data_len >> 3;
1006 		data_offset = data_offset >> 3;
1007 	}
1008 
1009 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1010 	if (retval) {
1011 		DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
1012 		return -ENOMEM;
1013 	}
1014 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1015 	/* TODO we are using the first FLE entry to store Mbuf.
1016 	 * Currently we donot know which FLE has the mbuf stored.
1017 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1018 	 * to get the MBUF Addr from the previous FLE.
1019 	 * We can have a better approach to use the inline Mbuf
1020 	 */
1021 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1022 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1023 	fle = fle + 1;
1024 	sge = fle + 2;
1025 
1026 	if (likely(bpid < MAX_BPID)) {
1027 		DPAA2_SET_FD_BPID(fd, bpid);
1028 		DPAA2_SET_FLE_BPID(fle, bpid);
1029 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1030 		DPAA2_SET_FLE_BPID(sge, bpid);
1031 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1032 	} else {
1033 		DPAA2_SET_FD_IVP(fd);
1034 		DPAA2_SET_FLE_IVP(fle);
1035 		DPAA2_SET_FLE_IVP((fle + 1));
1036 		DPAA2_SET_FLE_IVP(sge);
1037 		DPAA2_SET_FLE_IVP((sge + 1));
1038 	}
1039 
1040 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1041 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1042 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1043 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1044 
1045 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1046 	fle->length = sess->digest_length;
1047 	fle++;
1048 
1049 	/* Setting input FLE */
1050 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1051 	DPAA2_SET_FLE_SG_EXT(fle);
1052 	fle->length = data_len;
1053 
1054 	if (sess->iv.length) {
1055 		uint8_t *iv_ptr;
1056 
1057 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1058 						   sess->iv.offset);
1059 
1060 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1061 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1062 			sge->length = 12;
1063 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1064 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1065 			sge->length = 8;
1066 		} else {
1067 			sge->length = sess->iv.length;
1068 		}
1069 
1070 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1071 		fle->length = fle->length + sge->length;
1072 		sge++;
1073 	}
1074 
1075 	/* Setting data to authenticate */
1076 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset);
1077 	sge->length = data_len;
1078 
1079 	if (sess->dir == DIR_DEC) {
1080 		sge++;
1081 		old_digest = (uint8_t *)(sge + 1);
1082 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1083 			   sess->digest_length);
1084 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1085 		sge->length = sess->digest_length;
1086 		fle->length = fle->length + sess->digest_length;
1087 	}
1088 
1089 	DPAA2_SET_FLE_FIN(sge);
1090 	DPAA2_SET_FLE_FIN(fle);
1091 	DPAA2_SET_FD_LEN(fd, fle->length);
1092 
1093 	return 0;
1094 }
1095 
1096 static int
1097 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1098 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1099 {
1100 	struct rte_crypto_sym_op *sym_op = op->sym;
1101 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1102 	int data_len, data_offset;
1103 	struct sec_flow_context *flc;
1104 	struct ctxt_priv *priv = sess->ctxt;
1105 	struct rte_mbuf *mbuf;
1106 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1107 			sess->iv.offset);
1108 
1109 	data_len = sym_op->cipher.data.length;
1110 	data_offset = sym_op->cipher.data.offset;
1111 
1112 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1113 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1114 		if ((data_len & 7) || (data_offset & 7)) {
1115 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1116 			return -ENOTSUP;
1117 		}
1118 
1119 		data_len = data_len >> 3;
1120 		data_offset = data_offset >> 3;
1121 	}
1122 
1123 	if (sym_op->m_dst)
1124 		mbuf = sym_op->m_dst;
1125 	else
1126 		mbuf = sym_op->m_src;
1127 
1128 	/* first FLE entry used to store mbuf and session ctxt */
1129 	fle = (struct qbman_fle *)rte_malloc(NULL,
1130 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1131 			RTE_CACHE_LINE_SIZE);
1132 	if (!fle) {
1133 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1134 		return -ENOMEM;
1135 	}
1136 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1137 	/* first FLE entry used to store mbuf and session ctxt */
1138 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1139 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1140 
1141 	op_fle = fle + 1;
1142 	ip_fle = fle + 2;
1143 	sge = fle + 3;
1144 
1145 	flc = &priv->flc_desc[0].flc;
1146 
1147 	DPAA2_SEC_DP_DEBUG(
1148 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1149 		" data_off: 0x%x\n",
1150 		data_offset,
1151 		data_len,
1152 		sess->iv.length,
1153 		sym_op->m_src->data_off);
1154 
1155 	/* o/p fle */
1156 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1157 	op_fle->length = data_len;
1158 	DPAA2_SET_FLE_SG_EXT(op_fle);
1159 
1160 	/* o/p 1st seg */
1161 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset);
1162 	sge->length = mbuf->data_len - data_offset;
1163 
1164 	mbuf = mbuf->next;
1165 	/* o/p segs */
1166 	while (mbuf) {
1167 		sge++;
1168 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
1169 		sge->length = mbuf->data_len;
1170 		mbuf = mbuf->next;
1171 	}
1172 	DPAA2_SET_FLE_FIN(sge);
1173 
1174 	DPAA2_SEC_DP_DEBUG(
1175 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1176 		flc, fle, fle->addr_hi, fle->addr_lo,
1177 		fle->length);
1178 
1179 	/* i/p fle */
1180 	mbuf = sym_op->m_src;
1181 	sge++;
1182 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1183 	ip_fle->length = sess->iv.length + data_len;
1184 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1185 
1186 	/* i/p IV */
1187 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1188 	sge->length = sess->iv.length;
1189 
1190 	sge++;
1191 
1192 	/* i/p 1st seg */
1193 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset);
1194 	sge->length = mbuf->data_len - data_offset;
1195 
1196 	mbuf = mbuf->next;
1197 	/* i/p segs */
1198 	while (mbuf) {
1199 		sge++;
1200 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
1201 		sge->length = mbuf->data_len;
1202 		mbuf = mbuf->next;
1203 	}
1204 	DPAA2_SET_FLE_FIN(sge);
1205 	DPAA2_SET_FLE_FIN(ip_fle);
1206 
1207 	/* sg fd */
1208 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1209 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1210 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1211 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1212 
1213 	DPAA2_SEC_DP_DEBUG(
1214 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1215 		" off =%d, len =%d\n",
1216 		DPAA2_GET_FD_ADDR(fd),
1217 		DPAA2_GET_FD_BPID(fd),
1218 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1219 		DPAA2_GET_FD_OFFSET(fd),
1220 		DPAA2_GET_FD_LEN(fd));
1221 	return 0;
1222 }
1223 
1224 static int
1225 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1226 		struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1227 {
1228 	struct rte_crypto_sym_op *sym_op = op->sym;
1229 	struct qbman_fle *fle, *sge;
1230 	int retval, data_len, data_offset;
1231 	struct sec_flow_context *flc;
1232 	struct ctxt_priv *priv = sess->ctxt;
1233 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1234 			sess->iv.offset);
1235 	struct rte_mbuf *dst;
1236 
1237 	data_len = sym_op->cipher.data.length;
1238 	data_offset = sym_op->cipher.data.offset;
1239 
1240 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1241 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1242 		if ((data_len & 7) || (data_offset & 7)) {
1243 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1244 			return -ENOTSUP;
1245 		}
1246 
1247 		data_len = data_len >> 3;
1248 		data_offset = data_offset >> 3;
1249 	}
1250 
1251 	if (sym_op->m_dst)
1252 		dst = sym_op->m_dst;
1253 	else
1254 		dst = sym_op->m_src;
1255 
1256 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1257 	if (retval) {
1258 		DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
1259 		return -ENOMEM;
1260 	}
1261 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1262 	/* TODO we are using the first FLE entry to store Mbuf.
1263 	 * Currently we donot know which FLE has the mbuf stored.
1264 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1265 	 * to get the MBUF Addr from the previous FLE.
1266 	 * We can have a better approach to use the inline Mbuf
1267 	 */
1268 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1269 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1270 	fle = fle + 1;
1271 	sge = fle + 2;
1272 
1273 	if (likely(bpid < MAX_BPID)) {
1274 		DPAA2_SET_FD_BPID(fd, bpid);
1275 		DPAA2_SET_FLE_BPID(fle, bpid);
1276 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1277 		DPAA2_SET_FLE_BPID(sge, bpid);
1278 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1279 	} else {
1280 		DPAA2_SET_FD_IVP(fd);
1281 		DPAA2_SET_FLE_IVP(fle);
1282 		DPAA2_SET_FLE_IVP((fle + 1));
1283 		DPAA2_SET_FLE_IVP(sge);
1284 		DPAA2_SET_FLE_IVP((sge + 1));
1285 	}
1286 
1287 	flc = &priv->flc_desc[0].flc;
1288 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1289 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1290 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1291 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1292 
1293 	DPAA2_SEC_DP_DEBUG(
1294 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1295 		" data_off: 0x%x\n",
1296 		data_offset,
1297 		data_len,
1298 		sess->iv.length,
1299 		sym_op->m_src->data_off);
1300 
1301 	DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset);
1302 
1303 	fle->length = data_len + sess->iv.length;
1304 
1305 	DPAA2_SEC_DP_DEBUG(
1306 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1307 		flc, fle, fle->addr_hi, fle->addr_lo,
1308 		fle->length);
1309 
1310 	fle++;
1311 
1312 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1313 	fle->length = data_len + sess->iv.length;
1314 
1315 	DPAA2_SET_FLE_SG_EXT(fle);
1316 
1317 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1318 	sge->length = sess->iv.length;
1319 
1320 	sge++;
1321 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset);
1322 
1323 	sge->length = data_len;
1324 	DPAA2_SET_FLE_FIN(sge);
1325 	DPAA2_SET_FLE_FIN(fle);
1326 
1327 	DPAA2_SEC_DP_DEBUG(
1328 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1329 		" off =%d, len =%d\n",
1330 		DPAA2_GET_FD_ADDR(fd),
1331 		DPAA2_GET_FD_BPID(fd),
1332 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1333 		DPAA2_GET_FD_OFFSET(fd),
1334 		DPAA2_GET_FD_LEN(fd));
1335 
1336 	return 0;
1337 }
1338 
1339 static inline int
1340 build_sec_fd(struct rte_crypto_op *op,
1341 	     struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1342 {
1343 	int ret = -1;
1344 	dpaa2_sec_session *sess;
1345 
1346 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
1347 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1348 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1349 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
1350 	} else {
1351 		DPAA2_SEC_DP_ERR("Session type invalid\n");
1352 		return -ENOTSUP;
1353 	}
1354 
1355 	if (!sess) {
1356 		DPAA2_SEC_DP_ERR("Session not available\n");
1357 		return -EINVAL;
1358 	}
1359 
1360 	/* Any of the buffer is segmented*/
1361 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1362 		  ((op->sym->m_dst != NULL) &&
1363 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1364 		switch (sess->ctxt_type) {
1365 		case DPAA2_SEC_CIPHER:
1366 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1367 			break;
1368 		case DPAA2_SEC_AUTH:
1369 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1370 			break;
1371 		case DPAA2_SEC_AEAD:
1372 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1373 			break;
1374 		case DPAA2_SEC_CIPHER_HASH:
1375 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1376 			break;
1377 		case DPAA2_SEC_IPSEC:
1378 		case DPAA2_SEC_PDCP:
1379 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_HASH_CIPHER:
1382 		default:
1383 			DPAA2_SEC_ERR("error: Unsupported session");
1384 		}
1385 	} else {
1386 		switch (sess->ctxt_type) {
1387 		case DPAA2_SEC_CIPHER:
1388 			ret = build_cipher_fd(sess, op, fd, bpid, qp);
1389 			break;
1390 		case DPAA2_SEC_AUTH:
1391 			ret = build_auth_fd(sess, op, fd, bpid, qp);
1392 			break;
1393 		case DPAA2_SEC_AEAD:
1394 			ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
1395 			break;
1396 		case DPAA2_SEC_CIPHER_HASH:
1397 			ret = build_authenc_fd(sess, op, fd, bpid, qp);
1398 			break;
1399 		case DPAA2_SEC_IPSEC:
1400 			ret = build_proto_fd(sess, op, fd, bpid, qp);
1401 			break;
1402 		case DPAA2_SEC_PDCP:
1403 			ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
1404 			break;
1405 		case DPAA2_SEC_HASH_CIPHER:
1406 		default:
1407 			DPAA2_SEC_ERR("error: Unsupported session");
1408 			ret = -ENOTSUP;
1409 		}
1410 	}
1411 	return ret;
1412 }
1413 
1414 static uint16_t
1415 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1416 			uint16_t nb_ops)
1417 {
1418 	/* Function to transmit the frames to given device and VQ*/
1419 	uint32_t loop;
1420 	int32_t ret;
1421 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1422 	uint32_t frames_to_send, retry_count;
1423 	struct qbman_eq_desc eqdesc;
1424 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1425 	struct qbman_swp *swp;
1426 	uint16_t num_tx = 0;
1427 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1428 	/*todo - need to support multiple buffer pools */
1429 	uint16_t bpid;
1430 	struct rte_mempool *mb_pool;
1431 
1432 	if (unlikely(nb_ops == 0))
1433 		return 0;
1434 
1435 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1436 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1437 		return 0;
1438 	}
1439 	/*Prepare enqueue descriptor*/
1440 	qbman_eq_desc_clear(&eqdesc);
1441 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1442 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1443 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1444 
1445 	if (!DPAA2_PER_LCORE_DPIO) {
1446 		ret = dpaa2_affine_qbman_swp();
1447 		if (ret) {
1448 			DPAA2_SEC_ERR(
1449 				"Failed to allocate IO portal, tid: %d\n",
1450 				rte_gettid());
1451 			return 0;
1452 		}
1453 	}
1454 	swp = DPAA2_PER_LCORE_PORTAL;
1455 
1456 	while (nb_ops) {
1457 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1458 			dpaa2_eqcr_size : nb_ops;
1459 
1460 		for (loop = 0; loop < frames_to_send; loop++) {
1461 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1462 				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
1463 					DPAA2_PER_LCORE_DQRR_SIZE--;
1464 					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
1465 					*dpaa2_seqn((*ops)->sym->m_src) &
1466 					QBMAN_EQCR_DCA_IDXMASK);
1467 				}
1468 				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
1469 				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
1470 			}
1471 
1472 			/*Clear the unused FD fields before sending*/
1473 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1474 			mb_pool = (*ops)->sym->m_src->pool;
1475 			bpid = mempool_to_bpid(mb_pool);
1476 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1477 			if (ret) {
1478 				DPAA2_SEC_DP_DEBUG("FD build failed\n");
1479 				goto skip_tx;
1480 			}
1481 			ops++;
1482 		}
1483 
1484 		loop = 0;
1485 		retry_count = 0;
1486 		while (loop < frames_to_send) {
1487 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1488 							 &fd_arr[loop],
1489 							 &flags[loop],
1490 							 frames_to_send - loop);
1491 			if (unlikely(ret < 0)) {
1492 				retry_count++;
1493 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1494 					num_tx += loop;
1495 					nb_ops -= loop;
1496 					DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
1497 					/* freeing the fle buffers */
1498 					while (loop < frames_to_send) {
1499 						free_fle(&fd_arr[loop],
1500 								dpaa2_qp);
1501 						loop++;
1502 					}
1503 					goto skip_tx;
1504 				}
1505 			} else {
1506 				loop += ret;
1507 				retry_count = 0;
1508 			}
1509 		}
1510 
1511 		num_tx += loop;
1512 		nb_ops -= loop;
1513 	}
1514 skip_tx:
1515 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1516 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1517 	return num_tx;
1518 }
1519 
1520 static inline struct rte_crypto_op *
1521 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1522 {
1523 	struct rte_crypto_op *op;
1524 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1525 	int16_t diff = 0;
1526 	dpaa2_sec_session *sess_priv __rte_unused;
1527 
1528 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1529 		DPAA2_SEC_ERR("error: non inline buffer");
1530 		return NULL;
1531 	}
1532 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1533 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1534 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1535 
1536 	diff = len - mbuf->pkt_len;
1537 	mbuf->pkt_len += diff;
1538 	mbuf->data_len += diff;
1539 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1540 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1541 	op->sym->aead.digest.phys_addr = 0L;
1542 
1543 	sess_priv = SECURITY_GET_SESS_PRIV(op->sym->session);
1544 	if (sess_priv->dir == DIR_ENC)
1545 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1546 	else
1547 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1548 
1549 	if (unlikely(fd->simple.frc)) {
1550 		DPAA2_SEC_ERR("SEC returned Error - %x",
1551 				fd->simple.frc);
1552 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1553 	} else {
1554 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1555 	}
1556 
1557 	return op;
1558 }
1559 
1560 static inline struct rte_crypto_op *
1561 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
1562 {
1563 	struct qbman_fle *fle;
1564 	struct rte_crypto_op *op;
1565 	struct rte_mbuf *dst, *src;
1566 
1567 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1568 		return sec_simple_fd_to_mbuf(fd);
1569 
1570 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1571 
1572 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1573 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1574 
1575 	/* we are using the first FLE entry to store Mbuf.
1576 	 * Currently we donot know which FLE has the mbuf stored.
1577 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1578 	 * to get the MBUF Addr from the previous FLE.
1579 	 * We can have a better approach to use the inline Mbuf
1580 	 */
1581 
1582 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1583 
1584 	/* Prefeth op */
1585 	src = op->sym->m_src;
1586 	rte_prefetch0(src);
1587 
1588 	if (op->sym->m_dst) {
1589 		dst = op->sym->m_dst;
1590 		rte_prefetch0(dst);
1591 	} else
1592 		dst = src;
1593 
1594 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1595 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1596 		dst->pkt_len = len;
1597 		while (dst->next != NULL) {
1598 			len -= dst->data_len;
1599 			dst = dst->next;
1600 		}
1601 		dst->data_len = len;
1602 	}
1603 
1604 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1605 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1606 		(void *)dst,
1607 		dst->buf_addr,
1608 		DPAA2_GET_FD_ADDR(fd),
1609 		DPAA2_GET_FD_BPID(fd),
1610 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1611 		DPAA2_GET_FD_OFFSET(fd),
1612 		DPAA2_GET_FD_LEN(fd));
1613 
1614 	/* free the fle memory */
1615 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1616 		rte_mempool_put(qp->fle_pool, (void *)(fle-1));
1617 	} else
1618 		rte_free((void *)(fle-1));
1619 
1620 	return op;
1621 }
1622 
1623 static void
1624 dpaa2_sec_dump(struct rte_crypto_op *op)
1625 {
1626 	int i;
1627 	dpaa2_sec_session *sess = NULL;
1628 	struct ctxt_priv *priv;
1629 	uint8_t bufsize;
1630 	struct rte_crypto_sym_op *sym_op;
1631 
1632 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1633 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1634 #ifdef RTE_LIB_SECURITY
1635 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1636 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
1637 #endif
1638 
1639 	if (sess == NULL)
1640 		goto mbuf_dump;
1641 
1642 	priv = (struct ctxt_priv *)sess->ctxt;
1643 	printf("\n****************************************\n"
1644 		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
1645 		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
1646 		"\tCipher key len:\t%zd\n", sess->ctxt_type,
1647 		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
1648 		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
1649 		sess->cipher_key.length);
1650 		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
1651 				sess->cipher_key.length);
1652 		rte_hexdump(stdout, "auth key", sess->auth_key.data,
1653 				sess->auth_key.length);
1654 	printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
1655 		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
1656 		" len:\t%d\n\taead cipher text:\t%d\n",
1657 		sess->auth_key.length, sess->iv.length, sess->iv.offset,
1658 		sess->digest_length, sess->status,
1659 		sess->ext_params.aead_ctxt.auth_only_len,
1660 		sess->ext_params.aead_ctxt.auth_cipher_text);
1661 #ifdef RTE_LIB_SECURITY
1662 	printf("PDCP session params:\n"
1663 		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
1664 		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
1665 		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
1666 		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
1667 		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
1668 		sess->pdcp.hfn_threshold);
1669 
1670 #endif
1671 	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
1672 	printf("Descriptor Dump:\n");
1673 	for (i = 0; i < bufsize; i++)
1674 		printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
1675 
1676 	printf("\n");
1677 mbuf_dump:
1678 	sym_op = op->sym;
1679 	if (sym_op->m_src) {
1680 		printf("Source mbuf:\n");
1681 		rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
1682 	}
1683 	if (sym_op->m_dst) {
1684 		printf("Destination mbuf:\n");
1685 		rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
1686 	}
1687 
1688 	printf("Session address = %p\ncipher offset: %d, length: %d\n"
1689 		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
1690 		, sym_op->session,
1691 		sym_op->cipher.data.offset, sym_op->cipher.data.length,
1692 		sym_op->auth.data.offset, sym_op->auth.data.length,
1693 		sym_op->aead.data.offset, sym_op->aead.data.length);
1694 	printf("\n");
1695 
1696 }
1697 
1698 static void
1699 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
1700 			  struct dpaa2_queue *dpaa2_q)
1701 {
1702 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1703 	struct rte_crypto_op *op;
1704 	struct qbman_fd *fd;
1705 	struct dpaa2_sec_qp *dpaa2_qp;
1706 
1707 	dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
1708 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1709 	op = sec_fd_to_mbuf(fd, dpaa2_qp);
1710 	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
1711 	 * after setting an error in FD. But this will have performance impact.
1712 	 */
1713 	rte_pktmbuf_free(op->sym->m_src);
1714 }
1715 
1716 static void
1717 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1718 			     struct rte_mbuf *m,
1719 			     struct qbman_eq_desc *eqdesc)
1720 {
1721 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1722 	struct eqresp_metadata *eqresp_meta;
1723 	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
1724 	uint16_t orpid, seqnum;
1725 	uint8_t dq_idx;
1726 
1727 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1728 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1729 			DPAA2_EQCR_OPRID_SHIFT;
1730 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1731 			DPAA2_EQCR_SEQNUM_SHIFT;
1732 
1733 
1734 		if (!priv->en_loose_ordered) {
1735 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1736 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1737 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1738 				dpio_dev->eqresp_pi]), 1);
1739 			qbman_eq_desc_set_token(eqdesc, 1);
1740 
1741 			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
1742 			eqresp_meta->dpaa2_q = dpaa2_q;
1743 			eqresp_meta->mp = m->pool;
1744 
1745 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1746 				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
1747 		} else {
1748 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1749 		}
1750 	} else {
1751 		dq_idx = *dpaa2_seqn(m) - 1;
1752 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1753 		DPAA2_PER_LCORE_DQRR_SIZE--;
1754 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1755 	}
1756 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1757 }
1758 
1759 
1760 static uint16_t
1761 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
1762 			uint16_t nb_ops)
1763 {
1764 	/* Function to transmit the frames to given device and VQ*/
1765 	uint32_t loop;
1766 	int32_t ret;
1767 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1768 	uint32_t frames_to_send, num_free_eq_desc, retry_count;
1769 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1770 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1771 	struct qbman_swp *swp;
1772 	uint16_t num_tx = 0;
1773 	uint16_t bpid;
1774 	struct rte_mempool *mb_pool;
1775 	struct dpaa2_sec_dev_private *priv =
1776 				dpaa2_qp->tx_vq.crypto_data->dev_private;
1777 
1778 	if (unlikely(nb_ops == 0))
1779 		return 0;
1780 
1781 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1782 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1783 		return 0;
1784 	}
1785 
1786 	if (!DPAA2_PER_LCORE_DPIO) {
1787 		ret = dpaa2_affine_qbman_swp();
1788 		if (ret) {
1789 			DPAA2_SEC_ERR("Failure in affining portal");
1790 			return 0;
1791 		}
1792 	}
1793 	swp = DPAA2_PER_LCORE_PORTAL;
1794 
1795 	while (nb_ops) {
1796 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1797 			dpaa2_eqcr_size : nb_ops;
1798 
1799 		if (!priv->en_loose_ordered) {
1800 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1801 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1802 				if (num_free_eq_desc < frames_to_send)
1803 					frames_to_send = num_free_eq_desc;
1804 			}
1805 		}
1806 
1807 		for (loop = 0; loop < frames_to_send; loop++) {
1808 			/*Prepare enqueue descriptor*/
1809 			qbman_eq_desc_clear(&eqdesc[loop]);
1810 			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
1811 
1812 			if (*dpaa2_seqn((*ops)->sym->m_src))
1813 				dpaa2_sec_set_enqueue_descriptor(
1814 						&dpaa2_qp->tx_vq,
1815 						(*ops)->sym->m_src,
1816 						&eqdesc[loop]);
1817 			else
1818 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1819 							 DPAA2_EQ_RESP_ERR_FQ);
1820 
1821 			/*Clear the unused FD fields before sending*/
1822 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1823 			mb_pool = (*ops)->sym->m_src->pool;
1824 			bpid = mempool_to_bpid(mb_pool);
1825 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1826 			if (ret) {
1827 				DPAA2_SEC_DP_DEBUG("FD build failed\n");
1828 				goto skip_tx;
1829 			}
1830 			ops++;
1831 		}
1832 
1833 		loop = 0;
1834 		retry_count = 0;
1835 		while (loop < frames_to_send) {
1836 			ret = qbman_swp_enqueue_multiple_desc(swp,
1837 					&eqdesc[loop], &fd_arr[loop],
1838 					frames_to_send - loop);
1839 			if (unlikely(ret < 0)) {
1840 				retry_count++;
1841 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1842 					num_tx += loop;
1843 					nb_ops -= loop;
1844 					DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
1845 					/* freeing the fle buffers */
1846 					while (loop < frames_to_send) {
1847 						free_fle(&fd_arr[loop],
1848 								dpaa2_qp);
1849 						loop++;
1850 					}
1851 					goto skip_tx;
1852 				}
1853 			} else {
1854 				loop += ret;
1855 				retry_count = 0;
1856 			}
1857 		}
1858 
1859 		num_tx += loop;
1860 		nb_ops -= loop;
1861 	}
1862 
1863 skip_tx:
1864 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1865 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1866 	return num_tx;
1867 }
1868 
1869 static uint16_t
1870 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1871 			uint16_t nb_ops)
1872 {
1873 	/* Function is responsible to receive frames for a given device and VQ*/
1874 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1875 	struct qbman_result *dq_storage;
1876 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1877 	int ret, num_rx = 0;
1878 	uint8_t is_last = 0, status;
1879 	struct qbman_swp *swp;
1880 	const struct qbman_fd *fd;
1881 	struct qbman_pull_desc pulldesc;
1882 
1883 	if (!DPAA2_PER_LCORE_DPIO) {
1884 		ret = dpaa2_affine_qbman_swp();
1885 		if (ret) {
1886 			DPAA2_SEC_ERR(
1887 				"Failed to allocate IO portal, tid: %d\n",
1888 				rte_gettid());
1889 			return 0;
1890 		}
1891 	}
1892 	swp = DPAA2_PER_LCORE_PORTAL;
1893 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1894 
1895 	qbman_pull_desc_clear(&pulldesc);
1896 	qbman_pull_desc_set_numframes(&pulldesc,
1897 				      (nb_ops > dpaa2_dqrr_size) ?
1898 				      dpaa2_dqrr_size : nb_ops);
1899 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1900 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1901 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1902 				    1);
1903 
1904 	/*Issue a volatile dequeue command. */
1905 	while (1) {
1906 		if (qbman_swp_pull(swp, &pulldesc)) {
1907 			DPAA2_SEC_WARN(
1908 				"SEC VDQ command is not issued : QBMAN busy");
1909 			/* Portal was busy, try again */
1910 			continue;
1911 		}
1912 		break;
1913 	};
1914 
1915 	/* Receive the packets till Last Dequeue entry is found with
1916 	 * respect to the above issues PULL command.
1917 	 */
1918 	while (!is_last) {
1919 		/* Check if the previous issued command is completed.
1920 		 * Also seems like the SWP is shared between the Ethernet Driver
1921 		 * and the SEC driver.
1922 		 */
1923 		while (!qbman_check_command_complete(dq_storage))
1924 			;
1925 
1926 		/* Loop until the dq_storage is updated with
1927 		 * new token by QBMAN
1928 		 */
1929 		while (!qbman_check_new_result(dq_storage))
1930 			;
1931 		/* Check whether Last Pull command is Expired and
1932 		 * setting Condition for Loop termination
1933 		 */
1934 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1935 			is_last = 1;
1936 			/* Check for valid frame. */
1937 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1938 			if (unlikely(
1939 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1940 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1941 				continue;
1942 			}
1943 		}
1944 
1945 		fd = qbman_result_DQ_fd(dq_storage);
1946 		ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
1947 
1948 		if (unlikely(fd->simple.frc)) {
1949 			/* TODO Parse SEC errors */
1950 			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
1951 				DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1952 						 fd->simple.frc);
1953 				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
1954 					dpaa2_sec_dump(ops[num_rx]);
1955 			}
1956 
1957 			dpaa2_qp->rx_vq.err_pkts += 1;
1958 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1959 		} else {
1960 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1961 		}
1962 
1963 		num_rx++;
1964 		dq_storage++;
1965 	} /* End of Packet Rx loop */
1966 
1967 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1968 
1969 	DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
1970 				dpaa2_qp->rx_vq.err_pkts);
1971 	/*Return the total number of packets received to DPAA2 app*/
1972 	return num_rx;
1973 }
1974 
1975 /** Release queue pair */
1976 static int
1977 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1978 {
1979 	struct dpaa2_sec_qp *qp =
1980 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1981 
1982 	PMD_INIT_FUNC_TRACE();
1983 
1984 	if (qp->rx_vq.q_storage) {
1985 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1986 		rte_free(qp->rx_vq.q_storage);
1987 	}
1988 	rte_mempool_free(qp->fle_pool);
1989 	rte_free(qp);
1990 
1991 	dev->data->queue_pairs[queue_pair_id] = NULL;
1992 
1993 	return 0;
1994 }
1995 
1996 /** Setup a queue pair */
1997 static int
1998 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1999 		const struct rte_cryptodev_qp_conf *qp_conf,
2000 		__rte_unused int socket_id)
2001 {
2002 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2003 	struct dpaa2_sec_qp *qp;
2004 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2005 	struct dpseci_rx_queue_cfg cfg;
2006 	int32_t retcode;
2007 	char str[30];
2008 
2009 	PMD_INIT_FUNC_TRACE();
2010 
2011 	/* If qp is already in use free ring memory and qp metadata. */
2012 	if (dev->data->queue_pairs[qp_id] != NULL) {
2013 		DPAA2_SEC_INFO("QP already setup");
2014 		return 0;
2015 	}
2016 
2017 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
2018 		    dev, qp_id, qp_conf);
2019 
2020 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
2021 
2022 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
2023 			RTE_CACHE_LINE_SIZE);
2024 	if (!qp) {
2025 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
2026 		return -ENOMEM;
2027 	}
2028 
2029 	qp->rx_vq.crypto_data = dev->data;
2030 	qp->tx_vq.crypto_data = dev->data;
2031 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
2032 		sizeof(struct queue_storage_info_t),
2033 		RTE_CACHE_LINE_SIZE);
2034 	if (!qp->rx_vq.q_storage) {
2035 		DPAA2_SEC_ERR("malloc failed for q_storage");
2036 		return -ENOMEM;
2037 	}
2038 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
2039 
2040 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
2041 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
2042 		return -ENOMEM;
2043 	}
2044 
2045 	dev->data->queue_pairs[qp_id] = qp;
2046 
2047 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
2048 			getpid(), dev->data->dev_id, qp_id);
2049 	qp->fle_pool = rte_mempool_create((const char *)str,
2050 			qp_conf->nb_descriptors,
2051 			FLE_POOL_BUF_SIZE,
2052 			FLE_POOL_CACHE_SIZE, 0,
2053 			NULL, NULL, NULL, NULL,
2054 			SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
2055 	if (!qp->fle_pool) {
2056 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2057 		return -ENOMEM;
2058 	}
2059 
2060 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
2061 	cfg.user_ctx = (size_t)(&qp->rx_vq);
2062 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
2063 				      qp_id, &cfg);
2064 	return retcode;
2065 }
2066 
2067 /** Returns the size of the aesni gcm session structure */
2068 static unsigned int
2069 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2070 {
2071 	PMD_INIT_FUNC_TRACE();
2072 
2073 	return sizeof(dpaa2_sec_session);
2074 }
2075 
2076 static int
2077 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
2078 		      dpaa2_sec_session *session)
2079 {
2080 	struct alginfo cipherdata;
2081 	int bufsize, ret = 0;
2082 	struct ctxt_priv *priv;
2083 	struct sec_flow_context *flc;
2084 
2085 	PMD_INIT_FUNC_TRACE();
2086 
2087 	/* For SEC CIPHER only one descriptor is required. */
2088 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2089 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2090 			RTE_CACHE_LINE_SIZE);
2091 	if (priv == NULL) {
2092 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2093 		return -ENOMEM;
2094 	}
2095 
2096 	flc = &priv->flc_desc[0].flc;
2097 
2098 	session->ctxt_type = DPAA2_SEC_CIPHER;
2099 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2100 			RTE_CACHE_LINE_SIZE);
2101 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2102 		DPAA2_SEC_ERR("No Memory for cipher key");
2103 		rte_free(priv);
2104 		return -ENOMEM;
2105 	}
2106 	session->cipher_key.length = xform->cipher.key.length;
2107 
2108 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2109 	       xform->cipher.key.length);
2110 	cipherdata.key = (size_t)session->cipher_key.data;
2111 	cipherdata.keylen = session->cipher_key.length;
2112 	cipherdata.key_enc_flags = 0;
2113 	cipherdata.key_type = RTA_DATA_IMM;
2114 
2115 	/* Set IV parameters */
2116 	session->iv.offset = xform->cipher.iv.offset;
2117 	session->iv.length = xform->cipher.iv.length;
2118 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2119 				DIR_ENC : DIR_DEC;
2120 
2121 	switch (xform->cipher.algo) {
2122 	case RTE_CRYPTO_CIPHER_AES_CBC:
2123 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2124 		cipherdata.algmode = OP_ALG_AAI_CBC;
2125 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2126 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2127 						SHR_NEVER, &cipherdata,
2128 						session->iv.length,
2129 						session->dir);
2130 		break;
2131 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2132 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2133 		cipherdata.algmode = OP_ALG_AAI_CBC;
2134 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2135 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2136 						SHR_NEVER, &cipherdata,
2137 						session->iv.length,
2138 						session->dir);
2139 		break;
2140 	case RTE_CRYPTO_CIPHER_DES_CBC:
2141 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2142 		cipherdata.algmode = OP_ALG_AAI_CBC;
2143 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2144 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2145 						SHR_NEVER, &cipherdata,
2146 						session->iv.length,
2147 						session->dir);
2148 		break;
2149 	case RTE_CRYPTO_CIPHER_AES_CTR:
2150 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2151 		cipherdata.algmode = OP_ALG_AAI_CTR;
2152 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2153 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2154 						SHR_NEVER, &cipherdata,
2155 						session->iv.length,
2156 						session->dir);
2157 		break;
2158 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2159 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
2160 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
2161 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
2162 					      &cipherdata,
2163 					      session->dir);
2164 		break;
2165 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2166 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
2167 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
2168 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
2169 					      &cipherdata,
2170 					      session->dir);
2171 		break;
2172 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2173 	case RTE_CRYPTO_CIPHER_AES_F8:
2174 	case RTE_CRYPTO_CIPHER_AES_ECB:
2175 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2176 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2177 	case RTE_CRYPTO_CIPHER_AES_XTS:
2178 	case RTE_CRYPTO_CIPHER_ARC4:
2179 	case RTE_CRYPTO_CIPHER_NULL:
2180 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2181 			xform->cipher.algo);
2182 		ret = -ENOTSUP;
2183 		goto error_out;
2184 	default:
2185 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2186 			xform->cipher.algo);
2187 		ret = -ENOTSUP;
2188 		goto error_out;
2189 	}
2190 
2191 	if (bufsize < 0) {
2192 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
2193 		ret = -EINVAL;
2194 		goto error_out;
2195 	}
2196 
2197 	flc->word1_sdl = (uint8_t)bufsize;
2198 	session->ctxt = priv;
2199 
2200 #ifdef CAAM_DESC_DEBUG
2201 	int i;
2202 	for (i = 0; i < bufsize; i++)
2203 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
2204 #endif
2205 	return ret;
2206 
2207 error_out:
2208 	rte_free(session->cipher_key.data);
2209 	rte_free(priv);
2210 	return ret;
2211 }
2212 
2213 static int
2214 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
2215 		    dpaa2_sec_session *session)
2216 {
2217 	struct alginfo authdata;
2218 	int bufsize, ret = 0;
2219 	struct ctxt_priv *priv;
2220 	struct sec_flow_context *flc;
2221 
2222 	PMD_INIT_FUNC_TRACE();
2223 
2224 	/* For SEC AUTH three descriptors are required for various stages */
2225 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2226 			sizeof(struct ctxt_priv) + 3 *
2227 			sizeof(struct sec_flc_desc),
2228 			RTE_CACHE_LINE_SIZE);
2229 	if (priv == NULL) {
2230 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2231 		return -ENOMEM;
2232 	}
2233 
2234 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
2235 
2236 	session->ctxt_type = DPAA2_SEC_AUTH;
2237 	session->auth_key.length = xform->auth.key.length;
2238 	if (xform->auth.key.length) {
2239 		session->auth_key.data = rte_zmalloc(NULL,
2240 			xform->auth.key.length,
2241 			RTE_CACHE_LINE_SIZE);
2242 		if (session->auth_key.data == NULL) {
2243 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
2244 			rte_free(priv);
2245 			return -ENOMEM;
2246 		}
2247 		memcpy(session->auth_key.data, xform->auth.key.data,
2248 		       xform->auth.key.length);
2249 		authdata.key = (size_t)session->auth_key.data;
2250 		authdata.key_enc_flags = 0;
2251 		authdata.key_type = RTA_DATA_IMM;
2252 	}
2253 	authdata.keylen = session->auth_key.length;
2254 
2255 	session->digest_length = xform->auth.digest_length;
2256 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2257 				DIR_ENC : DIR_DEC;
2258 
2259 	switch (xform->auth.algo) {
2260 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2261 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2262 		authdata.algmode = OP_ALG_AAI_HMAC;
2263 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2264 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2265 					   1, 0, SHR_NEVER, &authdata,
2266 					   !session->dir,
2267 					   session->digest_length);
2268 		break;
2269 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2270 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2271 		authdata.algmode = OP_ALG_AAI_HMAC;
2272 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2273 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2274 					   1, 0, SHR_NEVER, &authdata,
2275 					   !session->dir,
2276 					   session->digest_length);
2277 		break;
2278 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2279 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2280 		authdata.algmode = OP_ALG_AAI_HMAC;
2281 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2282 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2283 					   1, 0, SHR_NEVER, &authdata,
2284 					   !session->dir,
2285 					   session->digest_length);
2286 		break;
2287 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2288 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2289 		authdata.algmode = OP_ALG_AAI_HMAC;
2290 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2291 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2292 					   1, 0, SHR_NEVER, &authdata,
2293 					   !session->dir,
2294 					   session->digest_length);
2295 		break;
2296 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2297 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2298 		authdata.algmode = OP_ALG_AAI_HMAC;
2299 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2300 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2301 					   1, 0, SHR_NEVER, &authdata,
2302 					   !session->dir,
2303 					   session->digest_length);
2304 		break;
2305 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2306 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2307 		authdata.algmode = OP_ALG_AAI_HMAC;
2308 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2309 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2310 					   1, 0, SHR_NEVER, &authdata,
2311 					   !session->dir,
2312 					   session->digest_length);
2313 		break;
2314 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2315 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2316 		authdata.algmode = OP_ALG_AAI_F9;
2317 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2318 		session->iv.offset = xform->auth.iv.offset;
2319 		session->iv.length = xform->auth.iv.length;
2320 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2321 					      1, 0, &authdata,
2322 					      !session->dir,
2323 					      session->digest_length);
2324 		break;
2325 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2326 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2327 		authdata.algmode = OP_ALG_AAI_F9;
2328 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2329 		session->iv.offset = xform->auth.iv.offset;
2330 		session->iv.length = xform->auth.iv.length;
2331 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2332 					   1, 0, &authdata,
2333 					   !session->dir,
2334 					   session->digest_length);
2335 		break;
2336 	case RTE_CRYPTO_AUTH_SHA1:
2337 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2338 		authdata.algmode = OP_ALG_AAI_HASH;
2339 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2340 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2341 					   1, 0, SHR_NEVER, &authdata,
2342 					   !session->dir,
2343 					   session->digest_length);
2344 		break;
2345 	case RTE_CRYPTO_AUTH_MD5:
2346 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2347 		authdata.algmode = OP_ALG_AAI_HASH;
2348 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2349 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2350 					   1, 0, SHR_NEVER, &authdata,
2351 					   !session->dir,
2352 					   session->digest_length);
2353 		break;
2354 	case RTE_CRYPTO_AUTH_SHA256:
2355 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2356 		authdata.algmode = OP_ALG_AAI_HASH;
2357 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2358 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2359 					   1, 0, SHR_NEVER, &authdata,
2360 					   !session->dir,
2361 					   session->digest_length);
2362 		break;
2363 	case RTE_CRYPTO_AUTH_SHA384:
2364 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2365 		authdata.algmode = OP_ALG_AAI_HASH;
2366 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2367 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2368 					   1, 0, SHR_NEVER, &authdata,
2369 					   !session->dir,
2370 					   session->digest_length);
2371 		break;
2372 	case RTE_CRYPTO_AUTH_SHA512:
2373 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2374 		authdata.algmode = OP_ALG_AAI_HASH;
2375 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2376 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2377 					   1, 0, SHR_NEVER, &authdata,
2378 					   !session->dir,
2379 					   session->digest_length);
2380 		break;
2381 	case RTE_CRYPTO_AUTH_SHA224:
2382 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2383 		authdata.algmode = OP_ALG_AAI_HASH;
2384 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2385 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2386 					   1, 0, SHR_NEVER, &authdata,
2387 					   !session->dir,
2388 					   session->digest_length);
2389 		break;
2390 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2391 		authdata.algtype = OP_ALG_ALGSEL_AES;
2392 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2393 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2394 		bufsize = cnstr_shdsc_aes_mac(
2395 					priv->flc_desc[DESC_INITFINAL].desc,
2396 					1, 0, SHR_NEVER, &authdata,
2397 					!session->dir,
2398 					session->digest_length);
2399 		break;
2400 	case RTE_CRYPTO_AUTH_AES_CMAC:
2401 		authdata.algtype = OP_ALG_ALGSEL_AES;
2402 		authdata.algmode = OP_ALG_AAI_CMAC;
2403 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2404 		bufsize = cnstr_shdsc_aes_mac(
2405 					   priv->flc_desc[DESC_INITFINAL].desc,
2406 					   1, 0, SHR_NEVER, &authdata,
2407 					   !session->dir,
2408 					   session->digest_length);
2409 		break;
2410 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2411 	case RTE_CRYPTO_AUTH_AES_GMAC:
2412 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2413 	case RTE_CRYPTO_AUTH_NULL:
2414 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2415 			      xform->auth.algo);
2416 		ret = -ENOTSUP;
2417 		goto error_out;
2418 	default:
2419 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2420 			      xform->auth.algo);
2421 		ret = -ENOTSUP;
2422 		goto error_out;
2423 	}
2424 
2425 	if (bufsize < 0) {
2426 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2427 		ret = -EINVAL;
2428 		goto error_out;
2429 	}
2430 
2431 	flc->word1_sdl = (uint8_t)bufsize;
2432 	session->ctxt = priv;
2433 #ifdef CAAM_DESC_DEBUG
2434 	int i;
2435 	for (i = 0; i < bufsize; i++)
2436 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2437 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2438 #endif
2439 
2440 	return ret;
2441 
2442 error_out:
2443 	rte_free(session->auth_key.data);
2444 	rte_free(priv);
2445 	return ret;
2446 }
2447 
2448 static int
2449 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
2450 		    dpaa2_sec_session *session)
2451 {
2452 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2453 	struct alginfo aeaddata;
2454 	int bufsize;
2455 	struct ctxt_priv *priv;
2456 	struct sec_flow_context *flc;
2457 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2458 	int err, ret = 0;
2459 
2460 	PMD_INIT_FUNC_TRACE();
2461 
2462 	/* Set IV parameters */
2463 	session->iv.offset = aead_xform->iv.offset;
2464 	session->iv.length = aead_xform->iv.length;
2465 	session->ctxt_type = DPAA2_SEC_AEAD;
2466 
2467 	/* For SEC AEAD only one descriptor is required */
2468 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2469 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2470 			RTE_CACHE_LINE_SIZE);
2471 	if (priv == NULL) {
2472 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2473 		return -ENOMEM;
2474 	}
2475 
2476 	flc = &priv->flc_desc[0].flc;
2477 
2478 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2479 					       RTE_CACHE_LINE_SIZE);
2480 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2481 		DPAA2_SEC_ERR("No Memory for aead key");
2482 		rte_free(priv);
2483 		return -ENOMEM;
2484 	}
2485 	memcpy(session->aead_key.data, aead_xform->key.data,
2486 	       aead_xform->key.length);
2487 
2488 	session->digest_length = aead_xform->digest_length;
2489 	session->aead_key.length = aead_xform->key.length;
2490 	ctxt->auth_only_len = aead_xform->aad_length;
2491 
2492 	aeaddata.key = (size_t)session->aead_key.data;
2493 	aeaddata.keylen = session->aead_key.length;
2494 	aeaddata.key_enc_flags = 0;
2495 	aeaddata.key_type = RTA_DATA_IMM;
2496 
2497 	switch (aead_xform->algo) {
2498 	case RTE_CRYPTO_AEAD_AES_GCM:
2499 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2500 		aeaddata.algmode = OP_ALG_AAI_GCM;
2501 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2502 		break;
2503 	case RTE_CRYPTO_AEAD_AES_CCM:
2504 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2505 			      aead_xform->algo);
2506 		ret = -ENOTSUP;
2507 		goto error_out;
2508 	default:
2509 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2510 			      aead_xform->algo);
2511 		ret = -ENOTSUP;
2512 		goto error_out;
2513 	}
2514 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2515 				DIR_ENC : DIR_DEC;
2516 
2517 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2518 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2519 			       DESC_JOB_IO_LEN,
2520 			       (unsigned int *)priv->flc_desc[0].desc,
2521 			       &priv->flc_desc[0].desc[1], 1);
2522 
2523 	if (err < 0) {
2524 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2525 		ret = -EINVAL;
2526 		goto error_out;
2527 	}
2528 	if (priv->flc_desc[0].desc[1] & 1) {
2529 		aeaddata.key_type = RTA_DATA_IMM;
2530 	} else {
2531 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2532 		aeaddata.key_type = RTA_DATA_PTR;
2533 	}
2534 	priv->flc_desc[0].desc[0] = 0;
2535 	priv->flc_desc[0].desc[1] = 0;
2536 
2537 	if (session->dir == DIR_ENC)
2538 		bufsize = cnstr_shdsc_gcm_encap(
2539 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2540 				&aeaddata, session->iv.length,
2541 				session->digest_length);
2542 	else
2543 		bufsize = cnstr_shdsc_gcm_decap(
2544 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2545 				&aeaddata, session->iv.length,
2546 				session->digest_length);
2547 	if (bufsize < 0) {
2548 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2549 		ret = -EINVAL;
2550 		goto error_out;
2551 	}
2552 
2553 	flc->word1_sdl = (uint8_t)bufsize;
2554 	session->ctxt = priv;
2555 #ifdef CAAM_DESC_DEBUG
2556 	int i;
2557 	for (i = 0; i < bufsize; i++)
2558 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2559 			    i, priv->flc_desc[0].desc[i]);
2560 #endif
2561 	return ret;
2562 
2563 error_out:
2564 	rte_free(session->aead_key.data);
2565 	rte_free(priv);
2566 	return ret;
2567 }
2568 
2569 
2570 static int
2571 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
2572 		    dpaa2_sec_session *session)
2573 {
2574 	struct alginfo authdata, cipherdata;
2575 	int bufsize;
2576 	struct ctxt_priv *priv;
2577 	struct sec_flow_context *flc;
2578 	struct rte_crypto_cipher_xform *cipher_xform;
2579 	struct rte_crypto_auth_xform *auth_xform;
2580 	int err, ret = 0;
2581 
2582 	PMD_INIT_FUNC_TRACE();
2583 
2584 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2585 		cipher_xform = &xform->cipher;
2586 		auth_xform = &xform->next->auth;
2587 		session->ctxt_type =
2588 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2589 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2590 	} else {
2591 		cipher_xform = &xform->next->cipher;
2592 		auth_xform = &xform->auth;
2593 		session->ctxt_type =
2594 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2595 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2596 	}
2597 
2598 	/* Set IV parameters */
2599 	session->iv.offset = cipher_xform->iv.offset;
2600 	session->iv.length = cipher_xform->iv.length;
2601 
2602 	/* For SEC AEAD only one descriptor is required */
2603 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2604 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2605 			RTE_CACHE_LINE_SIZE);
2606 	if (priv == NULL) {
2607 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2608 		return -ENOMEM;
2609 	}
2610 
2611 	flc = &priv->flc_desc[0].flc;
2612 
2613 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2614 					       RTE_CACHE_LINE_SIZE);
2615 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2616 		DPAA2_SEC_ERR("No Memory for cipher key");
2617 		rte_free(priv);
2618 		return -ENOMEM;
2619 	}
2620 	session->cipher_key.length = cipher_xform->key.length;
2621 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2622 					     RTE_CACHE_LINE_SIZE);
2623 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2624 		DPAA2_SEC_ERR("No Memory for auth key");
2625 		rte_free(session->cipher_key.data);
2626 		rte_free(priv);
2627 		return -ENOMEM;
2628 	}
2629 	session->auth_key.length = auth_xform->key.length;
2630 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2631 	       cipher_xform->key.length);
2632 	memcpy(session->auth_key.data, auth_xform->key.data,
2633 	       auth_xform->key.length);
2634 
2635 	authdata.key = (size_t)session->auth_key.data;
2636 	authdata.keylen = session->auth_key.length;
2637 	authdata.key_enc_flags = 0;
2638 	authdata.key_type = RTA_DATA_IMM;
2639 
2640 	session->digest_length = auth_xform->digest_length;
2641 
2642 	switch (auth_xform->algo) {
2643 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2644 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2645 		authdata.algmode = OP_ALG_AAI_HMAC;
2646 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2647 		break;
2648 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2649 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2650 		authdata.algmode = OP_ALG_AAI_HMAC;
2651 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2652 		break;
2653 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2654 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2655 		authdata.algmode = OP_ALG_AAI_HMAC;
2656 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2657 		break;
2658 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2659 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2660 		authdata.algmode = OP_ALG_AAI_HMAC;
2661 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2662 		break;
2663 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2664 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2665 		authdata.algmode = OP_ALG_AAI_HMAC;
2666 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2667 		break;
2668 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2669 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2670 		authdata.algmode = OP_ALG_AAI_HMAC;
2671 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2672 		break;
2673 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2674 		authdata.algtype = OP_ALG_ALGSEL_AES;
2675 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2676 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2677 		break;
2678 	case RTE_CRYPTO_AUTH_AES_CMAC:
2679 		authdata.algtype = OP_ALG_ALGSEL_AES;
2680 		authdata.algmode = OP_ALG_AAI_CMAC;
2681 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2682 		break;
2683 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2684 	case RTE_CRYPTO_AUTH_AES_GMAC:
2685 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2686 	case RTE_CRYPTO_AUTH_NULL:
2687 	case RTE_CRYPTO_AUTH_SHA1:
2688 	case RTE_CRYPTO_AUTH_SHA256:
2689 	case RTE_CRYPTO_AUTH_SHA512:
2690 	case RTE_CRYPTO_AUTH_SHA224:
2691 	case RTE_CRYPTO_AUTH_SHA384:
2692 	case RTE_CRYPTO_AUTH_MD5:
2693 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2694 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2695 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2696 			      auth_xform->algo);
2697 		ret = -ENOTSUP;
2698 		goto error_out;
2699 	default:
2700 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2701 			      auth_xform->algo);
2702 		ret = -ENOTSUP;
2703 		goto error_out;
2704 	}
2705 	cipherdata.key = (size_t)session->cipher_key.data;
2706 	cipherdata.keylen = session->cipher_key.length;
2707 	cipherdata.key_enc_flags = 0;
2708 	cipherdata.key_type = RTA_DATA_IMM;
2709 
2710 	switch (cipher_xform->algo) {
2711 	case RTE_CRYPTO_CIPHER_AES_CBC:
2712 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2713 		cipherdata.algmode = OP_ALG_AAI_CBC;
2714 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2715 		break;
2716 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2717 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2718 		cipherdata.algmode = OP_ALG_AAI_CBC;
2719 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2720 		break;
2721 	case RTE_CRYPTO_CIPHER_DES_CBC:
2722 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2723 		cipherdata.algmode = OP_ALG_AAI_CBC;
2724 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2725 		break;
2726 	case RTE_CRYPTO_CIPHER_AES_CTR:
2727 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2728 		cipherdata.algmode = OP_ALG_AAI_CTR;
2729 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2730 		break;
2731 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2732 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2733 	case RTE_CRYPTO_CIPHER_NULL:
2734 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2735 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2736 	case RTE_CRYPTO_CIPHER_AES_ECB:
2737 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2738 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2739 			      cipher_xform->algo);
2740 		ret = -ENOTSUP;
2741 		goto error_out;
2742 	default:
2743 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2744 			      cipher_xform->algo);
2745 		ret = -ENOTSUP;
2746 		goto error_out;
2747 	}
2748 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2749 				DIR_ENC : DIR_DEC;
2750 
2751 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2752 	priv->flc_desc[0].desc[1] = authdata.keylen;
2753 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2754 			       DESC_JOB_IO_LEN,
2755 			       (unsigned int *)priv->flc_desc[0].desc,
2756 			       &priv->flc_desc[0].desc[2], 2);
2757 
2758 	if (err < 0) {
2759 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2760 		ret = -EINVAL;
2761 		goto error_out;
2762 	}
2763 	if (priv->flc_desc[0].desc[2] & 1) {
2764 		cipherdata.key_type = RTA_DATA_IMM;
2765 	} else {
2766 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2767 		cipherdata.key_type = RTA_DATA_PTR;
2768 	}
2769 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2770 		authdata.key_type = RTA_DATA_IMM;
2771 	} else {
2772 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2773 		authdata.key_type = RTA_DATA_PTR;
2774 	}
2775 	priv->flc_desc[0].desc[0] = 0;
2776 	priv->flc_desc[0].desc[1] = 0;
2777 	priv->flc_desc[0].desc[2] = 0;
2778 
2779 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2780 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2781 					      0, SHR_SERIAL,
2782 					      &cipherdata, &authdata,
2783 					      session->iv.length,
2784 					      session->digest_length,
2785 					      session->dir);
2786 		if (bufsize < 0) {
2787 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2788 			ret = -EINVAL;
2789 			goto error_out;
2790 		}
2791 	} else {
2792 		DPAA2_SEC_ERR("Hash before cipher not supported");
2793 		ret = -ENOTSUP;
2794 		goto error_out;
2795 	}
2796 
2797 	flc->word1_sdl = (uint8_t)bufsize;
2798 	session->ctxt = priv;
2799 #ifdef CAAM_DESC_DEBUG
2800 	int i;
2801 	for (i = 0; i < bufsize; i++)
2802 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2803 			    i, priv->flc_desc[0].desc[i]);
2804 #endif
2805 
2806 	return ret;
2807 
2808 error_out:
2809 	rte_free(session->cipher_key.data);
2810 	rte_free(session->auth_key.data);
2811 	rte_free(priv);
2812 	return ret;
2813 }
2814 
2815 static int
2816 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
2817 {
2818 	dpaa2_sec_session *session = sess;
2819 	int ret;
2820 
2821 	PMD_INIT_FUNC_TRACE();
2822 
2823 	if (unlikely(sess == NULL)) {
2824 		DPAA2_SEC_ERR("Invalid session struct");
2825 		return -EINVAL;
2826 	}
2827 
2828 	memset(session, 0, sizeof(dpaa2_sec_session));
2829 	/* Default IV length = 0 */
2830 	session->iv.length = 0;
2831 
2832 	/* Cipher Only */
2833 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2834 		ret = dpaa2_sec_cipher_init(xform, session);
2835 
2836 	/* Authentication Only */
2837 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2838 		   xform->next == NULL) {
2839 		ret = dpaa2_sec_auth_init(xform, session);
2840 
2841 	/* Cipher then Authenticate */
2842 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2843 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2844 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2845 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2846 			ret = dpaa2_sec_auth_init(xform, session);
2847 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2848 			ret = dpaa2_sec_cipher_init(xform, session);
2849 		else
2850 			ret = dpaa2_sec_aead_chain_init(xform, session);
2851 	/* Authenticate then Cipher */
2852 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2853 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2854 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2855 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2856 			ret = dpaa2_sec_cipher_init(xform, session);
2857 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2858 			ret = dpaa2_sec_auth_init(xform, session);
2859 		else
2860 			ret = dpaa2_sec_aead_chain_init(xform, session);
2861 	/* AEAD operation for AES-GCM kind of Algorithms */
2862 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2863 		   xform->next == NULL) {
2864 		ret = dpaa2_sec_aead_init(xform, session);
2865 
2866 	} else {
2867 		DPAA2_SEC_ERR("Invalid crypto type");
2868 		return -EINVAL;
2869 	}
2870 
2871 	return ret;
2872 }
2873 
2874 static int
2875 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2876 			dpaa2_sec_session *session,
2877 			struct alginfo *aeaddata)
2878 {
2879 	PMD_INIT_FUNC_TRACE();
2880 
2881 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2882 					       RTE_CACHE_LINE_SIZE);
2883 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2884 		DPAA2_SEC_ERR("No Memory for aead key");
2885 		return -ENOMEM;
2886 	}
2887 	memcpy(session->aead_key.data, aead_xform->key.data,
2888 	       aead_xform->key.length);
2889 
2890 	session->digest_length = aead_xform->digest_length;
2891 	session->aead_key.length = aead_xform->key.length;
2892 
2893 	aeaddata->key = (size_t)session->aead_key.data;
2894 	aeaddata->keylen = session->aead_key.length;
2895 	aeaddata->key_enc_flags = 0;
2896 	aeaddata->key_type = RTA_DATA_IMM;
2897 
2898 	switch (aead_xform->algo) {
2899 	case RTE_CRYPTO_AEAD_AES_GCM:
2900 		switch (session->digest_length) {
2901 		case 8:
2902 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2903 			break;
2904 		case 12:
2905 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2906 			break;
2907 		case 16:
2908 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2909 			break;
2910 		default:
2911 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2912 				      session->digest_length);
2913 			return -EINVAL;
2914 		}
2915 		aeaddata->algmode = OP_ALG_AAI_GCM;
2916 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2917 		break;
2918 	case RTE_CRYPTO_AEAD_AES_CCM:
2919 		switch (session->digest_length) {
2920 		case 8:
2921 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2922 			break;
2923 		case 12:
2924 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2925 			break;
2926 		case 16:
2927 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2928 			break;
2929 		default:
2930 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2931 				      session->digest_length);
2932 			return -EINVAL;
2933 		}
2934 		aeaddata->algmode = OP_ALG_AAI_CCM;
2935 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2936 		break;
2937 	default:
2938 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2939 			      aead_xform->algo);
2940 		return -ENOTSUP;
2941 	}
2942 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2943 				DIR_ENC : DIR_DEC;
2944 
2945 	return 0;
2946 }
2947 
2948 static int
2949 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2950 	struct rte_crypto_auth_xform *auth_xform,
2951 	dpaa2_sec_session *session,
2952 	struct alginfo *cipherdata,
2953 	struct alginfo *authdata)
2954 {
2955 	if (cipher_xform) {
2956 		session->cipher_key.data = rte_zmalloc(NULL,
2957 						       cipher_xform->key.length,
2958 						       RTE_CACHE_LINE_SIZE);
2959 		if (session->cipher_key.data == NULL &&
2960 				cipher_xform->key.length > 0) {
2961 			DPAA2_SEC_ERR("No Memory for cipher key");
2962 			return -ENOMEM;
2963 		}
2964 
2965 		session->cipher_key.length = cipher_xform->key.length;
2966 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2967 				cipher_xform->key.length);
2968 		session->cipher_alg = cipher_xform->algo;
2969 	} else {
2970 		session->cipher_key.data = NULL;
2971 		session->cipher_key.length = 0;
2972 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2973 	}
2974 
2975 	if (auth_xform) {
2976 		session->auth_key.data = rte_zmalloc(NULL,
2977 						auth_xform->key.length,
2978 						RTE_CACHE_LINE_SIZE);
2979 		if (session->auth_key.data == NULL &&
2980 				auth_xform->key.length > 0) {
2981 			DPAA2_SEC_ERR("No Memory for auth key");
2982 			return -ENOMEM;
2983 		}
2984 		session->auth_key.length = auth_xform->key.length;
2985 		memcpy(session->auth_key.data, auth_xform->key.data,
2986 				auth_xform->key.length);
2987 		session->auth_alg = auth_xform->algo;
2988 		session->digest_length = auth_xform->digest_length;
2989 	} else {
2990 		session->auth_key.data = NULL;
2991 		session->auth_key.length = 0;
2992 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2993 	}
2994 
2995 	authdata->key = (size_t)session->auth_key.data;
2996 	authdata->keylen = session->auth_key.length;
2997 	authdata->key_enc_flags = 0;
2998 	authdata->key_type = RTA_DATA_IMM;
2999 	switch (session->auth_alg) {
3000 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
3001 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
3002 		authdata->algmode = OP_ALG_AAI_HMAC;
3003 		break;
3004 	case RTE_CRYPTO_AUTH_MD5_HMAC:
3005 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
3006 		authdata->algmode = OP_ALG_AAI_HMAC;
3007 		break;
3008 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
3009 		authdata->algmode = OP_ALG_AAI_HMAC;
3010 		if (session->digest_length == 6)
3011 			authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_96;
3012 		else if (session->digest_length == 14)
3013 			authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_224;
3014 		else
3015 			authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_112;
3016 		break;
3017 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
3018 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
3019 		authdata->algmode = OP_ALG_AAI_HMAC;
3020 		if (session->digest_length != 16)
3021 			DPAA2_SEC_WARN(
3022 			"+++Using sha256-hmac truncated len is non-standard,"
3023 			"it will not work with lookaside proto");
3024 		break;
3025 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
3026 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
3027 		authdata->algmode = OP_ALG_AAI_HMAC;
3028 		break;
3029 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
3030 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
3031 		authdata->algmode = OP_ALG_AAI_HMAC;
3032 		break;
3033 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
3034 		authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
3035 		authdata->algmode = OP_ALG_AAI_XCBC_MAC;
3036 		break;
3037 	case RTE_CRYPTO_AUTH_AES_CMAC:
3038 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
3039 		authdata->algmode = OP_ALG_AAI_CMAC;
3040 		break;
3041 	case RTE_CRYPTO_AUTH_NULL:
3042 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
3043 		break;
3044 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3045 	case RTE_CRYPTO_AUTH_SHA1:
3046 	case RTE_CRYPTO_AUTH_SHA256:
3047 	case RTE_CRYPTO_AUTH_SHA512:
3048 	case RTE_CRYPTO_AUTH_SHA224:
3049 	case RTE_CRYPTO_AUTH_SHA384:
3050 	case RTE_CRYPTO_AUTH_MD5:
3051 	case RTE_CRYPTO_AUTH_AES_GMAC:
3052 	case RTE_CRYPTO_AUTH_KASUMI_F9:
3053 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
3054 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
3055 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3056 			      session->auth_alg);
3057 		return -ENOTSUP;
3058 	default:
3059 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
3060 			      session->auth_alg);
3061 		return -ENOTSUP;
3062 	}
3063 	cipherdata->key = (size_t)session->cipher_key.data;
3064 	cipherdata->keylen = session->cipher_key.length;
3065 	cipherdata->key_enc_flags = 0;
3066 	cipherdata->key_type = RTA_DATA_IMM;
3067 
3068 	switch (session->cipher_alg) {
3069 	case RTE_CRYPTO_CIPHER_AES_CBC:
3070 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
3071 		cipherdata->algmode = OP_ALG_AAI_CBC;
3072 		break;
3073 	case RTE_CRYPTO_CIPHER_3DES_CBC:
3074 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
3075 		cipherdata->algmode = OP_ALG_AAI_CBC;
3076 		break;
3077 	case RTE_CRYPTO_CIPHER_DES_CBC:
3078 		cipherdata->algtype = OP_PCL_IPSEC_DES;
3079 		cipherdata->algmode = OP_ALG_AAI_CBC;
3080 		break;
3081 	case RTE_CRYPTO_CIPHER_AES_CTR:
3082 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
3083 		cipherdata->algmode = OP_ALG_AAI_CTR;
3084 		break;
3085 	case RTE_CRYPTO_CIPHER_NULL:
3086 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
3087 		break;
3088 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3089 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3090 	case RTE_CRYPTO_CIPHER_3DES_ECB:
3091 	case RTE_CRYPTO_CIPHER_3DES_CTR:
3092 	case RTE_CRYPTO_CIPHER_AES_ECB:
3093 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
3094 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
3095 			      session->cipher_alg);
3096 		return -ENOTSUP;
3097 	default:
3098 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3099 			      session->cipher_alg);
3100 		return -ENOTSUP;
3101 	}
3102 
3103 	return 0;
3104 }
3105 
3106 static int
3107 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3108 			    struct rte_security_session_conf *conf,
3109 			    void *sess)
3110 {
3111 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
3112 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3113 	struct rte_crypto_auth_xform *auth_xform = NULL;
3114 	struct rte_crypto_aead_xform *aead_xform = NULL;
3115 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3116 	struct ctxt_priv *priv;
3117 	struct alginfo authdata, cipherdata;
3118 	int bufsize;
3119 	struct sec_flow_context *flc;
3120 	int ret = -1;
3121 
3122 	PMD_INIT_FUNC_TRACE();
3123 
3124 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3125 				sizeof(struct ctxt_priv) +
3126 				sizeof(struct sec_flc_desc),
3127 				RTE_CACHE_LINE_SIZE);
3128 
3129 	if (priv == NULL) {
3130 		DPAA2_SEC_ERR("No memory for priv CTXT");
3131 		return -ENOMEM;
3132 	}
3133 
3134 	flc = &priv->flc_desc[0].flc;
3135 
3136 	if (ipsec_xform->life.bytes_hard_limit != 0 ||
3137 	    ipsec_xform->life.bytes_soft_limit != 0 ||
3138 	    ipsec_xform->life.packets_hard_limit != 0 ||
3139 	    ipsec_xform->life.packets_soft_limit != 0)
3140 		return -ENOTSUP;
3141 
3142 	memset(session, 0, sizeof(dpaa2_sec_session));
3143 
3144 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3145 		cipher_xform = &conf->crypto_xform->cipher;
3146 		if (conf->crypto_xform->next)
3147 			auth_xform = &conf->crypto_xform->next->auth;
3148 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3149 					session, &cipherdata, &authdata);
3150 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3151 		auth_xform = &conf->crypto_xform->auth;
3152 		if (conf->crypto_xform->next)
3153 			cipher_xform = &conf->crypto_xform->next->cipher;
3154 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3155 					session, &cipherdata, &authdata);
3156 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
3157 		aead_xform = &conf->crypto_xform->aead;
3158 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
3159 					session, &cipherdata);
3160 		authdata.keylen = 0;
3161 		authdata.algtype = 0;
3162 	} else {
3163 		DPAA2_SEC_ERR("XFORM not specified");
3164 		ret = -EINVAL;
3165 		goto out;
3166 	}
3167 	if (ret) {
3168 		DPAA2_SEC_ERR("Failed to process xform");
3169 		goto out;
3170 	}
3171 
3172 	session->ctxt_type = DPAA2_SEC_IPSEC;
3173 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3174 		uint8_t hdr[48] = {};
3175 		struct rte_ipv4_hdr *ip4_hdr;
3176 		struct rte_ipv6_hdr *ip6_hdr;
3177 		struct ipsec_encap_pdb encap_pdb;
3178 
3179 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
3180 		/* For Sec Proto only one descriptor is required. */
3181 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
3182 
3183 		/* copy algo specific data to PDB */
3184 		switch (cipherdata.algtype) {
3185 		case OP_PCL_IPSEC_AES_CTR:
3186 			encap_pdb.ctr.ctr_initial = 0x00000001;
3187 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3188 			break;
3189 		case OP_PCL_IPSEC_AES_GCM8:
3190 		case OP_PCL_IPSEC_AES_GCM12:
3191 		case OP_PCL_IPSEC_AES_GCM16:
3192 			memcpy(encap_pdb.gcm.salt,
3193 				(uint8_t *)&(ipsec_xform->salt), 4);
3194 			break;
3195 		}
3196 
3197 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3198 			PDBOPTS_ESP_OIHI_PDB_INL |
3199 			PDBHMO_ESP_SNR;
3200 
3201 		if (ipsec_xform->options.iv_gen_disable == 0)
3202 			encap_pdb.options |= PDBOPTS_ESP_IVSRC;
3203 		if (ipsec_xform->options.esn)
3204 			encap_pdb.options |= PDBOPTS_ESP_ESN;
3205 		if (ipsec_xform->options.copy_dscp)
3206 			encap_pdb.options |= PDBOPTS_ESP_DIFFSERV;
3207 		if (ipsec_xform->options.ecn)
3208 			encap_pdb.options |= PDBOPTS_ESP_TECN;
3209 		encap_pdb.spi = ipsec_xform->spi;
3210 		session->dir = DIR_ENC;
3211 		if (ipsec_xform->tunnel.type ==
3212 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3213 			if (ipsec_xform->options.dec_ttl)
3214 				encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3215 			if (ipsec_xform->options.copy_df)
3216 				encap_pdb.options |= PDBHMO_ESP_DFBIT;
3217 			ip4_hdr = (struct rte_ipv4_hdr *)hdr;
3218 
3219 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv4_hdr);
3220 			ip4_hdr->version_ihl = RTE_IPV4_VHL_DEF;
3221 			ip4_hdr->time_to_live = ipsec_xform->tunnel.ipv4.ttl ?
3222 						ipsec_xform->tunnel.ipv4.ttl :  0x40;
3223 			ip4_hdr->type_of_service = (ipsec_xform->tunnel.ipv4.dscp<<2);
3224 
3225 			ip4_hdr->hdr_checksum = 0;
3226 			ip4_hdr->packet_id = 0;
3227 			if (ipsec_xform->tunnel.ipv4.df) {
3228 				uint16_t frag_off = 0;
3229 
3230 				frag_off |= RTE_IPV4_HDR_DF_FLAG;
3231 				ip4_hdr->fragment_offset = rte_cpu_to_be_16(frag_off);
3232 			} else
3233 				ip4_hdr->fragment_offset = 0;
3234 
3235 			memcpy(&ip4_hdr->src_addr, &ipsec_xform->tunnel.ipv4.src_ip,
3236 			       sizeof(struct in_addr));
3237 			memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip,
3238 			       sizeof(struct in_addr));
3239 			if (ipsec_xform->options.udp_encap) {
3240 				uint16_t sport, dport;
3241 				struct rte_udp_hdr *uh =
3242 					(struct rte_udp_hdr *) (hdr +
3243 						sizeof(struct rte_ipv4_hdr));
3244 
3245 				sport = ipsec_xform->udp.sport ?
3246 					ipsec_xform->udp.sport : 4500;
3247 				dport = ipsec_xform->udp.dport ?
3248 					ipsec_xform->udp.dport : 4500;
3249 				uh->src_port = rte_cpu_to_be_16(sport);
3250 				uh->dst_port = rte_cpu_to_be_16(dport);
3251 				uh->dgram_len = 0;
3252 				uh->dgram_cksum = 0;
3253 
3254 				ip4_hdr->next_proto_id = IPPROTO_UDP;
3255 				ip4_hdr->total_length =
3256 					rte_cpu_to_be_16(
3257 						sizeof(struct rte_ipv4_hdr) +
3258 						sizeof(struct rte_udp_hdr));
3259 				encap_pdb.ip_hdr_len +=
3260 					sizeof(struct rte_udp_hdr);
3261 				encap_pdb.options |=
3262 					PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC;
3263 			} else {
3264 				ip4_hdr->total_length =
3265 					rte_cpu_to_be_16(
3266 						sizeof(struct rte_ipv4_hdr));
3267 				ip4_hdr->next_proto_id = IPPROTO_ESP;
3268 			}
3269 
3270 			ip4_hdr->hdr_checksum = calc_chksum((uint16_t *)
3271 				(void *)ip4_hdr, sizeof(struct rte_ipv4_hdr));
3272 
3273 		} else if (ipsec_xform->tunnel.type ==
3274 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3275 			ip6_hdr = (struct rte_ipv6_hdr *)hdr;
3276 
3277 			ip6_hdr->vtc_flow = rte_cpu_to_be_32(
3278 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
3279 				((ipsec_xform->tunnel.ipv6.dscp <<
3280 					RTE_IPV6_HDR_TC_SHIFT) &
3281 					RTE_IPV6_HDR_TC_MASK) |
3282 				((ipsec_xform->tunnel.ipv6.flabel <<
3283 					RTE_IPV6_HDR_FL_SHIFT) &
3284 					RTE_IPV6_HDR_FL_MASK));
3285 			/* Payload length will be updated by HW */
3286 			ip6_hdr->payload_len = 0;
3287 			ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ?
3288 					ipsec_xform->tunnel.ipv6.hlimit : 0x40;
3289 			ip6_hdr->proto = (ipsec_xform->proto ==
3290 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3291 					IPPROTO_ESP : IPPROTO_AH;
3292 			memcpy(&ip6_hdr->src_addr,
3293 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
3294 			memcpy(&ip6_hdr->dst_addr,
3295 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
3296 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
3297 		}
3298 
3299 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
3300 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3301 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
3302 				hdr, &cipherdata, &authdata);
3303 	} else if (ipsec_xform->direction ==
3304 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3305 		struct ipsec_decap_pdb decap_pdb;
3306 
3307 		flc->dhr = SEC_FLC_DHR_INBOUND;
3308 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3309 		/* copy algo specific data to PDB */
3310 		switch (cipherdata.algtype) {
3311 		case OP_PCL_IPSEC_AES_CTR:
3312 			decap_pdb.ctr.ctr_initial = 0x00000001;
3313 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3314 			break;
3315 		case OP_PCL_IPSEC_AES_GCM8:
3316 		case OP_PCL_IPSEC_AES_GCM12:
3317 		case OP_PCL_IPSEC_AES_GCM16:
3318 			memcpy(decap_pdb.gcm.salt,
3319 				(uint8_t *)&(ipsec_xform->salt), 4);
3320 			break;
3321 		}
3322 
3323 		if (ipsec_xform->tunnel.type ==
3324 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3325 			decap_pdb.options = sizeof(struct ip) << 16;
3326 			if (ipsec_xform->options.copy_df)
3327 				decap_pdb.options |= PDBHMO_ESP_DFV;
3328 			if (ipsec_xform->options.dec_ttl)
3329 				decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL;
3330 		} else {
3331 			decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16;
3332 		}
3333 		if (ipsec_xform->options.esn)
3334 			decap_pdb.options |= PDBOPTS_ESP_ESN;
3335 		if (ipsec_xform->options.copy_dscp)
3336 			decap_pdb.options |= PDBOPTS_ESP_DIFFSERV;
3337 		if (ipsec_xform->options.ecn)
3338 			decap_pdb.options |= PDBOPTS_ESP_TECN;
3339 
3340 		if (ipsec_xform->replay_win_sz) {
3341 			uint32_t win_sz;
3342 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3343 
3344 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3345 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3346 				win_sz = 128;
3347 			}
3348 			switch (win_sz) {
3349 			case 1:
3350 			case 2:
3351 			case 4:
3352 			case 8:
3353 			case 16:
3354 			case 32:
3355 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3356 				break;
3357 			case 64:
3358 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3359 				break;
3360 			case 256:
3361 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3362 				break;
3363 			case 512:
3364 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3365 				break;
3366 			case 1024:
3367 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3368 				break;
3369 			case 128:
3370 			default:
3371 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3372 			}
3373 		}
3374 		session->dir = DIR_DEC;
3375 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3376 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3377 				SHR_WAIT : SHR_SERIAL,
3378 				&decap_pdb, &cipherdata, &authdata);
3379 	} else
3380 		goto out;
3381 
3382 	if (bufsize < 0) {
3383 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3384 		goto out;
3385 	}
3386 
3387 	flc->word1_sdl = (uint8_t)bufsize;
3388 
3389 	/* Enable the stashing control bit */
3390 	DPAA2_SET_FLC_RSC(flc);
3391 	flc->word2_rflc_31_0 = lower_32_bits(
3392 			(size_t)&(((struct dpaa2_sec_qp *)
3393 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3394 	flc->word3_rflc_63_32 = upper_32_bits(
3395 			(size_t)&(((struct dpaa2_sec_qp *)
3396 			dev->data->queue_pairs[0])->rx_vq));
3397 
3398 	/* Set EWS bit i.e. enable write-safe */
3399 	DPAA2_SET_FLC_EWS(flc);
3400 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3401 	DPAA2_SET_FLC_REUSE_BS(flc);
3402 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3403 	DPAA2_SET_FLC_REUSE_FF(flc);
3404 
3405 	session->ctxt = priv;
3406 
3407 	return 0;
3408 out:
3409 	rte_free(session->auth_key.data);
3410 	rte_free(session->cipher_key.data);
3411 	rte_free(priv);
3412 	return ret;
3413 }
3414 
3415 static int
3416 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3417 			   struct rte_security_session_conf *conf,
3418 			   void *sess)
3419 {
3420 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3421 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3422 	struct rte_crypto_auth_xform *auth_xform = NULL;
3423 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3424 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3425 	struct ctxt_priv *priv;
3426 	struct alginfo authdata, cipherdata;
3427 	struct alginfo *p_authdata = NULL;
3428 	int bufsize = -1;
3429 	struct sec_flow_context *flc;
3430 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3431 	int swap = true;
3432 #else
3433 	int swap = false;
3434 #endif
3435 
3436 	PMD_INIT_FUNC_TRACE();
3437 
3438 	memset(session, 0, sizeof(dpaa2_sec_session));
3439 
3440 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3441 				sizeof(struct ctxt_priv) +
3442 				sizeof(struct sec_flc_desc),
3443 				RTE_CACHE_LINE_SIZE);
3444 
3445 	if (priv == NULL) {
3446 		DPAA2_SEC_ERR("No memory for priv CTXT");
3447 		return -ENOMEM;
3448 	}
3449 
3450 	flc = &priv->flc_desc[0].flc;
3451 
3452 	/* find xfrm types */
3453 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3454 		cipher_xform = &xform->cipher;
3455 		if (xform->next != NULL &&
3456 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3457 			session->ext_params.aead_ctxt.auth_cipher_text = true;
3458 			auth_xform = &xform->next->auth;
3459 		}
3460 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3461 		auth_xform = &xform->auth;
3462 		if (xform->next != NULL &&
3463 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3464 			session->ext_params.aead_ctxt.auth_cipher_text = false;
3465 			cipher_xform = &xform->next->cipher;
3466 		}
3467 	} else {
3468 		DPAA2_SEC_ERR("Invalid crypto type");
3469 		return -EINVAL;
3470 	}
3471 
3472 	session->ctxt_type = DPAA2_SEC_PDCP;
3473 	if (cipher_xform) {
3474 		session->cipher_key.data = rte_zmalloc(NULL,
3475 					       cipher_xform->key.length,
3476 					       RTE_CACHE_LINE_SIZE);
3477 		if (session->cipher_key.data == NULL &&
3478 				cipher_xform->key.length > 0) {
3479 			DPAA2_SEC_ERR("No Memory for cipher key");
3480 			rte_free(priv);
3481 			return -ENOMEM;
3482 		}
3483 		session->cipher_key.length = cipher_xform->key.length;
3484 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3485 			cipher_xform->key.length);
3486 		session->dir =
3487 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3488 					DIR_ENC : DIR_DEC;
3489 		session->cipher_alg = cipher_xform->algo;
3490 	} else {
3491 		session->cipher_key.data = NULL;
3492 		session->cipher_key.length = 0;
3493 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3494 		session->dir = DIR_ENC;
3495 	}
3496 
3497 	session->pdcp.domain = pdcp_xform->domain;
3498 	session->pdcp.bearer = pdcp_xform->bearer;
3499 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3500 	session->pdcp.sn_size = pdcp_xform->sn_size;
3501 	session->pdcp.hfn = pdcp_xform->hfn;
3502 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3503 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3504 	/* hfv ovd offset location is stored in iv.offset value*/
3505 	if (cipher_xform)
3506 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3507 
3508 	cipherdata.key = (size_t)session->cipher_key.data;
3509 	cipherdata.keylen = session->cipher_key.length;
3510 	cipherdata.key_enc_flags = 0;
3511 	cipherdata.key_type = RTA_DATA_IMM;
3512 
3513 	switch (session->cipher_alg) {
3514 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3515 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3516 		break;
3517 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3518 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3519 		break;
3520 	case RTE_CRYPTO_CIPHER_AES_CTR:
3521 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3522 		break;
3523 	case RTE_CRYPTO_CIPHER_NULL:
3524 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3525 		break;
3526 	default:
3527 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3528 			      session->cipher_alg);
3529 		goto out;
3530 	}
3531 
3532 	if (auth_xform) {
3533 		session->auth_key.data = rte_zmalloc(NULL,
3534 						     auth_xform->key.length,
3535 						     RTE_CACHE_LINE_SIZE);
3536 		if (!session->auth_key.data &&
3537 		    auth_xform->key.length > 0) {
3538 			DPAA2_SEC_ERR("No Memory for auth key");
3539 			rte_free(session->cipher_key.data);
3540 			rte_free(priv);
3541 			return -ENOMEM;
3542 		}
3543 		session->auth_key.length = auth_xform->key.length;
3544 		memcpy(session->auth_key.data, auth_xform->key.data,
3545 		       auth_xform->key.length);
3546 		session->auth_alg = auth_xform->algo;
3547 	} else {
3548 		session->auth_key.data = NULL;
3549 		session->auth_key.length = 0;
3550 		session->auth_alg = 0;
3551 		authdata.algtype = PDCP_AUTH_TYPE_NULL;
3552 	}
3553 	authdata.key = (size_t)session->auth_key.data;
3554 	authdata.keylen = session->auth_key.length;
3555 	authdata.key_enc_flags = 0;
3556 	authdata.key_type = RTA_DATA_IMM;
3557 
3558 	if (session->auth_alg) {
3559 		switch (session->auth_alg) {
3560 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3561 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3562 			break;
3563 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3564 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3565 			break;
3566 		case RTE_CRYPTO_AUTH_AES_CMAC:
3567 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3568 			break;
3569 		case RTE_CRYPTO_AUTH_NULL:
3570 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3571 			break;
3572 		default:
3573 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3574 				      session->auth_alg);
3575 			goto out;
3576 		}
3577 		p_authdata = &authdata;
3578 	} else {
3579 		if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3580 			DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3581 			goto out;
3582 		}
3583 		session->auth_key.data = NULL;
3584 		session->auth_key.length = 0;
3585 		session->auth_alg = 0;
3586 	}
3587 	authdata.key = (size_t)session->auth_key.data;
3588 	authdata.keylen = session->auth_key.length;
3589 	authdata.key_enc_flags = 0;
3590 	authdata.key_type = RTA_DATA_IMM;
3591 
3592 	if (pdcp_xform->sdap_enabled) {
3593 		int nb_keys_to_inline =
3594 			rta_inline_pdcp_sdap_query(authdata.algtype,
3595 					cipherdata.algtype,
3596 					session->pdcp.sn_size,
3597 					session->pdcp.hfn_ovd);
3598 		if (nb_keys_to_inline >= 1) {
3599 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3600 			cipherdata.key_type = RTA_DATA_PTR;
3601 		}
3602 		if (nb_keys_to_inline >= 2) {
3603 			authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3604 			authdata.key_type = RTA_DATA_PTR;
3605 		}
3606 	} else {
3607 		if (rta_inline_pdcp_query(authdata.algtype,
3608 					cipherdata.algtype,
3609 					session->pdcp.sn_size,
3610 					session->pdcp.hfn_ovd)) {
3611 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3612 			cipherdata.key_type = RTA_DATA_PTR;
3613 		}
3614 	}
3615 
3616 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3617 		if (session->dir == DIR_ENC)
3618 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3619 					priv->flc_desc[0].desc, 1, swap,
3620 					pdcp_xform->hfn,
3621 					session->pdcp.sn_size,
3622 					pdcp_xform->bearer,
3623 					pdcp_xform->pkt_dir,
3624 					pdcp_xform->hfn_threshold,
3625 					&cipherdata, &authdata);
3626 		else if (session->dir == DIR_DEC)
3627 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3628 					priv->flc_desc[0].desc, 1, swap,
3629 					pdcp_xform->hfn,
3630 					session->pdcp.sn_size,
3631 					pdcp_xform->bearer,
3632 					pdcp_xform->pkt_dir,
3633 					pdcp_xform->hfn_threshold,
3634 					&cipherdata, &authdata);
3635 
3636 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3637 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3638 						     1, swap, &authdata);
3639 	} else {
3640 		if (session->dir == DIR_ENC) {
3641 			if (pdcp_xform->sdap_enabled)
3642 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3643 					priv->flc_desc[0].desc, 1, swap,
3644 					session->pdcp.sn_size,
3645 					pdcp_xform->hfn,
3646 					pdcp_xform->bearer,
3647 					pdcp_xform->pkt_dir,
3648 					pdcp_xform->hfn_threshold,
3649 					&cipherdata, p_authdata);
3650 			else
3651 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3652 					priv->flc_desc[0].desc, 1, swap,
3653 					session->pdcp.sn_size,
3654 					pdcp_xform->hfn,
3655 					pdcp_xform->bearer,
3656 					pdcp_xform->pkt_dir,
3657 					pdcp_xform->hfn_threshold,
3658 					&cipherdata, p_authdata);
3659 		} else if (session->dir == DIR_DEC) {
3660 			if (pdcp_xform->sdap_enabled)
3661 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3662 					priv->flc_desc[0].desc, 1, swap,
3663 					session->pdcp.sn_size,
3664 					pdcp_xform->hfn,
3665 					pdcp_xform->bearer,
3666 					pdcp_xform->pkt_dir,
3667 					pdcp_xform->hfn_threshold,
3668 					&cipherdata, p_authdata);
3669 			else
3670 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3671 					priv->flc_desc[0].desc, 1, swap,
3672 					session->pdcp.sn_size,
3673 					pdcp_xform->hfn,
3674 					pdcp_xform->bearer,
3675 					pdcp_xform->pkt_dir,
3676 					pdcp_xform->hfn_threshold,
3677 					&cipherdata, p_authdata);
3678 		}
3679 	}
3680 
3681 	if (bufsize < 0) {
3682 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3683 		goto out;
3684 	}
3685 
3686 	/* Enable the stashing control bit */
3687 	DPAA2_SET_FLC_RSC(flc);
3688 	flc->word2_rflc_31_0 = lower_32_bits(
3689 			(size_t)&(((struct dpaa2_sec_qp *)
3690 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3691 	flc->word3_rflc_63_32 = upper_32_bits(
3692 			(size_t)&(((struct dpaa2_sec_qp *)
3693 			dev->data->queue_pairs[0])->rx_vq));
3694 
3695 	flc->word1_sdl = (uint8_t)bufsize;
3696 
3697 	/* TODO - check the perf impact or
3698 	 * align as per descriptor type
3699 	 * Set EWS bit i.e. enable write-safe
3700 	 * DPAA2_SET_FLC_EWS(flc);
3701 	 */
3702 
3703 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3704 	DPAA2_SET_FLC_REUSE_BS(flc);
3705 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3706 	DPAA2_SET_FLC_REUSE_FF(flc);
3707 
3708 	session->ctxt = priv;
3709 
3710 	return 0;
3711 out:
3712 	rte_free(session->auth_key.data);
3713 	rte_free(session->cipher_key.data);
3714 	rte_free(priv);
3715 	return -EINVAL;
3716 }
3717 
3718 static int
3719 dpaa2_sec_security_session_create(void *dev,
3720 				  struct rte_security_session_conf *conf,
3721 				  struct rte_security_session *sess)
3722 {
3723 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
3724 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3725 	int ret;
3726 
3727 	switch (conf->protocol) {
3728 	case RTE_SECURITY_PROTOCOL_IPSEC:
3729 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3730 				sess_private_data);
3731 		break;
3732 	case RTE_SECURITY_PROTOCOL_MACSEC:
3733 		return -ENOTSUP;
3734 	case RTE_SECURITY_PROTOCOL_PDCP:
3735 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3736 				sess_private_data);
3737 		break;
3738 	default:
3739 		return -EINVAL;
3740 	}
3741 	if (ret != 0) {
3742 		DPAA2_SEC_ERR("Failed to configure session parameters");
3743 		return ret;
3744 	}
3745 
3746 	return ret;
3747 }
3748 
3749 /** Clear the memory of session so it doesn't leave key material behind */
3750 static int
3751 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3752 		struct rte_security_session *sess)
3753 {
3754 	PMD_INIT_FUNC_TRACE();
3755 	void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
3756 
3757 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3758 
3759 	if (sess_priv) {
3760 		rte_free(s->ctxt);
3761 		rte_free(s->cipher_key.data);
3762 		rte_free(s->auth_key.data);
3763 		memset(s, 0, sizeof(dpaa2_sec_session));
3764 	}
3765 	return 0;
3766 }
3767 
3768 static unsigned int
3769 dpaa2_sec_security_session_get_size(void *device __rte_unused)
3770 {
3771 	return sizeof(dpaa2_sec_session);
3772 }
3773 
3774 static int
3775 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
3776 		struct rte_crypto_sym_xform *xform,
3777 		struct rte_cryptodev_sym_session *sess)
3778 {
3779 	void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
3780 	int ret;
3781 
3782 	ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
3783 	if (ret != 0) {
3784 		DPAA2_SEC_ERR("Failed to configure session parameters");
3785 		/* Return session to mempool */
3786 		return ret;
3787 	}
3788 
3789 	return 0;
3790 }
3791 
3792 /** Clear the memory of session so it doesn't leave key material behind */
3793 static void
3794 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
3795 		struct rte_cryptodev_sym_session *sess)
3796 {
3797 	PMD_INIT_FUNC_TRACE();
3798 	dpaa2_sec_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
3799 
3800 	if (s) {
3801 		rte_free(s->ctxt);
3802 		rte_free(s->cipher_key.data);
3803 		rte_free(s->auth_key.data);
3804 	}
3805 }
3806 
3807 static int
3808 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3809 			struct rte_cryptodev_config *config __rte_unused)
3810 {
3811 	PMD_INIT_FUNC_TRACE();
3812 
3813 	return 0;
3814 }
3815 
3816 static int
3817 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3818 {
3819 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3820 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3821 	struct dpseci_attr attr;
3822 	struct dpaa2_queue *dpaa2_q;
3823 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3824 					dev->data->queue_pairs;
3825 	struct dpseci_rx_queue_attr rx_attr;
3826 	struct dpseci_tx_queue_attr tx_attr;
3827 	int ret, i;
3828 
3829 	PMD_INIT_FUNC_TRACE();
3830 
3831 	/* Change the tx burst function if ordered queues are used */
3832 	if (priv->en_ordered)
3833 		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
3834 
3835 	memset(&attr, 0, sizeof(struct dpseci_attr));
3836 
3837 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3838 	if (ret) {
3839 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3840 			      priv->hw_id);
3841 		goto get_attr_failure;
3842 	}
3843 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3844 	if (ret) {
3845 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3846 		goto get_attr_failure;
3847 	}
3848 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3849 		dpaa2_q = &qp[i]->rx_vq;
3850 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3851 				    &rx_attr);
3852 		dpaa2_q->fqid = rx_attr.fqid;
3853 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3854 	}
3855 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3856 		dpaa2_q = &qp[i]->tx_vq;
3857 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3858 				    &tx_attr);
3859 		dpaa2_q->fqid = tx_attr.fqid;
3860 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3861 	}
3862 
3863 	return 0;
3864 get_attr_failure:
3865 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3866 	return -1;
3867 }
3868 
3869 static void
3870 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3871 {
3872 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3873 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3874 	int ret;
3875 
3876 	PMD_INIT_FUNC_TRACE();
3877 
3878 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3879 	if (ret) {
3880 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3881 			     priv->hw_id);
3882 		return;
3883 	}
3884 
3885 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3886 	if (ret < 0) {
3887 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3888 		return;
3889 	}
3890 }
3891 
3892 static int
3893 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3894 {
3895 	PMD_INIT_FUNC_TRACE();
3896 
3897 	return 0;
3898 }
3899 
3900 static void
3901 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3902 			struct rte_cryptodev_info *info)
3903 {
3904 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3905 
3906 	PMD_INIT_FUNC_TRACE();
3907 	if (info != NULL) {
3908 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3909 		info->feature_flags = dev->feature_flags;
3910 		info->capabilities = dpaa2_sec_capabilities;
3911 		/* No limit of number of sessions */
3912 		info->sym.max_nb_sessions = 0;
3913 		info->driver_id = cryptodev_driver_id;
3914 	}
3915 }
3916 
3917 static
3918 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3919 			 struct rte_cryptodev_stats *stats)
3920 {
3921 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3922 	struct fsl_mc_io dpseci;
3923 	struct dpseci_sec_counters counters = {0};
3924 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3925 					dev->data->queue_pairs;
3926 	int ret, i;
3927 
3928 	PMD_INIT_FUNC_TRACE();
3929 	if (stats == NULL) {
3930 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3931 		return;
3932 	}
3933 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3934 		if (qp == NULL || qp[i] == NULL) {
3935 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3936 			continue;
3937 		}
3938 
3939 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3940 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3941 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3942 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3943 	}
3944 
3945 	/* In case as secondary process access stats, MCP portal in priv-hw
3946 	 * may have primary process address. Need the secondary process
3947 	 * based MCP portal address for this object.
3948 	 */
3949 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3950 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3951 				      &counters);
3952 	if (ret) {
3953 		DPAA2_SEC_ERR("SEC counters failed");
3954 	} else {
3955 		DPAA2_SEC_INFO("dpseci hardware stats:"
3956 			    "\n\tNum of Requests Dequeued = %" PRIu64
3957 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3958 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3959 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3960 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3961 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3962 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3963 			    counters.dequeued_requests,
3964 			    counters.ob_enc_requests,
3965 			    counters.ib_dec_requests,
3966 			    counters.ob_enc_bytes,
3967 			    counters.ob_prot_bytes,
3968 			    counters.ib_dec_bytes,
3969 			    counters.ib_valid_bytes);
3970 	}
3971 }
3972 
3973 static
3974 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3975 {
3976 	int i;
3977 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3978 				   (dev->data->queue_pairs);
3979 
3980 	PMD_INIT_FUNC_TRACE();
3981 
3982 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3983 		if (qp[i] == NULL) {
3984 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3985 			continue;
3986 		}
3987 		qp[i]->tx_vq.rx_pkts = 0;
3988 		qp[i]->tx_vq.tx_pkts = 0;
3989 		qp[i]->tx_vq.err_pkts = 0;
3990 		qp[i]->rx_vq.rx_pkts = 0;
3991 		qp[i]->rx_vq.tx_pkts = 0;
3992 		qp[i]->rx_vq.err_pkts = 0;
3993 	}
3994 }
3995 
3996 static void __rte_hot
3997 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3998 				 const struct qbman_fd *fd,
3999 				 const struct qbman_result *dq,
4000 				 struct dpaa2_queue *rxq,
4001 				 struct rte_event *ev)
4002 {
4003 	struct dpaa2_sec_qp *qp;
4004 	/* Prefetching mbuf */
4005 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4006 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4007 
4008 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
4009 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4010 
4011 	qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4012 	ev->flow_id = rxq->ev.flow_id;
4013 	ev->sub_event_type = rxq->ev.sub_event_type;
4014 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4015 	ev->op = RTE_EVENT_OP_NEW;
4016 	ev->sched_type = rxq->ev.sched_type;
4017 	ev->queue_id = rxq->ev.queue_id;
4018 	ev->priority = rxq->ev.priority;
4019 	ev->event_ptr = sec_fd_to_mbuf(fd, qp);
4020 
4021 	qbman_swp_dqrr_consume(swp, dq);
4022 }
4023 static void
4024 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
4025 				 const struct qbman_fd *fd,
4026 				 const struct qbman_result *dq,
4027 				 struct dpaa2_queue *rxq,
4028 				 struct rte_event *ev)
4029 {
4030 	uint8_t dqrr_index;
4031 	struct dpaa2_sec_qp *qp;
4032 	struct rte_crypto_op *crypto_op;
4033 	/* Prefetching mbuf */
4034 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4035 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4036 
4037 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
4038 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4039 
4040 	qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4041 	ev->flow_id = rxq->ev.flow_id;
4042 	ev->sub_event_type = rxq->ev.sub_event_type;
4043 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4044 	ev->op = RTE_EVENT_OP_NEW;
4045 	ev->sched_type = rxq->ev.sched_type;
4046 	ev->queue_id = rxq->ev.queue_id;
4047 	ev->priority = rxq->ev.priority;
4048 
4049 	crypto_op = sec_fd_to_mbuf(fd, qp);
4050 	dqrr_index = qbman_get_dqrr_idx(dq);
4051 	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
4052 	DPAA2_PER_LCORE_DQRR_SIZE++;
4053 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
4054 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
4055 	ev->event_ptr = crypto_op;
4056 }
4057 
4058 static void __rte_hot
4059 dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
4060 				const struct qbman_fd *fd,
4061 				const struct qbman_result *dq,
4062 				struct dpaa2_queue *rxq,
4063 				struct rte_event *ev)
4064 {
4065 	struct rte_crypto_op *crypto_op;
4066 	struct dpaa2_sec_qp *qp;
4067 
4068 	/* Prefetching mbuf */
4069 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4070 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4071 
4072 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
4073 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4074 
4075 	qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4076 	ev->flow_id = rxq->ev.flow_id;
4077 	ev->sub_event_type = rxq->ev.sub_event_type;
4078 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4079 	ev->op = RTE_EVENT_OP_NEW;
4080 	ev->sched_type = rxq->ev.sched_type;
4081 	ev->queue_id = rxq->ev.queue_id;
4082 	ev->priority = rxq->ev.priority;
4083 	crypto_op = sec_fd_to_mbuf(fd, qp);
4084 
4085 	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
4086 	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
4087 		DPAA2_EQCR_OPRID_SHIFT;
4088 	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
4089 		DPAA2_EQCR_SEQNUM_SHIFT;
4090 
4091 	qbman_swp_dqrr_consume(swp, dq);
4092 	ev->event_ptr = crypto_op;
4093 }
4094 
4095 int
4096 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
4097 		int qp_id,
4098 		struct dpaa2_dpcon_dev *dpcon,
4099 		const struct rte_event *event)
4100 {
4101 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4102 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4103 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
4104 	struct dpseci_rx_queue_cfg cfg;
4105 	uint8_t priority;
4106 	int ret;
4107 
4108 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
4109 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
4110 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
4111 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
4112 	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
4113 		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
4114 	else
4115 		return -EINVAL;
4116 
4117 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
4118 		   (dpcon->num_priorities - 1);
4119 
4120 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4121 	cfg.options = DPSECI_QUEUE_OPT_DEST;
4122 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
4123 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
4124 	cfg.dest_cfg.priority = priority;
4125 
4126 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
4127 	cfg.user_ctx = (size_t)(qp);
4128 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
4129 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
4130 		cfg.order_preservation_en = 1;
4131 	}
4132 
4133 	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
4134 		struct opr_cfg ocfg;
4135 
4136 		/* Restoration window size = 256 frames */
4137 		ocfg.oprrws = 3;
4138 		/* Restoration window size = 512 frames for LX2 */
4139 		if (dpaa2_svr_family == SVR_LX2160A)
4140 			ocfg.oprrws = 4;
4141 		/* Auto advance NESN window enabled */
4142 		ocfg.oa = 1;
4143 		/* Late arrival window size disabled */
4144 		ocfg.olws = 0;
4145 		/* ORL resource exhaustaion advance NESN disabled */
4146 		ocfg.oeane = 0;
4147 
4148 		if (priv->en_loose_ordered)
4149 			ocfg.oloe = 1;
4150 		else
4151 			ocfg.oloe = 0;
4152 
4153 		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
4154 				   qp_id, OPR_OPT_CREATE, &ocfg);
4155 		if (ret) {
4156 			DPAA2_SEC_ERR("Error setting opr: ret: %d", ret);
4157 			return ret;
4158 		}
4159 		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
4160 		priv->en_ordered = 1;
4161 	}
4162 
4163 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4164 				  qp_id, &cfg);
4165 	if (ret) {
4166 		DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret);
4167 		return ret;
4168 	}
4169 
4170 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
4171 
4172 	return 0;
4173 }
4174 
4175 int
4176 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
4177 			int qp_id)
4178 {
4179 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4180 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4181 	struct dpseci_rx_queue_cfg cfg;
4182 	int ret;
4183 
4184 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4185 	cfg.options = DPSECI_QUEUE_OPT_DEST;
4186 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
4187 
4188 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4189 				  qp_id, &cfg);
4190 	if (ret)
4191 		DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret);
4192 
4193 	return ret;
4194 }
4195 
4196 static struct rte_cryptodev_ops crypto_ops = {
4197 	.dev_configure	      = dpaa2_sec_dev_configure,
4198 	.dev_start	      = dpaa2_sec_dev_start,
4199 	.dev_stop	      = dpaa2_sec_dev_stop,
4200 	.dev_close	      = dpaa2_sec_dev_close,
4201 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
4202 	.stats_get	      = dpaa2_sec_stats_get,
4203 	.stats_reset	      = dpaa2_sec_stats_reset,
4204 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
4205 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
4206 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
4207 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
4208 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
4209 	/* Raw data-path API related operations */
4210 	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
4211 	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
4212 };
4213 
4214 static const struct rte_security_capability *
4215 dpaa2_sec_capabilities_get(void *device __rte_unused)
4216 {
4217 	return dpaa2_sec_security_cap;
4218 }
4219 
4220 static const struct rte_security_ops dpaa2_sec_security_ops = {
4221 	.session_create = dpaa2_sec_security_session_create,
4222 	.session_update = NULL,
4223 	.session_get_size = dpaa2_sec_security_session_get_size,
4224 	.session_stats_get = NULL,
4225 	.session_destroy = dpaa2_sec_security_session_destroy,
4226 	.set_pkt_metadata = NULL,
4227 	.capabilities_get = dpaa2_sec_capabilities_get
4228 };
4229 
4230 static int
4231 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
4232 {
4233 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4234 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4235 	int ret;
4236 
4237 	PMD_INIT_FUNC_TRACE();
4238 
4239 	/* Function is reverse of dpaa2_sec_dev_init.
4240 	 * It does the following:
4241 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
4242 	 * 2. Close the DPSECI device
4243 	 * 3. Free the allocated resources.
4244 	 */
4245 
4246 	/*Close the device at underlying layer*/
4247 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
4248 	if (ret) {
4249 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
4250 		return -1;
4251 	}
4252 
4253 	/*Free the allocated memory for ethernet private data and dpseci*/
4254 	priv->hw = NULL;
4255 	rte_free(dpseci);
4256 	rte_free(dev->security_ctx);
4257 
4258 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
4259 		       dev->data->name, rte_socket_id());
4260 
4261 	return 0;
4262 }
4263 
4264 static int
4265 check_devargs_handler(const char *key, const char *value,
4266 		      void *opaque)
4267 {
4268 	struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
4269 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4270 
4271 	if (!strcmp(key, "drv_strict_order")) {
4272 		priv->en_loose_ordered = false;
4273 	} else if (!strcmp(key, "drv_dump_mode")) {
4274 		dpaa2_sec_dp_dump = atoi(value);
4275 		if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
4276 			DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
4277 				      "supported, changing to FULL error"
4278 				      " prints\n");
4279 			dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
4280 		}
4281 	} else
4282 		return -1;
4283 
4284 	return 0;
4285 }
4286 
4287 static void
4288 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
4289 {
4290 	struct rte_kvargs *kvlist;
4291 	struct rte_devargs *devargs;
4292 
4293 	devargs = cryptodev->device->devargs;
4294 	if (!devargs)
4295 		return;
4296 
4297 	kvlist = rte_kvargs_parse(devargs->args, NULL);
4298 	if (!kvlist)
4299 		return;
4300 
4301 	if (!rte_kvargs_count(kvlist, key)) {
4302 		rte_kvargs_free(kvlist);
4303 		return;
4304 	}
4305 
4306 	rte_kvargs_process(kvlist, key,
4307 			check_devargs_handler, (void *)cryptodev);
4308 	rte_kvargs_free(kvlist);
4309 }
4310 
4311 static int
4312 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
4313 {
4314 	struct dpaa2_sec_dev_private *internals;
4315 	struct rte_device *dev = cryptodev->device;
4316 	struct rte_dpaa2_device *dpaa2_dev;
4317 	struct rte_security_ctx *security_instance;
4318 	struct fsl_mc_io *dpseci;
4319 	uint16_t token;
4320 	struct dpseci_attr attr;
4321 	int retcode, hw_id;
4322 
4323 	PMD_INIT_FUNC_TRACE();
4324 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
4325 	hw_id = dpaa2_dev->object_id;
4326 
4327 	cryptodev->driver_id = cryptodev_driver_id;
4328 	cryptodev->dev_ops = &crypto_ops;
4329 
4330 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
4331 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
4332 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
4333 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
4334 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
4335 			RTE_CRYPTODEV_FF_SECURITY |
4336 			RTE_CRYPTODEV_FF_SYM_RAW_DP |
4337 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
4338 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
4339 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
4340 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
4341 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
4342 
4343 	internals = cryptodev->data->dev_private;
4344 
4345 	/*
4346 	 * For secondary processes, we don't initialise any further as primary
4347 	 * has already done this work. Only check we don't need a different
4348 	 * RX function
4349 	 */
4350 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4351 		DPAA2_SEC_DEBUG("Device already init by primary process");
4352 		return 0;
4353 	}
4354 
4355 	/* Initialize security_ctx only for primary process*/
4356 	security_instance = rte_malloc("rte_security_instances_ops",
4357 				sizeof(struct rte_security_ctx), 0);
4358 	if (security_instance == NULL)
4359 		return -ENOMEM;
4360 	security_instance->device = (void *)cryptodev;
4361 	security_instance->ops = &dpaa2_sec_security_ops;
4362 	security_instance->sess_cnt = 0;
4363 	cryptodev->security_ctx = security_instance;
4364 
4365 	/*Open the rte device via MC and save the handle for further use*/
4366 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
4367 				sizeof(struct fsl_mc_io), 0);
4368 	if (!dpseci) {
4369 		DPAA2_SEC_ERR(
4370 			"Error in allocating the memory for dpsec object");
4371 		return -ENOMEM;
4372 	}
4373 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
4374 
4375 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
4376 	if (retcode != 0) {
4377 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
4378 			      retcode);
4379 		goto init_error;
4380 	}
4381 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
4382 	if (retcode != 0) {
4383 		DPAA2_SEC_ERR(
4384 			     "Cannot get dpsec device attributed: Error = %x",
4385 			     retcode);
4386 		goto init_error;
4387 	}
4388 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
4389 			"dpsec-%u", hw_id);
4390 
4391 	internals->max_nb_queue_pairs = attr.num_tx_queues;
4392 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
4393 	internals->hw = dpseci;
4394 	internals->token = token;
4395 	internals->en_loose_ordered = true;
4396 
4397 	dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
4398 	dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
4399 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
4400 	return 0;
4401 
4402 init_error:
4403 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
4404 
4405 	/* dpaa2_sec_uninit(crypto_dev_name); */
4406 	return -EFAULT;
4407 }
4408 
4409 static int
4410 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
4411 			  struct rte_dpaa2_device *dpaa2_dev)
4412 {
4413 	struct rte_cryptodev *cryptodev;
4414 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4415 
4416 	int retval;
4417 
4418 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4419 			dpaa2_dev->object_id);
4420 
4421 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4422 	if (cryptodev == NULL)
4423 		return -ENOMEM;
4424 
4425 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4426 		cryptodev->data->dev_private = rte_zmalloc_socket(
4427 					"cryptodev private structure",
4428 					sizeof(struct dpaa2_sec_dev_private),
4429 					RTE_CACHE_LINE_SIZE,
4430 					rte_socket_id());
4431 
4432 		if (cryptodev->data->dev_private == NULL)
4433 			rte_panic("Cannot allocate memzone for private "
4434 				  "device data");
4435 	}
4436 
4437 	dpaa2_dev->cryptodev = cryptodev;
4438 	cryptodev->device = &dpaa2_dev->device;
4439 
4440 	/* init user callbacks */
4441 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
4442 
4443 	if (dpaa2_svr_family == SVR_LX2160A)
4444 		rta_set_sec_era(RTA_SEC_ERA_10);
4445 	else
4446 		rta_set_sec_era(RTA_SEC_ERA_8);
4447 
4448 	DPAA2_SEC_INFO("2-SEC ERA is %d", USER_SEC_ERA(rta_get_sec_era()));
4449 
4450 	/* Invoke PMD device initialization function */
4451 	retval = dpaa2_sec_dev_init(cryptodev);
4452 	if (retval == 0) {
4453 		rte_cryptodev_pmd_probing_finish(cryptodev);
4454 		return 0;
4455 	}
4456 
4457 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4458 		rte_free(cryptodev->data->dev_private);
4459 
4460 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4461 
4462 	return -ENXIO;
4463 }
4464 
4465 static int
4466 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4467 {
4468 	struct rte_cryptodev *cryptodev;
4469 	int ret;
4470 
4471 	cryptodev = dpaa2_dev->cryptodev;
4472 	if (cryptodev == NULL)
4473 		return -ENODEV;
4474 
4475 	ret = dpaa2_sec_uninit(cryptodev);
4476 	if (ret)
4477 		return ret;
4478 
4479 	return rte_cryptodev_pmd_destroy(cryptodev);
4480 }
4481 
4482 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4483 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4484 	.drv_type = DPAA2_CRYPTO,
4485 	.driver = {
4486 		.name = "DPAA2 SEC PMD"
4487 	},
4488 	.probe = cryptodev_dpaa2_sec_probe,
4489 	.remove = cryptodev_dpaa2_sec_remove,
4490 };
4491 
4492 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4493 
4494 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4495 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4496 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4497 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
4498 		DRIVER_STRICT_ORDER "=<int>"
4499 		DRIVER_DUMP_MODE "=<int>");
4500 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4501