xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 3a3885276124a26cce507a6bf0a8f2f54de25ad0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2023 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_udp.h>
14 #include <rte_mbuf.h>
15 #include <rte_cryptodev.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 #include <rte_string_fns.h>
19 #include <rte_cycles.h>
20 #include <rte_kvargs.h>
21 #include <dev_driver.h>
22 #include <cryptodev_pmd.h>
23 #include <rte_common.h>
24 #include <bus_fslmc_driver.h>
25 #include <fslmc_vfio.h>
26 #include <dpaa2_hw_pvt.h>
27 #include <dpaa2_hw_dpio.h>
28 #include <dpaa2_hw_mempool.h>
29 #include <fsl_dpopr.h>
30 #include <fsl_dpseci.h>
31 #include <fsl_mc_sys.h>
32 #include <rte_hexdump.h>
33 
34 #include "dpaa2_sec_priv.h"
35 #include "dpaa2_sec_event.h"
36 #include "dpaa2_sec_logs.h"
37 
38 /* RTA header files */
39 #include <desc/ipsec.h>
40 #include <desc/pdcp.h>
41 #include <desc/sdap.h>
42 #include <desc/algo.h>
43 
44 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
45  * a pointer to the shared descriptor
46  */
47 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
48 #define FSL_VENDOR_ID           0x1957
49 #define FSL_DEVICE_ID           0x410
50 #define FSL_SUBSYSTEM_SEC       1
51 #define FSL_MC_DPSECI_DEVID     3
52 
53 #define DPAA2_DEFAULT_NAT_T_PORT 4500
54 #define NO_PREFETCH 0
55 
56 #define DRIVER_DUMP_MODE "drv_dump_mode"
57 #define DRIVER_STRICT_ORDER "drv_strict_order"
58 
59 /* DPAA2_SEC_DP_DUMP levels */
60 enum dpaa2_sec_dump_levels {
61 	DPAA2_SEC_DP_NO_DUMP,
62 	DPAA2_SEC_DP_ERR_DUMP,
63 	DPAA2_SEC_DP_FULL_DUMP
64 };
65 
66 uint8_t cryptodev_driver_id;
67 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
68 
69 static inline void
70 dpaa2_sec_dp_fd_dump(const struct qbman_fd *fd, uint16_t bpid,
71 		     struct rte_mbuf *mbuf, bool tx)
72 {
73 #if (RTE_LOG_DEBUG <= RTE_LOG_DP_LEVEL)
74 	char debug_str[1024];
75 	int offset;
76 
77 	if (tx) {
78 		offset = sprintf(debug_str,
79 			"CIPHER SG: fdaddr =%" PRIx64 ", from %s pool ",
80 			DPAA2_GET_FD_ADDR(fd),
81 			bpid < MAX_BPID ? "SW" : "BMAN");
82 		if (bpid < MAX_BPID) {
83 			offset += sprintf(&debug_str[offset],
84 				"bpid = %d ", bpid);
85 		}
86 	} else {
87 		offset = sprintf(debug_str, "Mbuf %p from %s pool ",
88 				 mbuf, DPAA2_GET_FD_IVP(fd) ? "SW" : "BMAN");
89 		if (!DPAA2_GET_FD_IVP(fd)) {
90 			offset += sprintf(&debug_str[offset], "bpid = %d ",
91 					  DPAA2_GET_FD_BPID(fd));
92 		}
93 	}
94 	offset += sprintf(&debug_str[offset],
95 		"private size = %d ",
96 		mbuf->pool->private_data_size);
97 	offset += sprintf(&debug_str[offset],
98 		"addr %p, fdaddr =%" PRIx64 ", off =%d, len =%d",
99 		mbuf->buf_addr, DPAA2_GET_FD_ADDR(fd),
100 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_LEN(fd));
101 	DPAA2_SEC_DP_DEBUG("%s", debug_str);
102 #else
103 	RTE_SET_USED(bpid);
104 	RTE_SET_USED(tx);
105 	RTE_SET_USED(mbuf);
106 	RTE_SET_USED(fd);
107 #endif
108 }
109 
110 static inline void
111 free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
112 {
113 	struct qbman_fle *fle;
114 	struct rte_crypto_op *op;
115 
116 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
117 		return;
118 
119 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
120 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
121 	/* free the fle memory */
122 	if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
123 		rte_mempool_put(qp->fle_pool, (void *)(fle-1));
124 	else
125 		rte_free((void *)(fle-1));
126 }
127 
128 static inline int
129 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
130 			   struct rte_crypto_op *op,
131 			   struct qbman_fd *fd, uint16_t bpid)
132 {
133 	struct rte_crypto_sym_op *sym_op = op->sym;
134 	struct ctxt_priv *priv = sess->ctxt;
135 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
136 	struct sec_flow_context *flc;
137 	struct rte_mbuf *mbuf;
138 	uint32_t in_len = 0, out_len = 0;
139 
140 	if (sym_op->m_dst)
141 		mbuf = sym_op->m_dst;
142 	else
143 		mbuf = sym_op->m_src;
144 
145 	/* first FLE entry used to store mbuf and session ctxt */
146 	fle = (struct qbman_fle *)rte_malloc(NULL,
147 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
148 			RTE_CACHE_LINE_SIZE);
149 	if (unlikely(!fle)) {
150 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
151 		return -ENOMEM;
152 	}
153 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
154 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
155 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
156 
157 	/* Save the shared descriptor */
158 	flc = &priv->flc_desc[0].flc;
159 
160 	op_fle = fle + 1;
161 	ip_fle = fle + 2;
162 	sge = fle + 3;
163 
164 	if (likely(bpid < MAX_BPID)) {
165 		DPAA2_SET_FD_BPID(fd, bpid);
166 		DPAA2_SET_FLE_BPID(op_fle, bpid);
167 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
168 	} else {
169 		DPAA2_SET_FD_IVP(fd);
170 		DPAA2_SET_FLE_IVP(op_fle);
171 		DPAA2_SET_FLE_IVP(ip_fle);
172 	}
173 
174 	/* Configure FD as a FRAME LIST */
175 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
176 	DPAA2_SET_FD_COMPOUND_FMT(fd);
177 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
178 
179 	/* Configure Output FLE with Scatter/Gather Entry */
180 	DPAA2_SET_FLE_SG_EXT(op_fle);
181 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
182 
183 	/* Configure Output SGE for Encap/Decap */
184 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
185 	/* o/p segs */
186 	while (mbuf->next) {
187 		sge->length = mbuf->data_len;
188 		out_len += sge->length;
189 		sge++;
190 		mbuf = mbuf->next;
191 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
192 	}
193 	/* using buf_len for last buf - so that extra data can be added */
194 	sge->length = mbuf->buf_len - mbuf->data_off;
195 	out_len += sge->length;
196 
197 	DPAA2_SET_FLE_FIN(sge);
198 	op_fle->length = out_len;
199 
200 	sge++;
201 	mbuf = sym_op->m_src;
202 
203 	/* Configure Input FLE with Scatter/Gather Entry */
204 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
205 	DPAA2_SET_FLE_SG_EXT(ip_fle);
206 	DPAA2_SET_FLE_FIN(ip_fle);
207 
208 	/* Configure input SGE for Encap/Decap */
209 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
210 	sge->length = mbuf->data_len;
211 	in_len += sge->length;
212 
213 	mbuf = mbuf->next;
214 	/* i/p segs */
215 	while (mbuf) {
216 		sge++;
217 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
218 		sge->length = mbuf->data_len;
219 		in_len += sge->length;
220 		mbuf = mbuf->next;
221 	}
222 	ip_fle->length = in_len;
223 	DPAA2_SET_FLE_FIN(sge);
224 
225 	/* In case of PDCP, per packet HFN is stored in
226 	 * mbuf priv after sym_op.
227 	 */
228 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
229 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
230 					sess->pdcp.hfn_ovd_offset);
231 		/* enable HFN override */
232 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
233 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
234 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
235 	}
236 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
237 
238 	return 0;
239 }
240 
241 static inline int
242 build_proto_compound_fd(dpaa2_sec_session *sess,
243 	       struct rte_crypto_op *op,
244 	       struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
245 {
246 	struct rte_crypto_sym_op *sym_op = op->sym;
247 	struct ctxt_priv *priv = sess->ctxt;
248 	struct qbman_fle *fle, *ip_fle, *op_fle;
249 	struct sec_flow_context *flc;
250 	struct rte_mbuf *src_mbuf = sym_op->m_src;
251 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
252 	int retval;
253 
254 	if (!dst_mbuf)
255 		dst_mbuf = src_mbuf;
256 
257 	/* Save the shared descriptor */
258 	flc = &priv->flc_desc[0].flc;
259 
260 	/* we are using the first FLE entry to store Mbuf */
261 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
262 	if (retval) {
263 		DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
264 		return -ENOMEM;
265 	}
266 	memset(fle, 0, FLE_POOL_BUF_SIZE);
267 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
268 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
269 
270 	op_fle = fle + 1;
271 	ip_fle = fle + 2;
272 
273 	if (likely(bpid < MAX_BPID)) {
274 		DPAA2_SET_FD_BPID(fd, bpid);
275 		DPAA2_SET_FLE_BPID(op_fle, bpid);
276 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
277 	} else {
278 		DPAA2_SET_FD_IVP(fd);
279 		DPAA2_SET_FLE_IVP(op_fle);
280 		DPAA2_SET_FLE_IVP(ip_fle);
281 	}
282 
283 	/* Configure FD as a FRAME LIST */
284 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
285 	DPAA2_SET_FD_COMPOUND_FMT(fd);
286 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
287 
288 	/* Configure Output FLE with dst mbuf data  */
289 	DPAA2_SET_FLE_ADDR(op_fle, rte_pktmbuf_iova(dst_mbuf));
290 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
291 
292 	/* Configure Input FLE with src mbuf data */
293 	DPAA2_SET_FLE_ADDR(ip_fle, rte_pktmbuf_iova(src_mbuf));
294 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
295 
296 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
297 	DPAA2_SET_FLE_FIN(ip_fle);
298 
299 	/* In case of PDCP, per packet HFN is stored in
300 	 * mbuf priv after sym_op.
301 	 */
302 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
303 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
304 					sess->pdcp.hfn_ovd_offset);
305 		/* enable HFN override */
306 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
307 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
308 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
309 	}
310 
311 	return 0;
312 
313 }
314 
315 static inline int
316 build_proto_fd(dpaa2_sec_session *sess,
317 	       struct rte_crypto_op *op,
318 	       struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
319 {
320 	struct rte_crypto_sym_op *sym_op = op->sym;
321 	if (sym_op->m_dst)
322 		return build_proto_compound_fd(sess, op, fd, bpid, qp);
323 
324 	struct ctxt_priv *priv = sess->ctxt;
325 	struct sec_flow_context *flc;
326 	struct rte_mbuf *mbuf = sym_op->m_src;
327 
328 	if (likely(bpid < MAX_BPID))
329 		DPAA2_SET_FD_BPID(fd, bpid);
330 	else
331 		DPAA2_SET_FD_IVP(fd);
332 
333 	/* Save the shared descriptor */
334 	flc = &priv->flc_desc[0].flc;
335 
336 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
337 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
338 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
339 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
340 
341 	/* save physical address of mbuf */
342 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
343 	mbuf->buf_iova = (size_t)op;
344 
345 	return 0;
346 }
347 
348 static inline int
349 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
350 		 struct rte_crypto_op *op,
351 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
352 {
353 	struct rte_crypto_sym_op *sym_op = op->sym;
354 	struct ctxt_priv *priv = sess->ctxt;
355 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
356 	struct sec_flow_context *flc;
357 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
358 	int icv_len = sess->digest_length;
359 	uint8_t *old_icv;
360 	struct rte_mbuf *mbuf;
361 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
362 			sess->iv.offset);
363 
364 	if (sym_op->m_dst)
365 		mbuf = sym_op->m_dst;
366 	else
367 		mbuf = sym_op->m_src;
368 
369 	/* first FLE entry used to store mbuf and session ctxt */
370 	fle = (struct qbman_fle *)rte_malloc(NULL,
371 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
372 			RTE_CACHE_LINE_SIZE);
373 	if (unlikely(!fle)) {
374 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
375 		return -ENOMEM;
376 	}
377 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
378 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
379 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
380 
381 	op_fle = fle + 1;
382 	ip_fle = fle + 2;
383 	sge = fle + 3;
384 
385 	/* Save the shared descriptor */
386 	flc = &priv->flc_desc[0].flc;
387 
388 	/* Configure FD as a FRAME LIST */
389 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
390 	DPAA2_SET_FD_COMPOUND_FMT(fd);
391 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
392 
393 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d",
394 		   sym_op->aead.data.offset,
395 		   sym_op->aead.data.length,
396 		   sess->digest_length);
397 	DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x",
398 		   sess->iv.length,
399 		   sym_op->m_src->data_off);
400 
401 	/* Configure Output FLE with Scatter/Gather Entry */
402 	DPAA2_SET_FLE_SG_EXT(op_fle);
403 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
404 
405 	if (auth_only_len)
406 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
407 
408 	op_fle->length = (sess->dir == DIR_ENC) ?
409 			(sym_op->aead.data.length + icv_len) :
410 			sym_op->aead.data.length;
411 
412 	/* Configure Output SGE for Encap/Decap */
413 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset);
414 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
415 
416 	mbuf = mbuf->next;
417 	/* o/p segs */
418 	while (mbuf) {
419 		sge++;
420 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
421 		sge->length = mbuf->data_len;
422 		mbuf = mbuf->next;
423 	}
424 	sge->length -= icv_len;
425 
426 	if (sess->dir == DIR_ENC) {
427 		sge++;
428 		DPAA2_SET_FLE_ADDR(sge,
429 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
430 		sge->length = icv_len;
431 	}
432 	DPAA2_SET_FLE_FIN(sge);
433 
434 	sge++;
435 	mbuf = sym_op->m_src;
436 
437 	/* Configure Input FLE with Scatter/Gather Entry */
438 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
439 	DPAA2_SET_FLE_SG_EXT(ip_fle);
440 	DPAA2_SET_FLE_FIN(ip_fle);
441 	ip_fle->length = (sess->dir == DIR_ENC) ?
442 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
443 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
444 		 icv_len);
445 
446 	/* Configure Input SGE for Encap/Decap */
447 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
448 	sge->length = sess->iv.length;
449 
450 	sge++;
451 	if (auth_only_len) {
452 		DPAA2_SET_FLE_ADDR(sge,
453 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
454 		sge->length = auth_only_len;
455 		sge++;
456 	}
457 
458 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->aead.data.offset);
459 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
460 
461 	mbuf = mbuf->next;
462 	/* i/p segs */
463 	while (mbuf) {
464 		sge++;
465 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
466 		sge->length = mbuf->data_len;
467 		mbuf = mbuf->next;
468 	}
469 
470 	if (sess->dir == DIR_DEC) {
471 		sge++;
472 		old_icv = (uint8_t *)(sge + 1);
473 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
474 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
475 		sge->length = icv_len;
476 	}
477 
478 	DPAA2_SET_FLE_FIN(sge);
479 	if (auth_only_len) {
480 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
481 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
482 	}
483 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
484 
485 	return 0;
486 }
487 
488 static inline int
489 build_authenc_gcm_fd(dpaa2_sec_session *sess,
490 		     struct rte_crypto_op *op,
491 		     struct qbman_fd *fd, uint16_t bpid,
492 		     struct dpaa2_sec_qp *qp)
493 {
494 	struct rte_crypto_sym_op *sym_op = op->sym;
495 	struct ctxt_priv *priv = sess->ctxt;
496 	struct qbman_fle *fle, *sge;
497 	struct sec_flow_context *flc;
498 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
499 	int icv_len = sess->digest_length, retval;
500 	uint8_t *old_icv;
501 	struct rte_mbuf *dst;
502 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
503 			sess->iv.offset);
504 
505 	if (sym_op->m_dst)
506 		dst = sym_op->m_dst;
507 	else
508 		dst = sym_op->m_src;
509 
510 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
511 	 * Currently we donot know which FLE has the mbuf stored.
512 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
513 	 * to get the MBUF Addr from the previous FLE.
514 	 * We can have a better approach to use the inline Mbuf
515 	 */
516 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
517 	if (retval) {
518 		DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
519 		return -ENOMEM;
520 	}
521 	memset(fle, 0, FLE_POOL_BUF_SIZE);
522 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
523 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
524 	fle = fle + 1;
525 	sge = fle + 2;
526 	if (likely(bpid < MAX_BPID)) {
527 		DPAA2_SET_FD_BPID(fd, bpid);
528 		DPAA2_SET_FLE_BPID(fle, bpid);
529 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
530 		DPAA2_SET_FLE_BPID(sge, bpid);
531 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
532 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
533 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
534 	} else {
535 		DPAA2_SET_FD_IVP(fd);
536 		DPAA2_SET_FLE_IVP(fle);
537 		DPAA2_SET_FLE_IVP((fle + 1));
538 		DPAA2_SET_FLE_IVP(sge);
539 		DPAA2_SET_FLE_IVP((sge + 1));
540 		DPAA2_SET_FLE_IVP((sge + 2));
541 		DPAA2_SET_FLE_IVP((sge + 3));
542 	}
543 
544 	/* Save the shared descriptor */
545 	flc = &priv->flc_desc[0].flc;
546 	/* Configure FD as a FRAME LIST */
547 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
548 	DPAA2_SET_FD_COMPOUND_FMT(fd);
549 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
550 
551 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d",
552 		   sym_op->aead.data.offset,
553 		   sym_op->aead.data.length,
554 		   sess->digest_length);
555 	DPAA2_SEC_DP_DEBUG("iv-len=%d data_off: 0x%x",
556 		   sess->iv.length,
557 		   sym_op->m_src->data_off);
558 
559 	/* Configure Output FLE with Scatter/Gather Entry */
560 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
561 	if (auth_only_len)
562 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
563 	fle->length = (sess->dir == DIR_ENC) ?
564 			(sym_op->aead.data.length + icv_len) :
565 			sym_op->aead.data.length;
566 
567 	DPAA2_SET_FLE_SG_EXT(fle);
568 
569 	/* Configure Output SGE for Encap/Decap */
570 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->aead.data.offset);
571 	sge->length = sym_op->aead.data.length;
572 
573 	if (sess->dir == DIR_ENC) {
574 		sge++;
575 		DPAA2_SET_FLE_ADDR(sge,
576 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
577 		sge->length = sess->digest_length;
578 	}
579 	DPAA2_SET_FLE_FIN(sge);
580 
581 	sge++;
582 	fle++;
583 
584 	/* Configure Input FLE with Scatter/Gather Entry */
585 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
586 	DPAA2_SET_FLE_SG_EXT(fle);
587 	DPAA2_SET_FLE_FIN(fle);
588 	fle->length = (sess->dir == DIR_ENC) ?
589 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
590 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
591 		 sess->digest_length);
592 
593 	/* Configure Input SGE for Encap/Decap */
594 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
595 	sge->length = sess->iv.length;
596 	sge++;
597 	if (auth_only_len) {
598 		DPAA2_SET_FLE_ADDR(sge,
599 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
600 		sge->length = auth_only_len;
601 		DPAA2_SET_FLE_BPID(sge, bpid);
602 		sge++;
603 	}
604 
605 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->aead.data.offset);
606 	sge->length = sym_op->aead.data.length;
607 	if (sess->dir == DIR_DEC) {
608 		sge++;
609 		old_icv = (uint8_t *)(sge + 1);
610 		memcpy(old_icv,	sym_op->aead.digest.data,
611 		       sess->digest_length);
612 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
613 		sge->length = sess->digest_length;
614 	}
615 	DPAA2_SET_FLE_FIN(sge);
616 
617 	if (auth_only_len) {
618 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
619 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
620 	}
621 
622 	DPAA2_SET_FD_LEN(fd, fle->length);
623 	return 0;
624 }
625 
626 static inline int
627 build_authenc_sg_fd(dpaa2_sec_session *sess,
628 		 struct rte_crypto_op *op,
629 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
630 {
631 	struct rte_crypto_sym_op *sym_op = op->sym;
632 	struct ctxt_priv *priv = sess->ctxt;
633 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
634 	struct sec_flow_context *flc;
635 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
636 				sym_op->auth.data.offset;
637 	uint16_t auth_tail_len = sym_op->auth.data.length -
638 				sym_op->cipher.data.length - auth_hdr_len;
639 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
640 	int icv_len = sess->digest_length;
641 	uint8_t *old_icv;
642 	struct rte_mbuf *mbuf;
643 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
644 			sess->iv.offset);
645 
646 	if (sym_op->m_dst)
647 		mbuf = sym_op->m_dst;
648 	else
649 		mbuf = sym_op->m_src;
650 
651 	/* first FLE entry used to store mbuf and session ctxt */
652 	fle = (struct qbman_fle *)rte_malloc(NULL,
653 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
654 			RTE_CACHE_LINE_SIZE);
655 	if (unlikely(!fle)) {
656 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
657 		return -ENOMEM;
658 	}
659 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
660 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
661 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
662 
663 	op_fle = fle + 1;
664 	ip_fle = fle + 2;
665 	sge = fle + 3;
666 
667 	/* Save the shared descriptor */
668 	flc = &priv->flc_desc[0].flc;
669 
670 	/* Configure FD as a FRAME LIST */
671 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
672 	DPAA2_SET_FD_COMPOUND_FMT(fd);
673 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
674 
675 	DPAA2_SEC_DP_DEBUG(
676 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d",
677 		sym_op->auth.data.offset,
678 		sym_op->auth.data.length,
679 		sess->digest_length);
680 	DPAA2_SEC_DP_DEBUG(
681 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x",
682 		sym_op->cipher.data.offset,
683 		sym_op->cipher.data.length,
684 		sess->iv.length,
685 		sym_op->m_src->data_off);
686 
687 	/* Configure Output FLE with Scatter/Gather Entry */
688 	DPAA2_SET_FLE_SG_EXT(op_fle);
689 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
690 
691 	if (auth_only_len)
692 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
693 
694 	op_fle->length = (sess->dir == DIR_ENC) ?
695 			(sym_op->cipher.data.length + icv_len) :
696 			sym_op->cipher.data.length;
697 
698 	/* Configure Output SGE for Encap/Decap */
699 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset);
700 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
701 
702 	mbuf = mbuf->next;
703 	/* o/p segs */
704 	while (mbuf) {
705 		sge++;
706 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
707 		sge->length = mbuf->data_len;
708 		mbuf = mbuf->next;
709 	}
710 	sge->length -= icv_len;
711 
712 	if (sess->dir == DIR_ENC) {
713 		sge++;
714 		DPAA2_SET_FLE_ADDR(sge,
715 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
716 		sge->length = icv_len;
717 	}
718 	DPAA2_SET_FLE_FIN(sge);
719 
720 	sge++;
721 	mbuf = sym_op->m_src;
722 
723 	/* Configure Input FLE with Scatter/Gather Entry */
724 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
725 	DPAA2_SET_FLE_SG_EXT(ip_fle);
726 	DPAA2_SET_FLE_FIN(ip_fle);
727 	ip_fle->length = (sess->dir == DIR_ENC) ?
728 			(sym_op->auth.data.length + sess->iv.length) :
729 			(sym_op->auth.data.length + sess->iv.length +
730 			 icv_len);
731 
732 	/* Configure Input SGE for Encap/Decap */
733 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
734 	sge->length = sess->iv.length;
735 
736 	sge++;
737 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + sym_op->auth.data.offset);
738 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
739 
740 	mbuf = mbuf->next;
741 	/* i/p segs */
742 	while (mbuf) {
743 		sge++;
744 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
745 		sge->length = mbuf->data_len;
746 		mbuf = mbuf->next;
747 	}
748 	sge->length -= icv_len;
749 
750 	if (sess->dir == DIR_DEC) {
751 		sge++;
752 		old_icv = (uint8_t *)(sge + 1);
753 		memcpy(old_icv,	sym_op->auth.digest.data,
754 		       icv_len);
755 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
756 		sge->length = icv_len;
757 	}
758 
759 	DPAA2_SET_FLE_FIN(sge);
760 	if (auth_only_len) {
761 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
762 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
763 	}
764 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
765 
766 	return 0;
767 }
768 
769 static inline int
770 build_authenc_fd(dpaa2_sec_session *sess,
771 		 struct rte_crypto_op *op,
772 		 struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
773 {
774 	struct rte_crypto_sym_op *sym_op = op->sym;
775 	struct ctxt_priv *priv = sess->ctxt;
776 	struct qbman_fle *fle, *sge;
777 	struct sec_flow_context *flc;
778 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
779 				sym_op->auth.data.offset;
780 	uint16_t auth_tail_len = sym_op->auth.data.length -
781 				sym_op->cipher.data.length - auth_hdr_len;
782 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
783 
784 	int icv_len = sess->digest_length, retval;
785 	uint8_t *old_icv;
786 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
787 			sess->iv.offset);
788 	struct rte_mbuf *dst;
789 
790 	if (sym_op->m_dst)
791 		dst = sym_op->m_dst;
792 	else
793 		dst = sym_op->m_src;
794 
795 	/* we are using the first FLE entry to store Mbuf.
796 	 * Currently we donot know which FLE has the mbuf stored.
797 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
798 	 * to get the MBUF Addr from the previous FLE.
799 	 * We can have a better approach to use the inline Mbuf
800 	 */
801 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
802 	if (retval) {
803 		DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
804 		return -ENOMEM;
805 	}
806 	memset(fle, 0, FLE_POOL_BUF_SIZE);
807 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
808 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
809 	fle = fle + 1;
810 	sge = fle + 2;
811 	if (likely(bpid < MAX_BPID)) {
812 		DPAA2_SET_FD_BPID(fd, bpid);
813 		DPAA2_SET_FLE_BPID(fle, bpid);
814 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
815 		DPAA2_SET_FLE_BPID(sge, bpid);
816 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
817 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
818 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
819 	} else {
820 		DPAA2_SET_FD_IVP(fd);
821 		DPAA2_SET_FLE_IVP(fle);
822 		DPAA2_SET_FLE_IVP((fle + 1));
823 		DPAA2_SET_FLE_IVP(sge);
824 		DPAA2_SET_FLE_IVP((sge + 1));
825 		DPAA2_SET_FLE_IVP((sge + 2));
826 		DPAA2_SET_FLE_IVP((sge + 3));
827 	}
828 
829 	/* Save the shared descriptor */
830 	flc = &priv->flc_desc[0].flc;
831 	/* Configure FD as a FRAME LIST */
832 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
833 	DPAA2_SET_FD_COMPOUND_FMT(fd);
834 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
835 
836 	DPAA2_SEC_DP_DEBUG(
837 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d",
838 		sym_op->auth.data.offset,
839 		sym_op->auth.data.length,
840 		sess->digest_length);
841 	DPAA2_SEC_DP_DEBUG(
842 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x",
843 		sym_op->cipher.data.offset,
844 		sym_op->cipher.data.length,
845 		sess->iv.length,
846 		sym_op->m_src->data_off);
847 
848 	/* Configure Output FLE with Scatter/Gather Entry */
849 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
850 	if (auth_only_len)
851 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
852 	fle->length = (sess->dir == DIR_ENC) ?
853 			(sym_op->cipher.data.length + icv_len) :
854 			sym_op->cipher.data.length;
855 
856 	DPAA2_SET_FLE_SG_EXT(fle);
857 
858 	/* Configure Output SGE for Encap/Decap */
859 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(dst) + sym_op->cipher.data.offset);
860 	sge->length = sym_op->cipher.data.length;
861 
862 	if (sess->dir == DIR_ENC) {
863 		sge++;
864 		DPAA2_SET_FLE_ADDR(sge,
865 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
866 		sge->length = sess->digest_length;
867 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
868 					sess->iv.length));
869 	}
870 	DPAA2_SET_FLE_FIN(sge);
871 
872 	sge++;
873 	fle++;
874 
875 	/* Configure Input FLE with Scatter/Gather Entry */
876 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
877 	DPAA2_SET_FLE_SG_EXT(fle);
878 	DPAA2_SET_FLE_FIN(fle);
879 	fle->length = (sess->dir == DIR_ENC) ?
880 			(sym_op->auth.data.length + sess->iv.length) :
881 			(sym_op->auth.data.length + sess->iv.length +
882 			 sess->digest_length);
883 
884 	/* Configure Input SGE for Encap/Decap */
885 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
886 	sge->length = sess->iv.length;
887 	sge++;
888 
889 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + sym_op->auth.data.offset);
890 	sge->length = sym_op->auth.data.length;
891 	if (sess->dir == DIR_DEC) {
892 		sge++;
893 		old_icv = (uint8_t *)(sge + 1);
894 		memcpy(old_icv,	sym_op->auth.digest.data,
895 		       sess->digest_length);
896 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
897 		sge->length = sess->digest_length;
898 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
899 				 sess->digest_length +
900 				 sess->iv.length));
901 	}
902 	DPAA2_SET_FLE_FIN(sge);
903 	if (auth_only_len) {
904 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
905 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
906 	}
907 	return 0;
908 }
909 
910 static inline int build_auth_sg_fd(
911 		dpaa2_sec_session *sess,
912 		struct rte_crypto_op *op,
913 		struct qbman_fd *fd,
914 		__rte_unused uint16_t bpid)
915 {
916 	struct rte_crypto_sym_op *sym_op = op->sym;
917 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
918 	struct sec_flow_context *flc;
919 	struct ctxt_priv *priv = sess->ctxt;
920 	int data_len, data_offset;
921 	uint8_t *old_digest;
922 	struct rte_mbuf *mbuf;
923 
924 	data_len = sym_op->auth.data.length;
925 	data_offset = sym_op->auth.data.offset;
926 
927 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
928 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
929 		if ((data_len & 7) || (data_offset & 7)) {
930 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
931 			return -ENOTSUP;
932 		}
933 
934 		data_len = data_len >> 3;
935 		data_offset = data_offset >> 3;
936 	}
937 
938 	mbuf = sym_op->m_src;
939 	fle = (struct qbman_fle *)rte_malloc(NULL,
940 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
941 			RTE_CACHE_LINE_SIZE);
942 	if (unlikely(!fle)) {
943 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
944 		return -ENOMEM;
945 	}
946 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
947 	/* first FLE entry used to store mbuf and session ctxt */
948 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
949 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
950 	op_fle = fle + 1;
951 	ip_fle = fle + 2;
952 	sge = fle + 3;
953 
954 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
955 	/* sg FD */
956 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
957 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
958 	DPAA2_SET_FD_COMPOUND_FMT(fd);
959 
960 	/* o/p fle */
961 	DPAA2_SET_FLE_ADDR(op_fle,
962 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
963 	op_fle->length = sess->digest_length;
964 
965 	/* i/p fle */
966 	DPAA2_SET_FLE_SG_EXT(ip_fle);
967 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
968 	ip_fle->length = data_len;
969 
970 	if (sess->iv.length) {
971 		uint8_t *iv_ptr;
972 
973 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
974 						   sess->iv.offset);
975 
976 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
977 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
978 			sge->length = 12;
979 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
980 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
981 			sge->length = 8;
982 		} else {
983 			sge->length = sess->iv.length;
984 		}
985 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
986 		ip_fle->length += sge->length;
987 		sge++;
988 	}
989 	/* i/p 1st seg */
990 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset);
991 
992 	if (data_len <= (mbuf->data_len - data_offset)) {
993 		sge->length = data_len;
994 		data_len = 0;
995 	} else {
996 		sge->length = mbuf->data_len - data_offset;
997 
998 		/* remaining i/p segs */
999 		while ((data_len = data_len - sge->length) &&
1000 		       (mbuf = mbuf->next)) {
1001 			sge++;
1002 			DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
1003 			if (data_len > mbuf->data_len)
1004 				sge->length = mbuf->data_len;
1005 			else
1006 				sge->length = data_len;
1007 		}
1008 	}
1009 
1010 	if (sess->dir == DIR_DEC) {
1011 		/* Digest verification case */
1012 		sge++;
1013 		old_digest = (uint8_t *)(sge + 1);
1014 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1015 			   sess->digest_length);
1016 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1017 		sge->length = sess->digest_length;
1018 		ip_fle->length += sess->digest_length;
1019 	}
1020 	DPAA2_SET_FLE_FIN(sge);
1021 	DPAA2_SET_FLE_FIN(ip_fle);
1022 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1023 
1024 	return 0;
1025 }
1026 
1027 static inline int
1028 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1029 	      struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1030 {
1031 	struct rte_crypto_sym_op *sym_op = op->sym;
1032 	struct qbman_fle *fle, *sge;
1033 	struct sec_flow_context *flc;
1034 	struct ctxt_priv *priv = sess->ctxt;
1035 	int data_len, data_offset;
1036 	uint8_t *old_digest;
1037 	int retval;
1038 
1039 	data_len = sym_op->auth.data.length;
1040 	data_offset = sym_op->auth.data.offset;
1041 
1042 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1043 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1044 		if ((data_len & 7) || (data_offset & 7)) {
1045 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1046 			return -ENOTSUP;
1047 		}
1048 
1049 		data_len = data_len >> 3;
1050 		data_offset = data_offset >> 3;
1051 	}
1052 
1053 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1054 	if (retval) {
1055 		DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
1056 		return -ENOMEM;
1057 	}
1058 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1059 	/* TODO we are using the first FLE entry to store Mbuf.
1060 	 * Currently we donot know which FLE has the mbuf stored.
1061 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1062 	 * to get the MBUF Addr from the previous FLE.
1063 	 * We can have a better approach to use the inline Mbuf
1064 	 */
1065 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1066 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1067 	fle = fle + 1;
1068 	sge = fle + 2;
1069 
1070 	if (likely(bpid < MAX_BPID)) {
1071 		DPAA2_SET_FD_BPID(fd, bpid);
1072 		DPAA2_SET_FLE_BPID(fle, bpid);
1073 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1074 		DPAA2_SET_FLE_BPID(sge, bpid);
1075 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1076 	} else {
1077 		DPAA2_SET_FD_IVP(fd);
1078 		DPAA2_SET_FLE_IVP(fle);
1079 		DPAA2_SET_FLE_IVP((fle + 1));
1080 		DPAA2_SET_FLE_IVP(sge);
1081 		DPAA2_SET_FLE_IVP((sge + 1));
1082 	}
1083 
1084 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1085 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1086 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1087 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1088 
1089 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1090 	fle->length = sess->digest_length;
1091 	fle++;
1092 
1093 	/* Setting input FLE */
1094 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1095 	DPAA2_SET_FLE_SG_EXT(fle);
1096 	fle->length = data_len;
1097 
1098 	if (sess->iv.length) {
1099 		uint8_t *iv_ptr;
1100 
1101 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1102 						   sess->iv.offset);
1103 
1104 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1105 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1106 			sge->length = 12;
1107 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1108 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1109 			sge->length = 8;
1110 		} else {
1111 			sge->length = sess->iv.length;
1112 		}
1113 
1114 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1115 		fle->length = fle->length + sge->length;
1116 		sge++;
1117 	}
1118 
1119 	/* Setting data to authenticate */
1120 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset);
1121 	sge->length = data_len;
1122 
1123 	if (sess->dir == DIR_DEC) {
1124 		sge++;
1125 		old_digest = (uint8_t *)(sge + 1);
1126 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1127 			   sess->digest_length);
1128 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1129 		sge->length = sess->digest_length;
1130 		fle->length = fle->length + sess->digest_length;
1131 	}
1132 
1133 	DPAA2_SET_FLE_FIN(sge);
1134 	DPAA2_SET_FLE_FIN(fle);
1135 	DPAA2_SET_FD_LEN(fd, fle->length);
1136 
1137 	return 0;
1138 }
1139 
1140 static int
1141 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1142 		struct qbman_fd *fd, uint16_t bpid)
1143 {
1144 	struct rte_crypto_sym_op *sym_op = op->sym;
1145 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1146 	int data_len, data_offset;
1147 	struct sec_flow_context *flc;
1148 	struct ctxt_priv *priv = sess->ctxt;
1149 	struct rte_mbuf *mbuf;
1150 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1151 			sess->iv.offset);
1152 
1153 	data_len = sym_op->cipher.data.length;
1154 	data_offset = sym_op->cipher.data.offset;
1155 
1156 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1157 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1158 		if ((data_len & 7) || (data_offset & 7)) {
1159 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1160 			return -ENOTSUP;
1161 		}
1162 
1163 		data_len = data_len >> 3;
1164 		data_offset = data_offset >> 3;
1165 	}
1166 
1167 	if (sym_op->m_dst)
1168 		mbuf = sym_op->m_dst;
1169 	else
1170 		mbuf = sym_op->m_src;
1171 
1172 	/* first FLE entry used to store mbuf and session ctxt */
1173 	fle = (struct qbman_fle *)rte_malloc(NULL,
1174 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1175 			RTE_CACHE_LINE_SIZE);
1176 	if (!fle) {
1177 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1178 		return -ENOMEM;
1179 	}
1180 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1181 	/* first FLE entry used to store mbuf and session ctxt */
1182 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1183 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1184 
1185 	op_fle = fle + 1;
1186 	ip_fle = fle + 2;
1187 	sge = fle + 3;
1188 
1189 	flc = &priv->flc_desc[0].flc;
1190 
1191 	DPAA2_SEC_DP_DEBUG(
1192 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1193 		" data_off: 0x%x",
1194 		data_offset,
1195 		data_len,
1196 		sess->iv.length,
1197 		sym_op->m_src->data_off);
1198 
1199 	/* o/p fle */
1200 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1201 	op_fle->length = data_len;
1202 	DPAA2_SET_FLE_SG_EXT(op_fle);
1203 
1204 	/* o/p 1st seg */
1205 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset);
1206 	sge->length = mbuf->data_len - data_offset;
1207 
1208 	mbuf = mbuf->next;
1209 	/* o/p segs */
1210 	while (mbuf) {
1211 		sge++;
1212 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
1213 		sge->length = mbuf->data_len;
1214 		mbuf = mbuf->next;
1215 	}
1216 	DPAA2_SET_FLE_FIN(sge);
1217 
1218 	DPAA2_SEC_DP_DEBUG(
1219 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d",
1220 		flc, fle, fle->addr_hi, fle->addr_lo,
1221 		fle->length);
1222 
1223 	/* i/p fle */
1224 	mbuf = sym_op->m_src;
1225 	sge++;
1226 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1227 	ip_fle->length = sess->iv.length + data_len;
1228 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1229 
1230 	/* i/p IV */
1231 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1232 	sge->length = sess->iv.length;
1233 
1234 	sge++;
1235 
1236 	/* i/p 1st seg */
1237 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf) + data_offset);
1238 	sge->length = mbuf->data_len - data_offset;
1239 
1240 	mbuf = mbuf->next;
1241 	/* i/p segs */
1242 	while (mbuf) {
1243 		sge++;
1244 		DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(mbuf));
1245 		sge->length = mbuf->data_len;
1246 		mbuf = mbuf->next;
1247 	}
1248 	DPAA2_SET_FLE_FIN(sge);
1249 	DPAA2_SET_FLE_FIN(ip_fle);
1250 
1251 	/* sg fd */
1252 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1253 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1254 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1255 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1256 	dpaa2_sec_dp_fd_dump(fd, bpid, mbuf, true);
1257 
1258 	return 0;
1259 }
1260 
1261 static int
1262 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1263 		struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1264 {
1265 	struct rte_crypto_sym_op *sym_op = op->sym;
1266 	struct qbman_fle *fle, *sge;
1267 	int retval, data_len, data_offset;
1268 	struct sec_flow_context *flc;
1269 	struct ctxt_priv *priv = sess->ctxt;
1270 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1271 			sess->iv.offset);
1272 	struct rte_mbuf *dst;
1273 
1274 	data_len = sym_op->cipher.data.length;
1275 	data_offset = sym_op->cipher.data.offset;
1276 
1277 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1278 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1279 		if ((data_len & 7) || (data_offset & 7)) {
1280 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1281 			return -ENOTSUP;
1282 		}
1283 
1284 		data_len = data_len >> 3;
1285 		data_offset = data_offset >> 3;
1286 	}
1287 
1288 	if (sym_op->m_dst)
1289 		dst = sym_op->m_dst;
1290 	else
1291 		dst = sym_op->m_src;
1292 
1293 	retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
1294 	if (retval) {
1295 		DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
1296 		return -ENOMEM;
1297 	}
1298 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1299 	/* TODO we are using the first FLE entry to store Mbuf.
1300 	 * Currently we donot know which FLE has the mbuf stored.
1301 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1302 	 * to get the MBUF Addr from the previous FLE.
1303 	 * We can have a better approach to use the inline Mbuf
1304 	 */
1305 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1306 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1307 	fle = fle + 1;
1308 	sge = fle + 2;
1309 
1310 	if (likely(bpid < MAX_BPID)) {
1311 		DPAA2_SET_FD_BPID(fd, bpid);
1312 		DPAA2_SET_FLE_BPID(fle, bpid);
1313 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1314 		DPAA2_SET_FLE_BPID(sge, bpid);
1315 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1316 	} else {
1317 		DPAA2_SET_FD_IVP(fd);
1318 		DPAA2_SET_FLE_IVP(fle);
1319 		DPAA2_SET_FLE_IVP((fle + 1));
1320 		DPAA2_SET_FLE_IVP(sge);
1321 		DPAA2_SET_FLE_IVP((sge + 1));
1322 	}
1323 
1324 	flc = &priv->flc_desc[0].flc;
1325 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1326 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1327 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1328 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1329 
1330 	DPAA2_SEC_DP_DEBUG(
1331 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1332 		" data_off: 0x%x",
1333 		data_offset,
1334 		data_len,
1335 		sess->iv.length,
1336 		sym_op->m_src->data_off);
1337 
1338 	DPAA2_SET_FLE_ADDR(fle, rte_pktmbuf_iova(dst) + data_offset);
1339 
1340 	fle->length = data_len + sess->iv.length;
1341 
1342 	DPAA2_SEC_DP_DEBUG(
1343 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
1344 		flc, fle, fle->addr_hi, fle->addr_lo,
1345 		fle->length);
1346 
1347 	fle++;
1348 
1349 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1350 	fle->length = data_len + sess->iv.length;
1351 
1352 	DPAA2_SET_FLE_SG_EXT(fle);
1353 
1354 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1355 	sge->length = sess->iv.length;
1356 
1357 	sge++;
1358 	DPAA2_SET_FLE_ADDR(sge, rte_pktmbuf_iova(sym_op->m_src) + data_offset);
1359 
1360 	sge->length = data_len;
1361 	DPAA2_SET_FLE_FIN(sge);
1362 	DPAA2_SET_FLE_FIN(fle);
1363 	dpaa2_sec_dp_fd_dump(fd, bpid, dst, true);
1364 
1365 	return 0;
1366 }
1367 
1368 static inline int
1369 build_sec_fd(struct rte_crypto_op *op,
1370 	     struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
1371 {
1372 	int ret = -1;
1373 	dpaa2_sec_session *sess;
1374 
1375 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
1376 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1377 	} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1378 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
1379 	} else {
1380 		DPAA2_SEC_DP_ERR("Session type invalid");
1381 		return -ENOTSUP;
1382 	}
1383 
1384 	if (!sess) {
1385 		DPAA2_SEC_DP_ERR("Session not available");
1386 		return -EINVAL;
1387 	}
1388 
1389 	/* Any of the buffer is segmented*/
1390 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1391 		  ((op->sym->m_dst != NULL) &&
1392 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1393 		switch (sess->ctxt_type) {
1394 		case DPAA2_SEC_CIPHER:
1395 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1396 			break;
1397 		case DPAA2_SEC_AUTH:
1398 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1399 			break;
1400 		case DPAA2_SEC_AEAD:
1401 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1402 			break;
1403 		case DPAA2_SEC_CIPHER_HASH:
1404 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1405 			break;
1406 		case DPAA2_SEC_IPSEC:
1407 		case DPAA2_SEC_PDCP:
1408 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1409 			break;
1410 		default:
1411 			DPAA2_SEC_ERR("error: Unsupported session %d",
1412 				sess->ctxt_type);
1413 			ret = -ENOTSUP;
1414 		}
1415 	} else {
1416 		switch (sess->ctxt_type) {
1417 		case DPAA2_SEC_CIPHER:
1418 			ret = build_cipher_fd(sess, op, fd, bpid, qp);
1419 			break;
1420 		case DPAA2_SEC_AUTH:
1421 			ret = build_auth_fd(sess, op, fd, bpid, qp);
1422 			break;
1423 		case DPAA2_SEC_AEAD:
1424 			ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
1425 			break;
1426 		case DPAA2_SEC_CIPHER_HASH:
1427 			ret = build_authenc_fd(sess, op, fd, bpid, qp);
1428 			break;
1429 		case DPAA2_SEC_IPSEC:
1430 			ret = build_proto_fd(sess, op, fd, bpid, qp);
1431 			break;
1432 		case DPAA2_SEC_PDCP:
1433 			ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
1434 			break;
1435 		default:
1436 			DPAA2_SEC_ERR("error: Unsupported session%d",
1437 				sess->ctxt_type);
1438 			ret = -ENOTSUP;
1439 		}
1440 	}
1441 	return ret;
1442 }
1443 
1444 static uint16_t
1445 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1446 			uint16_t nb_ops)
1447 {
1448 	/* Function to transmit the frames to given device and VQ*/
1449 	uint32_t loop;
1450 	int32_t ret;
1451 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1452 	uint32_t frames_to_send, retry_count;
1453 	struct qbman_eq_desc eqdesc;
1454 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1455 	struct qbman_swp *swp;
1456 	uint16_t num_tx = 0;
1457 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1458 	/*todo - need to support multiple buffer pools */
1459 	uint16_t bpid;
1460 	struct rte_mempool *mb_pool;
1461 
1462 	if (unlikely(nb_ops == 0))
1463 		return 0;
1464 
1465 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1466 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1467 		return 0;
1468 	}
1469 	/*Prepare enqueue descriptor*/
1470 	qbman_eq_desc_clear(&eqdesc);
1471 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1472 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1473 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1474 
1475 	if (!DPAA2_PER_LCORE_DPIO) {
1476 		ret = dpaa2_affine_qbman_swp();
1477 		if (ret) {
1478 			DPAA2_SEC_ERR(
1479 				"Failed to allocate IO portal, tid: %d",
1480 				rte_gettid());
1481 			return 0;
1482 		}
1483 	}
1484 	swp = DPAA2_PER_LCORE_PORTAL;
1485 
1486 	while (nb_ops) {
1487 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1488 			dpaa2_eqcr_size : nb_ops;
1489 
1490 		for (loop = 0; loop < frames_to_send; loop++) {
1491 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1492 				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
1493 					DPAA2_PER_LCORE_DQRR_SIZE--;
1494 					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
1495 					*dpaa2_seqn((*ops)->sym->m_src) &
1496 					QBMAN_EQCR_DCA_IDXMASK);
1497 				}
1498 				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
1499 				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
1500 			}
1501 
1502 			/*Clear the unused FD fields before sending*/
1503 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1504 			mb_pool = (*ops)->sym->m_src->pool;
1505 			bpid = mempool_to_bpid(mb_pool);
1506 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1507 			if (ret) {
1508 				DPAA2_SEC_DP_DEBUG("FD build failed");
1509 				goto skip_tx;
1510 			}
1511 			ops++;
1512 		}
1513 
1514 		loop = 0;
1515 		retry_count = 0;
1516 		while (loop < frames_to_send) {
1517 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1518 							 &fd_arr[loop],
1519 							 &flags[loop],
1520 							 frames_to_send - loop);
1521 			if (unlikely(ret < 0)) {
1522 				retry_count++;
1523 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1524 					num_tx += loop;
1525 					nb_ops -= loop;
1526 					DPAA2_SEC_DP_DEBUG("Enqueue fail");
1527 					/* freeing the fle buffers */
1528 					while (loop < frames_to_send) {
1529 						free_fle(&fd_arr[loop],
1530 								dpaa2_qp);
1531 						loop++;
1532 					}
1533 					goto skip_tx;
1534 				}
1535 			} else {
1536 				loop += ret;
1537 				retry_count = 0;
1538 			}
1539 		}
1540 
1541 		num_tx += loop;
1542 		nb_ops -= loop;
1543 	}
1544 skip_tx:
1545 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1546 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1547 	return num_tx;
1548 }
1549 
1550 static inline struct rte_crypto_op *
1551 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1552 {
1553 	struct rte_crypto_op *op;
1554 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1555 	int16_t diff = 0;
1556 	dpaa2_sec_session *sess_priv __rte_unused;
1557 
1558 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1559 		DPAA2_SEC_ERR("error: non inline buffer");
1560 		return NULL;
1561 	}
1562 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1563 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1564 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1565 
1566 	diff = len - mbuf->pkt_len;
1567 	mbuf->pkt_len += diff;
1568 	mbuf->data_len += diff;
1569 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1570 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1571 	op->sym->aead.digest.phys_addr = 0L;
1572 
1573 	sess_priv = SECURITY_GET_SESS_PRIV(op->sym->session);
1574 	if (sess_priv->dir == DIR_ENC)
1575 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1576 	else
1577 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1578 
1579 	if (unlikely(fd->simple.frc)) {
1580 		DPAA2_SEC_ERR("SEC returned Error - %x",
1581 				fd->simple.frc);
1582 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
1583 	} else {
1584 		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1585 	}
1586 
1587 	return op;
1588 }
1589 
1590 static inline struct rte_crypto_op *
1591 sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
1592 {
1593 	struct qbman_fle *fle;
1594 	struct rte_crypto_op *op;
1595 	struct rte_mbuf *dst, *src;
1596 
1597 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1598 		return sec_simple_fd_to_mbuf(fd);
1599 
1600 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1601 
1602 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x",
1603 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1604 
1605 	/* we are using the first FLE entry to store Mbuf.
1606 	 * Currently we donot know which FLE has the mbuf stored.
1607 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1608 	 * to get the MBUF Addr from the previous FLE.
1609 	 * We can have a better approach to use the inline Mbuf
1610 	 */
1611 
1612 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1613 
1614 	/* Prefeth op */
1615 	src = op->sym->m_src;
1616 	rte_prefetch0(src);
1617 
1618 	if (op->sym->m_dst) {
1619 		dst = op->sym->m_dst;
1620 		rte_prefetch0(dst);
1621 	} else
1622 		dst = src;
1623 
1624 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1625 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1626 		dst->pkt_len = len;
1627 		while (dst->next != NULL) {
1628 			len -= dst->data_len;
1629 			dst = dst->next;
1630 		}
1631 		dst->data_len = len;
1632 	}
1633 	dpaa2_sec_dp_fd_dump(fd, 0, dst, false);
1634 
1635 	/* free the fle memory */
1636 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1637 		rte_mempool_put(qp->fle_pool, (void *)(fle-1));
1638 	} else
1639 		rte_free((void *)(fle-1));
1640 
1641 	return op;
1642 }
1643 
1644 static void
1645 dpaa2_sec_dump(struct rte_crypto_op *op, FILE *f)
1646 {
1647 	int i;
1648 	dpaa2_sec_session *sess = NULL;
1649 	struct ctxt_priv *priv;
1650 	uint8_t bufsize;
1651 	struct rte_crypto_sym_op *sym_op;
1652 
1653 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1654 		sess = CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
1655 #ifdef RTE_LIB_SECURITY
1656 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1657 		sess = SECURITY_GET_SESS_PRIV(op->sym->session);
1658 #endif
1659 
1660 	if (sess == NULL)
1661 		goto mbuf_dump;
1662 
1663 	priv = (struct ctxt_priv *)sess->ctxt;
1664 	fprintf(f, "\n****************************************\n"
1665 		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
1666 		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
1667 		"\tCipher key len:\t%zd\n", sess->ctxt_type,
1668 		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
1669 		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
1670 		sess->cipher_key.length);
1671 		rte_hexdump(f, "cipher key", sess->cipher_key.data,
1672 				sess->cipher_key.length);
1673 		rte_hexdump(f, "auth key", sess->auth_key.data,
1674 				sess->auth_key.length);
1675 	fprintf(f, "\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
1676 		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
1677 		" len:\t%d\n\taead cipher text:\t%d\n",
1678 		sess->auth_key.length, sess->iv.length, sess->iv.offset,
1679 		sess->digest_length, sess->status,
1680 		sess->ext_params.aead_ctxt.auth_only_len,
1681 		sess->ext_params.aead_ctxt.auth_cipher_text);
1682 #ifdef RTE_LIB_SECURITY
1683 	fprintf(f, "PDCP session params:\n"
1684 		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
1685 		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
1686 		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
1687 		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
1688 		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
1689 		sess->pdcp.hfn_threshold);
1690 
1691 #endif
1692 	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
1693 	fprintf(f, "Descriptor Dump:\n");
1694 	for (i = 0; i < bufsize; i++)
1695 		fprintf(f, "\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
1696 
1697 	fprintf(f, "\n");
1698 mbuf_dump:
1699 	sym_op = op->sym;
1700 	if (sym_op->m_src) {
1701 		fprintf(f, "Source mbuf:\n");
1702 		rte_pktmbuf_dump(f, sym_op->m_src, sym_op->m_src->data_len);
1703 	}
1704 	if (sym_op->m_dst) {
1705 		fprintf(f, "Destination mbuf:\n");
1706 		rte_pktmbuf_dump(f, sym_op->m_dst, sym_op->m_dst->data_len);
1707 	}
1708 
1709 	fprintf(f, "Session address = %p\ncipher offset: %d, length: %d\n"
1710 		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
1711 		, sym_op->session,
1712 		sym_op->cipher.data.offset, sym_op->cipher.data.length,
1713 		sym_op->auth.data.offset, sym_op->auth.data.length,
1714 		sym_op->aead.data.offset, sym_op->aead.data.length);
1715 	fprintf(f, "\n");
1716 
1717 }
1718 
1719 static void
1720 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
1721 			  struct dpaa2_queue *dpaa2_q)
1722 {
1723 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1724 	struct rte_crypto_op *op;
1725 	struct qbman_fd *fd;
1726 	struct dpaa2_sec_qp *dpaa2_qp;
1727 
1728 	dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
1729 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1730 	op = sec_fd_to_mbuf(fd, dpaa2_qp);
1731 	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
1732 	 * after setting an error in FD. But this will have performance impact.
1733 	 */
1734 	rte_pktmbuf_free(op->sym->m_src);
1735 }
1736 
1737 static void
1738 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1739 			     struct rte_mbuf *m,
1740 			     struct qbman_eq_desc *eqdesc)
1741 {
1742 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1743 	struct eqresp_metadata *eqresp_meta;
1744 	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
1745 	uint16_t orpid, seqnum;
1746 	uint8_t dq_idx;
1747 
1748 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1749 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1750 			DPAA2_EQCR_OPRID_SHIFT;
1751 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1752 			DPAA2_EQCR_SEQNUM_SHIFT;
1753 
1754 
1755 		if (!priv->en_loose_ordered) {
1756 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1757 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1758 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1759 				dpio_dev->eqresp_pi]), 1);
1760 			qbman_eq_desc_set_token(eqdesc, 1);
1761 
1762 			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
1763 			eqresp_meta->dpaa2_q = dpaa2_q;
1764 			eqresp_meta->mp = m->pool;
1765 
1766 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1767 				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
1768 		} else {
1769 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1770 		}
1771 	} else {
1772 		dq_idx = *dpaa2_seqn(m) - 1;
1773 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1774 		DPAA2_PER_LCORE_DQRR_SIZE--;
1775 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1776 	}
1777 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1778 }
1779 
1780 
1781 static uint16_t
1782 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
1783 			uint16_t nb_ops)
1784 {
1785 	/* Function to transmit the frames to given device and VQ*/
1786 	uint32_t loop;
1787 	int32_t ret;
1788 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1789 	uint32_t frames_to_send, num_free_eq_desc, retry_count;
1790 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1791 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1792 	struct qbman_swp *swp;
1793 	uint16_t num_tx = 0;
1794 	uint16_t bpid;
1795 	struct rte_mempool *mb_pool;
1796 	struct dpaa2_sec_dev_private *priv =
1797 				dpaa2_qp->tx_vq.crypto_data->dev_private;
1798 
1799 	if (unlikely(nb_ops == 0))
1800 		return 0;
1801 
1802 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1803 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1804 		return 0;
1805 	}
1806 
1807 	if (!DPAA2_PER_LCORE_DPIO) {
1808 		ret = dpaa2_affine_qbman_swp();
1809 		if (ret) {
1810 			DPAA2_SEC_ERR("Failure in affining portal");
1811 			return 0;
1812 		}
1813 	}
1814 	swp = DPAA2_PER_LCORE_PORTAL;
1815 
1816 	while (nb_ops) {
1817 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1818 			dpaa2_eqcr_size : nb_ops;
1819 
1820 		if (!priv->en_loose_ordered) {
1821 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1822 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1823 				if (num_free_eq_desc < frames_to_send)
1824 					frames_to_send = num_free_eq_desc;
1825 			}
1826 		}
1827 
1828 		for (loop = 0; loop < frames_to_send; loop++) {
1829 			/*Prepare enqueue descriptor*/
1830 			qbman_eq_desc_clear(&eqdesc[loop]);
1831 			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
1832 
1833 			if (*dpaa2_seqn((*ops)->sym->m_src))
1834 				dpaa2_sec_set_enqueue_descriptor(
1835 						&dpaa2_qp->tx_vq,
1836 						(*ops)->sym->m_src,
1837 						&eqdesc[loop]);
1838 			else
1839 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1840 							 DPAA2_EQ_RESP_ERR_FQ);
1841 
1842 			/*Clear the unused FD fields before sending*/
1843 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1844 			mb_pool = (*ops)->sym->m_src->pool;
1845 			bpid = mempool_to_bpid(mb_pool);
1846 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
1847 			if (ret) {
1848 				DPAA2_SEC_DP_DEBUG("FD build failed");
1849 				goto skip_tx;
1850 			}
1851 			ops++;
1852 		}
1853 
1854 		loop = 0;
1855 		retry_count = 0;
1856 		while (loop < frames_to_send) {
1857 			ret = qbman_swp_enqueue_multiple_desc(swp,
1858 					&eqdesc[loop], &fd_arr[loop],
1859 					frames_to_send - loop);
1860 			if (unlikely(ret < 0)) {
1861 				retry_count++;
1862 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1863 					num_tx += loop;
1864 					nb_ops -= loop;
1865 					DPAA2_SEC_DP_DEBUG("Enqueue fail");
1866 					/* freeing the fle buffers */
1867 					while (loop < frames_to_send) {
1868 						free_fle(&fd_arr[loop],
1869 								dpaa2_qp);
1870 						loop++;
1871 					}
1872 					goto skip_tx;
1873 				}
1874 			} else {
1875 				loop += ret;
1876 				retry_count = 0;
1877 			}
1878 		}
1879 
1880 		num_tx += loop;
1881 		nb_ops -= loop;
1882 	}
1883 
1884 skip_tx:
1885 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1886 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1887 	return num_tx;
1888 }
1889 
1890 static uint16_t
1891 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1892 			uint16_t nb_ops)
1893 {
1894 	/* Function is responsible to receive frames for a given device and VQ*/
1895 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1896 	struct qbman_result *dq_storage;
1897 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1898 	int ret, num_rx = 0;
1899 	uint8_t is_last = 0, status;
1900 	struct qbman_swp *swp;
1901 	const struct qbman_fd *fd;
1902 	struct qbman_pull_desc pulldesc;
1903 
1904 	if (!DPAA2_PER_LCORE_DPIO) {
1905 		ret = dpaa2_affine_qbman_swp();
1906 		if (ret) {
1907 			DPAA2_SEC_ERR(
1908 				"Failed to allocate IO portal, tid: %d",
1909 				rte_gettid());
1910 			return 0;
1911 		}
1912 	}
1913 	swp = DPAA2_PER_LCORE_PORTAL;
1914 	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
1915 
1916 	qbman_pull_desc_clear(&pulldesc);
1917 	qbman_pull_desc_set_numframes(&pulldesc,
1918 				      (nb_ops > dpaa2_dqrr_size) ?
1919 				      dpaa2_dqrr_size : nb_ops);
1920 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1921 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1922 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1923 				    1);
1924 
1925 	/*Issue a volatile dequeue command. */
1926 	while (1) {
1927 		if (qbman_swp_pull(swp, &pulldesc)) {
1928 			DPAA2_SEC_WARN(
1929 				"SEC VDQ command is not issued : QBMAN busy");
1930 			/* Portal was busy, try again */
1931 			continue;
1932 		}
1933 		break;
1934 	};
1935 
1936 	/* Receive the packets till Last Dequeue entry is found with
1937 	 * respect to the above issues PULL command.
1938 	 */
1939 	while (!is_last) {
1940 		/* Check if the previous issued command is completed.
1941 		 * Also seems like the SWP is shared between the Ethernet Driver
1942 		 * and the SEC driver.
1943 		 */
1944 		while (!qbman_check_command_complete(dq_storage))
1945 			;
1946 
1947 		/* Loop until the dq_storage is updated with
1948 		 * new token by QBMAN
1949 		 */
1950 		while (!qbman_check_new_result(dq_storage))
1951 			;
1952 		/* Check whether Last Pull command is Expired and
1953 		 * setting Condition for Loop termination
1954 		 */
1955 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1956 			is_last = 1;
1957 			/* Check for valid frame. */
1958 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1959 			if (unlikely(
1960 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1961 				DPAA2_SEC_DP_DEBUG("No frame is delivered");
1962 				continue;
1963 			}
1964 		}
1965 
1966 		fd = qbman_result_DQ_fd(dq_storage);
1967 		ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
1968 
1969 		if (unlikely(fd->simple.frc)) {
1970 			/* TODO Parse SEC errors */
1971 			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
1972 				DPAA2_SEC_DP_ERR("SEC returned Error - %x",
1973 						 fd->simple.frc);
1974 				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
1975 					dpaa2_sec_dump(ops[num_rx], stdout);
1976 			}
1977 
1978 			dpaa2_qp->rx_vq.err_pkts += 1;
1979 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1980 		} else {
1981 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1982 		}
1983 
1984 		num_rx++;
1985 		dq_storage++;
1986 	} /* End of Packet Rx loop */
1987 
1988 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1989 
1990 	DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64, num_rx,
1991 				dpaa2_qp->rx_vq.err_pkts);
1992 	/*Return the total number of packets received to DPAA2 app*/
1993 	return num_rx;
1994 }
1995 
1996 /** Release queue pair */
1997 static int
1998 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1999 {
2000 	struct dpaa2_sec_qp *qp =
2001 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
2002 
2003 	PMD_INIT_FUNC_TRACE();
2004 
2005 	dpaa2_queue_storage_free(&qp->rx_vq, 1);
2006 	rte_mempool_free(qp->fle_pool);
2007 	rte_free(qp);
2008 
2009 	dev->data->queue_pairs[queue_pair_id] = NULL;
2010 
2011 	return 0;
2012 }
2013 
2014 /** Setup a queue pair */
2015 static int
2016 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2017 		const struct rte_cryptodev_qp_conf *qp_conf,
2018 		__rte_unused int socket_id)
2019 {
2020 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2021 	struct dpaa2_sec_qp *qp;
2022 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2023 	struct dpseci_rx_queue_cfg cfg;
2024 	int32_t retcode;
2025 	char str[RTE_MEMZONE_NAMESIZE];
2026 
2027 	PMD_INIT_FUNC_TRACE();
2028 
2029 	/* If qp is already in use free ring memory and qp metadata. */
2030 	if (dev->data->queue_pairs[qp_id] != NULL) {
2031 		DPAA2_SEC_INFO("QP already setup");
2032 		return 0;
2033 	}
2034 
2035 	if (qp_conf->nb_descriptors < (2 * FLE_POOL_CACHE_SIZE)) {
2036 		DPAA2_SEC_ERR("Minimum supported nb_descriptors %d,"
2037 			      " but given %d", (2 * FLE_POOL_CACHE_SIZE),
2038 			      qp_conf->nb_descriptors);
2039 		return -EINVAL;
2040 	}
2041 
2042 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
2043 		    dev, qp_id, qp_conf);
2044 
2045 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
2046 
2047 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
2048 			RTE_CACHE_LINE_SIZE);
2049 	if (!qp) {
2050 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
2051 		return -ENOMEM;
2052 	}
2053 
2054 	qp->rx_vq.crypto_data = dev->data;
2055 	qp->tx_vq.crypto_data = dev->data;
2056 	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
2057 	if (retcode) {
2058 		dpaa2_queue_storage_free((&qp->rx_vq), 1);
2059 		return retcode;
2060 	}
2061 
2062 	dev->data->queue_pairs[qp_id] = qp;
2063 
2064 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
2065 			getpid(), dev->data->dev_id, qp_id);
2066 	qp->fle_pool = rte_mempool_create((const char *)str,
2067 			qp_conf->nb_descriptors,
2068 			FLE_POOL_BUF_SIZE,
2069 			FLE_POOL_CACHE_SIZE, 0,
2070 			NULL, NULL, NULL, NULL,
2071 			SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
2072 	if (!qp->fle_pool) {
2073 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2074 		return -ENOMEM;
2075 	}
2076 
2077 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
2078 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
2079 				      qp_id, &cfg);
2080 	return retcode;
2081 }
2082 
2083 /** Returns the size of the aesni gcm session structure */
2084 static unsigned int
2085 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2086 {
2087 	PMD_INIT_FUNC_TRACE();
2088 
2089 	return sizeof(dpaa2_sec_session);
2090 }
2091 
2092 static int
2093 dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
2094 		      dpaa2_sec_session *session)
2095 {
2096 	struct alginfo cipherdata;
2097 	int bufsize, ret = 0;
2098 	struct ctxt_priv *priv;
2099 	struct sec_flow_context *flc;
2100 
2101 	PMD_INIT_FUNC_TRACE();
2102 
2103 	/* For SEC CIPHER only one descriptor is required. */
2104 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2105 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2106 			RTE_CACHE_LINE_SIZE);
2107 	if (priv == NULL) {
2108 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2109 		return -ENOMEM;
2110 	}
2111 
2112 	flc = &priv->flc_desc[0].flc;
2113 
2114 	session->ctxt_type = DPAA2_SEC_CIPHER;
2115 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2116 			RTE_CACHE_LINE_SIZE);
2117 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2118 		DPAA2_SEC_ERR("No Memory for cipher key");
2119 		rte_free(priv);
2120 		return -ENOMEM;
2121 	}
2122 	session->cipher_key.length = xform->cipher.key.length;
2123 
2124 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2125 	       xform->cipher.key.length);
2126 	cipherdata.key = (size_t)session->cipher_key.data;
2127 	cipherdata.keylen = session->cipher_key.length;
2128 	cipherdata.key_enc_flags = 0;
2129 	cipherdata.key_type = RTA_DATA_IMM;
2130 
2131 	/* Set IV parameters */
2132 	session->iv.offset = xform->cipher.iv.offset;
2133 	session->iv.length = xform->cipher.iv.length;
2134 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2135 				DIR_ENC : DIR_DEC;
2136 
2137 	switch (xform->cipher.algo) {
2138 	case RTE_CRYPTO_CIPHER_AES_CBC:
2139 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2140 		cipherdata.algmode = OP_ALG_AAI_CBC;
2141 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2142 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2143 						SHR_NEVER, &cipherdata,
2144 						session->iv.length,
2145 						session->dir);
2146 		break;
2147 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2148 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2149 		cipherdata.algmode = OP_ALG_AAI_CBC;
2150 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2151 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2152 						SHR_NEVER, &cipherdata,
2153 						session->iv.length,
2154 						session->dir);
2155 		break;
2156 	case RTE_CRYPTO_CIPHER_DES_CBC:
2157 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2158 		cipherdata.algmode = OP_ALG_AAI_CBC;
2159 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2160 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2161 						SHR_NEVER, &cipherdata,
2162 						session->iv.length,
2163 						session->dir);
2164 		break;
2165 	case RTE_CRYPTO_CIPHER_AES_CTR:
2166 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2167 		cipherdata.algmode = OP_ALG_AAI_CTR;
2168 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2169 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2170 						SHR_NEVER, &cipherdata,
2171 						session->iv.length,
2172 						session->dir);
2173 		break;
2174 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2175 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
2176 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
2177 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
2178 					      &cipherdata,
2179 					      session->dir);
2180 		break;
2181 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2182 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
2183 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
2184 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
2185 					      &cipherdata,
2186 					      session->dir);
2187 		break;
2188 	default:
2189 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)",
2190 			rte_cryptodev_get_cipher_algo_string(xform->cipher.algo),
2191 			xform->cipher.algo);
2192 		ret = -ENOTSUP;
2193 		goto error_out;
2194 	}
2195 
2196 	if (bufsize < 0) {
2197 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
2198 		ret = -EINVAL;
2199 		goto error_out;
2200 	}
2201 
2202 	flc->word1_sdl = (uint8_t)bufsize;
2203 	session->ctxt = priv;
2204 
2205 #ifdef CAAM_DESC_DEBUG
2206 	int i;
2207 	for (i = 0; i < bufsize; i++)
2208 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
2209 #endif
2210 	return ret;
2211 
2212 error_out:
2213 	rte_free(session->cipher_key.data);
2214 	rte_free(priv);
2215 	return ret;
2216 }
2217 
2218 static int
2219 dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
2220 		    dpaa2_sec_session *session)
2221 {
2222 	struct alginfo authdata;
2223 	int bufsize, ret = 0;
2224 	struct ctxt_priv *priv;
2225 	struct sec_flow_context *flc;
2226 
2227 	PMD_INIT_FUNC_TRACE();
2228 
2229 	memset(&authdata, 0, sizeof(authdata));
2230 
2231 	/* For SEC AUTH three descriptors are required for various stages */
2232 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2233 			sizeof(struct ctxt_priv) + 3 *
2234 			sizeof(struct sec_flc_desc),
2235 			RTE_CACHE_LINE_SIZE);
2236 	if (priv == NULL) {
2237 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2238 		return -ENOMEM;
2239 	}
2240 
2241 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
2242 
2243 	session->ctxt_type = DPAA2_SEC_AUTH;
2244 	session->auth_key.length = xform->auth.key.length;
2245 	if (xform->auth.key.length) {
2246 		session->auth_key.data = rte_zmalloc(NULL,
2247 			xform->auth.key.length,
2248 			RTE_CACHE_LINE_SIZE);
2249 		if (session->auth_key.data == NULL) {
2250 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
2251 			rte_free(priv);
2252 			return -ENOMEM;
2253 		}
2254 		memcpy(session->auth_key.data, xform->auth.key.data,
2255 		       xform->auth.key.length);
2256 		authdata.key = (size_t)session->auth_key.data;
2257 		authdata.key_enc_flags = 0;
2258 		authdata.key_type = RTA_DATA_IMM;
2259 	}
2260 	authdata.keylen = session->auth_key.length;
2261 
2262 	session->digest_length = xform->auth.digest_length;
2263 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2264 				DIR_ENC : DIR_DEC;
2265 
2266 	switch (xform->auth.algo) {
2267 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2268 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2269 		authdata.algmode = OP_ALG_AAI_HMAC;
2270 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2271 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2272 					   1, 0, SHR_NEVER, &authdata,
2273 					   !session->dir,
2274 					   session->digest_length);
2275 		break;
2276 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2277 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2278 		authdata.algmode = OP_ALG_AAI_HMAC;
2279 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2280 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2281 					   1, 0, SHR_NEVER, &authdata,
2282 					   !session->dir,
2283 					   session->digest_length);
2284 		break;
2285 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2286 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2287 		authdata.algmode = OP_ALG_AAI_HMAC;
2288 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2289 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2290 					   1, 0, SHR_NEVER, &authdata,
2291 					   !session->dir,
2292 					   session->digest_length);
2293 		break;
2294 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2295 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2296 		authdata.algmode = OP_ALG_AAI_HMAC;
2297 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2298 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2299 					   1, 0, SHR_NEVER, &authdata,
2300 					   !session->dir,
2301 					   session->digest_length);
2302 		break;
2303 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2304 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2305 		authdata.algmode = OP_ALG_AAI_HMAC;
2306 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2307 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2308 					   1, 0, SHR_NEVER, &authdata,
2309 					   !session->dir,
2310 					   session->digest_length);
2311 		break;
2312 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2313 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2314 		authdata.algmode = OP_ALG_AAI_HMAC;
2315 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2316 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2317 					   1, 0, SHR_NEVER, &authdata,
2318 					   !session->dir,
2319 					   session->digest_length);
2320 		break;
2321 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2322 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2323 		authdata.algmode = OP_ALG_AAI_F9;
2324 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2325 		session->iv.offset = xform->auth.iv.offset;
2326 		session->iv.length = xform->auth.iv.length;
2327 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2328 					      1, 0, &authdata,
2329 					      !session->dir,
2330 					      session->digest_length);
2331 		break;
2332 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2333 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2334 		authdata.algmode = OP_ALG_AAI_F9;
2335 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2336 		session->iv.offset = xform->auth.iv.offset;
2337 		session->iv.length = xform->auth.iv.length;
2338 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2339 					   1, 0, &authdata,
2340 					   !session->dir,
2341 					   session->digest_length);
2342 		break;
2343 	case RTE_CRYPTO_AUTH_SHA1:
2344 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2345 		authdata.algmode = OP_ALG_AAI_HASH;
2346 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2347 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2348 					   1, 0, SHR_NEVER, &authdata,
2349 					   !session->dir,
2350 					   session->digest_length);
2351 		break;
2352 	case RTE_CRYPTO_AUTH_MD5:
2353 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2354 		authdata.algmode = OP_ALG_AAI_HASH;
2355 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2356 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2357 					   1, 0, SHR_NEVER, &authdata,
2358 					   !session->dir,
2359 					   session->digest_length);
2360 		break;
2361 	case RTE_CRYPTO_AUTH_SHA256:
2362 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2363 		authdata.algmode = OP_ALG_AAI_HASH;
2364 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2365 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2366 					   1, 0, SHR_NEVER, &authdata,
2367 					   !session->dir,
2368 					   session->digest_length);
2369 		break;
2370 	case RTE_CRYPTO_AUTH_SHA384:
2371 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2372 		authdata.algmode = OP_ALG_AAI_HASH;
2373 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2374 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2375 					   1, 0, SHR_NEVER, &authdata,
2376 					   !session->dir,
2377 					   session->digest_length);
2378 		break;
2379 	case RTE_CRYPTO_AUTH_SHA512:
2380 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2381 		authdata.algmode = OP_ALG_AAI_HASH;
2382 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2383 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2384 					   1, 0, SHR_NEVER, &authdata,
2385 					   !session->dir,
2386 					   session->digest_length);
2387 		break;
2388 	case RTE_CRYPTO_AUTH_SHA224:
2389 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2390 		authdata.algmode = OP_ALG_AAI_HASH;
2391 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2392 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2393 					   1, 0, SHR_NEVER, &authdata,
2394 					   !session->dir,
2395 					   session->digest_length);
2396 		break;
2397 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2398 		authdata.algtype = OP_ALG_ALGSEL_AES;
2399 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2400 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2401 		bufsize = cnstr_shdsc_aes_mac(
2402 					priv->flc_desc[DESC_INITFINAL].desc,
2403 					1, 0, SHR_NEVER, &authdata,
2404 					!session->dir,
2405 					session->digest_length);
2406 		break;
2407 	case RTE_CRYPTO_AUTH_AES_CMAC:
2408 		authdata.algtype = OP_ALG_ALGSEL_AES;
2409 		authdata.algmode = OP_ALG_AAI_CMAC;
2410 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2411 		bufsize = cnstr_shdsc_aes_mac(
2412 					   priv->flc_desc[DESC_INITFINAL].desc,
2413 					   1, 0, SHR_NEVER, &authdata,
2414 					   !session->dir,
2415 					   session->digest_length);
2416 		break;
2417 	default:
2418 		DPAA2_SEC_ERR("Crypto: Unsupported Auth alg %s (%u)",
2419 			rte_cryptodev_get_auth_algo_string(xform->auth.algo),
2420 			xform->auth.algo);
2421 		ret = -ENOTSUP;
2422 		goto error_out;
2423 	}
2424 
2425 	if (bufsize < 0) {
2426 		DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
2427 		ret = -EINVAL;
2428 		goto error_out;
2429 	}
2430 
2431 	flc->word1_sdl = (uint8_t)bufsize;
2432 	session->ctxt = priv;
2433 #ifdef CAAM_DESC_DEBUG
2434 	int i;
2435 	for (i = 0; i < bufsize; i++)
2436 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2437 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2438 #endif
2439 
2440 	return ret;
2441 
2442 error_out:
2443 	rte_free(session->auth_key.data);
2444 	rte_free(priv);
2445 	return ret;
2446 }
2447 
2448 static int
2449 dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
2450 		    dpaa2_sec_session *session)
2451 {
2452 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2453 	struct alginfo aeaddata;
2454 	int bufsize;
2455 	struct ctxt_priv *priv;
2456 	struct sec_flow_context *flc;
2457 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2458 	int err, ret = 0;
2459 
2460 	PMD_INIT_FUNC_TRACE();
2461 
2462 	/* Set IV parameters */
2463 	session->iv.offset = aead_xform->iv.offset;
2464 	session->iv.length = aead_xform->iv.length;
2465 	session->ctxt_type = DPAA2_SEC_AEAD;
2466 
2467 	/* For SEC AEAD only one descriptor is required */
2468 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2469 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2470 			RTE_CACHE_LINE_SIZE);
2471 	if (priv == NULL) {
2472 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2473 		return -ENOMEM;
2474 	}
2475 
2476 	flc = &priv->flc_desc[0].flc;
2477 
2478 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2479 					       RTE_CACHE_LINE_SIZE);
2480 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2481 		DPAA2_SEC_ERR("No Memory for aead key");
2482 		rte_free(priv);
2483 		return -ENOMEM;
2484 	}
2485 	memcpy(session->aead_key.data, aead_xform->key.data,
2486 	       aead_xform->key.length);
2487 
2488 	session->digest_length = aead_xform->digest_length;
2489 	session->aead_key.length = aead_xform->key.length;
2490 	ctxt->auth_only_len = aead_xform->aad_length;
2491 
2492 	aeaddata.key = (size_t)session->aead_key.data;
2493 	aeaddata.keylen = session->aead_key.length;
2494 	aeaddata.key_enc_flags = 0;
2495 	aeaddata.key_type = RTA_DATA_IMM;
2496 
2497 	switch (aead_xform->algo) {
2498 	case RTE_CRYPTO_AEAD_AES_GCM:
2499 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2500 		aeaddata.algmode = OP_ALG_AAI_GCM;
2501 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2502 		break;
2503 	default:
2504 
2505 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %s (%u)",
2506 			rte_cryptodev_get_aead_algo_string(aead_xform->algo),
2507 			aead_xform->algo);
2508 		ret = -ENOTSUP;
2509 		goto error_out;
2510 	}
2511 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2512 				DIR_ENC : DIR_DEC;
2513 
2514 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2515 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2516 			       DESC_JOB_IO_LEN,
2517 			       (unsigned int *)priv->flc_desc[0].desc,
2518 			       &priv->flc_desc[0].desc[1], 1);
2519 
2520 	if (err < 0) {
2521 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2522 		ret = -EINVAL;
2523 		goto error_out;
2524 	}
2525 	if (priv->flc_desc[0].desc[1] & 1) {
2526 		aeaddata.key_type = RTA_DATA_IMM;
2527 	} else {
2528 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2529 		aeaddata.key_type = RTA_DATA_PTR;
2530 	}
2531 	priv->flc_desc[0].desc[0] = 0;
2532 	priv->flc_desc[0].desc[1] = 0;
2533 
2534 	if (session->dir == DIR_ENC)
2535 		bufsize = cnstr_shdsc_gcm_encap(
2536 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2537 				&aeaddata, session->iv.length,
2538 				session->digest_length);
2539 	else
2540 		bufsize = cnstr_shdsc_gcm_decap(
2541 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2542 				&aeaddata, session->iv.length,
2543 				session->digest_length);
2544 	if (bufsize < 0) {
2545 		DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
2546 		ret = -EINVAL;
2547 		goto error_out;
2548 	}
2549 
2550 	flc->word1_sdl = (uint8_t)bufsize;
2551 	session->ctxt = priv;
2552 #ifdef CAAM_DESC_DEBUG
2553 	int i;
2554 	for (i = 0; i < bufsize; i++)
2555 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2556 			    i, priv->flc_desc[0].desc[i]);
2557 #endif
2558 	return ret;
2559 
2560 error_out:
2561 	rte_free(session->aead_key.data);
2562 	rte_free(priv);
2563 	return ret;
2564 }
2565 
2566 
2567 static int
2568 dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
2569 		    dpaa2_sec_session *session)
2570 {
2571 	struct alginfo authdata, cipherdata;
2572 	int bufsize;
2573 	struct ctxt_priv *priv;
2574 	struct sec_flow_context *flc;
2575 	struct rte_crypto_cipher_xform *cipher_xform;
2576 	struct rte_crypto_auth_xform *auth_xform;
2577 	int err, ret = 0;
2578 
2579 	PMD_INIT_FUNC_TRACE();
2580 
2581 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2582 		cipher_xform = &xform->cipher;
2583 		auth_xform = &xform->next->auth;
2584 		session->ctxt_type =
2585 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2586 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2587 	} else {
2588 		cipher_xform = &xform->next->cipher;
2589 		auth_xform = &xform->auth;
2590 		session->ctxt_type =
2591 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2592 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2593 	}
2594 
2595 	/* Set IV parameters */
2596 	session->iv.offset = cipher_xform->iv.offset;
2597 	session->iv.length = cipher_xform->iv.length;
2598 
2599 	/* For SEC AEAD only one descriptor is required */
2600 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2601 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2602 			RTE_CACHE_LINE_SIZE);
2603 	if (priv == NULL) {
2604 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2605 		return -ENOMEM;
2606 	}
2607 
2608 	flc = &priv->flc_desc[0].flc;
2609 
2610 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2611 					       RTE_CACHE_LINE_SIZE);
2612 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2613 		DPAA2_SEC_ERR("No Memory for cipher key");
2614 		rte_free(priv);
2615 		return -ENOMEM;
2616 	}
2617 	session->cipher_key.length = cipher_xform->key.length;
2618 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2619 					     RTE_CACHE_LINE_SIZE);
2620 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2621 		DPAA2_SEC_ERR("No Memory for auth key");
2622 		rte_free(session->cipher_key.data);
2623 		rte_free(priv);
2624 		return -ENOMEM;
2625 	}
2626 	session->auth_key.length = auth_xform->key.length;
2627 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2628 	       cipher_xform->key.length);
2629 	memcpy(session->auth_key.data, auth_xform->key.data,
2630 	       auth_xform->key.length);
2631 
2632 	authdata.key = (size_t)session->auth_key.data;
2633 	authdata.keylen = session->auth_key.length;
2634 	authdata.key_enc_flags = 0;
2635 	authdata.key_type = RTA_DATA_IMM;
2636 
2637 	session->digest_length = auth_xform->digest_length;
2638 
2639 	switch (auth_xform->algo) {
2640 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2641 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2642 		authdata.algmode = OP_ALG_AAI_HMAC;
2643 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2644 		break;
2645 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2646 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2647 		authdata.algmode = OP_ALG_AAI_HMAC;
2648 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2649 		break;
2650 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2651 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2652 		authdata.algmode = OP_ALG_AAI_HMAC;
2653 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2654 		break;
2655 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2656 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2657 		authdata.algmode = OP_ALG_AAI_HMAC;
2658 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2659 		break;
2660 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2661 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2662 		authdata.algmode = OP_ALG_AAI_HMAC;
2663 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2664 		break;
2665 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2666 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2667 		authdata.algmode = OP_ALG_AAI_HMAC;
2668 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2669 		break;
2670 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2671 		authdata.algtype = OP_ALG_ALGSEL_AES;
2672 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2673 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2674 		break;
2675 	case RTE_CRYPTO_AUTH_AES_CMAC:
2676 		authdata.algtype = OP_ALG_ALGSEL_AES;
2677 		authdata.algmode = OP_ALG_AAI_CMAC;
2678 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2679 		break;
2680 	default:
2681 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %s (%u)",
2682 				   rte_cryptodev_get_auth_algo_string(auth_xform->algo),
2683 			      auth_xform->algo);
2684 		ret = -ENOTSUP;
2685 		goto error_out;
2686 	}
2687 	cipherdata.key = (size_t)session->cipher_key.data;
2688 	cipherdata.keylen = session->cipher_key.length;
2689 	cipherdata.key_enc_flags = 0;
2690 	cipherdata.key_type = RTA_DATA_IMM;
2691 
2692 	switch (cipher_xform->algo) {
2693 	case RTE_CRYPTO_CIPHER_AES_CBC:
2694 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2695 		cipherdata.algmode = OP_ALG_AAI_CBC;
2696 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2697 		break;
2698 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2699 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2700 		cipherdata.algmode = OP_ALG_AAI_CBC;
2701 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2702 		break;
2703 	case RTE_CRYPTO_CIPHER_DES_CBC:
2704 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2705 		cipherdata.algmode = OP_ALG_AAI_CBC;
2706 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2707 		break;
2708 	case RTE_CRYPTO_CIPHER_AES_CTR:
2709 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2710 		cipherdata.algmode = OP_ALG_AAI_CTR;
2711 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2712 		break;
2713 	default:
2714 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %s (%u)",
2715 			      rte_cryptodev_get_cipher_algo_string(cipher_xform->algo),
2716 				  cipher_xform->algo);
2717 		ret = -ENOTSUP;
2718 		goto error_out;
2719 	}
2720 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2721 				DIR_ENC : DIR_DEC;
2722 
2723 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2724 	priv->flc_desc[0].desc[1] = authdata.keylen;
2725 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2726 			       DESC_JOB_IO_LEN,
2727 			       (unsigned int *)priv->flc_desc[0].desc,
2728 			       &priv->flc_desc[0].desc[2], 2);
2729 
2730 	if (err < 0) {
2731 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2732 		ret = -EINVAL;
2733 		goto error_out;
2734 	}
2735 	if (priv->flc_desc[0].desc[2] & 1) {
2736 		cipherdata.key_type = RTA_DATA_IMM;
2737 	} else {
2738 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2739 		cipherdata.key_type = RTA_DATA_PTR;
2740 	}
2741 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2742 		authdata.key_type = RTA_DATA_IMM;
2743 	} else {
2744 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2745 		authdata.key_type = RTA_DATA_PTR;
2746 	}
2747 	priv->flc_desc[0].desc[0] = 0;
2748 	priv->flc_desc[0].desc[1] = 0;
2749 	priv->flc_desc[0].desc[2] = 0;
2750 
2751 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2752 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2753 					      0, SHR_SERIAL,
2754 					      &cipherdata, &authdata,
2755 					      session->iv.length,
2756 					      session->digest_length,
2757 					      session->dir);
2758 		if (bufsize < 0) {
2759 			DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
2760 			ret = -EINVAL;
2761 			goto error_out;
2762 		}
2763 	} else {
2764 		DPAA2_SEC_ERR("Hash before cipher not supported");
2765 		ret = -ENOTSUP;
2766 		goto error_out;
2767 	}
2768 
2769 	flc->word1_sdl = (uint8_t)bufsize;
2770 	session->ctxt = priv;
2771 #ifdef CAAM_DESC_DEBUG
2772 	int i;
2773 	for (i = 0; i < bufsize; i++)
2774 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2775 			    i, priv->flc_desc[0].desc[i]);
2776 #endif
2777 
2778 	return ret;
2779 
2780 error_out:
2781 	rte_free(session->cipher_key.data);
2782 	rte_free(session->auth_key.data);
2783 	rte_free(priv);
2784 	return ret;
2785 }
2786 
2787 static int
2788 dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
2789 {
2790 	dpaa2_sec_session *session = sess;
2791 	int ret;
2792 
2793 	PMD_INIT_FUNC_TRACE();
2794 
2795 	if (unlikely(sess == NULL)) {
2796 		DPAA2_SEC_ERR("Invalid session struct");
2797 		return -EINVAL;
2798 	}
2799 
2800 	memset(session, 0, sizeof(dpaa2_sec_session));
2801 	/* Default IV length = 0 */
2802 	session->iv.length = 0;
2803 
2804 	/* Cipher Only */
2805 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2806 		ret = dpaa2_sec_cipher_init(xform, session);
2807 
2808 	/* Authentication Only */
2809 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2810 		   xform->next == NULL) {
2811 		ret = dpaa2_sec_auth_init(xform, session);
2812 
2813 	/* Cipher then Authenticate */
2814 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2815 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2816 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2817 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2818 			ret = dpaa2_sec_auth_init(xform, session);
2819 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2820 			ret = dpaa2_sec_cipher_init(xform, session);
2821 		else
2822 			ret = dpaa2_sec_aead_chain_init(xform, session);
2823 	/* Authenticate then Cipher */
2824 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2825 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2826 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2827 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2828 			ret = dpaa2_sec_cipher_init(xform, session);
2829 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2830 			ret = dpaa2_sec_auth_init(xform, session);
2831 		else
2832 			ret = dpaa2_sec_aead_chain_init(xform, session);
2833 	/* AEAD operation for AES-GCM kind of Algorithms */
2834 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2835 		   xform->next == NULL) {
2836 		ret = dpaa2_sec_aead_init(xform, session);
2837 
2838 	} else {
2839 		DPAA2_SEC_ERR("Invalid crypto type");
2840 		return -EINVAL;
2841 	}
2842 
2843 	return ret;
2844 }
2845 
2846 static int
2847 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2848 			dpaa2_sec_session *session,
2849 			struct alginfo *aeaddata)
2850 {
2851 	PMD_INIT_FUNC_TRACE();
2852 
2853 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2854 					       RTE_CACHE_LINE_SIZE);
2855 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2856 		DPAA2_SEC_ERR("No Memory for aead key");
2857 		return -ENOMEM;
2858 	}
2859 	memcpy(session->aead_key.data, aead_xform->key.data,
2860 	       aead_xform->key.length);
2861 
2862 	session->digest_length = aead_xform->digest_length;
2863 	session->aead_key.length = aead_xform->key.length;
2864 
2865 	aeaddata->key = (size_t)session->aead_key.data;
2866 	aeaddata->keylen = session->aead_key.length;
2867 	aeaddata->key_enc_flags = 0;
2868 	aeaddata->key_type = RTA_DATA_IMM;
2869 
2870 	switch (aead_xform->algo) {
2871 	case RTE_CRYPTO_AEAD_AES_GCM:
2872 		switch (session->digest_length) {
2873 		case 8:
2874 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2875 			break;
2876 		case 12:
2877 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2878 			break;
2879 		case 16:
2880 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2881 			break;
2882 		default:
2883 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2884 				      session->digest_length);
2885 			return -EINVAL;
2886 		}
2887 		aeaddata->algmode = OP_ALG_AAI_GCM;
2888 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2889 		break;
2890 	case RTE_CRYPTO_AEAD_AES_CCM:
2891 		switch (session->digest_length) {
2892 		case 8:
2893 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2894 			break;
2895 		case 12:
2896 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2897 			break;
2898 		case 16:
2899 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2900 			break;
2901 		default:
2902 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2903 				      session->digest_length);
2904 			return -EINVAL;
2905 		}
2906 		aeaddata->algmode = OP_ALG_AAI_CCM;
2907 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2908 		break;
2909 	default:
2910 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2911 			      aead_xform->algo);
2912 		return -ENOTSUP;
2913 	}
2914 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2915 				DIR_ENC : DIR_DEC;
2916 
2917 	return 0;
2918 }
2919 
2920 static int
2921 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2922 	struct rte_crypto_auth_xform *auth_xform,
2923 	dpaa2_sec_session *session,
2924 	struct alginfo *cipherdata,
2925 	struct alginfo *authdata)
2926 {
2927 	if (cipher_xform) {
2928 		session->cipher_key.data = rte_zmalloc(NULL,
2929 						       cipher_xform->key.length,
2930 						       RTE_CACHE_LINE_SIZE);
2931 		if (session->cipher_key.data == NULL &&
2932 				cipher_xform->key.length > 0) {
2933 			DPAA2_SEC_ERR("No Memory for cipher key");
2934 			return -ENOMEM;
2935 		}
2936 
2937 		session->cipher_key.length = cipher_xform->key.length;
2938 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2939 				cipher_xform->key.length);
2940 		session->cipher_alg = cipher_xform->algo;
2941 	} else {
2942 		session->cipher_key.data = NULL;
2943 		session->cipher_key.length = 0;
2944 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2945 	}
2946 
2947 	if (auth_xform) {
2948 		session->auth_key.data = rte_zmalloc(NULL,
2949 						auth_xform->key.length,
2950 						RTE_CACHE_LINE_SIZE);
2951 		if (session->auth_key.data == NULL &&
2952 				auth_xform->key.length > 0) {
2953 			DPAA2_SEC_ERR("No Memory for auth key");
2954 			return -ENOMEM;
2955 		}
2956 		session->auth_key.length = auth_xform->key.length;
2957 		memcpy(session->auth_key.data, auth_xform->key.data,
2958 				auth_xform->key.length);
2959 		session->auth_alg = auth_xform->algo;
2960 		session->digest_length = auth_xform->digest_length;
2961 	} else {
2962 		session->auth_key.data = NULL;
2963 		session->auth_key.length = 0;
2964 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2965 	}
2966 
2967 	authdata->key = (size_t)session->auth_key.data;
2968 	authdata->keylen = session->auth_key.length;
2969 	authdata->key_enc_flags = 0;
2970 	authdata->key_type = RTA_DATA_IMM;
2971 	switch (session->auth_alg) {
2972 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2973 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2974 		authdata->algmode = OP_ALG_AAI_HMAC;
2975 		break;
2976 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2977 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2978 		authdata->algmode = OP_ALG_AAI_HMAC;
2979 		break;
2980 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2981 		authdata->algmode = OP_ALG_AAI_HMAC;
2982 		if (session->digest_length == 6)
2983 			authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_96;
2984 		else if (session->digest_length == 14)
2985 			authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_224;
2986 		else
2987 			authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_224_112;
2988 		break;
2989 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2990 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2991 		authdata->algmode = OP_ALG_AAI_HMAC;
2992 		if (session->digest_length != 16)
2993 			DPAA2_SEC_WARN(
2994 			"+++Using sha256-hmac truncated len is non-standard,"
2995 			"it will not work with lookaside proto");
2996 		break;
2997 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2998 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2999 		authdata->algmode = OP_ALG_AAI_HMAC;
3000 		break;
3001 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
3002 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
3003 		authdata->algmode = OP_ALG_AAI_HMAC;
3004 		break;
3005 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
3006 		authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
3007 		authdata->algmode = OP_ALG_AAI_XCBC_MAC;
3008 		break;
3009 	case RTE_CRYPTO_AUTH_AES_CMAC:
3010 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
3011 		authdata->algmode = OP_ALG_AAI_CMAC;
3012 		break;
3013 	case RTE_CRYPTO_AUTH_NULL:
3014 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
3015 		break;
3016 	default:
3017 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %s (%u)",
3018 			rte_cryptodev_get_auth_algo_string(session->auth_alg),
3019 			      session->auth_alg);
3020 		return -ENOTSUP;
3021 	}
3022 	cipherdata->key = (size_t)session->cipher_key.data;
3023 	cipherdata->keylen = session->cipher_key.length;
3024 	cipherdata->key_enc_flags = 0;
3025 	cipherdata->key_type = RTA_DATA_IMM;
3026 
3027 	switch (session->cipher_alg) {
3028 	case RTE_CRYPTO_CIPHER_AES_CBC:
3029 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
3030 		cipherdata->algmode = OP_ALG_AAI_CBC;
3031 		break;
3032 	case RTE_CRYPTO_CIPHER_3DES_CBC:
3033 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
3034 		cipherdata->algmode = OP_ALG_AAI_CBC;
3035 		break;
3036 	case RTE_CRYPTO_CIPHER_DES_CBC:
3037 		cipherdata->algtype = OP_PCL_IPSEC_DES;
3038 		cipherdata->algmode = OP_ALG_AAI_CBC;
3039 		break;
3040 	case RTE_CRYPTO_CIPHER_AES_CTR:
3041 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
3042 		cipherdata->algmode = OP_ALG_AAI_CTR;
3043 		break;
3044 	case RTE_CRYPTO_CIPHER_NULL:
3045 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
3046 		break;
3047 	default:
3048 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %s (%u)",
3049 			rte_cryptodev_get_cipher_algo_string(session->cipher_alg),
3050 			session->cipher_alg);
3051 		return -ENOTSUP;
3052 	}
3053 
3054 	return 0;
3055 }
3056 
3057 static int
3058 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3059 			    struct rte_security_session_conf *conf,
3060 			    void *sess)
3061 {
3062 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
3063 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3064 	struct rte_crypto_auth_xform *auth_xform = NULL;
3065 	struct rte_crypto_aead_xform *aead_xform = NULL;
3066 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3067 	struct ctxt_priv *priv;
3068 	struct alginfo authdata, cipherdata;
3069 	int bufsize;
3070 	struct sec_flow_context *flc;
3071 	uint64_t flc_iova;
3072 	int ret = -1;
3073 
3074 	PMD_INIT_FUNC_TRACE();
3075 
3076 	RTE_SET_USED(dev);
3077 
3078 	/** Make FLC address to align with stashing, low 6 bits are used
3079 	 * control stashing.
3080 	 */
3081 	priv = rte_zmalloc(NULL, sizeof(struct ctxt_priv) +
3082 		sizeof(struct sec_flc_desc),
3083 		DPAA2_STASHING_ALIGN_SIZE);
3084 
3085 	if (priv == NULL) {
3086 		DPAA2_SEC_ERR("No memory for priv CTXT");
3087 		return -ENOMEM;
3088 	}
3089 
3090 	flc = &priv->flc_desc[0].flc;
3091 
3092 	if (ipsec_xform->life.bytes_hard_limit != 0 ||
3093 		ipsec_xform->life.bytes_soft_limit != 0 ||
3094 		ipsec_xform->life.packets_hard_limit != 0 ||
3095 		ipsec_xform->life.packets_soft_limit != 0) {
3096 		rte_free(priv);
3097 		return -ENOTSUP;
3098 	}
3099 
3100 	memset(session, 0, sizeof(dpaa2_sec_session));
3101 
3102 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3103 		cipher_xform = &conf->crypto_xform->cipher;
3104 		if (conf->crypto_xform->next)
3105 			auth_xform = &conf->crypto_xform->next->auth;
3106 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3107 					session, &cipherdata, &authdata);
3108 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3109 		auth_xform = &conf->crypto_xform->auth;
3110 		if (conf->crypto_xform->next)
3111 			cipher_xform = &conf->crypto_xform->next->cipher;
3112 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3113 					session, &cipherdata, &authdata);
3114 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
3115 		aead_xform = &conf->crypto_xform->aead;
3116 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
3117 					session, &cipherdata);
3118 		authdata.keylen = 0;
3119 		authdata.algtype = 0;
3120 	} else {
3121 		DPAA2_SEC_ERR("XFORM not specified");
3122 		ret = -EINVAL;
3123 		goto out;
3124 	}
3125 	if (ret) {
3126 		DPAA2_SEC_ERR("Failed to process xform");
3127 		goto out;
3128 	}
3129 
3130 	session->ctxt_type = DPAA2_SEC_IPSEC;
3131 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3132 		uint8_t hdr[48] = {};
3133 		struct rte_ipv4_hdr *ip4_hdr;
3134 		struct rte_ipv6_hdr *ip6_hdr;
3135 		struct rte_udp_hdr *uh = NULL;
3136 		struct ipsec_encap_pdb encap_pdb;
3137 
3138 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
3139 		/* For Sec Proto only one descriptor is required. */
3140 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
3141 
3142 		/* copy algo specific data to PDB */
3143 		switch (cipherdata.algtype) {
3144 		case OP_PCL_IPSEC_AES_CTR:
3145 			encap_pdb.ctr.ctr_initial = 0x00000001;
3146 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3147 			break;
3148 		case OP_PCL_IPSEC_AES_GCM8:
3149 		case OP_PCL_IPSEC_AES_GCM12:
3150 		case OP_PCL_IPSEC_AES_GCM16:
3151 			memcpy(encap_pdb.gcm.salt,
3152 				(uint8_t *)&(ipsec_xform->salt), 4);
3153 			break;
3154 		}
3155 
3156 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3157 			PDBOPTS_ESP_OIHI_PDB_INL |
3158 			PDBHMO_ESP_SNR;
3159 
3160 		if (ipsec_xform->options.iv_gen_disable == 0)
3161 			encap_pdb.options |= PDBOPTS_ESP_IVSRC;
3162 		/* Initializing the sequence number to 1, Security
3163 		 * engine will choose this sequence number for first packet
3164 		 * Refer: RFC4303 section: 3.3.3.Sequence Number Generation
3165 		 */
3166 		encap_pdb.seq_num = 1;
3167 		if (ipsec_xform->options.esn) {
3168 			encap_pdb.options |= PDBOPTS_ESP_ESN;
3169 			encap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi;
3170 			encap_pdb.seq_num = conf->ipsec.esn.low;
3171 		}
3172 		if (ipsec_xform->options.copy_dscp)
3173 			encap_pdb.options |= PDBOPTS_ESP_DIFFSERV;
3174 		if (ipsec_xform->options.ecn)
3175 			encap_pdb.options |= PDBOPTS_ESP_TECN;
3176 		encap_pdb.spi = ipsec_xform->spi;
3177 		session->dir = DIR_ENC;
3178 		if (ipsec_xform->tunnel.type ==
3179 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3180 			if (ipsec_xform->options.dec_ttl)
3181 				encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3182 			if (ipsec_xform->options.copy_df)
3183 				encap_pdb.options |= PDBHMO_ESP_DFBIT;
3184 			ip4_hdr = (struct rte_ipv4_hdr *)hdr;
3185 
3186 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv4_hdr);
3187 			ip4_hdr->version_ihl = RTE_IPV4_VHL_DEF;
3188 			ip4_hdr->time_to_live = ipsec_xform->tunnel.ipv4.ttl ?
3189 						ipsec_xform->tunnel.ipv4.ttl :  0x40;
3190 			ip4_hdr->type_of_service = (ipsec_xform->tunnel.ipv4.dscp<<2);
3191 
3192 			ip4_hdr->hdr_checksum = 0;
3193 			ip4_hdr->packet_id = 0;
3194 			if (ipsec_xform->tunnel.ipv4.df) {
3195 				uint16_t frag_off = 0;
3196 
3197 				frag_off |= RTE_IPV4_HDR_DF_FLAG;
3198 				ip4_hdr->fragment_offset = rte_cpu_to_be_16(frag_off);
3199 			} else
3200 				ip4_hdr->fragment_offset = 0;
3201 
3202 			memcpy(&ip4_hdr->src_addr, &ipsec_xform->tunnel.ipv4.src_ip,
3203 			       sizeof(struct in_addr));
3204 			memcpy(&ip4_hdr->dst_addr, &ipsec_xform->tunnel.ipv4.dst_ip,
3205 			       sizeof(struct in_addr));
3206 			if (ipsec_xform->options.udp_encap) {
3207 				ip4_hdr->next_proto_id = IPPROTO_UDP;
3208 				ip4_hdr->total_length = rte_cpu_to_be_16(
3209 						sizeof(struct rte_ipv4_hdr) +
3210 						sizeof(struct rte_udp_hdr));
3211 			} else {
3212 				ip4_hdr->total_length =
3213 					rte_cpu_to_be_16(
3214 						sizeof(struct rte_ipv4_hdr));
3215 				ip4_hdr->next_proto_id = IPPROTO_ESP;
3216 			}
3217 
3218 			ip4_hdr->hdr_checksum = calc_chksum((uint16_t *)
3219 				(void *)ip4_hdr, sizeof(struct rte_ipv4_hdr));
3220 
3221 		} else if (ipsec_xform->tunnel.type ==
3222 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3223 			ip6_hdr = (struct rte_ipv6_hdr *)hdr;
3224 
3225 			ip6_hdr->vtc_flow = rte_cpu_to_be_32(
3226 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
3227 				((ipsec_xform->tunnel.ipv6.dscp <<
3228 					RTE_IPV6_HDR_TC_SHIFT) &
3229 					RTE_IPV6_HDR_TC_MASK) |
3230 				((ipsec_xform->tunnel.ipv6.flabel <<
3231 					RTE_IPV6_HDR_FL_SHIFT) &
3232 					RTE_IPV6_HDR_FL_MASK));
3233 			/* Payload length will be updated by HW */
3234 			ip6_hdr->payload_len = 0;
3235 			ip6_hdr->hop_limits = ipsec_xform->tunnel.ipv6.hlimit ?
3236 					ipsec_xform->tunnel.ipv6.hlimit : 0x40;
3237 			memcpy(&ip6_hdr->src_addr,
3238 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
3239 			memcpy(&ip6_hdr->dst_addr,
3240 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
3241 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
3242 			if (ipsec_xform->options.udp_encap)
3243 				ip6_hdr->proto = IPPROTO_UDP;
3244 			else
3245 				ip6_hdr->proto = (ipsec_xform->proto ==
3246 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3247 					IPPROTO_ESP : IPPROTO_AH;
3248 		}
3249 		if (ipsec_xform->options.udp_encap) {
3250 			uint16_t sport, dport;
3251 
3252 			if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
3253 				uh = (struct rte_udp_hdr *) (hdr +
3254 						sizeof(struct rte_ipv4_hdr));
3255 			else
3256 				uh = (struct rte_udp_hdr *) (hdr +
3257 						sizeof(struct rte_ipv6_hdr));
3258 
3259 			sport = ipsec_xform->udp.sport ?
3260 				ipsec_xform->udp.sport : DPAA2_DEFAULT_NAT_T_PORT;
3261 			dport = ipsec_xform->udp.dport ?
3262 				ipsec_xform->udp.dport : DPAA2_DEFAULT_NAT_T_PORT;
3263 			uh->src_port = rte_cpu_to_be_16(sport);
3264 			uh->dst_port = rte_cpu_to_be_16(dport);
3265 			uh->dgram_len = 0;
3266 			uh->dgram_cksum = 0;
3267 
3268 			encap_pdb.ip_hdr_len += sizeof(struct rte_udp_hdr);
3269 			encap_pdb.options |= PDBOPTS_ESP_NAT | PDBOPTS_ESP_NUC;
3270 		}
3271 
3272 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
3273 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3274 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
3275 				hdr, &cipherdata, &authdata);
3276 	} else if (ipsec_xform->direction ==
3277 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3278 		struct ipsec_decap_pdb decap_pdb;
3279 
3280 		flc->dhr = SEC_FLC_DHR_INBOUND;
3281 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3282 		/* copy algo specific data to PDB */
3283 		switch (cipherdata.algtype) {
3284 		case OP_PCL_IPSEC_AES_CTR:
3285 			decap_pdb.ctr.ctr_initial = 0x00000001;
3286 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3287 			break;
3288 		case OP_PCL_IPSEC_AES_GCM8:
3289 		case OP_PCL_IPSEC_AES_GCM12:
3290 		case OP_PCL_IPSEC_AES_GCM16:
3291 			memcpy(decap_pdb.gcm.salt,
3292 				(uint8_t *)&(ipsec_xform->salt), 4);
3293 			break;
3294 		}
3295 
3296 		if (ipsec_xform->tunnel.type ==
3297 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3298 			if (ipsec_xform->options.udp_encap)
3299 				decap_pdb.options =
3300 					(sizeof(struct ip) + sizeof(struct rte_udp_hdr)) << 16;
3301 			else
3302 				decap_pdb.options = sizeof(struct ip) << 16;
3303 			if (ipsec_xform->options.copy_df)
3304 				decap_pdb.options |= PDBHMO_ESP_DFV;
3305 			if (ipsec_xform->options.dec_ttl)
3306 				decap_pdb.options |= PDBHMO_ESP_DECAP_DTTL;
3307 		} else {
3308 			if (ipsec_xform->options.udp_encap) {
3309 				decap_pdb.options =
3310 					(sizeof(struct rte_ipv6_hdr) +
3311 					 sizeof(struct rte_udp_hdr)) << 16;
3312 			} else {
3313 				decap_pdb.options = sizeof(struct rte_ipv6_hdr) << 16;
3314 			}
3315 		}
3316 		if (ipsec_xform->options.esn) {
3317 			decap_pdb.options |= PDBOPTS_ESP_ESN;
3318 			decap_pdb.seq_num_ext_hi = conf->ipsec.esn.hi;
3319 			decap_pdb.seq_num = conf->ipsec.esn.low;
3320 		}
3321 		if (ipsec_xform->options.copy_dscp)
3322 			decap_pdb.options |= PDBOPTS_ESP_DIFFSERV;
3323 		if (ipsec_xform->options.ecn)
3324 			decap_pdb.options |= PDBOPTS_ESP_TECN;
3325 
3326 		if (ipsec_xform->replay_win_sz) {
3327 			uint32_t win_sz;
3328 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3329 
3330 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3331 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3332 				win_sz = 128;
3333 			}
3334 			switch (win_sz) {
3335 			case 1:
3336 			case 2:
3337 			case 4:
3338 			case 8:
3339 			case 16:
3340 			case 32:
3341 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3342 				break;
3343 			case 64:
3344 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3345 				break;
3346 			case 256:
3347 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3348 				break;
3349 			case 512:
3350 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3351 				break;
3352 			case 1024:
3353 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3354 				break;
3355 			case 128:
3356 			default:
3357 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3358 			}
3359 		}
3360 		session->dir = DIR_DEC;
3361 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3362 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3363 				SHR_WAIT : SHR_SERIAL,
3364 				&decap_pdb, &cipherdata, &authdata);
3365 	} else {
3366 		ret = -EINVAL;
3367 		goto out;
3368 	}
3369 
3370 	if (bufsize < 0) {
3371 		ret = -EINVAL;
3372 		DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
3373 		goto out;
3374 	}
3375 
3376 	flc->word1_sdl = (uint8_t)bufsize;
3377 
3378 	flc_iova = DPAA2_VADDR_TO_IOVA(flc);
3379 	/* Enable the stashing control bit and data stashing only.*/
3380 	DPAA2_SET_FLC_RSC(flc);
3381 	dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
3382 		&flc_iova);
3383 	flc->word2_rflc_31_0 = lower_32_bits(flc_iova);
3384 	flc->word3_rflc_63_32 = upper_32_bits(flc_iova);
3385 
3386 	/* Set EWS bit i.e. enable write-safe */
3387 	DPAA2_SET_FLC_EWS(flc);
3388 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3389 	DPAA2_SET_FLC_REUSE_BS(flc);
3390 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3391 	DPAA2_SET_FLC_REUSE_FF(flc);
3392 
3393 	session->ctxt = priv;
3394 
3395 	return 0;
3396 out:
3397 	rte_free(session->auth_key.data);
3398 	rte_free(session->cipher_key.data);
3399 	rte_free(priv);
3400 	return ret;
3401 }
3402 
3403 static int
3404 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3405 			   struct rte_security_session_conf *conf,
3406 			   void *sess)
3407 {
3408 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3409 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3410 	struct rte_crypto_auth_xform *auth_xform = NULL;
3411 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3412 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3413 	struct ctxt_priv *priv;
3414 	struct alginfo authdata, cipherdata;
3415 	struct alginfo *p_authdata = NULL;
3416 	int bufsize = -1;
3417 	struct sec_flow_context *flc;
3418 	uint64_t flc_iova;
3419 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3420 	int swap = true;
3421 #else
3422 	int swap = false;
3423 #endif
3424 
3425 	PMD_INIT_FUNC_TRACE();
3426 
3427 	RTE_SET_USED(dev);
3428 
3429 	memset(session, 0, sizeof(dpaa2_sec_session));
3430 
3431 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3432 				sizeof(struct ctxt_priv) +
3433 				sizeof(struct sec_flc_desc),
3434 				RTE_CACHE_LINE_SIZE);
3435 
3436 	if (priv == NULL) {
3437 		DPAA2_SEC_ERR("No memory for priv CTXT");
3438 		return -ENOMEM;
3439 	}
3440 
3441 	flc = &priv->flc_desc[0].flc;
3442 
3443 	/* find xfrm types */
3444 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3445 		cipher_xform = &xform->cipher;
3446 		if (xform->next != NULL &&
3447 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3448 			session->ext_params.aead_ctxt.auth_cipher_text = true;
3449 			auth_xform = &xform->next->auth;
3450 		}
3451 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3452 		auth_xform = &xform->auth;
3453 		if (xform->next != NULL &&
3454 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3455 			session->ext_params.aead_ctxt.auth_cipher_text = false;
3456 			cipher_xform = &xform->next->cipher;
3457 		}
3458 	} else {
3459 		DPAA2_SEC_ERR("Invalid crypto type");
3460 		rte_free(priv);
3461 		return -EINVAL;
3462 	}
3463 
3464 	session->ctxt_type = DPAA2_SEC_PDCP;
3465 	if (cipher_xform) {
3466 		session->cipher_key.data = rte_zmalloc(NULL,
3467 					       cipher_xform->key.length,
3468 					       RTE_CACHE_LINE_SIZE);
3469 		if (session->cipher_key.data == NULL &&
3470 				cipher_xform->key.length > 0) {
3471 			DPAA2_SEC_ERR("No Memory for cipher key");
3472 			rte_free(priv);
3473 			return -ENOMEM;
3474 		}
3475 		session->cipher_key.length = cipher_xform->key.length;
3476 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3477 			cipher_xform->key.length);
3478 		session->dir =
3479 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3480 					DIR_ENC : DIR_DEC;
3481 		session->cipher_alg = cipher_xform->algo;
3482 	} else {
3483 		session->cipher_key.data = NULL;
3484 		session->cipher_key.length = 0;
3485 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3486 		session->dir = DIR_ENC;
3487 	}
3488 
3489 	session->pdcp.domain = pdcp_xform->domain;
3490 	session->pdcp.bearer = pdcp_xform->bearer;
3491 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3492 	session->pdcp.sn_size = pdcp_xform->sn_size;
3493 	session->pdcp.hfn = pdcp_xform->hfn;
3494 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3495 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3496 	/* hfv ovd offset location is stored in iv.offset value*/
3497 	if (cipher_xform)
3498 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3499 
3500 	cipherdata.key = (size_t)session->cipher_key.data;
3501 	cipherdata.keylen = session->cipher_key.length;
3502 	cipherdata.key_enc_flags = 0;
3503 	cipherdata.key_type = RTA_DATA_IMM;
3504 
3505 	switch (session->cipher_alg) {
3506 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3507 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3508 		break;
3509 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3510 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3511 		break;
3512 	case RTE_CRYPTO_CIPHER_AES_CTR:
3513 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3514 		break;
3515 	case RTE_CRYPTO_CIPHER_NULL:
3516 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3517 		break;
3518 	default:
3519 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3520 			      session->cipher_alg);
3521 		goto out;
3522 	}
3523 
3524 	if (auth_xform) {
3525 		session->auth_key.data = rte_zmalloc(NULL,
3526 						     auth_xform->key.length,
3527 						     RTE_CACHE_LINE_SIZE);
3528 		if (!session->auth_key.data &&
3529 		    auth_xform->key.length > 0) {
3530 			DPAA2_SEC_ERR("No Memory for auth key");
3531 			rte_free(session->cipher_key.data);
3532 			rte_free(priv);
3533 			return -ENOMEM;
3534 		}
3535 		session->auth_key.length = auth_xform->key.length;
3536 		memcpy(session->auth_key.data, auth_xform->key.data,
3537 		       auth_xform->key.length);
3538 		session->auth_alg = auth_xform->algo;
3539 	} else {
3540 		session->auth_key.data = NULL;
3541 		session->auth_key.length = 0;
3542 		session->auth_alg = 0;
3543 		authdata.algtype = PDCP_AUTH_TYPE_NULL;
3544 	}
3545 	authdata.key = (size_t)session->auth_key.data;
3546 	authdata.keylen = session->auth_key.length;
3547 	authdata.key_enc_flags = 0;
3548 	authdata.key_type = RTA_DATA_IMM;
3549 
3550 	if (session->auth_alg) {
3551 		switch (session->auth_alg) {
3552 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3553 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3554 			break;
3555 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3556 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3557 			break;
3558 		case RTE_CRYPTO_AUTH_AES_CMAC:
3559 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3560 			break;
3561 		case RTE_CRYPTO_AUTH_NULL:
3562 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3563 			break;
3564 		default:
3565 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3566 				      session->auth_alg);
3567 			goto out;
3568 		}
3569 		p_authdata = &authdata;
3570 	} else {
3571 		if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3572 			DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3573 			goto out;
3574 		}
3575 		session->auth_key.data = NULL;
3576 		session->auth_key.length = 0;
3577 		session->auth_alg = 0;
3578 	}
3579 	authdata.key = (size_t)session->auth_key.data;
3580 	authdata.keylen = session->auth_key.length;
3581 	authdata.key_enc_flags = 0;
3582 	authdata.key_type = RTA_DATA_IMM;
3583 
3584 	if (pdcp_xform->sdap_enabled) {
3585 		int nb_keys_to_inline =
3586 			rta_inline_pdcp_sdap_query(authdata.algtype,
3587 					cipherdata.algtype,
3588 					session->pdcp.sn_size,
3589 					session->pdcp.hfn_ovd);
3590 		if (nb_keys_to_inline >= 1) {
3591 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3592 			cipherdata.key_type = RTA_DATA_PTR;
3593 		}
3594 		if (nb_keys_to_inline >= 2) {
3595 			authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3596 			authdata.key_type = RTA_DATA_PTR;
3597 		}
3598 	} else {
3599 		if (rta_inline_pdcp_query(authdata.algtype,
3600 					cipherdata.algtype,
3601 					session->pdcp.sn_size,
3602 					session->pdcp.hfn_ovd)) {
3603 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3604 			cipherdata.key_type = RTA_DATA_PTR;
3605 		}
3606 	}
3607 
3608 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3609 		if (session->dir == DIR_ENC)
3610 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3611 					priv->flc_desc[0].desc, 1, swap,
3612 					pdcp_xform->hfn,
3613 					session->pdcp.sn_size,
3614 					pdcp_xform->bearer,
3615 					pdcp_xform->pkt_dir,
3616 					pdcp_xform->hfn_threshold,
3617 					&cipherdata, &authdata);
3618 		else if (session->dir == DIR_DEC)
3619 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3620 					priv->flc_desc[0].desc, 1, swap,
3621 					pdcp_xform->hfn,
3622 					session->pdcp.sn_size,
3623 					pdcp_xform->bearer,
3624 					pdcp_xform->pkt_dir,
3625 					pdcp_xform->hfn_threshold,
3626 					&cipherdata, &authdata);
3627 
3628 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3629 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3630 						     1, swap, &authdata);
3631 	} else {
3632 		if (session->dir == DIR_ENC) {
3633 			if (pdcp_xform->sdap_enabled)
3634 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3635 					priv->flc_desc[0].desc, 1, swap,
3636 					session->pdcp.sn_size,
3637 					pdcp_xform->hfn,
3638 					pdcp_xform->bearer,
3639 					pdcp_xform->pkt_dir,
3640 					pdcp_xform->hfn_threshold,
3641 					&cipherdata, p_authdata);
3642 			else
3643 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3644 					priv->flc_desc[0].desc, 1, swap,
3645 					session->pdcp.sn_size,
3646 					pdcp_xform->hfn,
3647 					pdcp_xform->bearer,
3648 					pdcp_xform->pkt_dir,
3649 					pdcp_xform->hfn_threshold,
3650 					&cipherdata, p_authdata);
3651 		} else if (session->dir == DIR_DEC) {
3652 			if (pdcp_xform->sdap_enabled)
3653 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3654 					priv->flc_desc[0].desc, 1, swap,
3655 					session->pdcp.sn_size,
3656 					pdcp_xform->hfn,
3657 					pdcp_xform->bearer,
3658 					pdcp_xform->pkt_dir,
3659 					pdcp_xform->hfn_threshold,
3660 					&cipherdata, p_authdata);
3661 			else
3662 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3663 					priv->flc_desc[0].desc, 1, swap,
3664 					session->pdcp.sn_size,
3665 					pdcp_xform->hfn,
3666 					pdcp_xform->bearer,
3667 					pdcp_xform->pkt_dir,
3668 					pdcp_xform->hfn_threshold,
3669 					&cipherdata, p_authdata);
3670 		}
3671 	}
3672 
3673 	if (bufsize < 0) {
3674 		DPAA2_SEC_ERR("Crypto: Invalid SEC-DESC buffer length");
3675 		goto out;
3676 	}
3677 
3678 	flc_iova = DPAA2_VADDR_TO_IOVA(flc);
3679 	/* Enable the stashing control bit and data stashing only.*/
3680 	DPAA2_SET_FLC_RSC(flc);
3681 	dpaa2_flc_stashing_set(DPAA2_FLC_DATA_STASHING, 1,
3682 		&flc_iova);
3683 	flc->word2_rflc_31_0 = lower_32_bits(flc_iova);
3684 	flc->word3_rflc_63_32 = upper_32_bits(flc_iova);
3685 
3686 	flc->word1_sdl = (uint8_t)bufsize;
3687 
3688 	/* TODO - check the perf impact or
3689 	 * align as per descriptor type
3690 	 * Set EWS bit i.e. enable write-safe
3691 	 * DPAA2_SET_FLC_EWS(flc);
3692 	 */
3693 
3694 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3695 	DPAA2_SET_FLC_REUSE_BS(flc);
3696 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3697 	DPAA2_SET_FLC_REUSE_FF(flc);
3698 
3699 	session->ctxt = priv;
3700 
3701 	return 0;
3702 out:
3703 	rte_free(session->auth_key.data);
3704 	rte_free(session->cipher_key.data);
3705 	rte_free(priv);
3706 	return -EINVAL;
3707 }
3708 
3709 static int
3710 dpaa2_sec_security_session_create(void *dev,
3711 				  struct rte_security_session_conf *conf,
3712 				  struct rte_security_session *sess)
3713 {
3714 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
3715 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3716 	int ret;
3717 
3718 	switch (conf->protocol) {
3719 	case RTE_SECURITY_PROTOCOL_IPSEC:
3720 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3721 				sess_private_data);
3722 		break;
3723 	case RTE_SECURITY_PROTOCOL_MACSEC:
3724 		return -ENOTSUP;
3725 	case RTE_SECURITY_PROTOCOL_PDCP:
3726 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3727 				sess_private_data);
3728 		break;
3729 	default:
3730 		return -EINVAL;
3731 	}
3732 	if (ret != 0) {
3733 		DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret);
3734 		return ret;
3735 	}
3736 
3737 	return ret;
3738 }
3739 
3740 /** Clear the memory of session so it doesn't leave key material behind */
3741 static int
3742 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3743 		struct rte_security_session *sess)
3744 {
3745 	PMD_INIT_FUNC_TRACE();
3746 	void *sess_priv = SECURITY_GET_SESS_PRIV(sess);
3747 
3748 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3749 
3750 	if (sess_priv) {
3751 		rte_free(s->ctxt);
3752 		rte_free(s->cipher_key.data);
3753 		rte_free(s->auth_key.data);
3754 		memset(s, 0, sizeof(dpaa2_sec_session));
3755 	}
3756 	return 0;
3757 }
3758 
3759 static int
3760 dpaa2_sec_security_session_update(void *dev,
3761 			struct rte_security_session *sess,
3762 			struct rte_security_session_conf *conf)
3763 {
3764 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3765 	void *sess_private_data = SECURITY_GET_SESS_PRIV(sess);
3766 	int ret;
3767 
3768 	if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC &&
3769 		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
3770 		return -ENOTSUP;
3771 
3772 	dpaa2_sec_security_session_destroy(dev, sess);
3773 
3774 	ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3775 				sess_private_data);
3776 	if (ret != 0) {
3777 		DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret);
3778 		return ret;
3779 	}
3780 
3781 	return ret;
3782 }
3783 
3784 static unsigned int
3785 dpaa2_sec_security_session_get_size(void *device __rte_unused)
3786 {
3787 	return sizeof(dpaa2_sec_session);
3788 }
3789 
3790 static int
3791 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
3792 		struct rte_crypto_sym_xform *xform,
3793 		struct rte_cryptodev_sym_session *sess)
3794 {
3795 	void *sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
3796 	int ret;
3797 
3798 	ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
3799 	if (ret != 0) {
3800 		DPAA2_SEC_DEBUG("Failed to configure session parameters %d", ret);
3801 		/* Return session to mempool */
3802 		return ret;
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 /** Clear the memory of session so it doesn't leave key material behind */
3809 static void
3810 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev __rte_unused,
3811 		struct rte_cryptodev_sym_session *sess)
3812 {
3813 	PMD_INIT_FUNC_TRACE();
3814 	dpaa2_sec_session *s = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
3815 
3816 	if (s) {
3817 		rte_free(s->ctxt);
3818 		rte_free(s->cipher_key.data);
3819 		rte_free(s->auth_key.data);
3820 	}
3821 }
3822 
3823 static int
3824 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3825 			struct rte_cryptodev_config *config __rte_unused)
3826 {
3827 	PMD_INIT_FUNC_TRACE();
3828 
3829 	return 0;
3830 }
3831 
3832 static int
3833 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3834 {
3835 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3836 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3837 	struct dpseci_attr attr;
3838 	struct dpaa2_queue *dpaa2_q;
3839 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3840 					dev->data->queue_pairs;
3841 	struct dpseci_rx_queue_attr rx_attr;
3842 	struct dpseci_tx_queue_attr tx_attr;
3843 	int ret, i;
3844 
3845 	PMD_INIT_FUNC_TRACE();
3846 
3847 	/* Change the tx burst function if ordered queues are used */
3848 	if (priv->en_ordered)
3849 		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
3850 
3851 	memset(&attr, 0, sizeof(struct dpseci_attr));
3852 
3853 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3854 	if (ret) {
3855 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3856 			      priv->hw_id);
3857 		goto get_attr_failure;
3858 	}
3859 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3860 	if (ret) {
3861 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3862 		goto get_attr_failure;
3863 	}
3864 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3865 		dpaa2_q = &qp[i]->rx_vq;
3866 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3867 				    &rx_attr);
3868 		dpaa2_q->fqid = rx_attr.fqid;
3869 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3870 	}
3871 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3872 		dpaa2_q = &qp[i]->tx_vq;
3873 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3874 				    &tx_attr);
3875 		dpaa2_q->fqid = tx_attr.fqid;
3876 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3877 	}
3878 
3879 	return 0;
3880 get_attr_failure:
3881 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3882 	return -1;
3883 }
3884 
3885 static void
3886 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3887 {
3888 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3889 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3890 	int ret;
3891 
3892 	PMD_INIT_FUNC_TRACE();
3893 
3894 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3895 	if (ret) {
3896 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3897 			     priv->hw_id);
3898 		return;
3899 	}
3900 
3901 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3902 	if (ret < 0) {
3903 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3904 		return;
3905 	}
3906 }
3907 
3908 static int
3909 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3910 {
3911 	PMD_INIT_FUNC_TRACE();
3912 
3913 	return 0;
3914 }
3915 
3916 static void
3917 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3918 			struct rte_cryptodev_info *info)
3919 {
3920 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3921 
3922 	PMD_INIT_FUNC_TRACE();
3923 	if (info != NULL) {
3924 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3925 		info->feature_flags = dev->feature_flags;
3926 		info->capabilities = dpaa2_sec_capabilities;
3927 		/* No limit of number of sessions */
3928 		info->sym.max_nb_sessions = 0;
3929 		info->driver_id = cryptodev_driver_id;
3930 	}
3931 }
3932 
3933 static
3934 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3935 			 struct rte_cryptodev_stats *stats)
3936 {
3937 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3938 	struct fsl_mc_io dpseci;
3939 	struct dpseci_sec_counters counters = {0};
3940 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3941 					dev->data->queue_pairs;
3942 	int ret, i;
3943 
3944 	PMD_INIT_FUNC_TRACE();
3945 	if (stats == NULL) {
3946 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3947 		return;
3948 	}
3949 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3950 		if (qp == NULL || qp[i] == NULL) {
3951 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3952 			continue;
3953 		}
3954 
3955 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3956 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3957 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3958 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3959 	}
3960 
3961 	/* In case as secondary process access stats, MCP portal in priv-hw
3962 	 * may have primary process address. Need the secondary process
3963 	 * based MCP portal address for this object.
3964 	 */
3965 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3966 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3967 				      &counters);
3968 	if (ret) {
3969 		DPAA2_SEC_ERR("SEC counters failed");
3970 	} else {
3971 		DPAA2_SEC_INFO("dpseci hardware stats:");
3972 		DPAA2_SEC_INFO("\tNum of Requests Dequeued = %" PRIu64,
3973 			    counters.dequeued_requests);
3974 		DPAA2_SEC_INFO("\tNum of Outbound Encrypt Requests = %" PRIu64,
3975 			    counters.ob_enc_requests);
3976 		DPAA2_SEC_INFO("\tNum of Inbound Decrypt Requests = %" PRIu64,
3977 			    counters.ib_dec_requests);
3978 		DPAA2_SEC_INFO("\tNum of Outbound Bytes Encrypted = %" PRIu64,
3979 			    counters.ob_enc_bytes);
3980 		DPAA2_SEC_INFO("\tNum of Outbound Bytes Protected = %" PRIu64,
3981 			    counters.ob_prot_bytes);
3982 		DPAA2_SEC_INFO("\tNum of Inbound Bytes Decrypted = %" PRIu64,
3983 			    counters.ib_dec_bytes);
3984 		DPAA2_SEC_INFO("\tNum of Inbound Bytes Validated = %" PRIu64,
3985 			    counters.ib_valid_bytes);
3986 	}
3987 }
3988 
3989 static
3990 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3991 {
3992 	int i;
3993 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3994 				   (dev->data->queue_pairs);
3995 
3996 	PMD_INIT_FUNC_TRACE();
3997 
3998 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3999 		if (qp[i] == NULL) {
4000 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
4001 			continue;
4002 		}
4003 		qp[i]->tx_vq.rx_pkts = 0;
4004 		qp[i]->tx_vq.tx_pkts = 0;
4005 		qp[i]->tx_vq.err_pkts = 0;
4006 		qp[i]->rx_vq.rx_pkts = 0;
4007 		qp[i]->rx_vq.tx_pkts = 0;
4008 		qp[i]->rx_vq.err_pkts = 0;
4009 	}
4010 }
4011 
4012 static void __rte_hot
4013 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
4014 				 const struct qbman_fd *fd,
4015 				 const struct qbman_result *dq,
4016 				 struct dpaa2_queue *rxq,
4017 				 struct rte_event *ev)
4018 {
4019 	struct dpaa2_sec_qp *qp;
4020 
4021 	qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4022 	ev->flow_id = rxq->ev.flow_id;
4023 	ev->sub_event_type = rxq->ev.sub_event_type;
4024 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4025 	ev->op = RTE_EVENT_OP_NEW;
4026 	ev->sched_type = rxq->ev.sched_type;
4027 	ev->queue_id = rxq->ev.queue_id;
4028 	ev->priority = rxq->ev.priority;
4029 	ev->event_ptr = sec_fd_to_mbuf(fd, qp);
4030 
4031 	qbman_swp_dqrr_consume(swp, dq);
4032 }
4033 
4034 static void
4035 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
4036 				 const struct qbman_fd *fd,
4037 				 const struct qbman_result *dq,
4038 				 struct dpaa2_queue *rxq,
4039 				 struct rte_event *ev)
4040 {
4041 	uint8_t dqrr_index;
4042 	struct dpaa2_sec_qp *qp;
4043 	struct rte_crypto_op *crypto_op;
4044 
4045 	qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4046 	ev->flow_id = rxq->ev.flow_id;
4047 	ev->sub_event_type = rxq->ev.sub_event_type;
4048 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4049 	ev->op = RTE_EVENT_OP_NEW;
4050 	ev->sched_type = rxq->ev.sched_type;
4051 	ev->queue_id = rxq->ev.queue_id;
4052 	ev->priority = rxq->ev.priority;
4053 
4054 	crypto_op = sec_fd_to_mbuf(fd, qp);
4055 	dqrr_index = qbman_get_dqrr_idx(dq);
4056 	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
4057 	DPAA2_PER_LCORE_DQRR_SIZE++;
4058 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
4059 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
4060 	ev->event_ptr = crypto_op;
4061 }
4062 
4063 static void __rte_hot
4064 dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
4065 				const struct qbman_fd *fd,
4066 				const struct qbman_result *dq,
4067 				struct dpaa2_queue *rxq,
4068 				struct rte_event *ev)
4069 {
4070 	struct rte_crypto_op *crypto_op;
4071 	struct dpaa2_sec_qp *qp;
4072 
4073 	qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
4074 	ev->flow_id = rxq->ev.flow_id;
4075 	ev->sub_event_type = rxq->ev.sub_event_type;
4076 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4077 	ev->op = RTE_EVENT_OP_NEW;
4078 	ev->sched_type = rxq->ev.sched_type;
4079 	ev->queue_id = rxq->ev.queue_id;
4080 	ev->priority = rxq->ev.priority;
4081 	crypto_op = sec_fd_to_mbuf(fd, qp);
4082 
4083 	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
4084 	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
4085 		DPAA2_EQCR_OPRID_SHIFT;
4086 	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
4087 		DPAA2_EQCR_SEQNUM_SHIFT;
4088 
4089 	qbman_swp_dqrr_consume(swp, dq);
4090 	ev->event_ptr = crypto_op;
4091 }
4092 
4093 int
4094 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
4095 		int qp_id,
4096 		struct dpaa2_dpcon_dev *dpcon,
4097 		const struct rte_event *event)
4098 {
4099 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4100 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4101 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
4102 	struct dpseci_rx_queue_cfg cfg;
4103 	uint8_t priority;
4104 	int ret;
4105 
4106 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
4107 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
4108 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
4109 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
4110 	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
4111 		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
4112 	else
4113 		return -EINVAL;
4114 
4115 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
4116 		   (dpcon->num_priorities - 1);
4117 
4118 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4119 	cfg.options = DPSECI_QUEUE_OPT_DEST;
4120 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
4121 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
4122 	cfg.dest_cfg.priority = priority;
4123 
4124 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
4125 	cfg.user_ctx = (size_t)(&qp->rx_vq);
4126 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
4127 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
4128 		cfg.order_preservation_en = 1;
4129 	}
4130 
4131 	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
4132 		struct opr_cfg ocfg;
4133 
4134 		/* Restoration window size = 256 frames */
4135 		ocfg.oprrws = 3;
4136 		/* Restoration window size = 512 frames for LX2 */
4137 		if (dpaa2_svr_family == SVR_LX2160A)
4138 			ocfg.oprrws = 4;
4139 		/* Auto advance NESN window enabled */
4140 		ocfg.oa = 1;
4141 		/* Late arrival window size disabled */
4142 		ocfg.olws = 0;
4143 		/* ORL resource exhaustaion advance NESN disabled */
4144 		ocfg.oeane = 0;
4145 
4146 		if (priv->en_loose_ordered)
4147 			ocfg.oloe = 1;
4148 		else
4149 			ocfg.oloe = 0;
4150 
4151 		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
4152 				   qp_id, OPR_OPT_CREATE, &ocfg);
4153 		if (ret) {
4154 			DPAA2_SEC_ERR("Error setting opr: ret: %d", ret);
4155 			return ret;
4156 		}
4157 		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
4158 		priv->en_ordered = 1;
4159 	}
4160 
4161 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4162 				  qp_id, &cfg);
4163 	if (ret) {
4164 		DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret);
4165 		return ret;
4166 	}
4167 
4168 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
4169 
4170 	return 0;
4171 }
4172 
4173 int
4174 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
4175 			int qp_id)
4176 {
4177 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4178 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4179 	struct dpseci_rx_queue_cfg cfg;
4180 	int ret;
4181 
4182 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4183 	cfg.options = DPSECI_QUEUE_OPT_DEST;
4184 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
4185 
4186 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4187 				  qp_id, &cfg);
4188 	if (ret)
4189 		DPAA2_SEC_ERR("Error in dpseci_set_queue: ret: %d", ret);
4190 
4191 	return ret;
4192 }
4193 
4194 static struct rte_cryptodev_ops crypto_ops = {
4195 	.dev_configure	      = dpaa2_sec_dev_configure,
4196 	.dev_start	      = dpaa2_sec_dev_start,
4197 	.dev_stop	      = dpaa2_sec_dev_stop,
4198 	.dev_close	      = dpaa2_sec_dev_close,
4199 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
4200 	.stats_get	      = dpaa2_sec_stats_get,
4201 	.stats_reset	      = dpaa2_sec_stats_reset,
4202 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
4203 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
4204 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
4205 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
4206 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
4207 	/* Raw data-path API related operations */
4208 	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
4209 	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
4210 };
4211 
4212 static const struct rte_security_capability *
4213 dpaa2_sec_capabilities_get(void *device __rte_unused)
4214 {
4215 	return dpaa2_sec_security_cap;
4216 }
4217 
4218 static const struct rte_security_ops dpaa2_sec_security_ops = {
4219 	.session_create = dpaa2_sec_security_session_create,
4220 	.session_update = dpaa2_sec_security_session_update,
4221 	.session_get_size = dpaa2_sec_security_session_get_size,
4222 	.session_stats_get = NULL,
4223 	.session_destroy = dpaa2_sec_security_session_destroy,
4224 	.set_pkt_metadata = NULL,
4225 	.capabilities_get = dpaa2_sec_capabilities_get
4226 };
4227 
4228 static int
4229 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
4230 {
4231 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4232 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4233 	int ret;
4234 
4235 	PMD_INIT_FUNC_TRACE();
4236 
4237 	/* Function is reverse of dpaa2_sec_dev_init.
4238 	 * It does the following:
4239 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
4240 	 * 2. Close the DPSECI device
4241 	 * 3. Free the allocated resources.
4242 	 */
4243 
4244 	/*Close the device at underlying layer*/
4245 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
4246 	if (ret) {
4247 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
4248 		return -1;
4249 	}
4250 
4251 	/*Free the allocated memory for ethernet private data and dpseci*/
4252 	priv->hw = NULL;
4253 	rte_free(dpseci);
4254 	rte_free(dev->security_ctx);
4255 
4256 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
4257 		       dev->data->name, rte_socket_id());
4258 
4259 	return 0;
4260 }
4261 
4262 static int
4263 check_devargs_handler(const char *key, const char *value,
4264 		      void *opaque)
4265 {
4266 	struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
4267 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4268 
4269 	if (!strcmp(key, "drv_strict_order")) {
4270 		priv->en_loose_ordered = false;
4271 	} else if (!strcmp(key, "drv_dump_mode")) {
4272 		dpaa2_sec_dp_dump = atoi(value);
4273 		if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
4274 			DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
4275 				      "supported, changing to FULL error"
4276 				      " prints");
4277 			dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
4278 		}
4279 	} else
4280 		return -1;
4281 
4282 	return 0;
4283 }
4284 
4285 static void
4286 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
4287 {
4288 	struct rte_kvargs *kvlist;
4289 	struct rte_devargs *devargs;
4290 
4291 	devargs = cryptodev->device->devargs;
4292 	if (!devargs)
4293 		return;
4294 
4295 	kvlist = rte_kvargs_parse(devargs->args, NULL);
4296 	if (!kvlist)
4297 		return;
4298 
4299 	if (!rte_kvargs_count(kvlist, key)) {
4300 		rte_kvargs_free(kvlist);
4301 		return;
4302 	}
4303 
4304 	rte_kvargs_process(kvlist, key,
4305 			check_devargs_handler, (void *)cryptodev);
4306 	rte_kvargs_free(kvlist);
4307 }
4308 
4309 static int
4310 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
4311 {
4312 	struct dpaa2_sec_dev_private *internals;
4313 	struct rte_device *dev = cryptodev->device;
4314 	struct rte_dpaa2_device *dpaa2_dev;
4315 	struct rte_security_ctx *security_instance;
4316 	struct fsl_mc_io *dpseci;
4317 	uint16_t token;
4318 	struct dpseci_attr attr;
4319 	int retcode, hw_id;
4320 
4321 	PMD_INIT_FUNC_TRACE();
4322 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
4323 	hw_id = dpaa2_dev->object_id;
4324 
4325 	cryptodev->driver_id = cryptodev_driver_id;
4326 	cryptodev->dev_ops = &crypto_ops;
4327 
4328 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
4329 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
4330 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
4331 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
4332 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
4333 			RTE_CRYPTODEV_FF_SECURITY |
4334 			RTE_CRYPTODEV_FF_SYM_RAW_DP |
4335 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
4336 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
4337 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
4338 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
4339 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
4340 
4341 	internals = cryptodev->data->dev_private;
4342 
4343 	/*
4344 	 * For secondary processes, we don't initialise any further as primary
4345 	 * has already done this work. Only check we don't need a different
4346 	 * RX function
4347 	 */
4348 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4349 		DPAA2_SEC_DEBUG("Device already init by primary process");
4350 		return 0;
4351 	}
4352 
4353 	/* Initialize security_ctx only for primary process*/
4354 	security_instance = rte_malloc("rte_security_instances_ops",
4355 				sizeof(struct rte_security_ctx), 0);
4356 	if (security_instance == NULL)
4357 		return -ENOMEM;
4358 	security_instance->device = (void *)cryptodev;
4359 	security_instance->ops = &dpaa2_sec_security_ops;
4360 	security_instance->sess_cnt = 0;
4361 	cryptodev->security_ctx = security_instance;
4362 
4363 	/*Open the rte device via MC and save the handle for further use*/
4364 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
4365 				sizeof(struct fsl_mc_io), 0);
4366 	if (!dpseci) {
4367 		DPAA2_SEC_ERR(
4368 			"Error in allocating the memory for dpsec object");
4369 		return -ENOMEM;
4370 	}
4371 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
4372 
4373 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
4374 	if (retcode != 0) {
4375 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
4376 			      retcode);
4377 		goto init_error;
4378 	}
4379 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
4380 	if (retcode != 0) {
4381 		DPAA2_SEC_ERR(
4382 			     "Cannot get dpsec device attributed: Error = %x",
4383 			     retcode);
4384 		goto init_error;
4385 	}
4386 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
4387 			"dpsec-%u", hw_id);
4388 
4389 	internals->max_nb_queue_pairs = attr.num_tx_queues;
4390 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
4391 	internals->hw = dpseci;
4392 	internals->token = token;
4393 	internals->en_loose_ordered = true;
4394 
4395 	dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
4396 	dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
4397 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
4398 	return 0;
4399 
4400 init_error:
4401 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
4402 
4403 	/* dpaa2_sec_uninit(crypto_dev_name); */
4404 	return -EFAULT;
4405 }
4406 
4407 static int
4408 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
4409 			  struct rte_dpaa2_device *dpaa2_dev)
4410 {
4411 	struct rte_cryptodev *cryptodev;
4412 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4413 
4414 	int retval;
4415 
4416 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4417 			dpaa2_dev->object_id);
4418 
4419 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4420 	if (cryptodev == NULL)
4421 		return -ENOMEM;
4422 
4423 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4424 		cryptodev->data->dev_private = rte_zmalloc_socket(
4425 					"cryptodev private structure",
4426 					sizeof(struct dpaa2_sec_dev_private),
4427 					RTE_CACHE_LINE_SIZE,
4428 					rte_socket_id());
4429 
4430 		if (cryptodev->data->dev_private == NULL)
4431 			rte_panic("Cannot allocate memzone for private "
4432 				  "device data");
4433 	}
4434 
4435 	dpaa2_dev->cryptodev = cryptodev;
4436 	cryptodev->device = &dpaa2_dev->device;
4437 
4438 	/* init user callbacks */
4439 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
4440 
4441 	if (dpaa2_svr_family == SVR_LX2160A)
4442 		rta_set_sec_era(RTA_SEC_ERA_10);
4443 	else
4444 		rta_set_sec_era(RTA_SEC_ERA_8);
4445 
4446 	DPAA2_SEC_INFO("2-SEC ERA is %d", USER_SEC_ERA(rta_get_sec_era()));
4447 
4448 	/* Invoke PMD device initialization function */
4449 	retval = dpaa2_sec_dev_init(cryptodev);
4450 	if (retval == 0) {
4451 		rte_cryptodev_pmd_probing_finish(cryptodev);
4452 		return 0;
4453 	}
4454 
4455 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4456 		rte_free(cryptodev->data->dev_private);
4457 
4458 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4459 
4460 	return -ENXIO;
4461 }
4462 
4463 static int
4464 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4465 {
4466 	struct rte_cryptodev *cryptodev;
4467 	int ret;
4468 
4469 	cryptodev = dpaa2_dev->cryptodev;
4470 	if (cryptodev == NULL)
4471 		return -ENODEV;
4472 
4473 	ret = dpaa2_sec_uninit(cryptodev);
4474 	if (ret)
4475 		return ret;
4476 
4477 	return rte_cryptodev_pmd_destroy(cryptodev);
4478 }
4479 
4480 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4481 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4482 	.drv_type = DPAA2_CRYPTO,
4483 	.driver = {
4484 		.name = "DPAA2 SEC PMD"
4485 	},
4486 	.probe = cryptodev_dpaa2_sec_probe,
4487 	.remove = cryptodev_dpaa2_sec_remove,
4488 };
4489 
4490 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4491 
4492 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4493 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4494 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4495 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
4496 		DRIVER_STRICT_ORDER "=<int>"
4497 		DRIVER_DUMP_MODE "=<int>");
4498 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4499