xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 9ad3a41ab2a10db0059e1decdbf3ec038f348e08)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 #include <rte_hexdump.h>
32 
33 #include "dpaa2_sec_priv.h"
34 #include "dpaa2_sec_event.h"
35 #include "dpaa2_sec_logs.h"
36 
37 /* RTA header files */
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41 #include <desc/algo.h>
42 
43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
44  * a pointer to the shared descriptor
45  */
46 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
47 #define FSL_VENDOR_ID           0x1957
48 #define FSL_DEVICE_ID           0x410
49 #define FSL_SUBSYSTEM_SEC       1
50 #define FSL_MC_DPSECI_DEVID     3
51 
52 #define NO_PREFETCH 0
53 
54 #define DRIVER_DUMP_MODE "drv_dump_mode"
55 #define DRIVER_STRICT_ORDER "drv_strict_order"
56 
57 /* DPAA2_SEC_DP_DUMP levels */
58 enum dpaa2_sec_dump_levels {
59 	DPAA2_SEC_DP_NO_DUMP,
60 	DPAA2_SEC_DP_ERR_DUMP,
61 	DPAA2_SEC_DP_FULL_DUMP
62 };
63 
64 uint8_t cryptodev_driver_id;
65 uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
66 
67 #ifdef RTE_LIB_SECURITY
68 static inline int
69 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
70 			   struct rte_crypto_op *op,
71 			   struct qbman_fd *fd, uint16_t bpid)
72 {
73 	struct rte_crypto_sym_op *sym_op = op->sym;
74 	struct ctxt_priv *priv = sess->ctxt;
75 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
76 	struct sec_flow_context *flc;
77 	struct rte_mbuf *mbuf;
78 	uint32_t in_len = 0, out_len = 0;
79 
80 	if (sym_op->m_dst)
81 		mbuf = sym_op->m_dst;
82 	else
83 		mbuf = sym_op->m_src;
84 
85 	/* first FLE entry used to store mbuf and session ctxt */
86 	fle = (struct qbman_fle *)rte_malloc(NULL,
87 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
88 			RTE_CACHE_LINE_SIZE);
89 	if (unlikely(!fle)) {
90 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
91 		return -ENOMEM;
92 	}
93 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
94 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
95 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
96 
97 	/* Save the shared descriptor */
98 	flc = &priv->flc_desc[0].flc;
99 
100 	op_fle = fle + 1;
101 	ip_fle = fle + 2;
102 	sge = fle + 3;
103 
104 	if (likely(bpid < MAX_BPID)) {
105 		DPAA2_SET_FD_BPID(fd, bpid);
106 		DPAA2_SET_FLE_BPID(op_fle, bpid);
107 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
108 	} else {
109 		DPAA2_SET_FD_IVP(fd);
110 		DPAA2_SET_FLE_IVP(op_fle);
111 		DPAA2_SET_FLE_IVP(ip_fle);
112 	}
113 
114 	/* Configure FD as a FRAME LIST */
115 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
116 	DPAA2_SET_FD_COMPOUND_FMT(fd);
117 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
118 
119 	/* Configure Output FLE with Scatter/Gather Entry */
120 	DPAA2_SET_FLE_SG_EXT(op_fle);
121 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
122 
123 	/* Configure Output SGE for Encap/Decap */
124 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
125 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
126 	/* o/p segs */
127 	while (mbuf->next) {
128 		sge->length = mbuf->data_len;
129 		out_len += sge->length;
130 		sge++;
131 		mbuf = mbuf->next;
132 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
133 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
134 	}
135 	/* using buf_len for last buf - so that extra data can be added */
136 	sge->length = mbuf->buf_len - mbuf->data_off;
137 	out_len += sge->length;
138 
139 	DPAA2_SET_FLE_FIN(sge);
140 	op_fle->length = out_len;
141 
142 	sge++;
143 	mbuf = sym_op->m_src;
144 
145 	/* Configure Input FLE with Scatter/Gather Entry */
146 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
147 	DPAA2_SET_FLE_SG_EXT(ip_fle);
148 	DPAA2_SET_FLE_FIN(ip_fle);
149 
150 	/* Configure input SGE for Encap/Decap */
151 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
152 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
153 	sge->length = mbuf->data_len;
154 	in_len += sge->length;
155 
156 	mbuf = mbuf->next;
157 	/* i/p segs */
158 	while (mbuf) {
159 		sge++;
160 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
162 		sge->length = mbuf->data_len;
163 		in_len += sge->length;
164 		mbuf = mbuf->next;
165 	}
166 	ip_fle->length = in_len;
167 	DPAA2_SET_FLE_FIN(sge);
168 
169 	/* In case of PDCP, per packet HFN is stored in
170 	 * mbuf priv after sym_op.
171 	 */
172 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
173 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
174 					sess->pdcp.hfn_ovd_offset);
175 		/*enable HFN override override */
176 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
177 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
178 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
179 	}
180 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
181 
182 	return 0;
183 }
184 
185 static inline int
186 build_proto_compound_fd(dpaa2_sec_session *sess,
187 	       struct rte_crypto_op *op,
188 	       struct qbman_fd *fd, uint16_t bpid)
189 {
190 	struct rte_crypto_sym_op *sym_op = op->sym;
191 	struct ctxt_priv *priv = sess->ctxt;
192 	struct qbman_fle *fle, *ip_fle, *op_fle;
193 	struct sec_flow_context *flc;
194 	struct rte_mbuf *src_mbuf = sym_op->m_src;
195 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
196 	int retval;
197 
198 	if (!dst_mbuf)
199 		dst_mbuf = src_mbuf;
200 
201 	/* Save the shared descriptor */
202 	flc = &priv->flc_desc[0].flc;
203 
204 	/* we are using the first FLE entry to store Mbuf */
205 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
206 	if (retval) {
207 		DPAA2_SEC_DP_ERR("Memory alloc failed");
208 		return -ENOMEM;
209 	}
210 	memset(fle, 0, FLE_POOL_BUF_SIZE);
211 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
212 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
213 
214 	op_fle = fle + 1;
215 	ip_fle = fle + 2;
216 
217 	if (likely(bpid < MAX_BPID)) {
218 		DPAA2_SET_FD_BPID(fd, bpid);
219 		DPAA2_SET_FLE_BPID(op_fle, bpid);
220 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
221 	} else {
222 		DPAA2_SET_FD_IVP(fd);
223 		DPAA2_SET_FLE_IVP(op_fle);
224 		DPAA2_SET_FLE_IVP(ip_fle);
225 	}
226 
227 	/* Configure FD as a FRAME LIST */
228 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
229 	DPAA2_SET_FD_COMPOUND_FMT(fd);
230 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
231 
232 	/* Configure Output FLE with dst mbuf data  */
233 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
234 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
235 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
236 
237 	/* Configure Input FLE with src mbuf data */
238 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
239 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
240 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
241 
242 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
243 	DPAA2_SET_FLE_FIN(ip_fle);
244 
245 	/* In case of PDCP, per packet HFN is stored in
246 	 * mbuf priv after sym_op.
247 	 */
248 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
249 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
250 					sess->pdcp.hfn_ovd_offset);
251 		/*enable HFN override override */
252 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
253 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
254 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
255 	}
256 
257 	return 0;
258 
259 }
260 
261 static inline int
262 build_proto_fd(dpaa2_sec_session *sess,
263 	       struct rte_crypto_op *op,
264 	       struct qbman_fd *fd, uint16_t bpid)
265 {
266 	struct rte_crypto_sym_op *sym_op = op->sym;
267 	if (sym_op->m_dst)
268 		return build_proto_compound_fd(sess, op, fd, bpid);
269 
270 	struct ctxt_priv *priv = sess->ctxt;
271 	struct sec_flow_context *flc;
272 	struct rte_mbuf *mbuf = sym_op->m_src;
273 
274 	if (likely(bpid < MAX_BPID))
275 		DPAA2_SET_FD_BPID(fd, bpid);
276 	else
277 		DPAA2_SET_FD_IVP(fd);
278 
279 	/* Save the shared descriptor */
280 	flc = &priv->flc_desc[0].flc;
281 
282 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
283 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
284 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
285 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
286 
287 	/* save physical address of mbuf */
288 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
289 	mbuf->buf_iova = (size_t)op;
290 
291 	return 0;
292 }
293 #endif
294 
295 static inline int
296 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
297 		 struct rte_crypto_op *op,
298 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
299 {
300 	struct rte_crypto_sym_op *sym_op = op->sym;
301 	struct ctxt_priv *priv = sess->ctxt;
302 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
303 	struct sec_flow_context *flc;
304 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
305 	int icv_len = sess->digest_length;
306 	uint8_t *old_icv;
307 	struct rte_mbuf *mbuf;
308 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
309 			sess->iv.offset);
310 
311 	if (sym_op->m_dst)
312 		mbuf = sym_op->m_dst;
313 	else
314 		mbuf = sym_op->m_src;
315 
316 	/* first FLE entry used to store mbuf and session ctxt */
317 	fle = (struct qbman_fle *)rte_malloc(NULL,
318 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
319 			RTE_CACHE_LINE_SIZE);
320 	if (unlikely(!fle)) {
321 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
322 		return -ENOMEM;
323 	}
324 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
325 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
326 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
327 
328 	op_fle = fle + 1;
329 	ip_fle = fle + 2;
330 	sge = fle + 3;
331 
332 	/* Save the shared descriptor */
333 	flc = &priv->flc_desc[0].flc;
334 
335 	/* Configure FD as a FRAME LIST */
336 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
337 	DPAA2_SET_FD_COMPOUND_FMT(fd);
338 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
339 
340 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
341 		   "iv-len=%d data_off: 0x%x\n",
342 		   sym_op->aead.data.offset,
343 		   sym_op->aead.data.length,
344 		   sess->digest_length,
345 		   sess->iv.length,
346 		   sym_op->m_src->data_off);
347 
348 	/* Configure Output FLE with Scatter/Gather Entry */
349 	DPAA2_SET_FLE_SG_EXT(op_fle);
350 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
351 
352 	if (auth_only_len)
353 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
354 
355 	op_fle->length = (sess->dir == DIR_ENC) ?
356 			(sym_op->aead.data.length + icv_len) :
357 			sym_op->aead.data.length;
358 
359 	/* Configure Output SGE for Encap/Decap */
360 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
361 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
362 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
363 
364 	mbuf = mbuf->next;
365 	/* o/p segs */
366 	while (mbuf) {
367 		sge++;
368 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
369 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
370 		sge->length = mbuf->data_len;
371 		mbuf = mbuf->next;
372 	}
373 	sge->length -= icv_len;
374 
375 	if (sess->dir == DIR_ENC) {
376 		sge++;
377 		DPAA2_SET_FLE_ADDR(sge,
378 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
379 		sge->length = icv_len;
380 	}
381 	DPAA2_SET_FLE_FIN(sge);
382 
383 	sge++;
384 	mbuf = sym_op->m_src;
385 
386 	/* Configure Input FLE with Scatter/Gather Entry */
387 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
388 	DPAA2_SET_FLE_SG_EXT(ip_fle);
389 	DPAA2_SET_FLE_FIN(ip_fle);
390 	ip_fle->length = (sess->dir == DIR_ENC) ?
391 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
392 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
393 		 icv_len);
394 
395 	/* Configure Input SGE for Encap/Decap */
396 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
397 	sge->length = sess->iv.length;
398 
399 	sge++;
400 	if (auth_only_len) {
401 		DPAA2_SET_FLE_ADDR(sge,
402 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
403 		sge->length = auth_only_len;
404 		sge++;
405 	}
406 
407 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
408 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
409 				mbuf->data_off);
410 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
411 
412 	mbuf = mbuf->next;
413 	/* i/p segs */
414 	while (mbuf) {
415 		sge++;
416 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
417 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
418 		sge->length = mbuf->data_len;
419 		mbuf = mbuf->next;
420 	}
421 
422 	if (sess->dir == DIR_DEC) {
423 		sge++;
424 		old_icv = (uint8_t *)(sge + 1);
425 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
426 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
427 		sge->length = icv_len;
428 	}
429 
430 	DPAA2_SET_FLE_FIN(sge);
431 	if (auth_only_len) {
432 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
433 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
434 	}
435 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
436 
437 	return 0;
438 }
439 
440 static inline int
441 build_authenc_gcm_fd(dpaa2_sec_session *sess,
442 		     struct rte_crypto_op *op,
443 		     struct qbman_fd *fd, uint16_t bpid)
444 {
445 	struct rte_crypto_sym_op *sym_op = op->sym;
446 	struct ctxt_priv *priv = sess->ctxt;
447 	struct qbman_fle *fle, *sge;
448 	struct sec_flow_context *flc;
449 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
450 	int icv_len = sess->digest_length, retval;
451 	uint8_t *old_icv;
452 	struct rte_mbuf *dst;
453 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
454 			sess->iv.offset);
455 
456 	if (sym_op->m_dst)
457 		dst = sym_op->m_dst;
458 	else
459 		dst = sym_op->m_src;
460 
461 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
462 	 * Currently we donot know which FLE has the mbuf stored.
463 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
464 	 * to get the MBUF Addr from the previous FLE.
465 	 * We can have a better approach to use the inline Mbuf
466 	 */
467 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
468 	if (retval) {
469 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
470 		return -ENOMEM;
471 	}
472 	memset(fle, 0, FLE_POOL_BUF_SIZE);
473 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
474 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
475 	fle = fle + 1;
476 	sge = fle + 2;
477 	if (likely(bpid < MAX_BPID)) {
478 		DPAA2_SET_FD_BPID(fd, bpid);
479 		DPAA2_SET_FLE_BPID(fle, bpid);
480 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
481 		DPAA2_SET_FLE_BPID(sge, bpid);
482 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
483 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
484 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
485 	} else {
486 		DPAA2_SET_FD_IVP(fd);
487 		DPAA2_SET_FLE_IVP(fle);
488 		DPAA2_SET_FLE_IVP((fle + 1));
489 		DPAA2_SET_FLE_IVP(sge);
490 		DPAA2_SET_FLE_IVP((sge + 1));
491 		DPAA2_SET_FLE_IVP((sge + 2));
492 		DPAA2_SET_FLE_IVP((sge + 3));
493 	}
494 
495 	/* Save the shared descriptor */
496 	flc = &priv->flc_desc[0].flc;
497 	/* Configure FD as a FRAME LIST */
498 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
499 	DPAA2_SET_FD_COMPOUND_FMT(fd);
500 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
501 
502 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
503 		   "iv-len=%d data_off: 0x%x\n",
504 		   sym_op->aead.data.offset,
505 		   sym_op->aead.data.length,
506 		   sess->digest_length,
507 		   sess->iv.length,
508 		   sym_op->m_src->data_off);
509 
510 	/* Configure Output FLE with Scatter/Gather Entry */
511 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
512 	if (auth_only_len)
513 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
514 	fle->length = (sess->dir == DIR_ENC) ?
515 			(sym_op->aead.data.length + icv_len) :
516 			sym_op->aead.data.length;
517 
518 	DPAA2_SET_FLE_SG_EXT(fle);
519 
520 	/* Configure Output SGE for Encap/Decap */
521 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
522 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
523 	sge->length = sym_op->aead.data.length;
524 
525 	if (sess->dir == DIR_ENC) {
526 		sge++;
527 		DPAA2_SET_FLE_ADDR(sge,
528 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
529 		sge->length = sess->digest_length;
530 	}
531 	DPAA2_SET_FLE_FIN(sge);
532 
533 	sge++;
534 	fle++;
535 
536 	/* Configure Input FLE with Scatter/Gather Entry */
537 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
538 	DPAA2_SET_FLE_SG_EXT(fle);
539 	DPAA2_SET_FLE_FIN(fle);
540 	fle->length = (sess->dir == DIR_ENC) ?
541 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
542 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
543 		 sess->digest_length);
544 
545 	/* Configure Input SGE for Encap/Decap */
546 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
547 	sge->length = sess->iv.length;
548 	sge++;
549 	if (auth_only_len) {
550 		DPAA2_SET_FLE_ADDR(sge,
551 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
552 		sge->length = auth_only_len;
553 		DPAA2_SET_FLE_BPID(sge, bpid);
554 		sge++;
555 	}
556 
557 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
558 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
559 				sym_op->m_src->data_off);
560 	sge->length = sym_op->aead.data.length;
561 	if (sess->dir == DIR_DEC) {
562 		sge++;
563 		old_icv = (uint8_t *)(sge + 1);
564 		memcpy(old_icv,	sym_op->aead.digest.data,
565 		       sess->digest_length);
566 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
567 		sge->length = sess->digest_length;
568 	}
569 	DPAA2_SET_FLE_FIN(sge);
570 
571 	if (auth_only_len) {
572 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
573 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
574 	}
575 
576 	DPAA2_SET_FD_LEN(fd, fle->length);
577 	return 0;
578 }
579 
580 static inline int
581 build_authenc_sg_fd(dpaa2_sec_session *sess,
582 		 struct rte_crypto_op *op,
583 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
584 {
585 	struct rte_crypto_sym_op *sym_op = op->sym;
586 	struct ctxt_priv *priv = sess->ctxt;
587 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
588 	struct sec_flow_context *flc;
589 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
590 				sym_op->auth.data.offset;
591 	uint16_t auth_tail_len = sym_op->auth.data.length -
592 				sym_op->cipher.data.length - auth_hdr_len;
593 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
594 	int icv_len = sess->digest_length;
595 	uint8_t *old_icv;
596 	struct rte_mbuf *mbuf;
597 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
598 			sess->iv.offset);
599 
600 	if (sym_op->m_dst)
601 		mbuf = sym_op->m_dst;
602 	else
603 		mbuf = sym_op->m_src;
604 
605 	/* first FLE entry used to store mbuf and session ctxt */
606 	fle = (struct qbman_fle *)rte_malloc(NULL,
607 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
608 			RTE_CACHE_LINE_SIZE);
609 	if (unlikely(!fle)) {
610 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
611 		return -ENOMEM;
612 	}
613 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
614 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
615 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
616 
617 	op_fle = fle + 1;
618 	ip_fle = fle + 2;
619 	sge = fle + 3;
620 
621 	/* Save the shared descriptor */
622 	flc = &priv->flc_desc[0].flc;
623 
624 	/* Configure FD as a FRAME LIST */
625 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
626 	DPAA2_SET_FD_COMPOUND_FMT(fd);
627 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
628 
629 	DPAA2_SEC_DP_DEBUG(
630 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
631 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
632 		sym_op->auth.data.offset,
633 		sym_op->auth.data.length,
634 		sess->digest_length,
635 		sym_op->cipher.data.offset,
636 		sym_op->cipher.data.length,
637 		sess->iv.length,
638 		sym_op->m_src->data_off);
639 
640 	/* Configure Output FLE with Scatter/Gather Entry */
641 	DPAA2_SET_FLE_SG_EXT(op_fle);
642 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
643 
644 	if (auth_only_len)
645 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
646 
647 	op_fle->length = (sess->dir == DIR_ENC) ?
648 			(sym_op->cipher.data.length + icv_len) :
649 			sym_op->cipher.data.length;
650 
651 	/* Configure Output SGE for Encap/Decap */
652 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
653 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
654 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
655 
656 	mbuf = mbuf->next;
657 	/* o/p segs */
658 	while (mbuf) {
659 		sge++;
660 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
661 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
662 		sge->length = mbuf->data_len;
663 		mbuf = mbuf->next;
664 	}
665 	sge->length -= icv_len;
666 
667 	if (sess->dir == DIR_ENC) {
668 		sge++;
669 		DPAA2_SET_FLE_ADDR(sge,
670 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
671 		sge->length = icv_len;
672 	}
673 	DPAA2_SET_FLE_FIN(sge);
674 
675 	sge++;
676 	mbuf = sym_op->m_src;
677 
678 	/* Configure Input FLE with Scatter/Gather Entry */
679 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
680 	DPAA2_SET_FLE_SG_EXT(ip_fle);
681 	DPAA2_SET_FLE_FIN(ip_fle);
682 	ip_fle->length = (sess->dir == DIR_ENC) ?
683 			(sym_op->auth.data.length + sess->iv.length) :
684 			(sym_op->auth.data.length + sess->iv.length +
685 			 icv_len);
686 
687 	/* Configure Input SGE for Encap/Decap */
688 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
689 	sge->length = sess->iv.length;
690 
691 	sge++;
692 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
693 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
694 				mbuf->data_off);
695 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
696 
697 	mbuf = mbuf->next;
698 	/* i/p segs */
699 	while (mbuf) {
700 		sge++;
701 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
702 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
703 		sge->length = mbuf->data_len;
704 		mbuf = mbuf->next;
705 	}
706 	sge->length -= icv_len;
707 
708 	if (sess->dir == DIR_DEC) {
709 		sge++;
710 		old_icv = (uint8_t *)(sge + 1);
711 		memcpy(old_icv,	sym_op->auth.digest.data,
712 		       icv_len);
713 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
714 		sge->length = icv_len;
715 	}
716 
717 	DPAA2_SET_FLE_FIN(sge);
718 	if (auth_only_len) {
719 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
720 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
721 	}
722 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
723 
724 	return 0;
725 }
726 
727 static inline int
728 build_authenc_fd(dpaa2_sec_session *sess,
729 		 struct rte_crypto_op *op,
730 		 struct qbman_fd *fd, uint16_t bpid)
731 {
732 	struct rte_crypto_sym_op *sym_op = op->sym;
733 	struct ctxt_priv *priv = sess->ctxt;
734 	struct qbman_fle *fle, *sge;
735 	struct sec_flow_context *flc;
736 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
737 				sym_op->auth.data.offset;
738 	uint16_t auth_tail_len = sym_op->auth.data.length -
739 				sym_op->cipher.data.length - auth_hdr_len;
740 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
741 
742 	int icv_len = sess->digest_length, retval;
743 	uint8_t *old_icv;
744 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
745 			sess->iv.offset);
746 	struct rte_mbuf *dst;
747 
748 	if (sym_op->m_dst)
749 		dst = sym_op->m_dst;
750 	else
751 		dst = sym_op->m_src;
752 
753 	/* we are using the first FLE entry to store Mbuf.
754 	 * Currently we donot know which FLE has the mbuf stored.
755 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
756 	 * to get the MBUF Addr from the previous FLE.
757 	 * We can have a better approach to use the inline Mbuf
758 	 */
759 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
760 	if (retval) {
761 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
762 		return -ENOMEM;
763 	}
764 	memset(fle, 0, FLE_POOL_BUF_SIZE);
765 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
766 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
767 	fle = fle + 1;
768 	sge = fle + 2;
769 	if (likely(bpid < MAX_BPID)) {
770 		DPAA2_SET_FD_BPID(fd, bpid);
771 		DPAA2_SET_FLE_BPID(fle, bpid);
772 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
773 		DPAA2_SET_FLE_BPID(sge, bpid);
774 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
775 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
776 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
777 	} else {
778 		DPAA2_SET_FD_IVP(fd);
779 		DPAA2_SET_FLE_IVP(fle);
780 		DPAA2_SET_FLE_IVP((fle + 1));
781 		DPAA2_SET_FLE_IVP(sge);
782 		DPAA2_SET_FLE_IVP((sge + 1));
783 		DPAA2_SET_FLE_IVP((sge + 2));
784 		DPAA2_SET_FLE_IVP((sge + 3));
785 	}
786 
787 	/* Save the shared descriptor */
788 	flc = &priv->flc_desc[0].flc;
789 	/* Configure FD as a FRAME LIST */
790 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
791 	DPAA2_SET_FD_COMPOUND_FMT(fd);
792 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
793 
794 	DPAA2_SEC_DP_DEBUG(
795 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
796 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
797 		sym_op->auth.data.offset,
798 		sym_op->auth.data.length,
799 		sess->digest_length,
800 		sym_op->cipher.data.offset,
801 		sym_op->cipher.data.length,
802 		sess->iv.length,
803 		sym_op->m_src->data_off);
804 
805 	/* Configure Output FLE with Scatter/Gather Entry */
806 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
807 	if (auth_only_len)
808 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
809 	fle->length = (sess->dir == DIR_ENC) ?
810 			(sym_op->cipher.data.length + icv_len) :
811 			sym_op->cipher.data.length;
812 
813 	DPAA2_SET_FLE_SG_EXT(fle);
814 
815 	/* Configure Output SGE for Encap/Decap */
816 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
817 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
818 				dst->data_off);
819 	sge->length = sym_op->cipher.data.length;
820 
821 	if (sess->dir == DIR_ENC) {
822 		sge++;
823 		DPAA2_SET_FLE_ADDR(sge,
824 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
825 		sge->length = sess->digest_length;
826 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
827 					sess->iv.length));
828 	}
829 	DPAA2_SET_FLE_FIN(sge);
830 
831 	sge++;
832 	fle++;
833 
834 	/* Configure Input FLE with Scatter/Gather Entry */
835 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
836 	DPAA2_SET_FLE_SG_EXT(fle);
837 	DPAA2_SET_FLE_FIN(fle);
838 	fle->length = (sess->dir == DIR_ENC) ?
839 			(sym_op->auth.data.length + sess->iv.length) :
840 			(sym_op->auth.data.length + sess->iv.length +
841 			 sess->digest_length);
842 
843 	/* Configure Input SGE for Encap/Decap */
844 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
845 	sge->length = sess->iv.length;
846 	sge++;
847 
848 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
849 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
850 				sym_op->m_src->data_off);
851 	sge->length = sym_op->auth.data.length;
852 	if (sess->dir == DIR_DEC) {
853 		sge++;
854 		old_icv = (uint8_t *)(sge + 1);
855 		memcpy(old_icv,	sym_op->auth.digest.data,
856 		       sess->digest_length);
857 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
858 		sge->length = sess->digest_length;
859 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
860 				 sess->digest_length +
861 				 sess->iv.length));
862 	}
863 	DPAA2_SET_FLE_FIN(sge);
864 	if (auth_only_len) {
865 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
866 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
867 	}
868 	return 0;
869 }
870 
871 static inline int build_auth_sg_fd(
872 		dpaa2_sec_session *sess,
873 		struct rte_crypto_op *op,
874 		struct qbman_fd *fd,
875 		__rte_unused uint16_t bpid)
876 {
877 	struct rte_crypto_sym_op *sym_op = op->sym;
878 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
879 	struct sec_flow_context *flc;
880 	struct ctxt_priv *priv = sess->ctxt;
881 	int data_len, data_offset;
882 	uint8_t *old_digest;
883 	struct rte_mbuf *mbuf;
884 
885 	data_len = sym_op->auth.data.length;
886 	data_offset = sym_op->auth.data.offset;
887 
888 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
889 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
890 		if ((data_len & 7) || (data_offset & 7)) {
891 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
892 			return -ENOTSUP;
893 		}
894 
895 		data_len = data_len >> 3;
896 		data_offset = data_offset >> 3;
897 	}
898 
899 	mbuf = sym_op->m_src;
900 	fle = (struct qbman_fle *)rte_malloc(NULL,
901 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
902 			RTE_CACHE_LINE_SIZE);
903 	if (unlikely(!fle)) {
904 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
905 		return -ENOMEM;
906 	}
907 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
908 	/* first FLE entry used to store mbuf and session ctxt */
909 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
910 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
911 	op_fle = fle + 1;
912 	ip_fle = fle + 2;
913 	sge = fle + 3;
914 
915 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
916 	/* sg FD */
917 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
918 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
919 	DPAA2_SET_FD_COMPOUND_FMT(fd);
920 
921 	/* o/p fle */
922 	DPAA2_SET_FLE_ADDR(op_fle,
923 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
924 	op_fle->length = sess->digest_length;
925 
926 	/* i/p fle */
927 	DPAA2_SET_FLE_SG_EXT(ip_fle);
928 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
929 	ip_fle->length = data_len;
930 
931 	if (sess->iv.length) {
932 		uint8_t *iv_ptr;
933 
934 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
935 						   sess->iv.offset);
936 
937 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
938 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
939 			sge->length = 12;
940 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
941 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
942 			sge->length = 8;
943 		} else {
944 			sge->length = sess->iv.length;
945 		}
946 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
947 		ip_fle->length += sge->length;
948 		sge++;
949 	}
950 	/* i/p 1st seg */
951 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
952 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
953 
954 	if (data_len <= (mbuf->data_len - data_offset)) {
955 		sge->length = data_len;
956 		data_len = 0;
957 	} else {
958 		sge->length = mbuf->data_len - data_offset;
959 
960 		/* remaining i/p segs */
961 		while ((data_len = data_len - sge->length) &&
962 		       (mbuf = mbuf->next)) {
963 			sge++;
964 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
965 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
966 			if (data_len > mbuf->data_len)
967 				sge->length = mbuf->data_len;
968 			else
969 				sge->length = data_len;
970 		}
971 	}
972 
973 	if (sess->dir == DIR_DEC) {
974 		/* Digest verification case */
975 		sge++;
976 		old_digest = (uint8_t *)(sge + 1);
977 		rte_memcpy(old_digest, sym_op->auth.digest.data,
978 			   sess->digest_length);
979 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
980 		sge->length = sess->digest_length;
981 		ip_fle->length += sess->digest_length;
982 	}
983 	DPAA2_SET_FLE_FIN(sge);
984 	DPAA2_SET_FLE_FIN(ip_fle);
985 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
986 
987 	return 0;
988 }
989 
990 static inline int
991 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
992 	      struct qbman_fd *fd, uint16_t bpid)
993 {
994 	struct rte_crypto_sym_op *sym_op = op->sym;
995 	struct qbman_fle *fle, *sge;
996 	struct sec_flow_context *flc;
997 	struct ctxt_priv *priv = sess->ctxt;
998 	int data_len, data_offset;
999 	uint8_t *old_digest;
1000 	int retval;
1001 
1002 	data_len = sym_op->auth.data.length;
1003 	data_offset = sym_op->auth.data.offset;
1004 
1005 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1006 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1007 		if ((data_len & 7) || (data_offset & 7)) {
1008 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1009 			return -ENOTSUP;
1010 		}
1011 
1012 		data_len = data_len >> 3;
1013 		data_offset = data_offset >> 3;
1014 	}
1015 
1016 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1017 	if (retval) {
1018 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1019 		return -ENOMEM;
1020 	}
1021 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1022 	/* TODO we are using the first FLE entry to store Mbuf.
1023 	 * Currently we donot know which FLE has the mbuf stored.
1024 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1025 	 * to get the MBUF Addr from the previous FLE.
1026 	 * We can have a better approach to use the inline Mbuf
1027 	 */
1028 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1029 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1030 	fle = fle + 1;
1031 	sge = fle + 2;
1032 
1033 	if (likely(bpid < MAX_BPID)) {
1034 		DPAA2_SET_FD_BPID(fd, bpid);
1035 		DPAA2_SET_FLE_BPID(fle, bpid);
1036 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1037 		DPAA2_SET_FLE_BPID(sge, bpid);
1038 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1039 	} else {
1040 		DPAA2_SET_FD_IVP(fd);
1041 		DPAA2_SET_FLE_IVP(fle);
1042 		DPAA2_SET_FLE_IVP((fle + 1));
1043 		DPAA2_SET_FLE_IVP(sge);
1044 		DPAA2_SET_FLE_IVP((sge + 1));
1045 	}
1046 
1047 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1048 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1049 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1050 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1051 
1052 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1053 	fle->length = sess->digest_length;
1054 	fle++;
1055 
1056 	/* Setting input FLE */
1057 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1058 	DPAA2_SET_FLE_SG_EXT(fle);
1059 	fle->length = data_len;
1060 
1061 	if (sess->iv.length) {
1062 		uint8_t *iv_ptr;
1063 
1064 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1065 						   sess->iv.offset);
1066 
1067 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1068 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1069 			sge->length = 12;
1070 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1071 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1072 			sge->length = 8;
1073 		} else {
1074 			sge->length = sess->iv.length;
1075 		}
1076 
1077 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1078 		fle->length = fle->length + sge->length;
1079 		sge++;
1080 	}
1081 
1082 	/* Setting data to authenticate */
1083 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1084 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1085 	sge->length = data_len;
1086 
1087 	if (sess->dir == DIR_DEC) {
1088 		sge++;
1089 		old_digest = (uint8_t *)(sge + 1);
1090 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1091 			   sess->digest_length);
1092 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1093 		sge->length = sess->digest_length;
1094 		fle->length = fle->length + sess->digest_length;
1095 	}
1096 
1097 	DPAA2_SET_FLE_FIN(sge);
1098 	DPAA2_SET_FLE_FIN(fle);
1099 	DPAA2_SET_FD_LEN(fd, fle->length);
1100 
1101 	return 0;
1102 }
1103 
1104 static int
1105 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1106 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1107 {
1108 	struct rte_crypto_sym_op *sym_op = op->sym;
1109 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1110 	int data_len, data_offset;
1111 	struct sec_flow_context *flc;
1112 	struct ctxt_priv *priv = sess->ctxt;
1113 	struct rte_mbuf *mbuf;
1114 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1115 			sess->iv.offset);
1116 
1117 	data_len = sym_op->cipher.data.length;
1118 	data_offset = sym_op->cipher.data.offset;
1119 
1120 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1121 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1122 		if ((data_len & 7) || (data_offset & 7)) {
1123 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1124 			return -ENOTSUP;
1125 		}
1126 
1127 		data_len = data_len >> 3;
1128 		data_offset = data_offset >> 3;
1129 	}
1130 
1131 	if (sym_op->m_dst)
1132 		mbuf = sym_op->m_dst;
1133 	else
1134 		mbuf = sym_op->m_src;
1135 
1136 	/* first FLE entry used to store mbuf and session ctxt */
1137 	fle = (struct qbman_fle *)rte_malloc(NULL,
1138 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1139 			RTE_CACHE_LINE_SIZE);
1140 	if (!fle) {
1141 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1142 		return -ENOMEM;
1143 	}
1144 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1145 	/* first FLE entry used to store mbuf and session ctxt */
1146 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1147 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1148 
1149 	op_fle = fle + 1;
1150 	ip_fle = fle + 2;
1151 	sge = fle + 3;
1152 
1153 	flc = &priv->flc_desc[0].flc;
1154 
1155 	DPAA2_SEC_DP_DEBUG(
1156 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1157 		" data_off: 0x%x\n",
1158 		data_offset,
1159 		data_len,
1160 		sess->iv.length,
1161 		sym_op->m_src->data_off);
1162 
1163 	/* o/p fle */
1164 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1165 	op_fle->length = data_len;
1166 	DPAA2_SET_FLE_SG_EXT(op_fle);
1167 
1168 	/* o/p 1st seg */
1169 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1170 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1171 	sge->length = mbuf->data_len - data_offset;
1172 
1173 	mbuf = mbuf->next;
1174 	/* o/p segs */
1175 	while (mbuf) {
1176 		sge++;
1177 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1178 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1179 		sge->length = mbuf->data_len;
1180 		mbuf = mbuf->next;
1181 	}
1182 	DPAA2_SET_FLE_FIN(sge);
1183 
1184 	DPAA2_SEC_DP_DEBUG(
1185 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1186 		flc, fle, fle->addr_hi, fle->addr_lo,
1187 		fle->length);
1188 
1189 	/* i/p fle */
1190 	mbuf = sym_op->m_src;
1191 	sge++;
1192 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1193 	ip_fle->length = sess->iv.length + data_len;
1194 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1195 
1196 	/* i/p IV */
1197 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1198 	DPAA2_SET_FLE_OFFSET(sge, 0);
1199 	sge->length = sess->iv.length;
1200 
1201 	sge++;
1202 
1203 	/* i/p 1st seg */
1204 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1205 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1206 	sge->length = mbuf->data_len - data_offset;
1207 
1208 	mbuf = mbuf->next;
1209 	/* i/p segs */
1210 	while (mbuf) {
1211 		sge++;
1212 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1213 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1214 		sge->length = mbuf->data_len;
1215 		mbuf = mbuf->next;
1216 	}
1217 	DPAA2_SET_FLE_FIN(sge);
1218 	DPAA2_SET_FLE_FIN(ip_fle);
1219 
1220 	/* sg fd */
1221 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1222 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1223 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1224 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1225 
1226 	DPAA2_SEC_DP_DEBUG(
1227 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1228 		" off =%d, len =%d\n",
1229 		DPAA2_GET_FD_ADDR(fd),
1230 		DPAA2_GET_FD_BPID(fd),
1231 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1232 		DPAA2_GET_FD_OFFSET(fd),
1233 		DPAA2_GET_FD_LEN(fd));
1234 	return 0;
1235 }
1236 
1237 static int
1238 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1239 		struct qbman_fd *fd, uint16_t bpid)
1240 {
1241 	struct rte_crypto_sym_op *sym_op = op->sym;
1242 	struct qbman_fle *fle, *sge;
1243 	int retval, data_len, data_offset;
1244 	struct sec_flow_context *flc;
1245 	struct ctxt_priv *priv = sess->ctxt;
1246 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1247 			sess->iv.offset);
1248 	struct rte_mbuf *dst;
1249 
1250 	data_len = sym_op->cipher.data.length;
1251 	data_offset = sym_op->cipher.data.offset;
1252 
1253 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1254 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1255 		if ((data_len & 7) || (data_offset & 7)) {
1256 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1257 			return -ENOTSUP;
1258 		}
1259 
1260 		data_len = data_len >> 3;
1261 		data_offset = data_offset >> 3;
1262 	}
1263 
1264 	if (sym_op->m_dst)
1265 		dst = sym_op->m_dst;
1266 	else
1267 		dst = sym_op->m_src;
1268 
1269 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1270 	if (retval) {
1271 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1272 		return -ENOMEM;
1273 	}
1274 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1275 	/* TODO we are using the first FLE entry to store Mbuf.
1276 	 * Currently we donot know which FLE has the mbuf stored.
1277 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1278 	 * to get the MBUF Addr from the previous FLE.
1279 	 * We can have a better approach to use the inline Mbuf
1280 	 */
1281 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1282 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1283 	fle = fle + 1;
1284 	sge = fle + 2;
1285 
1286 	if (likely(bpid < MAX_BPID)) {
1287 		DPAA2_SET_FD_BPID(fd, bpid);
1288 		DPAA2_SET_FLE_BPID(fle, bpid);
1289 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1290 		DPAA2_SET_FLE_BPID(sge, bpid);
1291 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1292 	} else {
1293 		DPAA2_SET_FD_IVP(fd);
1294 		DPAA2_SET_FLE_IVP(fle);
1295 		DPAA2_SET_FLE_IVP((fle + 1));
1296 		DPAA2_SET_FLE_IVP(sge);
1297 		DPAA2_SET_FLE_IVP((sge + 1));
1298 	}
1299 
1300 	flc = &priv->flc_desc[0].flc;
1301 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1302 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1303 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1304 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1305 
1306 	DPAA2_SEC_DP_DEBUG(
1307 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1308 		" data_off: 0x%x\n",
1309 		data_offset,
1310 		data_len,
1311 		sess->iv.length,
1312 		sym_op->m_src->data_off);
1313 
1314 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1315 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1316 
1317 	fle->length = data_len + sess->iv.length;
1318 
1319 	DPAA2_SEC_DP_DEBUG(
1320 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1321 		flc, fle, fle->addr_hi, fle->addr_lo,
1322 		fle->length);
1323 
1324 	fle++;
1325 
1326 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1327 	fle->length = data_len + sess->iv.length;
1328 
1329 	DPAA2_SET_FLE_SG_EXT(fle);
1330 
1331 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1332 	sge->length = sess->iv.length;
1333 
1334 	sge++;
1335 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1336 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1337 
1338 	sge->length = data_len;
1339 	DPAA2_SET_FLE_FIN(sge);
1340 	DPAA2_SET_FLE_FIN(fle);
1341 
1342 	DPAA2_SEC_DP_DEBUG(
1343 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1344 		" off =%d, len =%d\n",
1345 		DPAA2_GET_FD_ADDR(fd),
1346 		DPAA2_GET_FD_BPID(fd),
1347 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1348 		DPAA2_GET_FD_OFFSET(fd),
1349 		DPAA2_GET_FD_LEN(fd));
1350 
1351 	return 0;
1352 }
1353 
1354 static inline int
1355 build_sec_fd(struct rte_crypto_op *op,
1356 	     struct qbman_fd *fd, uint16_t bpid)
1357 {
1358 	int ret = -1;
1359 	dpaa2_sec_session *sess;
1360 
1361 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1362 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1363 				op->sym->session, cryptodev_driver_id);
1364 #ifdef RTE_LIB_SECURITY
1365 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1366 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1367 				op->sym->sec_session);
1368 #endif
1369 	else
1370 		return -ENOTSUP;
1371 
1372 	if (!sess)
1373 		return -EINVAL;
1374 
1375 	/* Any of the buffer is segmented*/
1376 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1377 		  ((op->sym->m_dst != NULL) &&
1378 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1379 		switch (sess->ctxt_type) {
1380 		case DPAA2_SEC_CIPHER:
1381 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1382 			break;
1383 		case DPAA2_SEC_AUTH:
1384 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1385 			break;
1386 		case DPAA2_SEC_AEAD:
1387 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1388 			break;
1389 		case DPAA2_SEC_CIPHER_HASH:
1390 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1391 			break;
1392 #ifdef RTE_LIB_SECURITY
1393 		case DPAA2_SEC_IPSEC:
1394 		case DPAA2_SEC_PDCP:
1395 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1396 			break;
1397 #endif
1398 		case DPAA2_SEC_HASH_CIPHER:
1399 		default:
1400 			DPAA2_SEC_ERR("error: Unsupported session");
1401 		}
1402 	} else {
1403 		switch (sess->ctxt_type) {
1404 		case DPAA2_SEC_CIPHER:
1405 			ret = build_cipher_fd(sess, op, fd, bpid);
1406 			break;
1407 		case DPAA2_SEC_AUTH:
1408 			ret = build_auth_fd(sess, op, fd, bpid);
1409 			break;
1410 		case DPAA2_SEC_AEAD:
1411 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1412 			break;
1413 		case DPAA2_SEC_CIPHER_HASH:
1414 			ret = build_authenc_fd(sess, op, fd, bpid);
1415 			break;
1416 #ifdef RTE_LIB_SECURITY
1417 		case DPAA2_SEC_IPSEC:
1418 			ret = build_proto_fd(sess, op, fd, bpid);
1419 			break;
1420 		case DPAA2_SEC_PDCP:
1421 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1422 			break;
1423 #endif
1424 		case DPAA2_SEC_HASH_CIPHER:
1425 		default:
1426 			DPAA2_SEC_ERR("error: Unsupported session");
1427 			ret = -ENOTSUP;
1428 		}
1429 	}
1430 	return ret;
1431 }
1432 
1433 static uint16_t
1434 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1435 			uint16_t nb_ops)
1436 {
1437 	/* Function to transmit the frames to given device and VQ*/
1438 	uint32_t loop;
1439 	int32_t ret;
1440 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1441 	uint32_t frames_to_send, retry_count;
1442 	struct qbman_eq_desc eqdesc;
1443 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1444 	struct qbman_swp *swp;
1445 	uint16_t num_tx = 0;
1446 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1447 	/*todo - need to support multiple buffer pools */
1448 	uint16_t bpid;
1449 	struct rte_mempool *mb_pool;
1450 
1451 	if (unlikely(nb_ops == 0))
1452 		return 0;
1453 
1454 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1455 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1456 		return 0;
1457 	}
1458 	/*Prepare enqueue descriptor*/
1459 	qbman_eq_desc_clear(&eqdesc);
1460 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1461 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1462 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1463 
1464 	if (!DPAA2_PER_LCORE_DPIO) {
1465 		ret = dpaa2_affine_qbman_swp();
1466 		if (ret) {
1467 			DPAA2_SEC_ERR(
1468 				"Failed to allocate IO portal, tid: %d\n",
1469 				rte_gettid());
1470 			return 0;
1471 		}
1472 	}
1473 	swp = DPAA2_PER_LCORE_PORTAL;
1474 
1475 	while (nb_ops) {
1476 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1477 			dpaa2_eqcr_size : nb_ops;
1478 
1479 		for (loop = 0; loop < frames_to_send; loop++) {
1480 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1481 				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
1482 					DPAA2_PER_LCORE_DQRR_SIZE--;
1483 					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
1484 					*dpaa2_seqn((*ops)->sym->m_src) &
1485 					QBMAN_EQCR_DCA_IDXMASK);
1486 				}
1487 				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
1488 				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
1489 			}
1490 
1491 			/*Clear the unused FD fields before sending*/
1492 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1493 			mb_pool = (*ops)->sym->m_src->pool;
1494 			bpid = mempool_to_bpid(mb_pool);
1495 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1496 			if (ret) {
1497 				DPAA2_SEC_ERR("error: Improper packet contents"
1498 					      " for crypto operation");
1499 				goto skip_tx;
1500 			}
1501 			ops++;
1502 		}
1503 
1504 		loop = 0;
1505 		retry_count = 0;
1506 		while (loop < frames_to_send) {
1507 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1508 							 &fd_arr[loop],
1509 							 &flags[loop],
1510 							 frames_to_send - loop);
1511 			if (unlikely(ret < 0)) {
1512 				retry_count++;
1513 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1514 					num_tx += loop;
1515 					nb_ops -= loop;
1516 					goto skip_tx;
1517 				}
1518 			} else {
1519 				loop += ret;
1520 				retry_count = 0;
1521 			}
1522 		}
1523 
1524 		num_tx += loop;
1525 		nb_ops -= loop;
1526 	}
1527 skip_tx:
1528 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1529 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1530 	return num_tx;
1531 }
1532 
1533 #ifdef RTE_LIB_SECURITY
1534 static inline struct rte_crypto_op *
1535 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1536 {
1537 	struct rte_crypto_op *op;
1538 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1539 	int16_t diff = 0;
1540 	dpaa2_sec_session *sess_priv __rte_unused;
1541 
1542 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1543 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1544 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1545 
1546 	diff = len - mbuf->pkt_len;
1547 	mbuf->pkt_len += diff;
1548 	mbuf->data_len += diff;
1549 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1550 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1551 	op->sym->aead.digest.phys_addr = 0L;
1552 
1553 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1554 				op->sym->sec_session);
1555 	if (sess_priv->dir == DIR_ENC)
1556 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1557 	else
1558 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1559 
1560 	return op;
1561 }
1562 #endif
1563 
1564 static inline struct rte_crypto_op *
1565 sec_fd_to_mbuf(const struct qbman_fd *fd)
1566 {
1567 	struct qbman_fle *fle;
1568 	struct rte_crypto_op *op;
1569 	struct ctxt_priv *priv;
1570 	struct rte_mbuf *dst, *src;
1571 
1572 #ifdef RTE_LIB_SECURITY
1573 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1574 		return sec_simple_fd_to_mbuf(fd);
1575 #endif
1576 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1577 
1578 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1579 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1580 
1581 	/* we are using the first FLE entry to store Mbuf.
1582 	 * Currently we donot know which FLE has the mbuf stored.
1583 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1584 	 * to get the MBUF Addr from the previous FLE.
1585 	 * We can have a better approach to use the inline Mbuf
1586 	 */
1587 
1588 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1589 		/* TODO complete it. */
1590 		DPAA2_SEC_ERR("error: non inline buffer");
1591 		return NULL;
1592 	}
1593 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1594 
1595 	/* Prefeth op */
1596 	src = op->sym->m_src;
1597 	rte_prefetch0(src);
1598 
1599 	if (op->sym->m_dst) {
1600 		dst = op->sym->m_dst;
1601 		rte_prefetch0(dst);
1602 	} else
1603 		dst = src;
1604 
1605 #ifdef RTE_LIB_SECURITY
1606 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1607 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1608 		dst->pkt_len = len;
1609 		while (dst->next != NULL) {
1610 			len -= dst->data_len;
1611 			dst = dst->next;
1612 		}
1613 		dst->data_len = len;
1614 	}
1615 #endif
1616 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1617 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1618 		(void *)dst,
1619 		dst->buf_addr,
1620 		DPAA2_GET_FD_ADDR(fd),
1621 		DPAA2_GET_FD_BPID(fd),
1622 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1623 		DPAA2_GET_FD_OFFSET(fd),
1624 		DPAA2_GET_FD_LEN(fd));
1625 
1626 	/* free the fle memory */
1627 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1628 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1629 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1630 	} else
1631 		rte_free((void *)(fle-1));
1632 
1633 	return op;
1634 }
1635 
1636 static void
1637 dpaa2_sec_dump(struct rte_crypto_op *op)
1638 {
1639 	int i;
1640 	dpaa2_sec_session *sess = NULL;
1641 	struct ctxt_priv *priv;
1642 	uint8_t bufsize;
1643 	struct rte_crypto_sym_op *sym_op;
1644 
1645 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1646 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1647 			op->sym->session, cryptodev_driver_id);
1648 #ifdef RTE_LIBRTE_SECURITY
1649 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1650 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1651 			op->sym->sec_session);
1652 #endif
1653 
1654 	if (sess == NULL)
1655 		goto mbuf_dump;
1656 
1657 	priv = (struct ctxt_priv *)sess->ctxt;
1658 	printf("\n****************************************\n"
1659 		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
1660 		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
1661 		"\tCipher key len:\t%zd\n", sess->ctxt_type,
1662 		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
1663 		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
1664 		sess->cipher_key.length);
1665 		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
1666 				sess->cipher_key.length);
1667 		rte_hexdump(stdout, "auth key", sess->auth_key.data,
1668 				sess->auth_key.length);
1669 	printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
1670 		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
1671 		" len:\t%d\n\taead cipher text:\t%d\n",
1672 		sess->auth_key.length, sess->iv.length, sess->iv.offset,
1673 		sess->digest_length, sess->status,
1674 		sess->ext_params.aead_ctxt.auth_only_len,
1675 		sess->ext_params.aead_ctxt.auth_cipher_text);
1676 #ifdef RTE_LIBRTE_SECURITY
1677 	printf("PDCP session params:\n"
1678 		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
1679 		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
1680 		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
1681 		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
1682 		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
1683 		sess->pdcp.hfn_threshold);
1684 
1685 #endif
1686 	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
1687 	printf("Descriptor Dump:\n");
1688 	for (i = 0; i < bufsize; i++)
1689 		printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
1690 
1691 	printf("\n");
1692 mbuf_dump:
1693 	sym_op = op->sym;
1694 	if (sym_op->m_src) {
1695 		printf("Source mbuf:\n");
1696 		rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
1697 	}
1698 	if (sym_op->m_dst) {
1699 		printf("Destination mbuf:\n");
1700 		rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
1701 	}
1702 
1703 	printf("Session address = %p\ncipher offset: %d, length: %d\n"
1704 		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
1705 		, sym_op->session,
1706 		sym_op->cipher.data.offset, sym_op->cipher.data.length,
1707 		sym_op->auth.data.offset, sym_op->auth.data.length,
1708 		sym_op->aead.data.offset, sym_op->aead.data.length);
1709 	printf("\n");
1710 
1711 }
1712 
1713 static void
1714 dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
1715 {
1716 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1717 	struct rte_crypto_op *op;
1718 	struct qbman_fd *fd;
1719 
1720 	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1721 	op = sec_fd_to_mbuf(fd);
1722 	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
1723 	 * after setting an error in FD. But this will have performance impact.
1724 	 */
1725 	rte_pktmbuf_free(op->sym->m_src);
1726 }
1727 
1728 static void
1729 dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1730 			     struct rte_mbuf *m,
1731 			     struct qbman_eq_desc *eqdesc)
1732 {
1733 	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1734 	struct eqresp_metadata *eqresp_meta;
1735 	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
1736 	uint16_t orpid, seqnum;
1737 	uint8_t dq_idx;
1738 
1739 	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1740 		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1741 			DPAA2_EQCR_OPRID_SHIFT;
1742 		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1743 			DPAA2_EQCR_SEQNUM_SHIFT;
1744 
1745 
1746 		if (!priv->en_loose_ordered) {
1747 			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1748 			qbman_eq_desc_set_response(eqdesc, (uint64_t)
1749 				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1750 				dpio_dev->eqresp_pi]), 1);
1751 			qbman_eq_desc_set_token(eqdesc, 1);
1752 
1753 			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
1754 			eqresp_meta->dpaa2_q = dpaa2_q;
1755 			eqresp_meta->mp = m->pool;
1756 
1757 			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1758 				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
1759 		} else {
1760 			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1761 		}
1762 	} else {
1763 		dq_idx = *dpaa2_seqn(m) - 1;
1764 		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1765 		DPAA2_PER_LCORE_DQRR_SIZE--;
1766 		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1767 	}
1768 	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1769 }
1770 
1771 
1772 static uint16_t
1773 dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
1774 			uint16_t nb_ops)
1775 {
1776 	/* Function to transmit the frames to given device and VQ*/
1777 	uint32_t loop;
1778 	int32_t ret;
1779 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1780 	uint32_t frames_to_send, num_free_eq_desc, retry_count;
1781 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1782 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1783 	struct qbman_swp *swp;
1784 	uint16_t num_tx = 0;
1785 	uint16_t bpid;
1786 	struct rte_mempool *mb_pool;
1787 	struct dpaa2_sec_dev_private *priv =
1788 				dpaa2_qp->tx_vq.crypto_data->dev_private;
1789 
1790 	if (unlikely(nb_ops == 0))
1791 		return 0;
1792 
1793 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1794 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1795 		return 0;
1796 	}
1797 
1798 	if (!DPAA2_PER_LCORE_DPIO) {
1799 		ret = dpaa2_affine_qbman_swp();
1800 		if (ret) {
1801 			DPAA2_SEC_ERR("Failure in affining portal");
1802 			return 0;
1803 		}
1804 	}
1805 	swp = DPAA2_PER_LCORE_PORTAL;
1806 
1807 	while (nb_ops) {
1808 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1809 			dpaa2_eqcr_size : nb_ops;
1810 
1811 		if (!priv->en_loose_ordered) {
1812 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1813 				num_free_eq_desc = dpaa2_free_eq_descriptors();
1814 				if (num_free_eq_desc < frames_to_send)
1815 					frames_to_send = num_free_eq_desc;
1816 			}
1817 		}
1818 
1819 		for (loop = 0; loop < frames_to_send; loop++) {
1820 			/*Prepare enqueue descriptor*/
1821 			qbman_eq_desc_clear(&eqdesc[loop]);
1822 			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
1823 
1824 			if (*dpaa2_seqn((*ops)->sym->m_src))
1825 				dpaa2_sec_set_enqueue_descriptor(
1826 						&dpaa2_qp->tx_vq,
1827 						(*ops)->sym->m_src,
1828 						&eqdesc[loop]);
1829 			else
1830 				qbman_eq_desc_set_no_orp(&eqdesc[loop],
1831 							 DPAA2_EQ_RESP_ERR_FQ);
1832 
1833 			/*Clear the unused FD fields before sending*/
1834 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1835 			mb_pool = (*ops)->sym->m_src->pool;
1836 			bpid = mempool_to_bpid(mb_pool);
1837 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1838 			if (ret) {
1839 				DPAA2_SEC_ERR("error: Improper packet contents"
1840 					      " for crypto operation");
1841 				goto skip_tx;
1842 			}
1843 			ops++;
1844 		}
1845 
1846 		loop = 0;
1847 		retry_count = 0;
1848 		while (loop < frames_to_send) {
1849 			ret = qbman_swp_enqueue_multiple_desc(swp,
1850 					&eqdesc[loop], &fd_arr[loop],
1851 					frames_to_send - loop);
1852 			if (unlikely(ret < 0)) {
1853 				retry_count++;
1854 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1855 					num_tx += loop;
1856 					nb_ops -= loop;
1857 					goto skip_tx;
1858 				}
1859 			} else {
1860 				loop += ret;
1861 				retry_count = 0;
1862 			}
1863 		}
1864 
1865 		num_tx += loop;
1866 		nb_ops -= loop;
1867 	}
1868 
1869 skip_tx:
1870 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1871 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1872 	return num_tx;
1873 }
1874 
1875 static uint16_t
1876 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1877 			uint16_t nb_ops)
1878 {
1879 	/* Function is responsible to receive frames for a given device and VQ*/
1880 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1881 	struct qbman_result *dq_storage;
1882 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1883 	int ret, num_rx = 0;
1884 	uint8_t is_last = 0, status;
1885 	struct qbman_swp *swp;
1886 	const struct qbman_fd *fd;
1887 	struct qbman_pull_desc pulldesc;
1888 
1889 	if (!DPAA2_PER_LCORE_DPIO) {
1890 		ret = dpaa2_affine_qbman_swp();
1891 		if (ret) {
1892 			DPAA2_SEC_ERR(
1893 				"Failed to allocate IO portal, tid: %d\n",
1894 				rte_gettid());
1895 			return 0;
1896 		}
1897 	}
1898 	swp = DPAA2_PER_LCORE_PORTAL;
1899 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1900 
1901 	qbman_pull_desc_clear(&pulldesc);
1902 	qbman_pull_desc_set_numframes(&pulldesc,
1903 				      (nb_ops > dpaa2_dqrr_size) ?
1904 				      dpaa2_dqrr_size : nb_ops);
1905 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1906 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1907 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1908 				    1);
1909 
1910 	/*Issue a volatile dequeue command. */
1911 	while (1) {
1912 		if (qbman_swp_pull(swp, &pulldesc)) {
1913 			DPAA2_SEC_WARN(
1914 				"SEC VDQ command is not issued : QBMAN busy");
1915 			/* Portal was busy, try again */
1916 			continue;
1917 		}
1918 		break;
1919 	};
1920 
1921 	/* Receive the packets till Last Dequeue entry is found with
1922 	 * respect to the above issues PULL command.
1923 	 */
1924 	while (!is_last) {
1925 		/* Check if the previous issued command is completed.
1926 		 * Also seems like the SWP is shared between the Ethernet Driver
1927 		 * and the SEC driver.
1928 		 */
1929 		while (!qbman_check_command_complete(dq_storage))
1930 			;
1931 
1932 		/* Loop until the dq_storage is updated with
1933 		 * new token by QBMAN
1934 		 */
1935 		while (!qbman_check_new_result(dq_storage))
1936 			;
1937 		/* Check whether Last Pull command is Expired and
1938 		 * setting Condition for Loop termination
1939 		 */
1940 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1941 			is_last = 1;
1942 			/* Check for valid frame. */
1943 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1944 			if (unlikely(
1945 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1946 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1947 				continue;
1948 			}
1949 		}
1950 
1951 		fd = qbman_result_DQ_fd(dq_storage);
1952 		ops[num_rx] = sec_fd_to_mbuf(fd);
1953 
1954 		if (unlikely(fd->simple.frc)) {
1955 			/* TODO Parse SEC errors */
1956 			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
1957 				DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
1958 						 fd->simple.frc);
1959 				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
1960 					dpaa2_sec_dump(ops[num_rx]);
1961 			}
1962 
1963 			dpaa2_qp->rx_vq.err_pkts += 1;
1964 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1965 		} else {
1966 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1967 		}
1968 
1969 		num_rx++;
1970 		dq_storage++;
1971 	} /* End of Packet Rx loop */
1972 
1973 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1974 
1975 	DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
1976 				dpaa2_qp->rx_vq.err_pkts);
1977 	/*Return the total number of packets received to DPAA2 app*/
1978 	return num_rx;
1979 }
1980 
1981 /** Release queue pair */
1982 static int
1983 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1984 {
1985 	struct dpaa2_sec_qp *qp =
1986 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1987 
1988 	PMD_INIT_FUNC_TRACE();
1989 
1990 	if (qp->rx_vq.q_storage) {
1991 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1992 		rte_free(qp->rx_vq.q_storage);
1993 	}
1994 	rte_free(qp);
1995 
1996 	dev->data->queue_pairs[queue_pair_id] = NULL;
1997 
1998 	return 0;
1999 }
2000 
2001 /** Setup a queue pair */
2002 static int
2003 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
2004 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
2005 		__rte_unused int socket_id)
2006 {
2007 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2008 	struct dpaa2_sec_qp *qp;
2009 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2010 	struct dpseci_rx_queue_cfg cfg;
2011 	int32_t retcode;
2012 
2013 	PMD_INIT_FUNC_TRACE();
2014 
2015 	/* If qp is already in use free ring memory and qp metadata. */
2016 	if (dev->data->queue_pairs[qp_id] != NULL) {
2017 		DPAA2_SEC_INFO("QP already setup");
2018 		return 0;
2019 	}
2020 
2021 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
2022 		    dev, qp_id, qp_conf);
2023 
2024 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
2025 
2026 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
2027 			RTE_CACHE_LINE_SIZE);
2028 	if (!qp) {
2029 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
2030 		return -ENOMEM;
2031 	}
2032 
2033 	qp->rx_vq.crypto_data = dev->data;
2034 	qp->tx_vq.crypto_data = dev->data;
2035 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
2036 		sizeof(struct queue_storage_info_t),
2037 		RTE_CACHE_LINE_SIZE);
2038 	if (!qp->rx_vq.q_storage) {
2039 		DPAA2_SEC_ERR("malloc failed for q_storage");
2040 		return -ENOMEM;
2041 	}
2042 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
2043 
2044 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
2045 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
2046 		return -ENOMEM;
2047 	}
2048 
2049 	dev->data->queue_pairs[qp_id] = qp;
2050 
2051 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
2052 	cfg.user_ctx = (size_t)(&qp->rx_vq);
2053 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
2054 				      qp_id, &cfg);
2055 	return retcode;
2056 }
2057 
2058 /** Returns the size of the aesni gcm session structure */
2059 static unsigned int
2060 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2061 {
2062 	PMD_INIT_FUNC_TRACE();
2063 
2064 	return sizeof(dpaa2_sec_session);
2065 }
2066 
2067 static int
2068 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
2069 		      struct rte_crypto_sym_xform *xform,
2070 		      dpaa2_sec_session *session)
2071 {
2072 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2073 	struct alginfo cipherdata;
2074 	int bufsize, ret = 0;
2075 	struct ctxt_priv *priv;
2076 	struct sec_flow_context *flc;
2077 
2078 	PMD_INIT_FUNC_TRACE();
2079 
2080 	/* For SEC CIPHER only one descriptor is required. */
2081 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2082 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2083 			RTE_CACHE_LINE_SIZE);
2084 	if (priv == NULL) {
2085 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2086 		return -ENOMEM;
2087 	}
2088 
2089 	priv->fle_pool = dev_priv->fle_pool;
2090 
2091 	flc = &priv->flc_desc[0].flc;
2092 
2093 	session->ctxt_type = DPAA2_SEC_CIPHER;
2094 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2095 			RTE_CACHE_LINE_SIZE);
2096 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2097 		DPAA2_SEC_ERR("No Memory for cipher key");
2098 		rte_free(priv);
2099 		return -ENOMEM;
2100 	}
2101 	session->cipher_key.length = xform->cipher.key.length;
2102 
2103 	memcpy(session->cipher_key.data, xform->cipher.key.data,
2104 	       xform->cipher.key.length);
2105 	cipherdata.key = (size_t)session->cipher_key.data;
2106 	cipherdata.keylen = session->cipher_key.length;
2107 	cipherdata.key_enc_flags = 0;
2108 	cipherdata.key_type = RTA_DATA_IMM;
2109 
2110 	/* Set IV parameters */
2111 	session->iv.offset = xform->cipher.iv.offset;
2112 	session->iv.length = xform->cipher.iv.length;
2113 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2114 				DIR_ENC : DIR_DEC;
2115 
2116 	switch (xform->cipher.algo) {
2117 	case RTE_CRYPTO_CIPHER_AES_CBC:
2118 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2119 		cipherdata.algmode = OP_ALG_AAI_CBC;
2120 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2121 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2122 						SHR_NEVER, &cipherdata,
2123 						session->iv.length,
2124 						session->dir);
2125 		break;
2126 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2127 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2128 		cipherdata.algmode = OP_ALG_AAI_CBC;
2129 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2130 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2131 						SHR_NEVER, &cipherdata,
2132 						session->iv.length,
2133 						session->dir);
2134 		break;
2135 	case RTE_CRYPTO_CIPHER_DES_CBC:
2136 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2137 		cipherdata.algmode = OP_ALG_AAI_CBC;
2138 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2139 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2140 						SHR_NEVER, &cipherdata,
2141 						session->iv.length,
2142 						session->dir);
2143 		break;
2144 	case RTE_CRYPTO_CIPHER_AES_CTR:
2145 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2146 		cipherdata.algmode = OP_ALG_AAI_CTR;
2147 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2148 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
2149 						SHR_NEVER, &cipherdata,
2150 						session->iv.length,
2151 						session->dir);
2152 		break;
2153 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2154 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
2155 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
2156 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
2157 					      &cipherdata,
2158 					      session->dir);
2159 		break;
2160 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2161 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
2162 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
2163 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
2164 					      &cipherdata,
2165 					      session->dir);
2166 		break;
2167 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2168 	case RTE_CRYPTO_CIPHER_AES_F8:
2169 	case RTE_CRYPTO_CIPHER_AES_ECB:
2170 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2171 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2172 	case RTE_CRYPTO_CIPHER_AES_XTS:
2173 	case RTE_CRYPTO_CIPHER_ARC4:
2174 	case RTE_CRYPTO_CIPHER_NULL:
2175 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2176 			xform->cipher.algo);
2177 		ret = -ENOTSUP;
2178 		goto error_out;
2179 	default:
2180 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2181 			xform->cipher.algo);
2182 		ret = -ENOTSUP;
2183 		goto error_out;
2184 	}
2185 
2186 	if (bufsize < 0) {
2187 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
2188 		ret = -EINVAL;
2189 		goto error_out;
2190 	}
2191 
2192 	flc->word1_sdl = (uint8_t)bufsize;
2193 	session->ctxt = priv;
2194 
2195 #ifdef CAAM_DESC_DEBUG
2196 	int i;
2197 	for (i = 0; i < bufsize; i++)
2198 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
2199 #endif
2200 	return ret;
2201 
2202 error_out:
2203 	rte_free(session->cipher_key.data);
2204 	rte_free(priv);
2205 	return ret;
2206 }
2207 
2208 static int
2209 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
2210 		    struct rte_crypto_sym_xform *xform,
2211 		    dpaa2_sec_session *session)
2212 {
2213 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2214 	struct alginfo authdata;
2215 	int bufsize, ret = 0;
2216 	struct ctxt_priv *priv;
2217 	struct sec_flow_context *flc;
2218 
2219 	PMD_INIT_FUNC_TRACE();
2220 
2221 	/* For SEC AUTH three descriptors are required for various stages */
2222 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2223 			sizeof(struct ctxt_priv) + 3 *
2224 			sizeof(struct sec_flc_desc),
2225 			RTE_CACHE_LINE_SIZE);
2226 	if (priv == NULL) {
2227 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2228 		return -ENOMEM;
2229 	}
2230 
2231 	priv->fle_pool = dev_priv->fle_pool;
2232 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
2233 
2234 	session->ctxt_type = DPAA2_SEC_AUTH;
2235 	session->auth_key.length = xform->auth.key.length;
2236 	if (xform->auth.key.length) {
2237 		session->auth_key.data = rte_zmalloc(NULL,
2238 			xform->auth.key.length,
2239 			RTE_CACHE_LINE_SIZE);
2240 		if (session->auth_key.data == NULL) {
2241 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
2242 			rte_free(priv);
2243 			return -ENOMEM;
2244 		}
2245 		memcpy(session->auth_key.data, xform->auth.key.data,
2246 		       xform->auth.key.length);
2247 		authdata.key = (size_t)session->auth_key.data;
2248 		authdata.key_enc_flags = 0;
2249 		authdata.key_type = RTA_DATA_IMM;
2250 	}
2251 	authdata.keylen = session->auth_key.length;
2252 
2253 	session->digest_length = xform->auth.digest_length;
2254 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2255 				DIR_ENC : DIR_DEC;
2256 
2257 	switch (xform->auth.algo) {
2258 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2259 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2260 		authdata.algmode = OP_ALG_AAI_HMAC;
2261 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2262 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2263 					   1, 0, SHR_NEVER, &authdata,
2264 					   !session->dir,
2265 					   session->digest_length);
2266 		break;
2267 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2268 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2269 		authdata.algmode = OP_ALG_AAI_HMAC;
2270 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2271 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2272 					   1, 0, SHR_NEVER, &authdata,
2273 					   !session->dir,
2274 					   session->digest_length);
2275 		break;
2276 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2277 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2278 		authdata.algmode = OP_ALG_AAI_HMAC;
2279 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2280 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2281 					   1, 0, SHR_NEVER, &authdata,
2282 					   !session->dir,
2283 					   session->digest_length);
2284 		break;
2285 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2286 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2287 		authdata.algmode = OP_ALG_AAI_HMAC;
2288 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2289 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2290 					   1, 0, SHR_NEVER, &authdata,
2291 					   !session->dir,
2292 					   session->digest_length);
2293 		break;
2294 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2295 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2296 		authdata.algmode = OP_ALG_AAI_HMAC;
2297 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2298 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2299 					   1, 0, SHR_NEVER, &authdata,
2300 					   !session->dir,
2301 					   session->digest_length);
2302 		break;
2303 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2304 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2305 		authdata.algmode = OP_ALG_AAI_HMAC;
2306 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2307 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2308 					   1, 0, SHR_NEVER, &authdata,
2309 					   !session->dir,
2310 					   session->digest_length);
2311 		break;
2312 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2313 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2314 		authdata.algmode = OP_ALG_AAI_F9;
2315 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2316 		session->iv.offset = xform->auth.iv.offset;
2317 		session->iv.length = xform->auth.iv.length;
2318 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2319 					      1, 0, &authdata,
2320 					      !session->dir,
2321 					      session->digest_length);
2322 		break;
2323 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2324 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2325 		authdata.algmode = OP_ALG_AAI_F9;
2326 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2327 		session->iv.offset = xform->auth.iv.offset;
2328 		session->iv.length = xform->auth.iv.length;
2329 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2330 					   1, 0, &authdata,
2331 					   !session->dir,
2332 					   session->digest_length);
2333 		break;
2334 	case RTE_CRYPTO_AUTH_SHA1:
2335 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2336 		authdata.algmode = OP_ALG_AAI_HASH;
2337 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2338 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2339 					   1, 0, SHR_NEVER, &authdata,
2340 					   !session->dir,
2341 					   session->digest_length);
2342 		break;
2343 	case RTE_CRYPTO_AUTH_MD5:
2344 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2345 		authdata.algmode = OP_ALG_AAI_HASH;
2346 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2347 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2348 					   1, 0, SHR_NEVER, &authdata,
2349 					   !session->dir,
2350 					   session->digest_length);
2351 		break;
2352 	case RTE_CRYPTO_AUTH_SHA256:
2353 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2354 		authdata.algmode = OP_ALG_AAI_HASH;
2355 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2356 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2357 					   1, 0, SHR_NEVER, &authdata,
2358 					   !session->dir,
2359 					   session->digest_length);
2360 		break;
2361 	case RTE_CRYPTO_AUTH_SHA384:
2362 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2363 		authdata.algmode = OP_ALG_AAI_HASH;
2364 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2365 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2366 					   1, 0, SHR_NEVER, &authdata,
2367 					   !session->dir,
2368 					   session->digest_length);
2369 		break;
2370 	case RTE_CRYPTO_AUTH_SHA512:
2371 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2372 		authdata.algmode = OP_ALG_AAI_HASH;
2373 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2374 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2375 					   1, 0, SHR_NEVER, &authdata,
2376 					   !session->dir,
2377 					   session->digest_length);
2378 		break;
2379 	case RTE_CRYPTO_AUTH_SHA224:
2380 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2381 		authdata.algmode = OP_ALG_AAI_HASH;
2382 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2383 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2384 					   1, 0, SHR_NEVER, &authdata,
2385 					   !session->dir,
2386 					   session->digest_length);
2387 		break;
2388 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2389 		authdata.algtype = OP_ALG_ALGSEL_AES;
2390 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2391 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2392 		bufsize = cnstr_shdsc_aes_mac(
2393 					priv->flc_desc[DESC_INITFINAL].desc,
2394 					1, 0, SHR_NEVER, &authdata,
2395 					!session->dir,
2396 					session->digest_length);
2397 		break;
2398 	case RTE_CRYPTO_AUTH_AES_CMAC:
2399 		authdata.algtype = OP_ALG_ALGSEL_AES;
2400 		authdata.algmode = OP_ALG_AAI_CMAC;
2401 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2402 		bufsize = cnstr_shdsc_aes_mac(
2403 					   priv->flc_desc[DESC_INITFINAL].desc,
2404 					   1, 0, SHR_NEVER, &authdata,
2405 					   !session->dir,
2406 					   session->digest_length);
2407 		break;
2408 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2409 	case RTE_CRYPTO_AUTH_AES_GMAC:
2410 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2411 	case RTE_CRYPTO_AUTH_NULL:
2412 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2413 			      xform->auth.algo);
2414 		ret = -ENOTSUP;
2415 		goto error_out;
2416 	default:
2417 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2418 			      xform->auth.algo);
2419 		ret = -ENOTSUP;
2420 		goto error_out;
2421 	}
2422 
2423 	if (bufsize < 0) {
2424 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2425 		ret = -EINVAL;
2426 		goto error_out;
2427 	}
2428 
2429 	flc->word1_sdl = (uint8_t)bufsize;
2430 	session->ctxt = priv;
2431 #ifdef CAAM_DESC_DEBUG
2432 	int i;
2433 	for (i = 0; i < bufsize; i++)
2434 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2435 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2436 #endif
2437 
2438 	return ret;
2439 
2440 error_out:
2441 	rte_free(session->auth_key.data);
2442 	rte_free(priv);
2443 	return ret;
2444 }
2445 
2446 static int
2447 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2448 		    struct rte_crypto_sym_xform *xform,
2449 		    dpaa2_sec_session *session)
2450 {
2451 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2452 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2453 	struct alginfo aeaddata;
2454 	int bufsize;
2455 	struct ctxt_priv *priv;
2456 	struct sec_flow_context *flc;
2457 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2458 	int err, ret = 0;
2459 
2460 	PMD_INIT_FUNC_TRACE();
2461 
2462 	/* Set IV parameters */
2463 	session->iv.offset = aead_xform->iv.offset;
2464 	session->iv.length = aead_xform->iv.length;
2465 	session->ctxt_type = DPAA2_SEC_AEAD;
2466 
2467 	/* For SEC AEAD only one descriptor is required */
2468 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2469 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2470 			RTE_CACHE_LINE_SIZE);
2471 	if (priv == NULL) {
2472 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2473 		return -ENOMEM;
2474 	}
2475 
2476 	priv->fle_pool = dev_priv->fle_pool;
2477 	flc = &priv->flc_desc[0].flc;
2478 
2479 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2480 					       RTE_CACHE_LINE_SIZE);
2481 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2482 		DPAA2_SEC_ERR("No Memory for aead key");
2483 		rte_free(priv);
2484 		return -ENOMEM;
2485 	}
2486 	memcpy(session->aead_key.data, aead_xform->key.data,
2487 	       aead_xform->key.length);
2488 
2489 	session->digest_length = aead_xform->digest_length;
2490 	session->aead_key.length = aead_xform->key.length;
2491 	ctxt->auth_only_len = aead_xform->aad_length;
2492 
2493 	aeaddata.key = (size_t)session->aead_key.data;
2494 	aeaddata.keylen = session->aead_key.length;
2495 	aeaddata.key_enc_flags = 0;
2496 	aeaddata.key_type = RTA_DATA_IMM;
2497 
2498 	switch (aead_xform->algo) {
2499 	case RTE_CRYPTO_AEAD_AES_GCM:
2500 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2501 		aeaddata.algmode = OP_ALG_AAI_GCM;
2502 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2503 		break;
2504 	case RTE_CRYPTO_AEAD_AES_CCM:
2505 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2506 			      aead_xform->algo);
2507 		ret = -ENOTSUP;
2508 		goto error_out;
2509 	default:
2510 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2511 			      aead_xform->algo);
2512 		ret = -ENOTSUP;
2513 		goto error_out;
2514 	}
2515 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2516 				DIR_ENC : DIR_DEC;
2517 
2518 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2519 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2520 			       DESC_JOB_IO_LEN,
2521 			       (unsigned int *)priv->flc_desc[0].desc,
2522 			       &priv->flc_desc[0].desc[1], 1);
2523 
2524 	if (err < 0) {
2525 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2526 		ret = -EINVAL;
2527 		goto error_out;
2528 	}
2529 	if (priv->flc_desc[0].desc[1] & 1) {
2530 		aeaddata.key_type = RTA_DATA_IMM;
2531 	} else {
2532 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2533 		aeaddata.key_type = RTA_DATA_PTR;
2534 	}
2535 	priv->flc_desc[0].desc[0] = 0;
2536 	priv->flc_desc[0].desc[1] = 0;
2537 
2538 	if (session->dir == DIR_ENC)
2539 		bufsize = cnstr_shdsc_gcm_encap(
2540 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2541 				&aeaddata, session->iv.length,
2542 				session->digest_length);
2543 	else
2544 		bufsize = cnstr_shdsc_gcm_decap(
2545 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2546 				&aeaddata, session->iv.length,
2547 				session->digest_length);
2548 	if (bufsize < 0) {
2549 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2550 		ret = -EINVAL;
2551 		goto error_out;
2552 	}
2553 
2554 	flc->word1_sdl = (uint8_t)bufsize;
2555 	session->ctxt = priv;
2556 #ifdef CAAM_DESC_DEBUG
2557 	int i;
2558 	for (i = 0; i < bufsize; i++)
2559 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2560 			    i, priv->flc_desc[0].desc[i]);
2561 #endif
2562 	return ret;
2563 
2564 error_out:
2565 	rte_free(session->aead_key.data);
2566 	rte_free(priv);
2567 	return ret;
2568 }
2569 
2570 
2571 static int
2572 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2573 		    struct rte_crypto_sym_xform *xform,
2574 		    dpaa2_sec_session *session)
2575 {
2576 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2577 	struct alginfo authdata, cipherdata;
2578 	int bufsize;
2579 	struct ctxt_priv *priv;
2580 	struct sec_flow_context *flc;
2581 	struct rte_crypto_cipher_xform *cipher_xform;
2582 	struct rte_crypto_auth_xform *auth_xform;
2583 	int err, ret = 0;
2584 
2585 	PMD_INIT_FUNC_TRACE();
2586 
2587 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2588 		cipher_xform = &xform->cipher;
2589 		auth_xform = &xform->next->auth;
2590 		session->ctxt_type =
2591 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2592 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2593 	} else {
2594 		cipher_xform = &xform->next->cipher;
2595 		auth_xform = &xform->auth;
2596 		session->ctxt_type =
2597 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2598 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2599 	}
2600 
2601 	/* Set IV parameters */
2602 	session->iv.offset = cipher_xform->iv.offset;
2603 	session->iv.length = cipher_xform->iv.length;
2604 
2605 	/* For SEC AEAD only one descriptor is required */
2606 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2607 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2608 			RTE_CACHE_LINE_SIZE);
2609 	if (priv == NULL) {
2610 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2611 		return -ENOMEM;
2612 	}
2613 
2614 	priv->fle_pool = dev_priv->fle_pool;
2615 	flc = &priv->flc_desc[0].flc;
2616 
2617 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2618 					       RTE_CACHE_LINE_SIZE);
2619 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2620 		DPAA2_SEC_ERR("No Memory for cipher key");
2621 		rte_free(priv);
2622 		return -ENOMEM;
2623 	}
2624 	session->cipher_key.length = cipher_xform->key.length;
2625 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2626 					     RTE_CACHE_LINE_SIZE);
2627 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2628 		DPAA2_SEC_ERR("No Memory for auth key");
2629 		rte_free(session->cipher_key.data);
2630 		rte_free(priv);
2631 		return -ENOMEM;
2632 	}
2633 	session->auth_key.length = auth_xform->key.length;
2634 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2635 	       cipher_xform->key.length);
2636 	memcpy(session->auth_key.data, auth_xform->key.data,
2637 	       auth_xform->key.length);
2638 
2639 	authdata.key = (size_t)session->auth_key.data;
2640 	authdata.keylen = session->auth_key.length;
2641 	authdata.key_enc_flags = 0;
2642 	authdata.key_type = RTA_DATA_IMM;
2643 
2644 	session->digest_length = auth_xform->digest_length;
2645 
2646 	switch (auth_xform->algo) {
2647 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2648 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2649 		authdata.algmode = OP_ALG_AAI_HMAC;
2650 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2651 		break;
2652 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2653 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2654 		authdata.algmode = OP_ALG_AAI_HMAC;
2655 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2656 		break;
2657 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2658 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2659 		authdata.algmode = OP_ALG_AAI_HMAC;
2660 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2661 		break;
2662 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2663 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2664 		authdata.algmode = OP_ALG_AAI_HMAC;
2665 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2666 		break;
2667 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2668 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2669 		authdata.algmode = OP_ALG_AAI_HMAC;
2670 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2671 		break;
2672 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2673 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2674 		authdata.algmode = OP_ALG_AAI_HMAC;
2675 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2676 		break;
2677 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2678 		authdata.algtype = OP_ALG_ALGSEL_AES;
2679 		authdata.algmode = OP_ALG_AAI_XCBC_MAC;
2680 		session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
2681 		break;
2682 	case RTE_CRYPTO_AUTH_AES_CMAC:
2683 		authdata.algtype = OP_ALG_ALGSEL_AES;
2684 		authdata.algmode = OP_ALG_AAI_CMAC;
2685 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2686 		break;
2687 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2688 	case RTE_CRYPTO_AUTH_AES_GMAC:
2689 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2690 	case RTE_CRYPTO_AUTH_NULL:
2691 	case RTE_CRYPTO_AUTH_SHA1:
2692 	case RTE_CRYPTO_AUTH_SHA256:
2693 	case RTE_CRYPTO_AUTH_SHA512:
2694 	case RTE_CRYPTO_AUTH_SHA224:
2695 	case RTE_CRYPTO_AUTH_SHA384:
2696 	case RTE_CRYPTO_AUTH_MD5:
2697 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2698 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2699 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2700 			      auth_xform->algo);
2701 		ret = -ENOTSUP;
2702 		goto error_out;
2703 	default:
2704 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2705 			      auth_xform->algo);
2706 		ret = -ENOTSUP;
2707 		goto error_out;
2708 	}
2709 	cipherdata.key = (size_t)session->cipher_key.data;
2710 	cipherdata.keylen = session->cipher_key.length;
2711 	cipherdata.key_enc_flags = 0;
2712 	cipherdata.key_type = RTA_DATA_IMM;
2713 
2714 	switch (cipher_xform->algo) {
2715 	case RTE_CRYPTO_CIPHER_AES_CBC:
2716 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2717 		cipherdata.algmode = OP_ALG_AAI_CBC;
2718 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2719 		break;
2720 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2721 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2722 		cipherdata.algmode = OP_ALG_AAI_CBC;
2723 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2724 		break;
2725 	case RTE_CRYPTO_CIPHER_DES_CBC:
2726 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2727 		cipherdata.algmode = OP_ALG_AAI_CBC;
2728 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2729 		break;
2730 	case RTE_CRYPTO_CIPHER_AES_CTR:
2731 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2732 		cipherdata.algmode = OP_ALG_AAI_CTR;
2733 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2734 		break;
2735 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2736 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2737 	case RTE_CRYPTO_CIPHER_NULL:
2738 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2739 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2740 	case RTE_CRYPTO_CIPHER_AES_ECB:
2741 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2742 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2743 			      cipher_xform->algo);
2744 		ret = -ENOTSUP;
2745 		goto error_out;
2746 	default:
2747 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2748 			      cipher_xform->algo);
2749 		ret = -ENOTSUP;
2750 		goto error_out;
2751 	}
2752 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2753 				DIR_ENC : DIR_DEC;
2754 
2755 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2756 	priv->flc_desc[0].desc[1] = authdata.keylen;
2757 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2758 			       DESC_JOB_IO_LEN,
2759 			       (unsigned int *)priv->flc_desc[0].desc,
2760 			       &priv->flc_desc[0].desc[2], 2);
2761 
2762 	if (err < 0) {
2763 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2764 		ret = -EINVAL;
2765 		goto error_out;
2766 	}
2767 	if (priv->flc_desc[0].desc[2] & 1) {
2768 		cipherdata.key_type = RTA_DATA_IMM;
2769 	} else {
2770 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2771 		cipherdata.key_type = RTA_DATA_PTR;
2772 	}
2773 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2774 		authdata.key_type = RTA_DATA_IMM;
2775 	} else {
2776 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2777 		authdata.key_type = RTA_DATA_PTR;
2778 	}
2779 	priv->flc_desc[0].desc[0] = 0;
2780 	priv->flc_desc[0].desc[1] = 0;
2781 	priv->flc_desc[0].desc[2] = 0;
2782 
2783 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2784 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2785 					      0, SHR_SERIAL,
2786 					      &cipherdata, &authdata,
2787 					      session->iv.length,
2788 					      session->digest_length,
2789 					      session->dir);
2790 		if (bufsize < 0) {
2791 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2792 			ret = -EINVAL;
2793 			goto error_out;
2794 		}
2795 	} else {
2796 		DPAA2_SEC_ERR("Hash before cipher not supported");
2797 		ret = -ENOTSUP;
2798 		goto error_out;
2799 	}
2800 
2801 	flc->word1_sdl = (uint8_t)bufsize;
2802 	session->ctxt = priv;
2803 #ifdef CAAM_DESC_DEBUG
2804 	int i;
2805 	for (i = 0; i < bufsize; i++)
2806 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2807 			    i, priv->flc_desc[0].desc[i]);
2808 #endif
2809 
2810 	return ret;
2811 
2812 error_out:
2813 	rte_free(session->cipher_key.data);
2814 	rte_free(session->auth_key.data);
2815 	rte_free(priv);
2816 	return ret;
2817 }
2818 
2819 static int
2820 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2821 			    struct rte_crypto_sym_xform *xform,	void *sess)
2822 {
2823 	dpaa2_sec_session *session = sess;
2824 	int ret;
2825 
2826 	PMD_INIT_FUNC_TRACE();
2827 
2828 	if (unlikely(sess == NULL)) {
2829 		DPAA2_SEC_ERR("Invalid session struct");
2830 		return -EINVAL;
2831 	}
2832 
2833 	memset(session, 0, sizeof(dpaa2_sec_session));
2834 	/* Default IV length = 0 */
2835 	session->iv.length = 0;
2836 
2837 	/* Cipher Only */
2838 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2839 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2840 
2841 	/* Authentication Only */
2842 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2843 		   xform->next == NULL) {
2844 		ret = dpaa2_sec_auth_init(dev, xform, session);
2845 
2846 	/* Cipher then Authenticate */
2847 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2848 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2849 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2850 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2851 			ret = dpaa2_sec_auth_init(dev, xform, session);
2852 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2853 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2854 		else
2855 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2856 	/* Authenticate then Cipher */
2857 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2858 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2859 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2860 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2861 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2862 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2863 			ret = dpaa2_sec_auth_init(dev, xform, session);
2864 		else
2865 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2866 	/* AEAD operation for AES-GCM kind of Algorithms */
2867 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2868 		   xform->next == NULL) {
2869 		ret = dpaa2_sec_aead_init(dev, xform, session);
2870 
2871 	} else {
2872 		DPAA2_SEC_ERR("Invalid crypto type");
2873 		return -EINVAL;
2874 	}
2875 
2876 	return ret;
2877 }
2878 
2879 #ifdef RTE_LIB_SECURITY
2880 static int
2881 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2882 			dpaa2_sec_session *session,
2883 			struct alginfo *aeaddata)
2884 {
2885 	PMD_INIT_FUNC_TRACE();
2886 
2887 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2888 					       RTE_CACHE_LINE_SIZE);
2889 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2890 		DPAA2_SEC_ERR("No Memory for aead key");
2891 		return -ENOMEM;
2892 	}
2893 	memcpy(session->aead_key.data, aead_xform->key.data,
2894 	       aead_xform->key.length);
2895 
2896 	session->digest_length = aead_xform->digest_length;
2897 	session->aead_key.length = aead_xform->key.length;
2898 
2899 	aeaddata->key = (size_t)session->aead_key.data;
2900 	aeaddata->keylen = session->aead_key.length;
2901 	aeaddata->key_enc_flags = 0;
2902 	aeaddata->key_type = RTA_DATA_IMM;
2903 
2904 	switch (aead_xform->algo) {
2905 	case RTE_CRYPTO_AEAD_AES_GCM:
2906 		switch (session->digest_length) {
2907 		case 8:
2908 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2909 			break;
2910 		case 12:
2911 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2912 			break;
2913 		case 16:
2914 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2915 			break;
2916 		default:
2917 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2918 				      session->digest_length);
2919 			return -EINVAL;
2920 		}
2921 		aeaddata->algmode = OP_ALG_AAI_GCM;
2922 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2923 		break;
2924 	case RTE_CRYPTO_AEAD_AES_CCM:
2925 		switch (session->digest_length) {
2926 		case 8:
2927 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2928 			break;
2929 		case 12:
2930 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2931 			break;
2932 		case 16:
2933 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2934 			break;
2935 		default:
2936 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2937 				      session->digest_length);
2938 			return -EINVAL;
2939 		}
2940 		aeaddata->algmode = OP_ALG_AAI_CCM;
2941 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2942 		break;
2943 	default:
2944 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2945 			      aead_xform->algo);
2946 		return -ENOTSUP;
2947 	}
2948 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2949 				DIR_ENC : DIR_DEC;
2950 
2951 	return 0;
2952 }
2953 
2954 static int
2955 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2956 	struct rte_crypto_auth_xform *auth_xform,
2957 	dpaa2_sec_session *session,
2958 	struct alginfo *cipherdata,
2959 	struct alginfo *authdata)
2960 {
2961 	if (cipher_xform) {
2962 		session->cipher_key.data = rte_zmalloc(NULL,
2963 						       cipher_xform->key.length,
2964 						       RTE_CACHE_LINE_SIZE);
2965 		if (session->cipher_key.data == NULL &&
2966 				cipher_xform->key.length > 0) {
2967 			DPAA2_SEC_ERR("No Memory for cipher key");
2968 			return -ENOMEM;
2969 		}
2970 
2971 		session->cipher_key.length = cipher_xform->key.length;
2972 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2973 				cipher_xform->key.length);
2974 		session->cipher_alg = cipher_xform->algo;
2975 	} else {
2976 		session->cipher_key.data = NULL;
2977 		session->cipher_key.length = 0;
2978 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2979 	}
2980 
2981 	if (auth_xform) {
2982 		session->auth_key.data = rte_zmalloc(NULL,
2983 						auth_xform->key.length,
2984 						RTE_CACHE_LINE_SIZE);
2985 		if (session->auth_key.data == NULL &&
2986 				auth_xform->key.length > 0) {
2987 			DPAA2_SEC_ERR("No Memory for auth key");
2988 			return -ENOMEM;
2989 		}
2990 		session->auth_key.length = auth_xform->key.length;
2991 		memcpy(session->auth_key.data, auth_xform->key.data,
2992 				auth_xform->key.length);
2993 		session->auth_alg = auth_xform->algo;
2994 		session->digest_length = auth_xform->digest_length;
2995 	} else {
2996 		session->auth_key.data = NULL;
2997 		session->auth_key.length = 0;
2998 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2999 	}
3000 
3001 	authdata->key = (size_t)session->auth_key.data;
3002 	authdata->keylen = session->auth_key.length;
3003 	authdata->key_enc_flags = 0;
3004 	authdata->key_type = RTA_DATA_IMM;
3005 	switch (session->auth_alg) {
3006 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
3007 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
3008 		authdata->algmode = OP_ALG_AAI_HMAC;
3009 		break;
3010 	case RTE_CRYPTO_AUTH_MD5_HMAC:
3011 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
3012 		authdata->algmode = OP_ALG_AAI_HMAC;
3013 		break;
3014 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
3015 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
3016 		authdata->algmode = OP_ALG_AAI_HMAC;
3017 		if (session->digest_length != 16)
3018 			DPAA2_SEC_WARN(
3019 			"+++Using sha256-hmac truncated len is non-standard,"
3020 			"it will not work with lookaside proto");
3021 		break;
3022 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
3023 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
3024 		authdata->algmode = OP_ALG_AAI_HMAC;
3025 		break;
3026 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
3027 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
3028 		authdata->algmode = OP_ALG_AAI_HMAC;
3029 		break;
3030 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
3031 		authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96;
3032 		authdata->algmode = OP_ALG_AAI_XCBC_MAC;
3033 		break;
3034 	case RTE_CRYPTO_AUTH_AES_CMAC:
3035 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
3036 		authdata->algmode = OP_ALG_AAI_CMAC;
3037 		break;
3038 	case RTE_CRYPTO_AUTH_NULL:
3039 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
3040 		break;
3041 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
3042 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3043 	case RTE_CRYPTO_AUTH_SHA1:
3044 	case RTE_CRYPTO_AUTH_SHA256:
3045 	case RTE_CRYPTO_AUTH_SHA512:
3046 	case RTE_CRYPTO_AUTH_SHA224:
3047 	case RTE_CRYPTO_AUTH_SHA384:
3048 	case RTE_CRYPTO_AUTH_MD5:
3049 	case RTE_CRYPTO_AUTH_AES_GMAC:
3050 	case RTE_CRYPTO_AUTH_KASUMI_F9:
3051 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
3052 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
3053 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3054 			      session->auth_alg);
3055 		return -ENOTSUP;
3056 	default:
3057 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
3058 			      session->auth_alg);
3059 		return -ENOTSUP;
3060 	}
3061 	cipherdata->key = (size_t)session->cipher_key.data;
3062 	cipherdata->keylen = session->cipher_key.length;
3063 	cipherdata->key_enc_flags = 0;
3064 	cipherdata->key_type = RTA_DATA_IMM;
3065 
3066 	switch (session->cipher_alg) {
3067 	case RTE_CRYPTO_CIPHER_AES_CBC:
3068 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
3069 		cipherdata->algmode = OP_ALG_AAI_CBC;
3070 		break;
3071 	case RTE_CRYPTO_CIPHER_3DES_CBC:
3072 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
3073 		cipherdata->algmode = OP_ALG_AAI_CBC;
3074 		break;
3075 	case RTE_CRYPTO_CIPHER_DES_CBC:
3076 		cipherdata->algtype = OP_PCL_IPSEC_DES;
3077 		cipherdata->algmode = OP_ALG_AAI_CBC;
3078 		break;
3079 	case RTE_CRYPTO_CIPHER_AES_CTR:
3080 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
3081 		cipherdata->algmode = OP_ALG_AAI_CTR;
3082 		break;
3083 	case RTE_CRYPTO_CIPHER_NULL:
3084 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
3085 		break;
3086 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3087 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3088 	case RTE_CRYPTO_CIPHER_3DES_ECB:
3089 	case RTE_CRYPTO_CIPHER_3DES_CTR:
3090 	case RTE_CRYPTO_CIPHER_AES_ECB:
3091 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
3092 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
3093 			      session->cipher_alg);
3094 		return -ENOTSUP;
3095 	default:
3096 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3097 			      session->cipher_alg);
3098 		return -ENOTSUP;
3099 	}
3100 
3101 	return 0;
3102 }
3103 
3104 static int
3105 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
3106 			    struct rte_security_session_conf *conf,
3107 			    void *sess)
3108 {
3109 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
3110 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3111 	struct rte_crypto_auth_xform *auth_xform = NULL;
3112 	struct rte_crypto_aead_xform *aead_xform = NULL;
3113 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3114 	struct ctxt_priv *priv;
3115 	struct alginfo authdata, cipherdata;
3116 	int bufsize;
3117 	struct sec_flow_context *flc;
3118 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3119 	int ret = -1;
3120 
3121 	PMD_INIT_FUNC_TRACE();
3122 
3123 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3124 				sizeof(struct ctxt_priv) +
3125 				sizeof(struct sec_flc_desc),
3126 				RTE_CACHE_LINE_SIZE);
3127 
3128 	if (priv == NULL) {
3129 		DPAA2_SEC_ERR("No memory for priv CTXT");
3130 		return -ENOMEM;
3131 	}
3132 
3133 	priv->fle_pool = dev_priv->fle_pool;
3134 	flc = &priv->flc_desc[0].flc;
3135 
3136 	if (ipsec_xform->life.bytes_hard_limit != 0 ||
3137 	    ipsec_xform->life.bytes_soft_limit != 0 ||
3138 	    ipsec_xform->life.packets_hard_limit != 0 ||
3139 	    ipsec_xform->life.packets_soft_limit != 0)
3140 		return -ENOTSUP;
3141 
3142 	memset(session, 0, sizeof(dpaa2_sec_session));
3143 
3144 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3145 		cipher_xform = &conf->crypto_xform->cipher;
3146 		if (conf->crypto_xform->next)
3147 			auth_xform = &conf->crypto_xform->next->auth;
3148 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3149 					session, &cipherdata, &authdata);
3150 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3151 		auth_xform = &conf->crypto_xform->auth;
3152 		if (conf->crypto_xform->next)
3153 			cipher_xform = &conf->crypto_xform->next->cipher;
3154 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
3155 					session, &cipherdata, &authdata);
3156 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
3157 		aead_xform = &conf->crypto_xform->aead;
3158 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
3159 					session, &cipherdata);
3160 		authdata.keylen = 0;
3161 		authdata.algtype = 0;
3162 	} else {
3163 		DPAA2_SEC_ERR("XFORM not specified");
3164 		ret = -EINVAL;
3165 		goto out;
3166 	}
3167 	if (ret) {
3168 		DPAA2_SEC_ERR("Failed to process xform");
3169 		goto out;
3170 	}
3171 
3172 	session->ctxt_type = DPAA2_SEC_IPSEC;
3173 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
3174 		uint8_t *hdr = NULL;
3175 		struct ip ip4_hdr;
3176 		struct rte_ipv6_hdr ip6_hdr;
3177 		struct ipsec_encap_pdb encap_pdb;
3178 
3179 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
3180 		/* For Sec Proto only one descriptor is required. */
3181 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
3182 
3183 		/* copy algo specific data to PDB */
3184 		switch (cipherdata.algtype) {
3185 		case OP_PCL_IPSEC_AES_CTR:
3186 			encap_pdb.ctr.ctr_initial = 0x00000001;
3187 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3188 			break;
3189 		case OP_PCL_IPSEC_AES_GCM8:
3190 		case OP_PCL_IPSEC_AES_GCM12:
3191 		case OP_PCL_IPSEC_AES_GCM16:
3192 			memcpy(encap_pdb.gcm.salt,
3193 				(uint8_t *)&(ipsec_xform->salt), 4);
3194 			break;
3195 		}
3196 
3197 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
3198 			PDBOPTS_ESP_OIHI_PDB_INL |
3199 			PDBOPTS_ESP_IVSRC |
3200 			PDBHMO_ESP_SNR;
3201 		if (ipsec_xform->options.dec_ttl)
3202 			encap_pdb.options |= PDBHMO_ESP_ENCAP_DTTL;
3203 		if (ipsec_xform->options.esn)
3204 			encap_pdb.options |= PDBOPTS_ESP_ESN;
3205 		encap_pdb.spi = ipsec_xform->spi;
3206 		session->dir = DIR_ENC;
3207 		if (ipsec_xform->tunnel.type ==
3208 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
3209 			encap_pdb.ip_hdr_len = sizeof(struct ip);
3210 			ip4_hdr.ip_v = IPVERSION;
3211 			ip4_hdr.ip_hl = 5;
3212 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
3213 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
3214 			ip4_hdr.ip_id = 0;
3215 			ip4_hdr.ip_off = 0;
3216 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
3217 			ip4_hdr.ip_p = IPPROTO_ESP;
3218 			ip4_hdr.ip_sum = 0;
3219 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
3220 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
3221 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
3222 					&ip4_hdr, sizeof(struct ip));
3223 			hdr = (uint8_t *)&ip4_hdr;
3224 		} else if (ipsec_xform->tunnel.type ==
3225 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
3226 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
3227 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
3228 				((ipsec_xform->tunnel.ipv6.dscp <<
3229 					RTE_IPV6_HDR_TC_SHIFT) &
3230 					RTE_IPV6_HDR_TC_MASK) |
3231 				((ipsec_xform->tunnel.ipv6.flabel <<
3232 					RTE_IPV6_HDR_FL_SHIFT) &
3233 					RTE_IPV6_HDR_FL_MASK));
3234 			/* Payload length will be updated by HW */
3235 			ip6_hdr.payload_len = 0;
3236 			ip6_hdr.hop_limits =
3237 					ipsec_xform->tunnel.ipv6.hlimit;
3238 			ip6_hdr.proto = (ipsec_xform->proto ==
3239 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
3240 					IPPROTO_ESP : IPPROTO_AH;
3241 			memcpy(&ip6_hdr.src_addr,
3242 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
3243 			memcpy(&ip6_hdr.dst_addr,
3244 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
3245 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
3246 			hdr = (uint8_t *)&ip6_hdr;
3247 		}
3248 
3249 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
3250 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3251 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
3252 				hdr, &cipherdata, &authdata);
3253 	} else if (ipsec_xform->direction ==
3254 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
3255 		struct ipsec_decap_pdb decap_pdb;
3256 
3257 		flc->dhr = SEC_FLC_DHR_INBOUND;
3258 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
3259 		/* copy algo specific data to PDB */
3260 		switch (cipherdata.algtype) {
3261 		case OP_PCL_IPSEC_AES_CTR:
3262 			decap_pdb.ctr.ctr_initial = 0x00000001;
3263 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
3264 			break;
3265 		case OP_PCL_IPSEC_AES_GCM8:
3266 		case OP_PCL_IPSEC_AES_GCM12:
3267 		case OP_PCL_IPSEC_AES_GCM16:
3268 			memcpy(decap_pdb.gcm.salt,
3269 				(uint8_t *)&(ipsec_xform->salt), 4);
3270 			break;
3271 		}
3272 
3273 		decap_pdb.options = (ipsec_xform->tunnel.type ==
3274 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
3275 				sizeof(struct ip) << 16 :
3276 				sizeof(struct rte_ipv6_hdr) << 16;
3277 		if (ipsec_xform->options.esn)
3278 			decap_pdb.options |= PDBOPTS_ESP_ESN;
3279 
3280 		if (ipsec_xform->replay_win_sz) {
3281 			uint32_t win_sz;
3282 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
3283 
3284 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
3285 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
3286 				win_sz = 128;
3287 			}
3288 			switch (win_sz) {
3289 			case 1:
3290 			case 2:
3291 			case 4:
3292 			case 8:
3293 			case 16:
3294 			case 32:
3295 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3296 				break;
3297 			case 64:
3298 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3299 				break;
3300 			case 256:
3301 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3302 				break;
3303 			case 512:
3304 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3305 				break;
3306 			case 1024:
3307 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3308 				break;
3309 			case 128:
3310 			default:
3311 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3312 			}
3313 		}
3314 		session->dir = DIR_DEC;
3315 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3316 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3317 				SHR_WAIT : SHR_SERIAL,
3318 				&decap_pdb, &cipherdata, &authdata);
3319 	} else
3320 		goto out;
3321 
3322 	if (bufsize < 0) {
3323 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3324 		goto out;
3325 	}
3326 
3327 	flc->word1_sdl = (uint8_t)bufsize;
3328 
3329 	/* Enable the stashing control bit */
3330 	DPAA2_SET_FLC_RSC(flc);
3331 	flc->word2_rflc_31_0 = lower_32_bits(
3332 			(size_t)&(((struct dpaa2_sec_qp *)
3333 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3334 	flc->word3_rflc_63_32 = upper_32_bits(
3335 			(size_t)&(((struct dpaa2_sec_qp *)
3336 			dev->data->queue_pairs[0])->rx_vq));
3337 
3338 	/* Set EWS bit i.e. enable write-safe */
3339 	DPAA2_SET_FLC_EWS(flc);
3340 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3341 	DPAA2_SET_FLC_REUSE_BS(flc);
3342 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3343 	DPAA2_SET_FLC_REUSE_FF(flc);
3344 
3345 	session->ctxt = priv;
3346 
3347 	return 0;
3348 out:
3349 	rte_free(session->auth_key.data);
3350 	rte_free(session->cipher_key.data);
3351 	rte_free(priv);
3352 	return ret;
3353 }
3354 
3355 static int
3356 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3357 			   struct rte_security_session_conf *conf,
3358 			   void *sess)
3359 {
3360 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3361 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3362 	struct rte_crypto_auth_xform *auth_xform = NULL;
3363 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
3364 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3365 	struct ctxt_priv *priv;
3366 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3367 	struct alginfo authdata, cipherdata;
3368 	struct alginfo *p_authdata = NULL;
3369 	int bufsize = -1;
3370 	struct sec_flow_context *flc;
3371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3372 	int swap = true;
3373 #else
3374 	int swap = false;
3375 #endif
3376 
3377 	PMD_INIT_FUNC_TRACE();
3378 
3379 	memset(session, 0, sizeof(dpaa2_sec_session));
3380 
3381 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3382 				sizeof(struct ctxt_priv) +
3383 				sizeof(struct sec_flc_desc),
3384 				RTE_CACHE_LINE_SIZE);
3385 
3386 	if (priv == NULL) {
3387 		DPAA2_SEC_ERR("No memory for priv CTXT");
3388 		return -ENOMEM;
3389 	}
3390 
3391 	priv->fle_pool = dev_priv->fle_pool;
3392 	flc = &priv->flc_desc[0].flc;
3393 
3394 	/* find xfrm types */
3395 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3396 		cipher_xform = &xform->cipher;
3397 		if (xform->next != NULL &&
3398 			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3399 			session->ext_params.aead_ctxt.auth_cipher_text = true;
3400 			auth_xform = &xform->next->auth;
3401 		}
3402 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3403 		auth_xform = &xform->auth;
3404 		if (xform->next != NULL &&
3405 			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3406 			session->ext_params.aead_ctxt.auth_cipher_text = false;
3407 			cipher_xform = &xform->next->cipher;
3408 		}
3409 	} else {
3410 		DPAA2_SEC_ERR("Invalid crypto type");
3411 		return -EINVAL;
3412 	}
3413 
3414 	session->ctxt_type = DPAA2_SEC_PDCP;
3415 	if (cipher_xform) {
3416 		session->cipher_key.data = rte_zmalloc(NULL,
3417 					       cipher_xform->key.length,
3418 					       RTE_CACHE_LINE_SIZE);
3419 		if (session->cipher_key.data == NULL &&
3420 				cipher_xform->key.length > 0) {
3421 			DPAA2_SEC_ERR("No Memory for cipher key");
3422 			rte_free(priv);
3423 			return -ENOMEM;
3424 		}
3425 		session->cipher_key.length = cipher_xform->key.length;
3426 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3427 			cipher_xform->key.length);
3428 		session->dir =
3429 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3430 					DIR_ENC : DIR_DEC;
3431 		session->cipher_alg = cipher_xform->algo;
3432 	} else {
3433 		session->cipher_key.data = NULL;
3434 		session->cipher_key.length = 0;
3435 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3436 		session->dir = DIR_ENC;
3437 	}
3438 
3439 	session->pdcp.domain = pdcp_xform->domain;
3440 	session->pdcp.bearer = pdcp_xform->bearer;
3441 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3442 	session->pdcp.sn_size = pdcp_xform->sn_size;
3443 	session->pdcp.hfn = pdcp_xform->hfn;
3444 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3445 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3446 	/* hfv ovd offset location is stored in iv.offset value*/
3447 	if (cipher_xform)
3448 		session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3449 
3450 	cipherdata.key = (size_t)session->cipher_key.data;
3451 	cipherdata.keylen = session->cipher_key.length;
3452 	cipherdata.key_enc_flags = 0;
3453 	cipherdata.key_type = RTA_DATA_IMM;
3454 
3455 	switch (session->cipher_alg) {
3456 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3457 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3458 		break;
3459 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3460 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3461 		break;
3462 	case RTE_CRYPTO_CIPHER_AES_CTR:
3463 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3464 		break;
3465 	case RTE_CRYPTO_CIPHER_NULL:
3466 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3467 		break;
3468 	default:
3469 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3470 			      session->cipher_alg);
3471 		goto out;
3472 	}
3473 
3474 	if (auth_xform) {
3475 		session->auth_key.data = rte_zmalloc(NULL,
3476 						     auth_xform->key.length,
3477 						     RTE_CACHE_LINE_SIZE);
3478 		if (!session->auth_key.data &&
3479 		    auth_xform->key.length > 0) {
3480 			DPAA2_SEC_ERR("No Memory for auth key");
3481 			rte_free(session->cipher_key.data);
3482 			rte_free(priv);
3483 			return -ENOMEM;
3484 		}
3485 		session->auth_key.length = auth_xform->key.length;
3486 		memcpy(session->auth_key.data, auth_xform->key.data,
3487 		       auth_xform->key.length);
3488 		session->auth_alg = auth_xform->algo;
3489 	} else {
3490 		session->auth_key.data = NULL;
3491 		session->auth_key.length = 0;
3492 		session->auth_alg = 0;
3493 	}
3494 	authdata.key = (size_t)session->auth_key.data;
3495 	authdata.keylen = session->auth_key.length;
3496 	authdata.key_enc_flags = 0;
3497 	authdata.key_type = RTA_DATA_IMM;
3498 
3499 	if (session->auth_alg) {
3500 		switch (session->auth_alg) {
3501 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3502 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3503 			break;
3504 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3505 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3506 			break;
3507 		case RTE_CRYPTO_AUTH_AES_CMAC:
3508 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3509 			break;
3510 		case RTE_CRYPTO_AUTH_NULL:
3511 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3512 			break;
3513 		default:
3514 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3515 				      session->auth_alg);
3516 			goto out;
3517 		}
3518 
3519 		p_authdata = &authdata;
3520 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3521 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3522 		goto out;
3523 	}
3524 
3525 	if (pdcp_xform->sdap_enabled) {
3526 		int nb_keys_to_inline =
3527 			rta_inline_pdcp_sdap_query(authdata.algtype,
3528 					cipherdata.algtype,
3529 					session->pdcp.sn_size,
3530 					session->pdcp.hfn_ovd);
3531 		if (nb_keys_to_inline >= 1) {
3532 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3533 			cipherdata.key_type = RTA_DATA_PTR;
3534 		}
3535 		if (nb_keys_to_inline >= 2) {
3536 			authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
3537 			authdata.key_type = RTA_DATA_PTR;
3538 		}
3539 	} else {
3540 		if (rta_inline_pdcp_query(authdata.algtype,
3541 					cipherdata.algtype,
3542 					session->pdcp.sn_size,
3543 					session->pdcp.hfn_ovd)) {
3544 			cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3545 			cipherdata.key_type = RTA_DATA_PTR;
3546 		}
3547 	}
3548 
3549 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3550 		if (session->dir == DIR_ENC)
3551 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3552 					priv->flc_desc[0].desc, 1, swap,
3553 					pdcp_xform->hfn,
3554 					session->pdcp.sn_size,
3555 					pdcp_xform->bearer,
3556 					pdcp_xform->pkt_dir,
3557 					pdcp_xform->hfn_threshold,
3558 					&cipherdata, &authdata);
3559 		else if (session->dir == DIR_DEC)
3560 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3561 					priv->flc_desc[0].desc, 1, swap,
3562 					pdcp_xform->hfn,
3563 					session->pdcp.sn_size,
3564 					pdcp_xform->bearer,
3565 					pdcp_xform->pkt_dir,
3566 					pdcp_xform->hfn_threshold,
3567 					&cipherdata, &authdata);
3568 
3569 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
3570 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
3571 						     1, swap, &authdata);
3572 	} else {
3573 		if (session->dir == DIR_ENC) {
3574 			if (pdcp_xform->sdap_enabled)
3575 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3576 					priv->flc_desc[0].desc, 1, swap,
3577 					session->pdcp.sn_size,
3578 					pdcp_xform->hfn,
3579 					pdcp_xform->bearer,
3580 					pdcp_xform->pkt_dir,
3581 					pdcp_xform->hfn_threshold,
3582 					&cipherdata, p_authdata);
3583 			else
3584 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3585 					priv->flc_desc[0].desc, 1, swap,
3586 					session->pdcp.sn_size,
3587 					pdcp_xform->hfn,
3588 					pdcp_xform->bearer,
3589 					pdcp_xform->pkt_dir,
3590 					pdcp_xform->hfn_threshold,
3591 					&cipherdata, p_authdata);
3592 		} else if (session->dir == DIR_DEC) {
3593 			if (pdcp_xform->sdap_enabled)
3594 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3595 					priv->flc_desc[0].desc, 1, swap,
3596 					session->pdcp.sn_size,
3597 					pdcp_xform->hfn,
3598 					pdcp_xform->bearer,
3599 					pdcp_xform->pkt_dir,
3600 					pdcp_xform->hfn_threshold,
3601 					&cipherdata, p_authdata);
3602 			else
3603 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3604 					priv->flc_desc[0].desc, 1, swap,
3605 					session->pdcp.sn_size,
3606 					pdcp_xform->hfn,
3607 					pdcp_xform->bearer,
3608 					pdcp_xform->pkt_dir,
3609 					pdcp_xform->hfn_threshold,
3610 					&cipherdata, p_authdata);
3611 		}
3612 	}
3613 
3614 	if (bufsize < 0) {
3615 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3616 		goto out;
3617 	}
3618 
3619 	/* Enable the stashing control bit */
3620 	DPAA2_SET_FLC_RSC(flc);
3621 	flc->word2_rflc_31_0 = lower_32_bits(
3622 			(size_t)&(((struct dpaa2_sec_qp *)
3623 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3624 	flc->word3_rflc_63_32 = upper_32_bits(
3625 			(size_t)&(((struct dpaa2_sec_qp *)
3626 			dev->data->queue_pairs[0])->rx_vq));
3627 
3628 	flc->word1_sdl = (uint8_t)bufsize;
3629 
3630 	/* TODO - check the perf impact or
3631 	 * align as per descriptor type
3632 	 * Set EWS bit i.e. enable write-safe
3633 	 * DPAA2_SET_FLC_EWS(flc);
3634 	 */
3635 
3636 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3637 	DPAA2_SET_FLC_REUSE_BS(flc);
3638 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3639 	DPAA2_SET_FLC_REUSE_FF(flc);
3640 
3641 	session->ctxt = priv;
3642 
3643 	return 0;
3644 out:
3645 	rte_free(session->auth_key.data);
3646 	rte_free(session->cipher_key.data);
3647 	rte_free(priv);
3648 	return -EINVAL;
3649 }
3650 
3651 static int
3652 dpaa2_sec_security_session_create(void *dev,
3653 				  struct rte_security_session_conf *conf,
3654 				  struct rte_security_session *sess,
3655 				  struct rte_mempool *mempool)
3656 {
3657 	void *sess_private_data;
3658 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3659 	int ret;
3660 
3661 	if (rte_mempool_get(mempool, &sess_private_data)) {
3662 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3663 		return -ENOMEM;
3664 	}
3665 
3666 	switch (conf->protocol) {
3667 	case RTE_SECURITY_PROTOCOL_IPSEC:
3668 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3669 				sess_private_data);
3670 		break;
3671 	case RTE_SECURITY_PROTOCOL_MACSEC:
3672 		return -ENOTSUP;
3673 	case RTE_SECURITY_PROTOCOL_PDCP:
3674 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3675 				sess_private_data);
3676 		break;
3677 	default:
3678 		return -EINVAL;
3679 	}
3680 	if (ret != 0) {
3681 		DPAA2_SEC_ERR("Failed to configure session parameters");
3682 		/* Return session to mempool */
3683 		rte_mempool_put(mempool, sess_private_data);
3684 		return ret;
3685 	}
3686 
3687 	set_sec_session_private_data(sess, sess_private_data);
3688 
3689 	return ret;
3690 }
3691 
3692 /** Clear the memory of session so it doesn't leave key material behind */
3693 static int
3694 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3695 		struct rte_security_session *sess)
3696 {
3697 	PMD_INIT_FUNC_TRACE();
3698 	void *sess_priv = get_sec_session_private_data(sess);
3699 
3700 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3701 
3702 	if (sess_priv) {
3703 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3704 
3705 		rte_free(s->ctxt);
3706 		rte_free(s->cipher_key.data);
3707 		rte_free(s->auth_key.data);
3708 		memset(s, 0, sizeof(dpaa2_sec_session));
3709 		set_sec_session_private_data(sess, NULL);
3710 		rte_mempool_put(sess_mp, sess_priv);
3711 	}
3712 	return 0;
3713 }
3714 #endif
3715 static int
3716 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3717 		struct rte_crypto_sym_xform *xform,
3718 		struct rte_cryptodev_sym_session *sess,
3719 		struct rte_mempool *mempool)
3720 {
3721 	void *sess_private_data;
3722 	int ret;
3723 
3724 	if (rte_mempool_get(mempool, &sess_private_data)) {
3725 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3726 		return -ENOMEM;
3727 	}
3728 
3729 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3730 	if (ret != 0) {
3731 		DPAA2_SEC_ERR("Failed to configure session parameters");
3732 		/* Return session to mempool */
3733 		rte_mempool_put(mempool, sess_private_data);
3734 		return ret;
3735 	}
3736 
3737 	set_sym_session_private_data(sess, dev->driver_id,
3738 		sess_private_data);
3739 
3740 	return 0;
3741 }
3742 
3743 /** Clear the memory of session so it doesn't leave key material behind */
3744 static void
3745 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3746 		struct rte_cryptodev_sym_session *sess)
3747 {
3748 	PMD_INIT_FUNC_TRACE();
3749 	uint8_t index = dev->driver_id;
3750 	void *sess_priv = get_sym_session_private_data(sess, index);
3751 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3752 
3753 	if (sess_priv) {
3754 		rte_free(s->ctxt);
3755 		rte_free(s->cipher_key.data);
3756 		rte_free(s->auth_key.data);
3757 		memset(s, 0, sizeof(dpaa2_sec_session));
3758 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3759 		set_sym_session_private_data(sess, index, NULL);
3760 		rte_mempool_put(sess_mp, sess_priv);
3761 	}
3762 }
3763 
3764 static int
3765 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3766 			struct rte_cryptodev_config *config __rte_unused)
3767 {
3768 	PMD_INIT_FUNC_TRACE();
3769 
3770 	return 0;
3771 }
3772 
3773 static int
3774 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3775 {
3776 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3777 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3778 	struct dpseci_attr attr;
3779 	struct dpaa2_queue *dpaa2_q;
3780 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3781 					dev->data->queue_pairs;
3782 	struct dpseci_rx_queue_attr rx_attr;
3783 	struct dpseci_tx_queue_attr tx_attr;
3784 	int ret, i;
3785 
3786 	PMD_INIT_FUNC_TRACE();
3787 
3788 	/* Change the tx burst function if ordered queues are used */
3789 	if (priv->en_ordered)
3790 		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
3791 
3792 	memset(&attr, 0, sizeof(struct dpseci_attr));
3793 
3794 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3795 	if (ret) {
3796 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3797 			      priv->hw_id);
3798 		goto get_attr_failure;
3799 	}
3800 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3801 	if (ret) {
3802 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3803 		goto get_attr_failure;
3804 	}
3805 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3806 		dpaa2_q = &qp[i]->rx_vq;
3807 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3808 				    &rx_attr);
3809 		dpaa2_q->fqid = rx_attr.fqid;
3810 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3811 	}
3812 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3813 		dpaa2_q = &qp[i]->tx_vq;
3814 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3815 				    &tx_attr);
3816 		dpaa2_q->fqid = tx_attr.fqid;
3817 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3818 	}
3819 
3820 	return 0;
3821 get_attr_failure:
3822 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3823 	return -1;
3824 }
3825 
3826 static void
3827 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3828 {
3829 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3830 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3831 	int ret;
3832 
3833 	PMD_INIT_FUNC_TRACE();
3834 
3835 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3836 	if (ret) {
3837 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3838 			     priv->hw_id);
3839 		return;
3840 	}
3841 
3842 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3843 	if (ret < 0) {
3844 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3845 		return;
3846 	}
3847 }
3848 
3849 static int
3850 dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
3851 {
3852 	PMD_INIT_FUNC_TRACE();
3853 
3854 	return 0;
3855 }
3856 
3857 static void
3858 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3859 			struct rte_cryptodev_info *info)
3860 {
3861 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3862 
3863 	PMD_INIT_FUNC_TRACE();
3864 	if (info != NULL) {
3865 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3866 		info->feature_flags = dev->feature_flags;
3867 		info->capabilities = dpaa2_sec_capabilities;
3868 		/* No limit of number of sessions */
3869 		info->sym.max_nb_sessions = 0;
3870 		info->driver_id = cryptodev_driver_id;
3871 	}
3872 }
3873 
3874 static
3875 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3876 			 struct rte_cryptodev_stats *stats)
3877 {
3878 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3879 	struct fsl_mc_io dpseci;
3880 	struct dpseci_sec_counters counters = {0};
3881 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3882 					dev->data->queue_pairs;
3883 	int ret, i;
3884 
3885 	PMD_INIT_FUNC_TRACE();
3886 	if (stats == NULL) {
3887 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3888 		return;
3889 	}
3890 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3891 		if (qp == NULL || qp[i] == NULL) {
3892 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3893 			continue;
3894 		}
3895 
3896 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3897 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3898 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3899 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3900 	}
3901 
3902 	/* In case as secondary process access stats, MCP portal in priv-hw
3903 	 * may have primary process address. Need the secondary process
3904 	 * based MCP portal address for this object.
3905 	 */
3906 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3907 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3908 				      &counters);
3909 	if (ret) {
3910 		DPAA2_SEC_ERR("SEC counters failed");
3911 	} else {
3912 		DPAA2_SEC_INFO("dpseci hardware stats:"
3913 			    "\n\tNum of Requests Dequeued = %" PRIu64
3914 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3915 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3916 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3917 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3918 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3919 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3920 			    counters.dequeued_requests,
3921 			    counters.ob_enc_requests,
3922 			    counters.ib_dec_requests,
3923 			    counters.ob_enc_bytes,
3924 			    counters.ob_prot_bytes,
3925 			    counters.ib_dec_bytes,
3926 			    counters.ib_valid_bytes);
3927 	}
3928 }
3929 
3930 static
3931 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3932 {
3933 	int i;
3934 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3935 				   (dev->data->queue_pairs);
3936 
3937 	PMD_INIT_FUNC_TRACE();
3938 
3939 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3940 		if (qp[i] == NULL) {
3941 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3942 			continue;
3943 		}
3944 		qp[i]->tx_vq.rx_pkts = 0;
3945 		qp[i]->tx_vq.tx_pkts = 0;
3946 		qp[i]->tx_vq.err_pkts = 0;
3947 		qp[i]->rx_vq.rx_pkts = 0;
3948 		qp[i]->rx_vq.tx_pkts = 0;
3949 		qp[i]->rx_vq.err_pkts = 0;
3950 	}
3951 }
3952 
3953 static void __rte_hot
3954 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3955 				 const struct qbman_fd *fd,
3956 				 const struct qbman_result *dq,
3957 				 struct dpaa2_queue *rxq,
3958 				 struct rte_event *ev)
3959 {
3960 	/* Prefetching mbuf */
3961 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3962 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3963 
3964 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3965 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3966 
3967 	ev->flow_id = rxq->ev.flow_id;
3968 	ev->sub_event_type = rxq->ev.sub_event_type;
3969 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3970 	ev->op = RTE_EVENT_OP_NEW;
3971 	ev->sched_type = rxq->ev.sched_type;
3972 	ev->queue_id = rxq->ev.queue_id;
3973 	ev->priority = rxq->ev.priority;
3974 	ev->event_ptr = sec_fd_to_mbuf(fd);
3975 
3976 	qbman_swp_dqrr_consume(swp, dq);
3977 }
3978 static void
3979 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3980 				 const struct qbman_fd *fd,
3981 				 const struct qbman_result *dq,
3982 				 struct dpaa2_queue *rxq,
3983 				 struct rte_event *ev)
3984 {
3985 	uint8_t dqrr_index;
3986 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3987 	/* Prefetching mbuf */
3988 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3989 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3990 
3991 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3992 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3993 
3994 	ev->flow_id = rxq->ev.flow_id;
3995 	ev->sub_event_type = rxq->ev.sub_event_type;
3996 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3997 	ev->op = RTE_EVENT_OP_NEW;
3998 	ev->sched_type = rxq->ev.sched_type;
3999 	ev->queue_id = rxq->ev.queue_id;
4000 	ev->priority = rxq->ev.priority;
4001 
4002 	ev->event_ptr = sec_fd_to_mbuf(fd);
4003 	dqrr_index = qbman_get_dqrr_idx(dq);
4004 	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
4005 	DPAA2_PER_LCORE_DQRR_SIZE++;
4006 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
4007 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
4008 }
4009 
4010 static void __rte_hot
4011 dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
4012 				const struct qbman_fd *fd,
4013 				const struct qbman_result *dq,
4014 				struct dpaa2_queue *rxq,
4015 				struct rte_event *ev)
4016 {
4017 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
4018 
4019 	/* Prefetching mbuf */
4020 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
4021 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
4022 
4023 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
4024 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
4025 
4026 	ev->flow_id = rxq->ev.flow_id;
4027 	ev->sub_event_type = rxq->ev.sub_event_type;
4028 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
4029 	ev->op = RTE_EVENT_OP_NEW;
4030 	ev->sched_type = rxq->ev.sched_type;
4031 	ev->queue_id = rxq->ev.queue_id;
4032 	ev->priority = rxq->ev.priority;
4033 	ev->event_ptr = sec_fd_to_mbuf(fd);
4034 
4035 	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
4036 	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
4037 		DPAA2_EQCR_OPRID_SHIFT;
4038 	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
4039 		DPAA2_EQCR_SEQNUM_SHIFT;
4040 
4041 	qbman_swp_dqrr_consume(swp, dq);
4042 }
4043 
4044 int
4045 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
4046 		int qp_id,
4047 		struct dpaa2_dpcon_dev *dpcon,
4048 		const struct rte_event *event)
4049 {
4050 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4051 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4052 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
4053 	struct dpseci_rx_queue_cfg cfg;
4054 	uint8_t priority;
4055 	int ret;
4056 
4057 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
4058 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
4059 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
4060 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
4061 	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
4062 		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
4063 	else
4064 		return -EINVAL;
4065 
4066 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
4067 		   (dpcon->num_priorities - 1);
4068 
4069 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4070 	cfg.options = DPSECI_QUEUE_OPT_DEST;
4071 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
4072 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
4073 	cfg.dest_cfg.priority = priority;
4074 
4075 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
4076 	cfg.user_ctx = (size_t)(qp);
4077 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
4078 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
4079 		cfg.order_preservation_en = 1;
4080 	}
4081 
4082 	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
4083 		struct opr_cfg ocfg;
4084 
4085 		/* Restoration window size = 256 frames */
4086 		ocfg.oprrws = 3;
4087 		/* Restoration window size = 512 frames for LX2 */
4088 		if (dpaa2_svr_family == SVR_LX2160A)
4089 			ocfg.oprrws = 4;
4090 		/* Auto advance NESN window enabled */
4091 		ocfg.oa = 1;
4092 		/* Late arrival window size disabled */
4093 		ocfg.olws = 0;
4094 		/* ORL resource exhaustaion advance NESN disabled */
4095 		ocfg.oeane = 0;
4096 
4097 		if (priv->en_loose_ordered)
4098 			ocfg.oloe = 1;
4099 		else
4100 			ocfg.oloe = 0;
4101 
4102 		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
4103 				   qp_id, OPR_OPT_CREATE, &ocfg);
4104 		if (ret) {
4105 			RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
4106 			return ret;
4107 		}
4108 		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
4109 		priv->en_ordered = 1;
4110 	}
4111 
4112 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4113 				  qp_id, &cfg);
4114 	if (ret) {
4115 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4116 		return ret;
4117 	}
4118 
4119 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
4120 
4121 	return 0;
4122 }
4123 
4124 int
4125 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
4126 			int qp_id)
4127 {
4128 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4129 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4130 	struct dpseci_rx_queue_cfg cfg;
4131 	int ret;
4132 
4133 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
4134 	cfg.options = DPSECI_QUEUE_OPT_DEST;
4135 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
4136 
4137 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
4138 				  qp_id, &cfg);
4139 	if (ret)
4140 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
4141 
4142 	return ret;
4143 }
4144 
4145 static struct rte_cryptodev_ops crypto_ops = {
4146 	.dev_configure	      = dpaa2_sec_dev_configure,
4147 	.dev_start	      = dpaa2_sec_dev_start,
4148 	.dev_stop	      = dpaa2_sec_dev_stop,
4149 	.dev_close	      = dpaa2_sec_dev_close,
4150 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
4151 	.stats_get	      = dpaa2_sec_stats_get,
4152 	.stats_reset	      = dpaa2_sec_stats_reset,
4153 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
4154 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
4155 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
4156 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
4157 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
4158 	/* Raw data-path API related operations */
4159 	.sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size,
4160 	.sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx,
4161 };
4162 
4163 #ifdef RTE_LIB_SECURITY
4164 static const struct rte_security_capability *
4165 dpaa2_sec_capabilities_get(void *device __rte_unused)
4166 {
4167 	return dpaa2_sec_security_cap;
4168 }
4169 
4170 static const struct rte_security_ops dpaa2_sec_security_ops = {
4171 	.session_create = dpaa2_sec_security_session_create,
4172 	.session_update = NULL,
4173 	.session_stats_get = NULL,
4174 	.session_destroy = dpaa2_sec_security_session_destroy,
4175 	.set_pkt_metadata = NULL,
4176 	.capabilities_get = dpaa2_sec_capabilities_get
4177 };
4178 #endif
4179 
4180 static int
4181 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
4182 {
4183 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4184 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
4185 	int ret;
4186 
4187 	PMD_INIT_FUNC_TRACE();
4188 
4189 	/* Function is reverse of dpaa2_sec_dev_init.
4190 	 * It does the following:
4191 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
4192 	 * 2. Close the DPSECI device
4193 	 * 3. Free the allocated resources.
4194 	 */
4195 
4196 	/*Close the device at underlying layer*/
4197 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
4198 	if (ret) {
4199 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
4200 		return -1;
4201 	}
4202 
4203 	/*Free the allocated memory for ethernet private data and dpseci*/
4204 	priv->hw = NULL;
4205 	rte_free(dpseci);
4206 	rte_free(dev->security_ctx);
4207 	rte_mempool_free(priv->fle_pool);
4208 
4209 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
4210 		       dev->data->name, rte_socket_id());
4211 
4212 	return 0;
4213 }
4214 
4215 static int
4216 check_devargs_handler(const char *key, const char *value,
4217 		      void *opaque)
4218 {
4219 	struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
4220 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
4221 
4222 	if (!strcmp(key, "drv_strict_order")) {
4223 		priv->en_loose_ordered = false;
4224 	} else if (!strcmp(key, "drv_dump_mode")) {
4225 		dpaa2_sec_dp_dump = atoi(value);
4226 		if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
4227 			DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
4228 				      "supported, changing to FULL error"
4229 				      " prints\n");
4230 			dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
4231 		}
4232 	} else
4233 		return -1;
4234 
4235 	return 0;
4236 }
4237 
4238 static void
4239 dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
4240 {
4241 	struct rte_kvargs *kvlist;
4242 	struct rte_devargs *devargs;
4243 
4244 	devargs = cryptodev->device->devargs;
4245 	if (!devargs)
4246 		return;
4247 
4248 	kvlist = rte_kvargs_parse(devargs->args, NULL);
4249 	if (!kvlist)
4250 		return;
4251 
4252 	if (!rte_kvargs_count(kvlist, key)) {
4253 		rte_kvargs_free(kvlist);
4254 		return;
4255 	}
4256 
4257 	rte_kvargs_process(kvlist, key,
4258 			check_devargs_handler, (void *)cryptodev);
4259 	rte_kvargs_free(kvlist);
4260 }
4261 
4262 static int
4263 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
4264 {
4265 	struct dpaa2_sec_dev_private *internals;
4266 	struct rte_device *dev = cryptodev->device;
4267 	struct rte_dpaa2_device *dpaa2_dev;
4268 #ifdef RTE_LIB_SECURITY
4269 	struct rte_security_ctx *security_instance;
4270 #endif
4271 	struct fsl_mc_io *dpseci;
4272 	uint16_t token;
4273 	struct dpseci_attr attr;
4274 	int retcode, hw_id;
4275 	char str[30];
4276 
4277 	PMD_INIT_FUNC_TRACE();
4278 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
4279 	hw_id = dpaa2_dev->object_id;
4280 
4281 	cryptodev->driver_id = cryptodev_driver_id;
4282 	cryptodev->dev_ops = &crypto_ops;
4283 
4284 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
4285 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
4286 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
4287 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
4288 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
4289 			RTE_CRYPTODEV_FF_SECURITY |
4290 			RTE_CRYPTODEV_FF_SYM_RAW_DP |
4291 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
4292 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
4293 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
4294 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
4295 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
4296 
4297 	internals = cryptodev->data->dev_private;
4298 
4299 	/*
4300 	 * For secondary processes, we don't initialise any further as primary
4301 	 * has already done this work. Only check we don't need a different
4302 	 * RX function
4303 	 */
4304 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
4305 		DPAA2_SEC_DEBUG("Device already init by primary process");
4306 		return 0;
4307 	}
4308 #ifdef RTE_LIB_SECURITY
4309 	/* Initialize security_ctx only for primary process*/
4310 	security_instance = rte_malloc("rte_security_instances_ops",
4311 				sizeof(struct rte_security_ctx), 0);
4312 	if (security_instance == NULL)
4313 		return -ENOMEM;
4314 	security_instance->device = (void *)cryptodev;
4315 	security_instance->ops = &dpaa2_sec_security_ops;
4316 	security_instance->sess_cnt = 0;
4317 	cryptodev->security_ctx = security_instance;
4318 #endif
4319 	/*Open the rte device via MC and save the handle for further use*/
4320 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
4321 				sizeof(struct fsl_mc_io), 0);
4322 	if (!dpseci) {
4323 		DPAA2_SEC_ERR(
4324 			"Error in allocating the memory for dpsec object");
4325 		return -ENOMEM;
4326 	}
4327 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
4328 
4329 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
4330 	if (retcode != 0) {
4331 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
4332 			      retcode);
4333 		goto init_error;
4334 	}
4335 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
4336 	if (retcode != 0) {
4337 		DPAA2_SEC_ERR(
4338 			     "Cannot get dpsec device attributed: Error = %x",
4339 			     retcode);
4340 		goto init_error;
4341 	}
4342 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
4343 			"dpsec-%u", hw_id);
4344 
4345 	internals->max_nb_queue_pairs = attr.num_tx_queues;
4346 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
4347 	internals->hw = dpseci;
4348 	internals->token = token;
4349 	internals->en_loose_ordered = true;
4350 
4351 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
4352 			getpid(), cryptodev->data->dev_id);
4353 	internals->fle_pool = rte_mempool_create((const char *)str,
4354 			FLE_POOL_NUM_BUFS,
4355 			FLE_POOL_BUF_SIZE,
4356 			FLE_POOL_CACHE_SIZE, 0,
4357 			NULL, NULL, NULL, NULL,
4358 			SOCKET_ID_ANY, 0);
4359 	if (!internals->fle_pool) {
4360 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
4361 		goto init_error;
4362 	}
4363 
4364 	dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
4365 	dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
4366 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
4367 	return 0;
4368 
4369 init_error:
4370 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
4371 
4372 	/* dpaa2_sec_uninit(crypto_dev_name); */
4373 	return -EFAULT;
4374 }
4375 
4376 static int
4377 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
4378 			  struct rte_dpaa2_device *dpaa2_dev)
4379 {
4380 	struct rte_cryptodev *cryptodev;
4381 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
4382 
4383 	int retval;
4384 
4385 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
4386 			dpaa2_dev->object_id);
4387 
4388 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
4389 	if (cryptodev == NULL)
4390 		return -ENOMEM;
4391 
4392 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4393 		cryptodev->data->dev_private = rte_zmalloc_socket(
4394 					"cryptodev private structure",
4395 					sizeof(struct dpaa2_sec_dev_private),
4396 					RTE_CACHE_LINE_SIZE,
4397 					rte_socket_id());
4398 
4399 		if (cryptodev->data->dev_private == NULL)
4400 			rte_panic("Cannot allocate memzone for private "
4401 				  "device data");
4402 	}
4403 
4404 	dpaa2_dev->cryptodev = cryptodev;
4405 	cryptodev->device = &dpaa2_dev->device;
4406 
4407 	/* init user callbacks */
4408 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
4409 
4410 	if (dpaa2_svr_family == SVR_LX2160A)
4411 		rta_set_sec_era(RTA_SEC_ERA_10);
4412 	else
4413 		rta_set_sec_era(RTA_SEC_ERA_8);
4414 
4415 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
4416 
4417 	/* Invoke PMD device initialization function */
4418 	retval = dpaa2_sec_dev_init(cryptodev);
4419 	if (retval == 0) {
4420 		rte_cryptodev_pmd_probing_finish(cryptodev);
4421 		return 0;
4422 	}
4423 
4424 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
4425 		rte_free(cryptodev->data->dev_private);
4426 
4427 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
4428 
4429 	return -ENXIO;
4430 }
4431 
4432 static int
4433 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4434 {
4435 	struct rte_cryptodev *cryptodev;
4436 	int ret;
4437 
4438 	cryptodev = dpaa2_dev->cryptodev;
4439 	if (cryptodev == NULL)
4440 		return -ENODEV;
4441 
4442 	ret = dpaa2_sec_uninit(cryptodev);
4443 	if (ret)
4444 		return ret;
4445 
4446 	return rte_cryptodev_pmd_destroy(cryptodev);
4447 }
4448 
4449 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4450 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4451 	.drv_type = DPAA2_CRYPTO,
4452 	.driver = {
4453 		.name = "DPAA2 SEC PMD"
4454 	},
4455 	.probe = cryptodev_dpaa2_sec_probe,
4456 	.remove = cryptodev_dpaa2_sec_remove,
4457 };
4458 
4459 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4460 
4461 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4462 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4463 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4464 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
4465 		DRIVER_STRICT_ORDER "=<int>"
4466 		DRIVER_DUMP_MODE "=<int>");
4467 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4468