xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 41545d91a4418a2e22ed6584dc833d3dffcf059e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 #include <unistd.h>
11 
12 #include <rte_ip.h>
13 #include <rte_mbuf.h>
14 #include <rte_cryptodev.h>
15 #include <rte_malloc.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_cycles.h>
19 #include <rte_kvargs.h>
20 #include <rte_dev.h>
21 #include <rte_cryptodev_pmd.h>
22 #include <rte_common.h>
23 #include <rte_fslmc.h>
24 #include <fslmc_vfio.h>
25 #include <dpaa2_hw_pvt.h>
26 #include <dpaa2_hw_dpio.h>
27 #include <dpaa2_hw_mempool.h>
28 #include <fsl_dpopr.h>
29 #include <fsl_dpseci.h>
30 #include <fsl_mc_sys.h>
31 
32 #include "dpaa2_sec_priv.h"
33 #include "dpaa2_sec_event.h"
34 #include "dpaa2_sec_logs.h"
35 
36 /* RTA header files */
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39 #include <desc/sdap.h>
40 #include <desc/algo.h>
41 
42 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
43  * a pointer to the shared descriptor
44  */
45 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
46 #define FSL_VENDOR_ID           0x1957
47 #define FSL_DEVICE_ID           0x410
48 #define FSL_SUBSYSTEM_SEC       1
49 #define FSL_MC_DPSECI_DEVID     3
50 
51 #define NO_PREFETCH 0
52 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
53 #define FLE_POOL_NUM_BUFS	32000
54 #define FLE_POOL_BUF_SIZE	256
55 #define FLE_POOL_CACHE_SIZE	512
56 #define FLE_SG_MEM_SIZE(num)	(FLE_POOL_BUF_SIZE + ((num) * 32))
57 #define SEC_FLC_DHR_OUTBOUND	-114
58 #define SEC_FLC_DHR_INBOUND	0
59 
60 static uint8_t cryptodev_driver_id;
61 
62 #ifdef RTE_LIB_SECURITY
63 static inline int
64 build_proto_compound_sg_fd(dpaa2_sec_session *sess,
65 			   struct rte_crypto_op *op,
66 			   struct qbman_fd *fd, uint16_t bpid)
67 {
68 	struct rte_crypto_sym_op *sym_op = op->sym;
69 	struct ctxt_priv *priv = sess->ctxt;
70 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf;
73 	uint32_t in_len = 0, out_len = 0;
74 
75 	if (sym_op->m_dst)
76 		mbuf = sym_op->m_dst;
77 	else
78 		mbuf = sym_op->m_src;
79 
80 	/* first FLE entry used to store mbuf and session ctxt */
81 	fle = (struct qbman_fle *)rte_malloc(NULL,
82 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
83 			RTE_CACHE_LINE_SIZE);
84 	if (unlikely(!fle)) {
85 		DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
86 		return -ENOMEM;
87 	}
88 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
89 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
90 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
91 
92 	/* Save the shared descriptor */
93 	flc = &priv->flc_desc[0].flc;
94 
95 	op_fle = fle + 1;
96 	ip_fle = fle + 2;
97 	sge = fle + 3;
98 
99 	if (likely(bpid < MAX_BPID)) {
100 		DPAA2_SET_FD_BPID(fd, bpid);
101 		DPAA2_SET_FLE_BPID(op_fle, bpid);
102 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
103 	} else {
104 		DPAA2_SET_FD_IVP(fd);
105 		DPAA2_SET_FLE_IVP(op_fle);
106 		DPAA2_SET_FLE_IVP(ip_fle);
107 	}
108 
109 	/* Configure FD as a FRAME LIST */
110 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
111 	DPAA2_SET_FD_COMPOUND_FMT(fd);
112 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
113 
114 	/* Configure Output FLE with Scatter/Gather Entry */
115 	DPAA2_SET_FLE_SG_EXT(op_fle);
116 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
117 
118 	/* Configure Output SGE for Encap/Decap */
119 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
120 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
121 	/* o/p segs */
122 	while (mbuf->next) {
123 		sge->length = mbuf->data_len;
124 		out_len += sge->length;
125 		sge++;
126 		mbuf = mbuf->next;
127 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
128 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
129 	}
130 	/* using buf_len for last buf - so that extra data can be added */
131 	sge->length = mbuf->buf_len - mbuf->data_off;
132 	out_len += sge->length;
133 
134 	DPAA2_SET_FLE_FIN(sge);
135 	op_fle->length = out_len;
136 
137 	sge++;
138 	mbuf = sym_op->m_src;
139 
140 	/* Configure Input FLE with Scatter/Gather Entry */
141 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
142 	DPAA2_SET_FLE_SG_EXT(ip_fle);
143 	DPAA2_SET_FLE_FIN(ip_fle);
144 
145 	/* Configure input SGE for Encap/Decap */
146 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
147 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
148 	sge->length = mbuf->data_len;
149 	in_len += sge->length;
150 
151 	mbuf = mbuf->next;
152 	/* i/p segs */
153 	while (mbuf) {
154 		sge++;
155 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
156 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
157 		sge->length = mbuf->data_len;
158 		in_len += sge->length;
159 		mbuf = mbuf->next;
160 	}
161 	ip_fle->length = in_len;
162 	DPAA2_SET_FLE_FIN(sge);
163 
164 	/* In case of PDCP, per packet HFN is stored in
165 	 * mbuf priv after sym_op.
166 	 */
167 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
168 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
169 					sess->pdcp.hfn_ovd_offset);
170 		/*enable HFN override override */
171 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
172 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
173 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
174 	}
175 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
176 
177 	return 0;
178 }
179 
180 static inline int
181 build_proto_compound_fd(dpaa2_sec_session *sess,
182 	       struct rte_crypto_op *op,
183 	       struct qbman_fd *fd, uint16_t bpid)
184 {
185 	struct rte_crypto_sym_op *sym_op = op->sym;
186 	struct ctxt_priv *priv = sess->ctxt;
187 	struct qbman_fle *fle, *ip_fle, *op_fle;
188 	struct sec_flow_context *flc;
189 	struct rte_mbuf *src_mbuf = sym_op->m_src;
190 	struct rte_mbuf *dst_mbuf = sym_op->m_dst;
191 	int retval;
192 
193 	if (!dst_mbuf)
194 		dst_mbuf = src_mbuf;
195 
196 	/* Save the shared descriptor */
197 	flc = &priv->flc_desc[0].flc;
198 
199 	/* we are using the first FLE entry to store Mbuf */
200 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
201 	if (retval) {
202 		DPAA2_SEC_DP_ERR("Memory alloc failed");
203 		return -ENOMEM;
204 	}
205 	memset(fle, 0, FLE_POOL_BUF_SIZE);
206 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
207 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
208 
209 	op_fle = fle + 1;
210 	ip_fle = fle + 2;
211 
212 	if (likely(bpid < MAX_BPID)) {
213 		DPAA2_SET_FD_BPID(fd, bpid);
214 		DPAA2_SET_FLE_BPID(op_fle, bpid);
215 		DPAA2_SET_FLE_BPID(ip_fle, bpid);
216 	} else {
217 		DPAA2_SET_FD_IVP(fd);
218 		DPAA2_SET_FLE_IVP(op_fle);
219 		DPAA2_SET_FLE_IVP(ip_fle);
220 	}
221 
222 	/* Configure FD as a FRAME LIST */
223 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
224 	DPAA2_SET_FD_COMPOUND_FMT(fd);
225 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
226 
227 	/* Configure Output FLE with dst mbuf data  */
228 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
229 	DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
230 	DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
231 
232 	/* Configure Input FLE with src mbuf data */
233 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
234 	DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
235 	DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
236 
237 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
238 	DPAA2_SET_FLE_FIN(ip_fle);
239 
240 	/* In case of PDCP, per packet HFN is stored in
241 	 * mbuf priv after sym_op.
242 	 */
243 	if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
244 		uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
245 					sess->pdcp.hfn_ovd_offset);
246 		/*enable HFN override override */
247 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
248 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
249 		DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd);
250 	}
251 
252 	return 0;
253 
254 }
255 
256 static inline int
257 build_proto_fd(dpaa2_sec_session *sess,
258 	       struct rte_crypto_op *op,
259 	       struct qbman_fd *fd, uint16_t bpid)
260 {
261 	struct rte_crypto_sym_op *sym_op = op->sym;
262 	if (sym_op->m_dst)
263 		return build_proto_compound_fd(sess, op, fd, bpid);
264 
265 	struct ctxt_priv *priv = sess->ctxt;
266 	struct sec_flow_context *flc;
267 	struct rte_mbuf *mbuf = sym_op->m_src;
268 
269 	if (likely(bpid < MAX_BPID))
270 		DPAA2_SET_FD_BPID(fd, bpid);
271 	else
272 		DPAA2_SET_FD_IVP(fd);
273 
274 	/* Save the shared descriptor */
275 	flc = &priv->flc_desc[0].flc;
276 
277 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
278 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
279 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
280 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
281 
282 	/* save physical address of mbuf */
283 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
284 	mbuf->buf_iova = (size_t)op;
285 
286 	return 0;
287 }
288 #endif
289 
290 static inline int
291 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
292 		 struct rte_crypto_op *op,
293 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
294 {
295 	struct rte_crypto_sym_op *sym_op = op->sym;
296 	struct ctxt_priv *priv = sess->ctxt;
297 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
298 	struct sec_flow_context *flc;
299 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
300 	int icv_len = sess->digest_length;
301 	uint8_t *old_icv;
302 	struct rte_mbuf *mbuf;
303 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
304 			sess->iv.offset);
305 
306 	if (sym_op->m_dst)
307 		mbuf = sym_op->m_dst;
308 	else
309 		mbuf = sym_op->m_src;
310 
311 	/* first FLE entry used to store mbuf and session ctxt */
312 	fle = (struct qbman_fle *)rte_malloc(NULL,
313 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
314 			RTE_CACHE_LINE_SIZE);
315 	if (unlikely(!fle)) {
316 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
317 		return -ENOMEM;
318 	}
319 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
320 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
321 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
322 
323 	op_fle = fle + 1;
324 	ip_fle = fle + 2;
325 	sge = fle + 3;
326 
327 	/* Save the shared descriptor */
328 	flc = &priv->flc_desc[0].flc;
329 
330 	/* Configure FD as a FRAME LIST */
331 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
332 	DPAA2_SET_FD_COMPOUND_FMT(fd);
333 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
334 
335 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
336 		   "iv-len=%d data_off: 0x%x\n",
337 		   sym_op->aead.data.offset,
338 		   sym_op->aead.data.length,
339 		   sess->digest_length,
340 		   sess->iv.length,
341 		   sym_op->m_src->data_off);
342 
343 	/* Configure Output FLE with Scatter/Gather Entry */
344 	DPAA2_SET_FLE_SG_EXT(op_fle);
345 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
346 
347 	if (auth_only_len)
348 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
349 
350 	op_fle->length = (sess->dir == DIR_ENC) ?
351 			(sym_op->aead.data.length + icv_len) :
352 			sym_op->aead.data.length;
353 
354 	/* Configure Output SGE for Encap/Decap */
355 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
356 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset);
357 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
358 
359 	mbuf = mbuf->next;
360 	/* o/p segs */
361 	while (mbuf) {
362 		sge++;
363 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
364 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
365 		sge->length = mbuf->data_len;
366 		mbuf = mbuf->next;
367 	}
368 	sge->length -= icv_len;
369 
370 	if (sess->dir == DIR_ENC) {
371 		sge++;
372 		DPAA2_SET_FLE_ADDR(sge,
373 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
374 		sge->length = icv_len;
375 	}
376 	DPAA2_SET_FLE_FIN(sge);
377 
378 	sge++;
379 	mbuf = sym_op->m_src;
380 
381 	/* Configure Input FLE with Scatter/Gather Entry */
382 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
383 	DPAA2_SET_FLE_SG_EXT(ip_fle);
384 	DPAA2_SET_FLE_FIN(ip_fle);
385 	ip_fle->length = (sess->dir == DIR_ENC) ?
386 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
387 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
388 		 icv_len);
389 
390 	/* Configure Input SGE for Encap/Decap */
391 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
392 	sge->length = sess->iv.length;
393 
394 	sge++;
395 	if (auth_only_len) {
396 		DPAA2_SET_FLE_ADDR(sge,
397 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
398 		sge->length = auth_only_len;
399 		sge++;
400 	}
401 
402 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
403 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
404 				mbuf->data_off);
405 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
406 
407 	mbuf = mbuf->next;
408 	/* i/p segs */
409 	while (mbuf) {
410 		sge++;
411 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
412 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
413 		sge->length = mbuf->data_len;
414 		mbuf = mbuf->next;
415 	}
416 
417 	if (sess->dir == DIR_DEC) {
418 		sge++;
419 		old_icv = (uint8_t *)(sge + 1);
420 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
421 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
422 		sge->length = icv_len;
423 	}
424 
425 	DPAA2_SET_FLE_FIN(sge);
426 	if (auth_only_len) {
427 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
428 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
429 	}
430 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
431 
432 	return 0;
433 }
434 
435 static inline int
436 build_authenc_gcm_fd(dpaa2_sec_session *sess,
437 		     struct rte_crypto_op *op,
438 		     struct qbman_fd *fd, uint16_t bpid)
439 {
440 	struct rte_crypto_sym_op *sym_op = op->sym;
441 	struct ctxt_priv *priv = sess->ctxt;
442 	struct qbman_fle *fle, *sge;
443 	struct sec_flow_context *flc;
444 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
445 	int icv_len = sess->digest_length, retval;
446 	uint8_t *old_icv;
447 	struct rte_mbuf *dst;
448 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
449 			sess->iv.offset);
450 
451 	if (sym_op->m_dst)
452 		dst = sym_op->m_dst;
453 	else
454 		dst = sym_op->m_src;
455 
456 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
457 	 * Currently we donot know which FLE has the mbuf stored.
458 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
459 	 * to get the MBUF Addr from the previous FLE.
460 	 * We can have a better approach to use the inline Mbuf
461 	 */
462 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
463 	if (retval) {
464 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
465 		return -ENOMEM;
466 	}
467 	memset(fle, 0, FLE_POOL_BUF_SIZE);
468 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
469 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
470 	fle = fle + 1;
471 	sge = fle + 2;
472 	if (likely(bpid < MAX_BPID)) {
473 		DPAA2_SET_FD_BPID(fd, bpid);
474 		DPAA2_SET_FLE_BPID(fle, bpid);
475 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
476 		DPAA2_SET_FLE_BPID(sge, bpid);
477 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
478 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
479 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
480 	} else {
481 		DPAA2_SET_FD_IVP(fd);
482 		DPAA2_SET_FLE_IVP(fle);
483 		DPAA2_SET_FLE_IVP((fle + 1));
484 		DPAA2_SET_FLE_IVP(sge);
485 		DPAA2_SET_FLE_IVP((sge + 1));
486 		DPAA2_SET_FLE_IVP((sge + 2));
487 		DPAA2_SET_FLE_IVP((sge + 3));
488 	}
489 
490 	/* Save the shared descriptor */
491 	flc = &priv->flc_desc[0].flc;
492 	/* Configure FD as a FRAME LIST */
493 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
494 	DPAA2_SET_FD_COMPOUND_FMT(fd);
495 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
496 
497 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
498 		   "iv-len=%d data_off: 0x%x\n",
499 		   sym_op->aead.data.offset,
500 		   sym_op->aead.data.length,
501 		   sess->digest_length,
502 		   sess->iv.length,
503 		   sym_op->m_src->data_off);
504 
505 	/* Configure Output FLE with Scatter/Gather Entry */
506 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
507 	if (auth_only_len)
508 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
509 	fle->length = (sess->dir == DIR_ENC) ?
510 			(sym_op->aead.data.length + icv_len) :
511 			sym_op->aead.data.length;
512 
513 	DPAA2_SET_FLE_SG_EXT(fle);
514 
515 	/* Configure Output SGE for Encap/Decap */
516 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
517 	DPAA2_SET_FLE_OFFSET(sge, dst->data_off + sym_op->aead.data.offset);
518 	sge->length = sym_op->aead.data.length;
519 
520 	if (sess->dir == DIR_ENC) {
521 		sge++;
522 		DPAA2_SET_FLE_ADDR(sge,
523 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
524 		sge->length = sess->digest_length;
525 	}
526 	DPAA2_SET_FLE_FIN(sge);
527 
528 	sge++;
529 	fle++;
530 
531 	/* Configure Input FLE with Scatter/Gather Entry */
532 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
533 	DPAA2_SET_FLE_SG_EXT(fle);
534 	DPAA2_SET_FLE_FIN(fle);
535 	fle->length = (sess->dir == DIR_ENC) ?
536 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
537 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
538 		 sess->digest_length);
539 
540 	/* Configure Input SGE for Encap/Decap */
541 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
542 	sge->length = sess->iv.length;
543 	sge++;
544 	if (auth_only_len) {
545 		DPAA2_SET_FLE_ADDR(sge,
546 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
547 		sge->length = auth_only_len;
548 		DPAA2_SET_FLE_BPID(sge, bpid);
549 		sge++;
550 	}
551 
552 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
553 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
554 				sym_op->m_src->data_off);
555 	sge->length = sym_op->aead.data.length;
556 	if (sess->dir == DIR_DEC) {
557 		sge++;
558 		old_icv = (uint8_t *)(sge + 1);
559 		memcpy(old_icv,	sym_op->aead.digest.data,
560 		       sess->digest_length);
561 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
562 		sge->length = sess->digest_length;
563 	}
564 	DPAA2_SET_FLE_FIN(sge);
565 
566 	if (auth_only_len) {
567 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
568 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
569 	}
570 
571 	DPAA2_SET_FD_LEN(fd, fle->length);
572 	return 0;
573 }
574 
575 static inline int
576 build_authenc_sg_fd(dpaa2_sec_session *sess,
577 		 struct rte_crypto_op *op,
578 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
579 {
580 	struct rte_crypto_sym_op *sym_op = op->sym;
581 	struct ctxt_priv *priv = sess->ctxt;
582 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
583 	struct sec_flow_context *flc;
584 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
585 				sym_op->auth.data.offset;
586 	uint16_t auth_tail_len = sym_op->auth.data.length -
587 				sym_op->cipher.data.length - auth_hdr_len;
588 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
589 	int icv_len = sess->digest_length;
590 	uint8_t *old_icv;
591 	struct rte_mbuf *mbuf;
592 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
593 			sess->iv.offset);
594 
595 	if (sym_op->m_dst)
596 		mbuf = sym_op->m_dst;
597 	else
598 		mbuf = sym_op->m_src;
599 
600 	/* first FLE entry used to store mbuf and session ctxt */
601 	fle = (struct qbman_fle *)rte_malloc(NULL,
602 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
603 			RTE_CACHE_LINE_SIZE);
604 	if (unlikely(!fle)) {
605 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
606 		return -ENOMEM;
607 	}
608 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
609 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
610 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
611 
612 	op_fle = fle + 1;
613 	ip_fle = fle + 2;
614 	sge = fle + 3;
615 
616 	/* Save the shared descriptor */
617 	flc = &priv->flc_desc[0].flc;
618 
619 	/* Configure FD as a FRAME LIST */
620 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
621 	DPAA2_SET_FD_COMPOUND_FMT(fd);
622 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
623 
624 	DPAA2_SEC_DP_DEBUG(
625 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
626 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
627 		sym_op->auth.data.offset,
628 		sym_op->auth.data.length,
629 		sess->digest_length,
630 		sym_op->cipher.data.offset,
631 		sym_op->cipher.data.length,
632 		sess->iv.length,
633 		sym_op->m_src->data_off);
634 
635 	/* Configure Output FLE with Scatter/Gather Entry */
636 	DPAA2_SET_FLE_SG_EXT(op_fle);
637 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
638 
639 	if (auth_only_len)
640 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
641 
642 	op_fle->length = (sess->dir == DIR_ENC) ?
643 			(sym_op->cipher.data.length + icv_len) :
644 			sym_op->cipher.data.length;
645 
646 	/* Configure Output SGE for Encap/Decap */
647 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
648 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
649 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
650 
651 	mbuf = mbuf->next;
652 	/* o/p segs */
653 	while (mbuf) {
654 		sge++;
655 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
656 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
657 		sge->length = mbuf->data_len;
658 		mbuf = mbuf->next;
659 	}
660 	sge->length -= icv_len;
661 
662 	if (sess->dir == DIR_ENC) {
663 		sge++;
664 		DPAA2_SET_FLE_ADDR(sge,
665 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
666 		sge->length = icv_len;
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 
670 	sge++;
671 	mbuf = sym_op->m_src;
672 
673 	/* Configure Input FLE with Scatter/Gather Entry */
674 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
675 	DPAA2_SET_FLE_SG_EXT(ip_fle);
676 	DPAA2_SET_FLE_FIN(ip_fle);
677 	ip_fle->length = (sess->dir == DIR_ENC) ?
678 			(sym_op->auth.data.length + sess->iv.length) :
679 			(sym_op->auth.data.length + sess->iv.length +
680 			 icv_len);
681 
682 	/* Configure Input SGE for Encap/Decap */
683 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
684 	sge->length = sess->iv.length;
685 
686 	sge++;
687 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
688 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
689 				mbuf->data_off);
690 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
691 
692 	mbuf = mbuf->next;
693 	/* i/p segs */
694 	while (mbuf) {
695 		sge++;
696 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
697 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
698 		sge->length = mbuf->data_len;
699 		mbuf = mbuf->next;
700 	}
701 	sge->length -= icv_len;
702 
703 	if (sess->dir == DIR_DEC) {
704 		sge++;
705 		old_icv = (uint8_t *)(sge + 1);
706 		memcpy(old_icv,	sym_op->auth.digest.data,
707 		       icv_len);
708 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
709 		sge->length = icv_len;
710 	}
711 
712 	DPAA2_SET_FLE_FIN(sge);
713 	if (auth_only_len) {
714 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
715 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
716 	}
717 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
718 
719 	return 0;
720 }
721 
722 static inline int
723 build_authenc_fd(dpaa2_sec_session *sess,
724 		 struct rte_crypto_op *op,
725 		 struct qbman_fd *fd, uint16_t bpid)
726 {
727 	struct rte_crypto_sym_op *sym_op = op->sym;
728 	struct ctxt_priv *priv = sess->ctxt;
729 	struct qbman_fle *fle, *sge;
730 	struct sec_flow_context *flc;
731 	uint16_t auth_hdr_len = sym_op->cipher.data.offset -
732 				sym_op->auth.data.offset;
733 	uint16_t auth_tail_len = sym_op->auth.data.length -
734 				sym_op->cipher.data.length - auth_hdr_len;
735 	uint32_t auth_only_len = (auth_tail_len << 16) | auth_hdr_len;
736 
737 	int icv_len = sess->digest_length, retval;
738 	uint8_t *old_icv;
739 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
740 			sess->iv.offset);
741 	struct rte_mbuf *dst;
742 
743 	if (sym_op->m_dst)
744 		dst = sym_op->m_dst;
745 	else
746 		dst = sym_op->m_src;
747 
748 	/* we are using the first FLE entry to store Mbuf.
749 	 * Currently we donot know which FLE has the mbuf stored.
750 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
751 	 * to get the MBUF Addr from the previous FLE.
752 	 * We can have a better approach to use the inline Mbuf
753 	 */
754 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
755 	if (retval) {
756 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
757 		return -ENOMEM;
758 	}
759 	memset(fle, 0, FLE_POOL_BUF_SIZE);
760 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
761 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
762 	fle = fle + 1;
763 	sge = fle + 2;
764 	if (likely(bpid < MAX_BPID)) {
765 		DPAA2_SET_FD_BPID(fd, bpid);
766 		DPAA2_SET_FLE_BPID(fle, bpid);
767 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
768 		DPAA2_SET_FLE_BPID(sge, bpid);
769 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
770 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
771 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
772 	} else {
773 		DPAA2_SET_FD_IVP(fd);
774 		DPAA2_SET_FLE_IVP(fle);
775 		DPAA2_SET_FLE_IVP((fle + 1));
776 		DPAA2_SET_FLE_IVP(sge);
777 		DPAA2_SET_FLE_IVP((sge + 1));
778 		DPAA2_SET_FLE_IVP((sge + 2));
779 		DPAA2_SET_FLE_IVP((sge + 3));
780 	}
781 
782 	/* Save the shared descriptor */
783 	flc = &priv->flc_desc[0].flc;
784 	/* Configure FD as a FRAME LIST */
785 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
786 	DPAA2_SET_FD_COMPOUND_FMT(fd);
787 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
788 
789 	DPAA2_SEC_DP_DEBUG(
790 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
791 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
792 		sym_op->auth.data.offset,
793 		sym_op->auth.data.length,
794 		sess->digest_length,
795 		sym_op->cipher.data.offset,
796 		sym_op->cipher.data.length,
797 		sess->iv.length,
798 		sym_op->m_src->data_off);
799 
800 	/* Configure Output FLE with Scatter/Gather Entry */
801 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
802 	if (auth_only_len)
803 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
804 	fle->length = (sess->dir == DIR_ENC) ?
805 			(sym_op->cipher.data.length + icv_len) :
806 			sym_op->cipher.data.length;
807 
808 	DPAA2_SET_FLE_SG_EXT(fle);
809 
810 	/* Configure Output SGE for Encap/Decap */
811 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
812 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
813 				dst->data_off);
814 	sge->length = sym_op->cipher.data.length;
815 
816 	if (sess->dir == DIR_ENC) {
817 		sge++;
818 		DPAA2_SET_FLE_ADDR(sge,
819 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
820 		sge->length = sess->digest_length;
821 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
822 					sess->iv.length));
823 	}
824 	DPAA2_SET_FLE_FIN(sge);
825 
826 	sge++;
827 	fle++;
828 
829 	/* Configure Input FLE with Scatter/Gather Entry */
830 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
831 	DPAA2_SET_FLE_SG_EXT(fle);
832 	DPAA2_SET_FLE_FIN(fle);
833 	fle->length = (sess->dir == DIR_ENC) ?
834 			(sym_op->auth.data.length + sess->iv.length) :
835 			(sym_op->auth.data.length + sess->iv.length +
836 			 sess->digest_length);
837 
838 	/* Configure Input SGE for Encap/Decap */
839 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
840 	sge->length = sess->iv.length;
841 	sge++;
842 
843 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
844 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
845 				sym_op->m_src->data_off);
846 	sge->length = sym_op->auth.data.length;
847 	if (sess->dir == DIR_DEC) {
848 		sge++;
849 		old_icv = (uint8_t *)(sge + 1);
850 		memcpy(old_icv,	sym_op->auth.digest.data,
851 		       sess->digest_length);
852 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
853 		sge->length = sess->digest_length;
854 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
855 				 sess->digest_length +
856 				 sess->iv.length));
857 	}
858 	DPAA2_SET_FLE_FIN(sge);
859 	if (auth_only_len) {
860 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
861 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
862 	}
863 	return 0;
864 }
865 
866 static inline int build_auth_sg_fd(
867 		dpaa2_sec_session *sess,
868 		struct rte_crypto_op *op,
869 		struct qbman_fd *fd,
870 		__rte_unused uint16_t bpid)
871 {
872 	struct rte_crypto_sym_op *sym_op = op->sym;
873 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
874 	struct sec_flow_context *flc;
875 	struct ctxt_priv *priv = sess->ctxt;
876 	int data_len, data_offset;
877 	uint8_t *old_digest;
878 	struct rte_mbuf *mbuf;
879 
880 	data_len = sym_op->auth.data.length;
881 	data_offset = sym_op->auth.data.offset;
882 
883 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
884 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
885 		if ((data_len & 7) || (data_offset & 7)) {
886 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
887 			return -ENOTSUP;
888 		}
889 
890 		data_len = data_len >> 3;
891 		data_offset = data_offset >> 3;
892 	}
893 
894 	mbuf = sym_op->m_src;
895 	fle = (struct qbman_fle *)rte_malloc(NULL,
896 			FLE_SG_MEM_SIZE(mbuf->nb_segs),
897 			RTE_CACHE_LINE_SIZE);
898 	if (unlikely(!fle)) {
899 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
900 		return -ENOMEM;
901 	}
902 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
903 	/* first FLE entry used to store mbuf and session ctxt */
904 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
905 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
906 	op_fle = fle + 1;
907 	ip_fle = fle + 2;
908 	sge = fle + 3;
909 
910 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
911 	/* sg FD */
912 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
913 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
914 	DPAA2_SET_FD_COMPOUND_FMT(fd);
915 
916 	/* o/p fle */
917 	DPAA2_SET_FLE_ADDR(op_fle,
918 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
919 	op_fle->length = sess->digest_length;
920 
921 	/* i/p fle */
922 	DPAA2_SET_FLE_SG_EXT(ip_fle);
923 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
924 	ip_fle->length = data_len;
925 
926 	if (sess->iv.length) {
927 		uint8_t *iv_ptr;
928 
929 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
930 						   sess->iv.offset);
931 
932 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
933 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
934 			sge->length = 12;
935 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
936 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
937 			sge->length = 8;
938 		} else {
939 			sge->length = sess->iv.length;
940 		}
941 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
942 		ip_fle->length += sge->length;
943 		sge++;
944 	}
945 	/* i/p 1st seg */
946 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
947 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
948 
949 	if (data_len <= (mbuf->data_len - data_offset)) {
950 		sge->length = data_len;
951 		data_len = 0;
952 	} else {
953 		sge->length = mbuf->data_len - data_offset;
954 
955 		/* remaining i/p segs */
956 		while ((data_len = data_len - sge->length) &&
957 		       (mbuf = mbuf->next)) {
958 			sge++;
959 			DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
960 			DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
961 			if (data_len > mbuf->data_len)
962 				sge->length = mbuf->data_len;
963 			else
964 				sge->length = data_len;
965 		}
966 	}
967 
968 	if (sess->dir == DIR_DEC) {
969 		/* Digest verification case */
970 		sge++;
971 		old_digest = (uint8_t *)(sge + 1);
972 		rte_memcpy(old_digest, sym_op->auth.digest.data,
973 			   sess->digest_length);
974 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
975 		sge->length = sess->digest_length;
976 		ip_fle->length += sess->digest_length;
977 	}
978 	DPAA2_SET_FLE_FIN(sge);
979 	DPAA2_SET_FLE_FIN(ip_fle);
980 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
981 
982 	return 0;
983 }
984 
985 static inline int
986 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
987 	      struct qbman_fd *fd, uint16_t bpid)
988 {
989 	struct rte_crypto_sym_op *sym_op = op->sym;
990 	struct qbman_fle *fle, *sge;
991 	struct sec_flow_context *flc;
992 	struct ctxt_priv *priv = sess->ctxt;
993 	int data_len, data_offset;
994 	uint8_t *old_digest;
995 	int retval;
996 
997 	data_len = sym_op->auth.data.length;
998 	data_offset = sym_op->auth.data.offset;
999 
1000 	if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
1001 	    sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1002 		if ((data_len & 7) || (data_offset & 7)) {
1003 			DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
1004 			return -ENOTSUP;
1005 		}
1006 
1007 		data_len = data_len >> 3;
1008 		data_offset = data_offset >> 3;
1009 	}
1010 
1011 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1012 	if (retval) {
1013 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
1014 		return -ENOMEM;
1015 	}
1016 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1017 	/* TODO we are using the first FLE entry to store Mbuf.
1018 	 * Currently we donot know which FLE has the mbuf stored.
1019 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1020 	 * to get the MBUF Addr from the previous FLE.
1021 	 * We can have a better approach to use the inline Mbuf
1022 	 */
1023 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1024 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1025 	fle = fle + 1;
1026 	sge = fle + 2;
1027 
1028 	if (likely(bpid < MAX_BPID)) {
1029 		DPAA2_SET_FD_BPID(fd, bpid);
1030 		DPAA2_SET_FLE_BPID(fle, bpid);
1031 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1032 		DPAA2_SET_FLE_BPID(sge, bpid);
1033 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1034 	} else {
1035 		DPAA2_SET_FD_IVP(fd);
1036 		DPAA2_SET_FLE_IVP(fle);
1037 		DPAA2_SET_FLE_IVP((fle + 1));
1038 		DPAA2_SET_FLE_IVP(sge);
1039 		DPAA2_SET_FLE_IVP((sge + 1));
1040 	}
1041 
1042 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1043 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1044 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1045 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1046 
1047 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
1048 	fle->length = sess->digest_length;
1049 	fle++;
1050 
1051 	/* Setting input FLE */
1052 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1053 	DPAA2_SET_FLE_SG_EXT(fle);
1054 	fle->length = data_len;
1055 
1056 	if (sess->iv.length) {
1057 		uint8_t *iv_ptr;
1058 
1059 		iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1060 						   sess->iv.offset);
1061 
1062 		if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
1063 			iv_ptr = conv_to_snow_f9_iv(iv_ptr);
1064 			sge->length = 12;
1065 		} else if (sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
1066 			iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
1067 			sge->length = 8;
1068 		} else {
1069 			sge->length = sess->iv.length;
1070 		}
1071 
1072 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1073 		fle->length = fle->length + sge->length;
1074 		sge++;
1075 	}
1076 
1077 	/* Setting data to authenticate */
1078 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1079 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1080 	sge->length = data_len;
1081 
1082 	if (sess->dir == DIR_DEC) {
1083 		sge++;
1084 		old_digest = (uint8_t *)(sge + 1);
1085 		rte_memcpy(old_digest, sym_op->auth.digest.data,
1086 			   sess->digest_length);
1087 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
1088 		sge->length = sess->digest_length;
1089 		fle->length = fle->length + sess->digest_length;
1090 	}
1091 
1092 	DPAA2_SET_FLE_FIN(sge);
1093 	DPAA2_SET_FLE_FIN(fle);
1094 	DPAA2_SET_FD_LEN(fd, fle->length);
1095 
1096 	return 0;
1097 }
1098 
1099 static int
1100 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1101 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
1102 {
1103 	struct rte_crypto_sym_op *sym_op = op->sym;
1104 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
1105 	int data_len, data_offset;
1106 	struct sec_flow_context *flc;
1107 	struct ctxt_priv *priv = sess->ctxt;
1108 	struct rte_mbuf *mbuf;
1109 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1110 			sess->iv.offset);
1111 
1112 	data_len = sym_op->cipher.data.length;
1113 	data_offset = sym_op->cipher.data.offset;
1114 
1115 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1116 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1117 		if ((data_len & 7) || (data_offset & 7)) {
1118 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1119 			return -ENOTSUP;
1120 		}
1121 
1122 		data_len = data_len >> 3;
1123 		data_offset = data_offset >> 3;
1124 	}
1125 
1126 	if (sym_op->m_dst)
1127 		mbuf = sym_op->m_dst;
1128 	else
1129 		mbuf = sym_op->m_src;
1130 
1131 	/* first FLE entry used to store mbuf and session ctxt */
1132 	fle = (struct qbman_fle *)rte_malloc(NULL,
1133 			FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs),
1134 			RTE_CACHE_LINE_SIZE);
1135 	if (!fle) {
1136 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
1137 		return -ENOMEM;
1138 	}
1139 	memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
1140 	/* first FLE entry used to store mbuf and session ctxt */
1141 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1142 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1143 
1144 	op_fle = fle + 1;
1145 	ip_fle = fle + 2;
1146 	sge = fle + 3;
1147 
1148 	flc = &priv->flc_desc[0].flc;
1149 
1150 	DPAA2_SEC_DP_DEBUG(
1151 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
1152 		" data_off: 0x%x\n",
1153 		data_offset,
1154 		data_len,
1155 		sess->iv.length,
1156 		sym_op->m_src->data_off);
1157 
1158 	/* o/p fle */
1159 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
1160 	op_fle->length = data_len;
1161 	DPAA2_SET_FLE_SG_EXT(op_fle);
1162 
1163 	/* o/p 1st seg */
1164 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1165 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1166 	sge->length = mbuf->data_len - data_offset;
1167 
1168 	mbuf = mbuf->next;
1169 	/* o/p segs */
1170 	while (mbuf) {
1171 		sge++;
1172 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1173 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1174 		sge->length = mbuf->data_len;
1175 		mbuf = mbuf->next;
1176 	}
1177 	DPAA2_SET_FLE_FIN(sge);
1178 
1179 	DPAA2_SEC_DP_DEBUG(
1180 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
1181 		flc, fle, fle->addr_hi, fle->addr_lo,
1182 		fle->length);
1183 
1184 	/* i/p fle */
1185 	mbuf = sym_op->m_src;
1186 	sge++;
1187 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
1188 	ip_fle->length = sess->iv.length + data_len;
1189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
1190 
1191 	/* i/p IV */
1192 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1193 	DPAA2_SET_FLE_OFFSET(sge, 0);
1194 	sge->length = sess->iv.length;
1195 
1196 	sge++;
1197 
1198 	/* i/p 1st seg */
1199 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1200 	DPAA2_SET_FLE_OFFSET(sge, data_offset + mbuf->data_off);
1201 	sge->length = mbuf->data_len - data_offset;
1202 
1203 	mbuf = mbuf->next;
1204 	/* i/p segs */
1205 	while (mbuf) {
1206 		sge++;
1207 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
1208 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
1209 		sge->length = mbuf->data_len;
1210 		mbuf = mbuf->next;
1211 	}
1212 	DPAA2_SET_FLE_FIN(sge);
1213 	DPAA2_SET_FLE_FIN(ip_fle);
1214 
1215 	/* sg fd */
1216 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
1217 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
1218 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1219 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1220 
1221 	DPAA2_SEC_DP_DEBUG(
1222 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1223 		" off =%d, len =%d\n",
1224 		DPAA2_GET_FD_ADDR(fd),
1225 		DPAA2_GET_FD_BPID(fd),
1226 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1227 		DPAA2_GET_FD_OFFSET(fd),
1228 		DPAA2_GET_FD_LEN(fd));
1229 	return 0;
1230 }
1231 
1232 static int
1233 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
1234 		struct qbman_fd *fd, uint16_t bpid)
1235 {
1236 	struct rte_crypto_sym_op *sym_op = op->sym;
1237 	struct qbman_fle *fle, *sge;
1238 	int retval, data_len, data_offset;
1239 	struct sec_flow_context *flc;
1240 	struct ctxt_priv *priv = sess->ctxt;
1241 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1242 			sess->iv.offset);
1243 	struct rte_mbuf *dst;
1244 
1245 	data_len = sym_op->cipher.data.length;
1246 	data_offset = sym_op->cipher.data.offset;
1247 
1248 	if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1249 		sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1250 		if ((data_len & 7) || (data_offset & 7)) {
1251 			DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
1252 			return -ENOTSUP;
1253 		}
1254 
1255 		data_len = data_len >> 3;
1256 		data_offset = data_offset >> 3;
1257 	}
1258 
1259 	if (sym_op->m_dst)
1260 		dst = sym_op->m_dst;
1261 	else
1262 		dst = sym_op->m_src;
1263 
1264 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
1265 	if (retval) {
1266 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1267 		return -ENOMEM;
1268 	}
1269 	memset(fle, 0, FLE_POOL_BUF_SIZE);
1270 	/* TODO we are using the first FLE entry to store Mbuf.
1271 	 * Currently we donot know which FLE has the mbuf stored.
1272 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1273 	 * to get the MBUF Addr from the previous FLE.
1274 	 * We can have a better approach to use the inline Mbuf
1275 	 */
1276 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
1277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
1278 	fle = fle + 1;
1279 	sge = fle + 2;
1280 
1281 	if (likely(bpid < MAX_BPID)) {
1282 		DPAA2_SET_FD_BPID(fd, bpid);
1283 		DPAA2_SET_FLE_BPID(fle, bpid);
1284 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1285 		DPAA2_SET_FLE_BPID(sge, bpid);
1286 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1287 	} else {
1288 		DPAA2_SET_FD_IVP(fd);
1289 		DPAA2_SET_FLE_IVP(fle);
1290 		DPAA2_SET_FLE_IVP((fle + 1));
1291 		DPAA2_SET_FLE_IVP(sge);
1292 		DPAA2_SET_FLE_IVP((sge + 1));
1293 	}
1294 
1295 	flc = &priv->flc_desc[0].flc;
1296 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1297 	DPAA2_SET_FD_LEN(fd, data_len + sess->iv.length);
1298 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1299 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1300 
1301 	DPAA2_SEC_DP_DEBUG(
1302 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1303 		" data_off: 0x%x\n",
1304 		data_offset,
1305 		data_len,
1306 		sess->iv.length,
1307 		sym_op->m_src->data_off);
1308 
1309 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1310 	DPAA2_SET_FLE_OFFSET(fle, data_offset + dst->data_off);
1311 
1312 	fle->length = data_len + sess->iv.length;
1313 
1314 	DPAA2_SEC_DP_DEBUG(
1315 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1316 		flc, fle, fle->addr_hi, fle->addr_lo,
1317 		fle->length);
1318 
1319 	fle++;
1320 
1321 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1322 	fle->length = data_len + sess->iv.length;
1323 
1324 	DPAA2_SET_FLE_SG_EXT(fle);
1325 
1326 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1327 	sge->length = sess->iv.length;
1328 
1329 	sge++;
1330 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1331 	DPAA2_SET_FLE_OFFSET(sge, data_offset + sym_op->m_src->data_off);
1332 
1333 	sge->length = data_len;
1334 	DPAA2_SET_FLE_FIN(sge);
1335 	DPAA2_SET_FLE_FIN(fle);
1336 
1337 	DPAA2_SEC_DP_DEBUG(
1338 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1339 		" off =%d, len =%d\n",
1340 		DPAA2_GET_FD_ADDR(fd),
1341 		DPAA2_GET_FD_BPID(fd),
1342 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1343 		DPAA2_GET_FD_OFFSET(fd),
1344 		DPAA2_GET_FD_LEN(fd));
1345 
1346 	return 0;
1347 }
1348 
1349 static inline int
1350 build_sec_fd(struct rte_crypto_op *op,
1351 	     struct qbman_fd *fd, uint16_t bpid)
1352 {
1353 	int ret = -1;
1354 	dpaa2_sec_session *sess;
1355 
1356 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1357 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1358 				op->sym->session, cryptodev_driver_id);
1359 #ifdef RTE_LIB_SECURITY
1360 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1361 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1362 				op->sym->sec_session);
1363 #endif
1364 	else
1365 		return -ENOTSUP;
1366 
1367 	if (!sess)
1368 		return -EINVAL;
1369 
1370 	/* Any of the buffer is segmented*/
1371 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
1372 		  ((op->sym->m_dst != NULL) &&
1373 		   !rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1374 		switch (sess->ctxt_type) {
1375 		case DPAA2_SEC_CIPHER:
1376 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1377 			break;
1378 		case DPAA2_SEC_AUTH:
1379 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1380 			break;
1381 		case DPAA2_SEC_AEAD:
1382 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1383 			break;
1384 		case DPAA2_SEC_CIPHER_HASH:
1385 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1386 			break;
1387 #ifdef RTE_LIB_SECURITY
1388 		case DPAA2_SEC_IPSEC:
1389 		case DPAA2_SEC_PDCP:
1390 			ret = build_proto_compound_sg_fd(sess, op, fd, bpid);
1391 			break;
1392 #endif
1393 		case DPAA2_SEC_HASH_CIPHER:
1394 		default:
1395 			DPAA2_SEC_ERR("error: Unsupported session");
1396 		}
1397 	} else {
1398 		switch (sess->ctxt_type) {
1399 		case DPAA2_SEC_CIPHER:
1400 			ret = build_cipher_fd(sess, op, fd, bpid);
1401 			break;
1402 		case DPAA2_SEC_AUTH:
1403 			ret = build_auth_fd(sess, op, fd, bpid);
1404 			break;
1405 		case DPAA2_SEC_AEAD:
1406 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1407 			break;
1408 		case DPAA2_SEC_CIPHER_HASH:
1409 			ret = build_authenc_fd(sess, op, fd, bpid);
1410 			break;
1411 #ifdef RTE_LIB_SECURITY
1412 		case DPAA2_SEC_IPSEC:
1413 			ret = build_proto_fd(sess, op, fd, bpid);
1414 			break;
1415 		case DPAA2_SEC_PDCP:
1416 			ret = build_proto_compound_fd(sess, op, fd, bpid);
1417 			break;
1418 #endif
1419 		case DPAA2_SEC_HASH_CIPHER:
1420 		default:
1421 			DPAA2_SEC_ERR("error: Unsupported session");
1422 			ret = -ENOTSUP;
1423 		}
1424 	}
1425 	return ret;
1426 }
1427 
1428 static uint16_t
1429 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1430 			uint16_t nb_ops)
1431 {
1432 	/* Function to transmit the frames to given device and VQ*/
1433 	uint32_t loop;
1434 	int32_t ret;
1435 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1436 	uint32_t frames_to_send, retry_count;
1437 	struct qbman_eq_desc eqdesc;
1438 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1439 	struct qbman_swp *swp;
1440 	uint16_t num_tx = 0;
1441 	uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1442 	/*todo - need to support multiple buffer pools */
1443 	uint16_t bpid;
1444 	struct rte_mempool *mb_pool;
1445 
1446 	if (unlikely(nb_ops == 0))
1447 		return 0;
1448 
1449 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1450 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1451 		return 0;
1452 	}
1453 	/*Prepare enqueue descriptor*/
1454 	qbman_eq_desc_clear(&eqdesc);
1455 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1456 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1457 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1458 
1459 	if (!DPAA2_PER_LCORE_DPIO) {
1460 		ret = dpaa2_affine_qbman_swp();
1461 		if (ret) {
1462 			DPAA2_SEC_ERR(
1463 				"Failed to allocate IO portal, tid: %d\n",
1464 				rte_gettid());
1465 			return 0;
1466 		}
1467 	}
1468 	swp = DPAA2_PER_LCORE_PORTAL;
1469 
1470 	while (nb_ops) {
1471 		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
1472 			dpaa2_eqcr_size : nb_ops;
1473 
1474 		for (loop = 0; loop < frames_to_send; loop++) {
1475 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
1476 				uint8_t dqrr_index =
1477 					*dpaa2_seqn((*ops)->sym->m_src) - 1;
1478 
1479 				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
1480 				DPAA2_PER_LCORE_DQRR_SIZE--;
1481 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1482 				*dpaa2_seqn((*ops)->sym->m_src) =
1483 					DPAA2_INVALID_MBUF_SEQN;
1484 			}
1485 
1486 			/*Clear the unused FD fields before sending*/
1487 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1488 			mb_pool = (*ops)->sym->m_src->pool;
1489 			bpid = mempool_to_bpid(mb_pool);
1490 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1491 			if (ret) {
1492 				DPAA2_SEC_ERR("error: Improper packet contents"
1493 					      " for crypto operation");
1494 				goto skip_tx;
1495 			}
1496 			ops++;
1497 		}
1498 
1499 		loop = 0;
1500 		retry_count = 0;
1501 		while (loop < frames_to_send) {
1502 			ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1503 							 &fd_arr[loop],
1504 							 &flags[loop],
1505 							 frames_to_send - loop);
1506 			if (unlikely(ret < 0)) {
1507 				retry_count++;
1508 				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1509 					num_tx += loop;
1510 					nb_ops -= loop;
1511 					goto skip_tx;
1512 				}
1513 			} else {
1514 				loop += ret;
1515 				retry_count = 0;
1516 			}
1517 		}
1518 
1519 		num_tx += loop;
1520 		nb_ops -= loop;
1521 	}
1522 skip_tx:
1523 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1524 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1525 	return num_tx;
1526 }
1527 
1528 #ifdef RTE_LIB_SECURITY
1529 static inline struct rte_crypto_op *
1530 sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
1531 {
1532 	struct rte_crypto_op *op;
1533 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1534 	int16_t diff = 0;
1535 	dpaa2_sec_session *sess_priv __rte_unused;
1536 
1537 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1538 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1539 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1540 
1541 	diff = len - mbuf->pkt_len;
1542 	mbuf->pkt_len += diff;
1543 	mbuf->data_len += diff;
1544 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1545 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1546 	op->sym->aead.digest.phys_addr = 0L;
1547 
1548 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1549 				op->sym->sec_session);
1550 	if (sess_priv->dir == DIR_ENC)
1551 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1552 	else
1553 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1554 
1555 	return op;
1556 }
1557 #endif
1558 
1559 static inline struct rte_crypto_op *
1560 sec_fd_to_mbuf(const struct qbman_fd *fd)
1561 {
1562 	struct qbman_fle *fle;
1563 	struct rte_crypto_op *op;
1564 	struct ctxt_priv *priv;
1565 	struct rte_mbuf *dst, *src;
1566 
1567 #ifdef RTE_LIB_SECURITY
1568 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1569 		return sec_simple_fd_to_mbuf(fd);
1570 #endif
1571 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1572 
1573 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1574 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1575 
1576 	/* we are using the first FLE entry to store Mbuf.
1577 	 * Currently we donot know which FLE has the mbuf stored.
1578 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1579 	 * to get the MBUF Addr from the previous FLE.
1580 	 * We can have a better approach to use the inline Mbuf
1581 	 */
1582 
1583 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1584 		/* TODO complete it. */
1585 		DPAA2_SEC_ERR("error: non inline buffer");
1586 		return NULL;
1587 	}
1588 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1589 
1590 	/* Prefeth op */
1591 	src = op->sym->m_src;
1592 	rte_prefetch0(src);
1593 
1594 	if (op->sym->m_dst) {
1595 		dst = op->sym->m_dst;
1596 		rte_prefetch0(dst);
1597 	} else
1598 		dst = src;
1599 
1600 #ifdef RTE_LIB_SECURITY
1601 	if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1602 		uint16_t len = DPAA2_GET_FD_LEN(fd);
1603 		dst->pkt_len = len;
1604 		while (dst->next != NULL) {
1605 			len -= dst->data_len;
1606 			dst = dst->next;
1607 		}
1608 		dst->data_len = len;
1609 	}
1610 #endif
1611 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1612 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1613 		(void *)dst,
1614 		dst->buf_addr,
1615 		DPAA2_GET_FD_ADDR(fd),
1616 		DPAA2_GET_FD_BPID(fd),
1617 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1618 		DPAA2_GET_FD_OFFSET(fd),
1619 		DPAA2_GET_FD_LEN(fd));
1620 
1621 	/* free the fle memory */
1622 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1623 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1624 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1625 	} else
1626 		rte_free((void *)(fle-1));
1627 
1628 	return op;
1629 }
1630 
1631 static uint16_t
1632 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1633 			uint16_t nb_ops)
1634 {
1635 	/* Function is responsible to receive frames for a given device and VQ*/
1636 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1637 	struct qbman_result *dq_storage;
1638 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1639 	int ret, num_rx = 0;
1640 	uint8_t is_last = 0, status;
1641 	struct qbman_swp *swp;
1642 	const struct qbman_fd *fd;
1643 	struct qbman_pull_desc pulldesc;
1644 
1645 	if (!DPAA2_PER_LCORE_DPIO) {
1646 		ret = dpaa2_affine_qbman_swp();
1647 		if (ret) {
1648 			DPAA2_SEC_ERR(
1649 				"Failed to allocate IO portal, tid: %d\n",
1650 				rte_gettid());
1651 			return 0;
1652 		}
1653 	}
1654 	swp = DPAA2_PER_LCORE_PORTAL;
1655 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1656 
1657 	qbman_pull_desc_clear(&pulldesc);
1658 	qbman_pull_desc_set_numframes(&pulldesc,
1659 				      (nb_ops > dpaa2_dqrr_size) ?
1660 				      dpaa2_dqrr_size : nb_ops);
1661 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1662 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1663 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1664 				    1);
1665 
1666 	/*Issue a volatile dequeue command. */
1667 	while (1) {
1668 		if (qbman_swp_pull(swp, &pulldesc)) {
1669 			DPAA2_SEC_WARN(
1670 				"SEC VDQ command is not issued : QBMAN busy");
1671 			/* Portal was busy, try again */
1672 			continue;
1673 		}
1674 		break;
1675 	};
1676 
1677 	/* Receive the packets till Last Dequeue entry is found with
1678 	 * respect to the above issues PULL command.
1679 	 */
1680 	while (!is_last) {
1681 		/* Check if the previous issued command is completed.
1682 		 * Also seems like the SWP is shared between the Ethernet Driver
1683 		 * and the SEC driver.
1684 		 */
1685 		while (!qbman_check_command_complete(dq_storage))
1686 			;
1687 
1688 		/* Loop until the dq_storage is updated with
1689 		 * new token by QBMAN
1690 		 */
1691 		while (!qbman_check_new_result(dq_storage))
1692 			;
1693 		/* Check whether Last Pull command is Expired and
1694 		 * setting Condition for Loop termination
1695 		 */
1696 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1697 			is_last = 1;
1698 			/* Check for valid frame. */
1699 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1700 			if (unlikely(
1701 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1702 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1703 				continue;
1704 			}
1705 		}
1706 
1707 		fd = qbman_result_DQ_fd(dq_storage);
1708 		ops[num_rx] = sec_fd_to_mbuf(fd);
1709 
1710 		if (unlikely(fd->simple.frc)) {
1711 			/* TODO Parse SEC errors */
1712 			DPAA2_SEC_ERR("SEC returned Error - %x",
1713 				      fd->simple.frc);
1714 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1715 		} else {
1716 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1717 		}
1718 
1719 		num_rx++;
1720 		dq_storage++;
1721 	} /* End of Packet Rx loop */
1722 
1723 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1724 
1725 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1726 	/*Return the total number of packets received to DPAA2 app*/
1727 	return num_rx;
1728 }
1729 
1730 /** Release queue pair */
1731 static int
1732 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1733 {
1734 	struct dpaa2_sec_qp *qp =
1735 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1736 
1737 	PMD_INIT_FUNC_TRACE();
1738 
1739 	if (qp->rx_vq.q_storage) {
1740 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1741 		rte_free(qp->rx_vq.q_storage);
1742 	}
1743 	rte_free(qp);
1744 
1745 	dev->data->queue_pairs[queue_pair_id] = NULL;
1746 
1747 	return 0;
1748 }
1749 
1750 /** Setup a queue pair */
1751 static int
1752 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1753 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1754 		__rte_unused int socket_id)
1755 {
1756 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1757 	struct dpaa2_sec_qp *qp;
1758 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1759 	struct dpseci_rx_queue_cfg cfg;
1760 	int32_t retcode;
1761 
1762 	PMD_INIT_FUNC_TRACE();
1763 
1764 	/* If qp is already in use free ring memory and qp metadata. */
1765 	if (dev->data->queue_pairs[qp_id] != NULL) {
1766 		DPAA2_SEC_INFO("QP already setup");
1767 		return 0;
1768 	}
1769 
1770 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1771 		    dev, qp_id, qp_conf);
1772 
1773 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1774 
1775 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1776 			RTE_CACHE_LINE_SIZE);
1777 	if (!qp) {
1778 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1779 		return -ENOMEM;
1780 	}
1781 
1782 	qp->rx_vq.crypto_data = dev->data;
1783 	qp->tx_vq.crypto_data = dev->data;
1784 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1785 		sizeof(struct queue_storage_info_t),
1786 		RTE_CACHE_LINE_SIZE);
1787 	if (!qp->rx_vq.q_storage) {
1788 		DPAA2_SEC_ERR("malloc failed for q_storage");
1789 		return -ENOMEM;
1790 	}
1791 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1792 
1793 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1794 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1795 		return -ENOMEM;
1796 	}
1797 
1798 	dev->data->queue_pairs[qp_id] = qp;
1799 
1800 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1801 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1802 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1803 				      qp_id, &cfg);
1804 	return retcode;
1805 }
1806 
1807 /** Returns the size of the aesni gcm session structure */
1808 static unsigned int
1809 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1810 {
1811 	PMD_INIT_FUNC_TRACE();
1812 
1813 	return sizeof(dpaa2_sec_session);
1814 }
1815 
1816 static int
1817 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1818 		      struct rte_crypto_sym_xform *xform,
1819 		      dpaa2_sec_session *session)
1820 {
1821 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1822 	struct alginfo cipherdata;
1823 	int bufsize, ret = 0;
1824 	struct ctxt_priv *priv;
1825 	struct sec_flow_context *flc;
1826 
1827 	PMD_INIT_FUNC_TRACE();
1828 
1829 	/* For SEC CIPHER only one descriptor is required. */
1830 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1831 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1832 			RTE_CACHE_LINE_SIZE);
1833 	if (priv == NULL) {
1834 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1835 		return -ENOMEM;
1836 	}
1837 
1838 	priv->fle_pool = dev_priv->fle_pool;
1839 
1840 	flc = &priv->flc_desc[0].flc;
1841 
1842 	session->ctxt_type = DPAA2_SEC_CIPHER;
1843 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1844 			RTE_CACHE_LINE_SIZE);
1845 	if (session->cipher_key.data == NULL) {
1846 		DPAA2_SEC_ERR("No Memory for cipher key");
1847 		rte_free(priv);
1848 		return -ENOMEM;
1849 	}
1850 	session->cipher_key.length = xform->cipher.key.length;
1851 
1852 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1853 	       xform->cipher.key.length);
1854 	cipherdata.key = (size_t)session->cipher_key.data;
1855 	cipherdata.keylen = session->cipher_key.length;
1856 	cipherdata.key_enc_flags = 0;
1857 	cipherdata.key_type = RTA_DATA_IMM;
1858 
1859 	/* Set IV parameters */
1860 	session->iv.offset = xform->cipher.iv.offset;
1861 	session->iv.length = xform->cipher.iv.length;
1862 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1863 				DIR_ENC : DIR_DEC;
1864 
1865 	switch (xform->cipher.algo) {
1866 	case RTE_CRYPTO_CIPHER_AES_CBC:
1867 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1868 		cipherdata.algmode = OP_ALG_AAI_CBC;
1869 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1870 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1871 						SHR_NEVER, &cipherdata,
1872 						session->iv.length,
1873 						session->dir);
1874 		break;
1875 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1876 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1877 		cipherdata.algmode = OP_ALG_AAI_CBC;
1878 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1879 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1880 						SHR_NEVER, &cipherdata,
1881 						session->iv.length,
1882 						session->dir);
1883 		break;
1884 	case RTE_CRYPTO_CIPHER_DES_CBC:
1885 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
1886 		cipherdata.algmode = OP_ALG_AAI_CBC;
1887 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
1888 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1889 						SHR_NEVER, &cipherdata,
1890 						session->iv.length,
1891 						session->dir);
1892 		break;
1893 	case RTE_CRYPTO_CIPHER_AES_CTR:
1894 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1895 		cipherdata.algmode = OP_ALG_AAI_CTR;
1896 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1897 		bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1898 						SHR_NEVER, &cipherdata,
1899 						session->iv.length,
1900 						session->dir);
1901 		break;
1902 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1903 		cipherdata.algtype = OP_ALG_ALGSEL_SNOW_F8;
1904 		session->cipher_alg = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
1905 		bufsize = cnstr_shdsc_snow_f8(priv->flc_desc[0].desc, 1, 0,
1906 					      &cipherdata,
1907 					      session->dir);
1908 		break;
1909 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1910 		cipherdata.algtype = OP_ALG_ALGSEL_ZUCE;
1911 		session->cipher_alg = RTE_CRYPTO_CIPHER_ZUC_EEA3;
1912 		bufsize = cnstr_shdsc_zuce(priv->flc_desc[0].desc, 1, 0,
1913 					      &cipherdata,
1914 					      session->dir);
1915 		break;
1916 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1917 	case RTE_CRYPTO_CIPHER_AES_F8:
1918 	case RTE_CRYPTO_CIPHER_AES_ECB:
1919 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1920 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1921 	case RTE_CRYPTO_CIPHER_AES_XTS:
1922 	case RTE_CRYPTO_CIPHER_ARC4:
1923 	case RTE_CRYPTO_CIPHER_NULL:
1924 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1925 			xform->cipher.algo);
1926 		ret = -ENOTSUP;
1927 		goto error_out;
1928 	default:
1929 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1930 			xform->cipher.algo);
1931 		ret = -ENOTSUP;
1932 		goto error_out;
1933 	}
1934 
1935 	if (bufsize < 0) {
1936 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1937 		ret = -EINVAL;
1938 		goto error_out;
1939 	}
1940 
1941 	flc->word1_sdl = (uint8_t)bufsize;
1942 	session->ctxt = priv;
1943 
1944 #ifdef CAAM_DESC_DEBUG
1945 	int i;
1946 	for (i = 0; i < bufsize; i++)
1947 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1948 #endif
1949 	return ret;
1950 
1951 error_out:
1952 	rte_free(session->cipher_key.data);
1953 	rte_free(priv);
1954 	return ret;
1955 }
1956 
1957 static int
1958 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1959 		    struct rte_crypto_sym_xform *xform,
1960 		    dpaa2_sec_session *session)
1961 {
1962 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1963 	struct alginfo authdata;
1964 	int bufsize, ret = 0;
1965 	struct ctxt_priv *priv;
1966 	struct sec_flow_context *flc;
1967 
1968 	PMD_INIT_FUNC_TRACE();
1969 
1970 	/* For SEC AUTH three descriptors are required for various stages */
1971 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1972 			sizeof(struct ctxt_priv) + 3 *
1973 			sizeof(struct sec_flc_desc),
1974 			RTE_CACHE_LINE_SIZE);
1975 	if (priv == NULL) {
1976 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1977 		return -ENOMEM;
1978 	}
1979 
1980 	priv->fle_pool = dev_priv->fle_pool;
1981 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1982 
1983 	session->ctxt_type = DPAA2_SEC_AUTH;
1984 	session->auth_key.length = xform->auth.key.length;
1985 	if (xform->auth.key.length) {
1986 		session->auth_key.data = rte_zmalloc(NULL,
1987 			xform->auth.key.length,
1988 			RTE_CACHE_LINE_SIZE);
1989 		if (session->auth_key.data == NULL) {
1990 			DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1991 			rte_free(priv);
1992 			return -ENOMEM;
1993 		}
1994 		memcpy(session->auth_key.data, xform->auth.key.data,
1995 		       xform->auth.key.length);
1996 		authdata.key = (size_t)session->auth_key.data;
1997 		authdata.key_enc_flags = 0;
1998 		authdata.key_type = RTA_DATA_IMM;
1999 	}
2000 	authdata.keylen = session->auth_key.length;
2001 
2002 	session->digest_length = xform->auth.digest_length;
2003 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2004 				DIR_ENC : DIR_DEC;
2005 
2006 	switch (xform->auth.algo) {
2007 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2008 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2009 		authdata.algmode = OP_ALG_AAI_HMAC;
2010 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2011 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2012 					   1, 0, SHR_NEVER, &authdata,
2013 					   !session->dir,
2014 					   session->digest_length);
2015 		break;
2016 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2017 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2018 		authdata.algmode = OP_ALG_AAI_HMAC;
2019 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2020 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2021 					   1, 0, SHR_NEVER, &authdata,
2022 					   !session->dir,
2023 					   session->digest_length);
2024 		break;
2025 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2026 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2027 		authdata.algmode = OP_ALG_AAI_HMAC;
2028 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2029 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2030 					   1, 0, SHR_NEVER, &authdata,
2031 					   !session->dir,
2032 					   session->digest_length);
2033 		break;
2034 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2035 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2036 		authdata.algmode = OP_ALG_AAI_HMAC;
2037 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2038 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2039 					   1, 0, SHR_NEVER, &authdata,
2040 					   !session->dir,
2041 					   session->digest_length);
2042 		break;
2043 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2044 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2045 		authdata.algmode = OP_ALG_AAI_HMAC;
2046 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2047 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2048 					   1, 0, SHR_NEVER, &authdata,
2049 					   !session->dir,
2050 					   session->digest_length);
2051 		break;
2052 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2053 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2054 		authdata.algmode = OP_ALG_AAI_HMAC;
2055 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2056 		bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
2057 					   1, 0, SHR_NEVER, &authdata,
2058 					   !session->dir,
2059 					   session->digest_length);
2060 		break;
2061 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2062 		authdata.algtype = OP_ALG_ALGSEL_SNOW_F9;
2063 		authdata.algmode = OP_ALG_AAI_F9;
2064 		session->auth_alg = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
2065 		session->iv.offset = xform->auth.iv.offset;
2066 		session->iv.length = xform->auth.iv.length;
2067 		bufsize = cnstr_shdsc_snow_f9(priv->flc_desc[DESC_INITFINAL].desc,
2068 					      1, 0, &authdata,
2069 					      !session->dir,
2070 					      session->digest_length);
2071 		break;
2072 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2073 		authdata.algtype = OP_ALG_ALGSEL_ZUCA;
2074 		authdata.algmode = OP_ALG_AAI_F9;
2075 		session->auth_alg = RTE_CRYPTO_AUTH_ZUC_EIA3;
2076 		session->iv.offset = xform->auth.iv.offset;
2077 		session->iv.length = xform->auth.iv.length;
2078 		bufsize = cnstr_shdsc_zuca(priv->flc_desc[DESC_INITFINAL].desc,
2079 					   1, 0, &authdata,
2080 					   !session->dir,
2081 					   session->digest_length);
2082 		break;
2083 	case RTE_CRYPTO_AUTH_SHA1:
2084 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2085 		authdata.algmode = OP_ALG_AAI_HASH;
2086 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1;
2087 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2088 					   1, 0, SHR_NEVER, &authdata,
2089 					   !session->dir,
2090 					   session->digest_length);
2091 		break;
2092 	case RTE_CRYPTO_AUTH_MD5:
2093 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2094 		authdata.algmode = OP_ALG_AAI_HASH;
2095 		session->auth_alg = RTE_CRYPTO_AUTH_MD5;
2096 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2097 					   1, 0, SHR_NEVER, &authdata,
2098 					   !session->dir,
2099 					   session->digest_length);
2100 		break;
2101 	case RTE_CRYPTO_AUTH_SHA256:
2102 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2103 		authdata.algmode = OP_ALG_AAI_HASH;
2104 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256;
2105 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2106 					   1, 0, SHR_NEVER, &authdata,
2107 					   !session->dir,
2108 					   session->digest_length);
2109 		break;
2110 	case RTE_CRYPTO_AUTH_SHA384:
2111 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2112 		authdata.algmode = OP_ALG_AAI_HASH;
2113 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384;
2114 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2115 					   1, 0, SHR_NEVER, &authdata,
2116 					   !session->dir,
2117 					   session->digest_length);
2118 		break;
2119 	case RTE_CRYPTO_AUTH_SHA512:
2120 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2121 		authdata.algmode = OP_ALG_AAI_HASH;
2122 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512;
2123 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2124 					   1, 0, SHR_NEVER, &authdata,
2125 					   !session->dir,
2126 					   session->digest_length);
2127 		break;
2128 	case RTE_CRYPTO_AUTH_SHA224:
2129 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2130 		authdata.algmode = OP_ALG_AAI_HASH;
2131 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224;
2132 		bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc,
2133 					   1, 0, SHR_NEVER, &authdata,
2134 					   !session->dir,
2135 					   session->digest_length);
2136 		break;
2137 	case RTE_CRYPTO_AUTH_AES_GMAC:
2138 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2139 	case RTE_CRYPTO_AUTH_AES_CMAC:
2140 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2141 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2142 	case RTE_CRYPTO_AUTH_NULL:
2143 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
2144 			      xform->auth.algo);
2145 		ret = -ENOTSUP;
2146 		goto error_out;
2147 	default:
2148 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2149 			      xform->auth.algo);
2150 		ret = -ENOTSUP;
2151 		goto error_out;
2152 	}
2153 
2154 	if (bufsize < 0) {
2155 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2156 		ret = -EINVAL;
2157 		goto error_out;
2158 	}
2159 
2160 	flc->word1_sdl = (uint8_t)bufsize;
2161 	session->ctxt = priv;
2162 #ifdef CAAM_DESC_DEBUG
2163 	int i;
2164 	for (i = 0; i < bufsize; i++)
2165 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2166 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
2167 #endif
2168 
2169 	return ret;
2170 
2171 error_out:
2172 	rte_free(session->auth_key.data);
2173 	rte_free(priv);
2174 	return ret;
2175 }
2176 
2177 static int
2178 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
2179 		    struct rte_crypto_sym_xform *xform,
2180 		    dpaa2_sec_session *session)
2181 {
2182 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
2183 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2184 	struct alginfo aeaddata;
2185 	int bufsize;
2186 	struct ctxt_priv *priv;
2187 	struct sec_flow_context *flc;
2188 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
2189 	int err, ret = 0;
2190 
2191 	PMD_INIT_FUNC_TRACE();
2192 
2193 	/* Set IV parameters */
2194 	session->iv.offset = aead_xform->iv.offset;
2195 	session->iv.length = aead_xform->iv.length;
2196 	session->ctxt_type = DPAA2_SEC_AEAD;
2197 
2198 	/* For SEC AEAD only one descriptor is required */
2199 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2200 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2201 			RTE_CACHE_LINE_SIZE);
2202 	if (priv == NULL) {
2203 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2204 		return -ENOMEM;
2205 	}
2206 
2207 	priv->fle_pool = dev_priv->fle_pool;
2208 	flc = &priv->flc_desc[0].flc;
2209 
2210 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2211 					       RTE_CACHE_LINE_SIZE);
2212 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2213 		DPAA2_SEC_ERR("No Memory for aead key");
2214 		rte_free(priv);
2215 		return -ENOMEM;
2216 	}
2217 	memcpy(session->aead_key.data, aead_xform->key.data,
2218 	       aead_xform->key.length);
2219 
2220 	session->digest_length = aead_xform->digest_length;
2221 	session->aead_key.length = aead_xform->key.length;
2222 	ctxt->auth_only_len = aead_xform->aad_length;
2223 
2224 	aeaddata.key = (size_t)session->aead_key.data;
2225 	aeaddata.keylen = session->aead_key.length;
2226 	aeaddata.key_enc_flags = 0;
2227 	aeaddata.key_type = RTA_DATA_IMM;
2228 
2229 	switch (aead_xform->algo) {
2230 	case RTE_CRYPTO_AEAD_AES_GCM:
2231 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
2232 		aeaddata.algmode = OP_ALG_AAI_GCM;
2233 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2234 		break;
2235 	case RTE_CRYPTO_AEAD_AES_CCM:
2236 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
2237 			      aead_xform->algo);
2238 		ret = -ENOTSUP;
2239 		goto error_out;
2240 	default:
2241 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2242 			      aead_xform->algo);
2243 		ret = -ENOTSUP;
2244 		goto error_out;
2245 	}
2246 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2247 				DIR_ENC : DIR_DEC;
2248 
2249 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
2250 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2251 			       DESC_JOB_IO_LEN,
2252 			       (unsigned int *)priv->flc_desc[0].desc,
2253 			       &priv->flc_desc[0].desc[1], 1);
2254 
2255 	if (err < 0) {
2256 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2257 		ret = -EINVAL;
2258 		goto error_out;
2259 	}
2260 	if (priv->flc_desc[0].desc[1] & 1) {
2261 		aeaddata.key_type = RTA_DATA_IMM;
2262 	} else {
2263 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
2264 		aeaddata.key_type = RTA_DATA_PTR;
2265 	}
2266 	priv->flc_desc[0].desc[0] = 0;
2267 	priv->flc_desc[0].desc[1] = 0;
2268 
2269 	if (session->dir == DIR_ENC)
2270 		bufsize = cnstr_shdsc_gcm_encap(
2271 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2272 				&aeaddata, session->iv.length,
2273 				session->digest_length);
2274 	else
2275 		bufsize = cnstr_shdsc_gcm_decap(
2276 				priv->flc_desc[0].desc, 1, 0, SHR_NEVER,
2277 				&aeaddata, session->iv.length,
2278 				session->digest_length);
2279 	if (bufsize < 0) {
2280 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2281 		ret = -EINVAL;
2282 		goto error_out;
2283 	}
2284 
2285 	flc->word1_sdl = (uint8_t)bufsize;
2286 	session->ctxt = priv;
2287 #ifdef CAAM_DESC_DEBUG
2288 	int i;
2289 	for (i = 0; i < bufsize; i++)
2290 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
2291 			    i, priv->flc_desc[0].desc[i]);
2292 #endif
2293 	return ret;
2294 
2295 error_out:
2296 	rte_free(session->aead_key.data);
2297 	rte_free(priv);
2298 	return ret;
2299 }
2300 
2301 
2302 static int
2303 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
2304 		    struct rte_crypto_sym_xform *xform,
2305 		    dpaa2_sec_session *session)
2306 {
2307 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2308 	struct alginfo authdata, cipherdata;
2309 	int bufsize;
2310 	struct ctxt_priv *priv;
2311 	struct sec_flow_context *flc;
2312 	struct rte_crypto_cipher_xform *cipher_xform;
2313 	struct rte_crypto_auth_xform *auth_xform;
2314 	int err, ret = 0;
2315 
2316 	PMD_INIT_FUNC_TRACE();
2317 
2318 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
2319 		cipher_xform = &xform->cipher;
2320 		auth_xform = &xform->next->auth;
2321 		session->ctxt_type =
2322 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2323 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
2324 	} else {
2325 		cipher_xform = &xform->next->cipher;
2326 		auth_xform = &xform->auth;
2327 		session->ctxt_type =
2328 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2329 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
2330 	}
2331 
2332 	/* Set IV parameters */
2333 	session->iv.offset = cipher_xform->iv.offset;
2334 	session->iv.length = cipher_xform->iv.length;
2335 
2336 	/* For SEC AEAD only one descriptor is required */
2337 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2338 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
2339 			RTE_CACHE_LINE_SIZE);
2340 	if (priv == NULL) {
2341 		DPAA2_SEC_ERR("No Memory for priv CTXT");
2342 		return -ENOMEM;
2343 	}
2344 
2345 	priv->fle_pool = dev_priv->fle_pool;
2346 	flc = &priv->flc_desc[0].flc;
2347 
2348 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2349 					       RTE_CACHE_LINE_SIZE);
2350 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2351 		DPAA2_SEC_ERR("No Memory for cipher key");
2352 		rte_free(priv);
2353 		return -ENOMEM;
2354 	}
2355 	session->cipher_key.length = cipher_xform->key.length;
2356 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2357 					     RTE_CACHE_LINE_SIZE);
2358 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2359 		DPAA2_SEC_ERR("No Memory for auth key");
2360 		rte_free(session->cipher_key.data);
2361 		rte_free(priv);
2362 		return -ENOMEM;
2363 	}
2364 	session->auth_key.length = auth_xform->key.length;
2365 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2366 	       cipher_xform->key.length);
2367 	memcpy(session->auth_key.data, auth_xform->key.data,
2368 	       auth_xform->key.length);
2369 
2370 	authdata.key = (size_t)session->auth_key.data;
2371 	authdata.keylen = session->auth_key.length;
2372 	authdata.key_enc_flags = 0;
2373 	authdata.key_type = RTA_DATA_IMM;
2374 
2375 	session->digest_length = auth_xform->digest_length;
2376 
2377 	switch (auth_xform->algo) {
2378 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2379 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
2380 		authdata.algmode = OP_ALG_AAI_HMAC;
2381 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2382 		break;
2383 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2384 		authdata.algtype = OP_ALG_ALGSEL_MD5;
2385 		authdata.algmode = OP_ALG_AAI_HMAC;
2386 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2387 		break;
2388 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2389 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
2390 		authdata.algmode = OP_ALG_AAI_HMAC;
2391 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
2392 		break;
2393 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2394 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
2395 		authdata.algmode = OP_ALG_AAI_HMAC;
2396 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2397 		break;
2398 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2399 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
2400 		authdata.algmode = OP_ALG_AAI_HMAC;
2401 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2402 		break;
2403 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2404 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
2405 		authdata.algmode = OP_ALG_AAI_HMAC;
2406 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2407 		break;
2408 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2409 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2410 	case RTE_CRYPTO_AUTH_NULL:
2411 	case RTE_CRYPTO_AUTH_SHA1:
2412 	case RTE_CRYPTO_AUTH_SHA256:
2413 	case RTE_CRYPTO_AUTH_SHA512:
2414 	case RTE_CRYPTO_AUTH_SHA224:
2415 	case RTE_CRYPTO_AUTH_SHA384:
2416 	case RTE_CRYPTO_AUTH_MD5:
2417 	case RTE_CRYPTO_AUTH_AES_GMAC:
2418 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2419 	case RTE_CRYPTO_AUTH_AES_CMAC:
2420 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2421 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2422 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2423 			      auth_xform->algo);
2424 		ret = -ENOTSUP;
2425 		goto error_out;
2426 	default:
2427 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2428 			      auth_xform->algo);
2429 		ret = -ENOTSUP;
2430 		goto error_out;
2431 	}
2432 	cipherdata.key = (size_t)session->cipher_key.data;
2433 	cipherdata.keylen = session->cipher_key.length;
2434 	cipherdata.key_enc_flags = 0;
2435 	cipherdata.key_type = RTA_DATA_IMM;
2436 
2437 	switch (cipher_xform->algo) {
2438 	case RTE_CRYPTO_CIPHER_AES_CBC:
2439 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2440 		cipherdata.algmode = OP_ALG_AAI_CBC;
2441 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2442 		break;
2443 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2444 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2445 		cipherdata.algmode = OP_ALG_AAI_CBC;
2446 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2447 		break;
2448 	case RTE_CRYPTO_CIPHER_DES_CBC:
2449 		cipherdata.algtype = OP_ALG_ALGSEL_DES;
2450 		cipherdata.algmode = OP_ALG_AAI_CBC;
2451 		session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC;
2452 		break;
2453 	case RTE_CRYPTO_CIPHER_AES_CTR:
2454 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2455 		cipherdata.algmode = OP_ALG_AAI_CTR;
2456 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2457 		break;
2458 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2459 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2460 	case RTE_CRYPTO_CIPHER_NULL:
2461 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2462 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2463 	case RTE_CRYPTO_CIPHER_AES_ECB:
2464 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2465 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2466 			      cipher_xform->algo);
2467 		ret = -ENOTSUP;
2468 		goto error_out;
2469 	default:
2470 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2471 			      cipher_xform->algo);
2472 		ret = -ENOTSUP;
2473 		goto error_out;
2474 	}
2475 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2476 				DIR_ENC : DIR_DEC;
2477 
2478 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2479 	priv->flc_desc[0].desc[1] = authdata.keylen;
2480 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2481 			       DESC_JOB_IO_LEN,
2482 			       (unsigned int *)priv->flc_desc[0].desc,
2483 			       &priv->flc_desc[0].desc[2], 2);
2484 
2485 	if (err < 0) {
2486 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2487 		ret = -EINVAL;
2488 		goto error_out;
2489 	}
2490 	if (priv->flc_desc[0].desc[2] & 1) {
2491 		cipherdata.key_type = RTA_DATA_IMM;
2492 	} else {
2493 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2494 		cipherdata.key_type = RTA_DATA_PTR;
2495 	}
2496 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2497 		authdata.key_type = RTA_DATA_IMM;
2498 	} else {
2499 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2500 		authdata.key_type = RTA_DATA_PTR;
2501 	}
2502 	priv->flc_desc[0].desc[0] = 0;
2503 	priv->flc_desc[0].desc[1] = 0;
2504 	priv->flc_desc[0].desc[2] = 0;
2505 
2506 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2507 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2508 					      0, SHR_SERIAL,
2509 					      &cipherdata, &authdata,
2510 					      session->iv.length,
2511 					      session->digest_length,
2512 					      session->dir);
2513 		if (bufsize < 0) {
2514 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2515 			ret = -EINVAL;
2516 			goto error_out;
2517 		}
2518 	} else {
2519 		DPAA2_SEC_ERR("Hash before cipher not supported");
2520 		ret = -ENOTSUP;
2521 		goto error_out;
2522 	}
2523 
2524 	flc->word1_sdl = (uint8_t)bufsize;
2525 	session->ctxt = priv;
2526 #ifdef CAAM_DESC_DEBUG
2527 	int i;
2528 	for (i = 0; i < bufsize; i++)
2529 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2530 			    i, priv->flc_desc[0].desc[i]);
2531 #endif
2532 
2533 	return ret;
2534 
2535 error_out:
2536 	rte_free(session->cipher_key.data);
2537 	rte_free(session->auth_key.data);
2538 	rte_free(priv);
2539 	return ret;
2540 }
2541 
2542 static int
2543 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2544 			    struct rte_crypto_sym_xform *xform,	void *sess)
2545 {
2546 	dpaa2_sec_session *session = sess;
2547 	int ret;
2548 
2549 	PMD_INIT_FUNC_TRACE();
2550 
2551 	if (unlikely(sess == NULL)) {
2552 		DPAA2_SEC_ERR("Invalid session struct");
2553 		return -EINVAL;
2554 	}
2555 
2556 	memset(session, 0, sizeof(dpaa2_sec_session));
2557 	/* Default IV length = 0 */
2558 	session->iv.length = 0;
2559 
2560 	/* Cipher Only */
2561 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2562 		ret = dpaa2_sec_cipher_init(dev, xform, session);
2563 
2564 	/* Authentication Only */
2565 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2566 		   xform->next == NULL) {
2567 		ret = dpaa2_sec_auth_init(dev, xform, session);
2568 
2569 	/* Cipher then Authenticate */
2570 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2571 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2572 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2573 		if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2574 			ret = dpaa2_sec_auth_init(dev, xform, session);
2575 		else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2576 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2577 		else
2578 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2579 	/* Authenticate then Cipher */
2580 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2581 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2582 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2583 		if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2584 			ret = dpaa2_sec_cipher_init(dev, xform, session);
2585 		else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2586 			ret = dpaa2_sec_auth_init(dev, xform, session);
2587 		else
2588 			ret = dpaa2_sec_aead_chain_init(dev, xform, session);
2589 	/* AEAD operation for AES-GCM kind of Algorithms */
2590 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2591 		   xform->next == NULL) {
2592 		ret = dpaa2_sec_aead_init(dev, xform, session);
2593 
2594 	} else {
2595 		DPAA2_SEC_ERR("Invalid crypto type");
2596 		return -EINVAL;
2597 	}
2598 
2599 	return ret;
2600 }
2601 
2602 #ifdef RTE_LIB_SECURITY
2603 static int
2604 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2605 			dpaa2_sec_session *session,
2606 			struct alginfo *aeaddata)
2607 {
2608 	PMD_INIT_FUNC_TRACE();
2609 
2610 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2611 					       RTE_CACHE_LINE_SIZE);
2612 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2613 		DPAA2_SEC_ERR("No Memory for aead key");
2614 		return -ENOMEM;
2615 	}
2616 	memcpy(session->aead_key.data, aead_xform->key.data,
2617 	       aead_xform->key.length);
2618 
2619 	session->digest_length = aead_xform->digest_length;
2620 	session->aead_key.length = aead_xform->key.length;
2621 
2622 	aeaddata->key = (size_t)session->aead_key.data;
2623 	aeaddata->keylen = session->aead_key.length;
2624 	aeaddata->key_enc_flags = 0;
2625 	aeaddata->key_type = RTA_DATA_IMM;
2626 
2627 	switch (aead_xform->algo) {
2628 	case RTE_CRYPTO_AEAD_AES_GCM:
2629 		switch (session->digest_length) {
2630 		case 8:
2631 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
2632 			break;
2633 		case 12:
2634 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
2635 			break;
2636 		case 16:
2637 			aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
2638 			break;
2639 		default:
2640 			DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
2641 				      session->digest_length);
2642 			return -EINVAL;
2643 		}
2644 		aeaddata->algmode = OP_ALG_AAI_GCM;
2645 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2646 		break;
2647 	case RTE_CRYPTO_AEAD_AES_CCM:
2648 		switch (session->digest_length) {
2649 		case 8:
2650 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
2651 			break;
2652 		case 12:
2653 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
2654 			break;
2655 		case 16:
2656 			aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
2657 			break;
2658 		default:
2659 			DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
2660 				      session->digest_length);
2661 			return -EINVAL;
2662 		}
2663 		aeaddata->algmode = OP_ALG_AAI_CCM;
2664 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
2665 		break;
2666 	default:
2667 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2668 			      aead_xform->algo);
2669 		return -ENOTSUP;
2670 	}
2671 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2672 				DIR_ENC : DIR_DEC;
2673 
2674 	return 0;
2675 }
2676 
2677 static int
2678 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2679 	struct rte_crypto_auth_xform *auth_xform,
2680 	dpaa2_sec_session *session,
2681 	struct alginfo *cipherdata,
2682 	struct alginfo *authdata)
2683 {
2684 	if (cipher_xform) {
2685 		session->cipher_key.data = rte_zmalloc(NULL,
2686 						       cipher_xform->key.length,
2687 						       RTE_CACHE_LINE_SIZE);
2688 		if (session->cipher_key.data == NULL &&
2689 				cipher_xform->key.length > 0) {
2690 			DPAA2_SEC_ERR("No Memory for cipher key");
2691 			return -ENOMEM;
2692 		}
2693 
2694 		session->cipher_key.length = cipher_xform->key.length;
2695 		memcpy(session->cipher_key.data, cipher_xform->key.data,
2696 				cipher_xform->key.length);
2697 		session->cipher_alg = cipher_xform->algo;
2698 	} else {
2699 		session->cipher_key.data = NULL;
2700 		session->cipher_key.length = 0;
2701 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2702 	}
2703 
2704 	if (auth_xform) {
2705 		session->auth_key.data = rte_zmalloc(NULL,
2706 						auth_xform->key.length,
2707 						RTE_CACHE_LINE_SIZE);
2708 		if (session->auth_key.data == NULL &&
2709 				auth_xform->key.length > 0) {
2710 			DPAA2_SEC_ERR("No Memory for auth key");
2711 			return -ENOMEM;
2712 		}
2713 		session->auth_key.length = auth_xform->key.length;
2714 		memcpy(session->auth_key.data, auth_xform->key.data,
2715 				auth_xform->key.length);
2716 		session->auth_alg = auth_xform->algo;
2717 		session->digest_length = auth_xform->digest_length;
2718 	} else {
2719 		session->auth_key.data = NULL;
2720 		session->auth_key.length = 0;
2721 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2722 	}
2723 
2724 	authdata->key = (size_t)session->auth_key.data;
2725 	authdata->keylen = session->auth_key.length;
2726 	authdata->key_enc_flags = 0;
2727 	authdata->key_type = RTA_DATA_IMM;
2728 	switch (session->auth_alg) {
2729 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2730 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2731 		authdata->algmode = OP_ALG_AAI_HMAC;
2732 		break;
2733 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2734 		authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2735 		authdata->algmode = OP_ALG_AAI_HMAC;
2736 		break;
2737 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2738 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2739 		authdata->algmode = OP_ALG_AAI_HMAC;
2740 		if (session->digest_length != 16)
2741 			DPAA2_SEC_WARN(
2742 			"+++Using sha256-hmac truncated len is non-standard,"
2743 			"it will not work with lookaside proto");
2744 		break;
2745 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2746 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2747 		authdata->algmode = OP_ALG_AAI_HMAC;
2748 		break;
2749 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2750 		authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2751 		authdata->algmode = OP_ALG_AAI_HMAC;
2752 		break;
2753 	case RTE_CRYPTO_AUTH_AES_CMAC:
2754 		authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
2755 		break;
2756 	case RTE_CRYPTO_AUTH_NULL:
2757 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
2758 		break;
2759 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2760 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2761 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2762 	case RTE_CRYPTO_AUTH_SHA1:
2763 	case RTE_CRYPTO_AUTH_SHA256:
2764 	case RTE_CRYPTO_AUTH_SHA512:
2765 	case RTE_CRYPTO_AUTH_SHA224:
2766 	case RTE_CRYPTO_AUTH_SHA384:
2767 	case RTE_CRYPTO_AUTH_MD5:
2768 	case RTE_CRYPTO_AUTH_AES_GMAC:
2769 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2770 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2771 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2772 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2773 			      session->auth_alg);
2774 		return -ENOTSUP;
2775 	default:
2776 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2777 			      session->auth_alg);
2778 		return -ENOTSUP;
2779 	}
2780 	cipherdata->key = (size_t)session->cipher_key.data;
2781 	cipherdata->keylen = session->cipher_key.length;
2782 	cipherdata->key_enc_flags = 0;
2783 	cipherdata->key_type = RTA_DATA_IMM;
2784 
2785 	switch (session->cipher_alg) {
2786 	case RTE_CRYPTO_CIPHER_AES_CBC:
2787 		cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
2788 		cipherdata->algmode = OP_ALG_AAI_CBC;
2789 		break;
2790 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2791 		cipherdata->algtype = OP_PCL_IPSEC_3DES;
2792 		cipherdata->algmode = OP_ALG_AAI_CBC;
2793 		break;
2794 	case RTE_CRYPTO_CIPHER_DES_CBC:
2795 		cipherdata->algtype = OP_PCL_IPSEC_DES;
2796 		cipherdata->algmode = OP_ALG_AAI_CBC;
2797 		break;
2798 	case RTE_CRYPTO_CIPHER_AES_CTR:
2799 		cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
2800 		cipherdata->algmode = OP_ALG_AAI_CTR;
2801 		break;
2802 	case RTE_CRYPTO_CIPHER_NULL:
2803 		cipherdata->algtype = OP_PCL_IPSEC_NULL;
2804 		break;
2805 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2806 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2807 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2808 	case RTE_CRYPTO_CIPHER_3DES_CTR:
2809 	case RTE_CRYPTO_CIPHER_AES_ECB:
2810 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2811 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2812 			      session->cipher_alg);
2813 		return -ENOTSUP;
2814 	default:
2815 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2816 			      session->cipher_alg);
2817 		return -ENOTSUP;
2818 	}
2819 
2820 	return 0;
2821 }
2822 
2823 static int
2824 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2825 			    struct rte_security_session_conf *conf,
2826 			    void *sess)
2827 {
2828 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2829 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
2830 	struct rte_crypto_auth_xform *auth_xform = NULL;
2831 	struct rte_crypto_aead_xform *aead_xform = NULL;
2832 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2833 	struct ctxt_priv *priv;
2834 	struct alginfo authdata, cipherdata;
2835 	int bufsize;
2836 	struct sec_flow_context *flc;
2837 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
2838 	int ret = -1;
2839 
2840 	PMD_INIT_FUNC_TRACE();
2841 
2842 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2843 				sizeof(struct ctxt_priv) +
2844 				sizeof(struct sec_flc_desc),
2845 				RTE_CACHE_LINE_SIZE);
2846 
2847 	if (priv == NULL) {
2848 		DPAA2_SEC_ERR("No memory for priv CTXT");
2849 		return -ENOMEM;
2850 	}
2851 
2852 	priv->fle_pool = dev_priv->fle_pool;
2853 	flc = &priv->flc_desc[0].flc;
2854 
2855 	memset(session, 0, sizeof(dpaa2_sec_session));
2856 
2857 	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2858 		cipher_xform = &conf->crypto_xform->cipher;
2859 		if (conf->crypto_xform->next)
2860 			auth_xform = &conf->crypto_xform->next->auth;
2861 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2862 					session, &cipherdata, &authdata);
2863 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2864 		auth_xform = &conf->crypto_xform->auth;
2865 		if (conf->crypto_xform->next)
2866 			cipher_xform = &conf->crypto_xform->next->cipher;
2867 		ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
2868 					session, &cipherdata, &authdata);
2869 	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2870 		aead_xform = &conf->crypto_xform->aead;
2871 		ret = dpaa2_sec_ipsec_aead_init(aead_xform,
2872 					session, &cipherdata);
2873 		authdata.keylen = 0;
2874 		authdata.algtype = 0;
2875 	} else {
2876 		DPAA2_SEC_ERR("XFORM not specified");
2877 		ret = -EINVAL;
2878 		goto out;
2879 	}
2880 	if (ret) {
2881 		DPAA2_SEC_ERR("Failed to process xform");
2882 		goto out;
2883 	}
2884 
2885 	session->ctxt_type = DPAA2_SEC_IPSEC;
2886 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2887 		uint8_t *hdr = NULL;
2888 		struct ip ip4_hdr;
2889 		struct rte_ipv6_hdr ip6_hdr;
2890 		struct ipsec_encap_pdb encap_pdb;
2891 
2892 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2893 		/* For Sec Proto only one descriptor is required. */
2894 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2895 
2896 		/* copy algo specific data to PDB */
2897 		switch (cipherdata.algtype) {
2898 		case OP_PCL_IPSEC_AES_CTR:
2899 			encap_pdb.ctr.ctr_initial = 0x00000001;
2900 			encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2901 			break;
2902 		case OP_PCL_IPSEC_AES_GCM8:
2903 		case OP_PCL_IPSEC_AES_GCM12:
2904 		case OP_PCL_IPSEC_AES_GCM16:
2905 			memcpy(encap_pdb.gcm.salt,
2906 				(uint8_t *)&(ipsec_xform->salt), 4);
2907 			break;
2908 		}
2909 
2910 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2911 			PDBOPTS_ESP_OIHI_PDB_INL |
2912 			PDBOPTS_ESP_IVSRC |
2913 			PDBHMO_ESP_ENCAP_DTTL |
2914 			PDBHMO_ESP_SNR;
2915 		if (ipsec_xform->options.esn)
2916 			encap_pdb.options |= PDBOPTS_ESP_ESN;
2917 		encap_pdb.spi = ipsec_xform->spi;
2918 		session->dir = DIR_ENC;
2919 		if (ipsec_xform->tunnel.type ==
2920 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2921 			encap_pdb.ip_hdr_len = sizeof(struct ip);
2922 			ip4_hdr.ip_v = IPVERSION;
2923 			ip4_hdr.ip_hl = 5;
2924 			ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2925 			ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2926 			ip4_hdr.ip_id = 0;
2927 			ip4_hdr.ip_off = 0;
2928 			ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2929 			ip4_hdr.ip_p = IPPROTO_ESP;
2930 			ip4_hdr.ip_sum = 0;
2931 			ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2932 			ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2933 			ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)
2934 					&ip4_hdr, sizeof(struct ip));
2935 			hdr = (uint8_t *)&ip4_hdr;
2936 		} else if (ipsec_xform->tunnel.type ==
2937 				RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2938 			ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2939 				DPAA2_IPv6_DEFAULT_VTC_FLOW |
2940 				((ipsec_xform->tunnel.ipv6.dscp <<
2941 					RTE_IPV6_HDR_TC_SHIFT) &
2942 					RTE_IPV6_HDR_TC_MASK) |
2943 				((ipsec_xform->tunnel.ipv6.flabel <<
2944 					RTE_IPV6_HDR_FL_SHIFT) &
2945 					RTE_IPV6_HDR_FL_MASK));
2946 			/* Payload length will be updated by HW */
2947 			ip6_hdr.payload_len = 0;
2948 			ip6_hdr.hop_limits =
2949 					ipsec_xform->tunnel.ipv6.hlimit;
2950 			ip6_hdr.proto = (ipsec_xform->proto ==
2951 					RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2952 					IPPROTO_ESP : IPPROTO_AH;
2953 			memcpy(&ip6_hdr.src_addr,
2954 				&ipsec_xform->tunnel.ipv6.src_addr, 16);
2955 			memcpy(&ip6_hdr.dst_addr,
2956 				&ipsec_xform->tunnel.ipv6.dst_addr, 16);
2957 			encap_pdb.ip_hdr_len = sizeof(struct rte_ipv6_hdr);
2958 			hdr = (uint8_t *)&ip6_hdr;
2959 		}
2960 
2961 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2962 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
2963 				SHR_WAIT : SHR_SERIAL, &encap_pdb,
2964 				hdr, &cipherdata, &authdata);
2965 	} else if (ipsec_xform->direction ==
2966 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2967 		struct ipsec_decap_pdb decap_pdb;
2968 
2969 		flc->dhr = SEC_FLC_DHR_INBOUND;
2970 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2971 		/* copy algo specific data to PDB */
2972 		switch (cipherdata.algtype) {
2973 		case OP_PCL_IPSEC_AES_CTR:
2974 			decap_pdb.ctr.ctr_initial = 0x00000001;
2975 			decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2976 			break;
2977 		case OP_PCL_IPSEC_AES_GCM8:
2978 		case OP_PCL_IPSEC_AES_GCM12:
2979 		case OP_PCL_IPSEC_AES_GCM16:
2980 			memcpy(decap_pdb.gcm.salt,
2981 				(uint8_t *)&(ipsec_xform->salt), 4);
2982 			break;
2983 		}
2984 
2985 		decap_pdb.options = (ipsec_xform->tunnel.type ==
2986 				RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
2987 				sizeof(struct ip) << 16 :
2988 				sizeof(struct rte_ipv6_hdr) << 16;
2989 		if (ipsec_xform->options.esn)
2990 			decap_pdb.options |= PDBOPTS_ESP_ESN;
2991 
2992 		if (ipsec_xform->replay_win_sz) {
2993 			uint32_t win_sz;
2994 			win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2995 
2996 			if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) {
2997 				DPAA2_SEC_INFO("Max Anti replay Win sz = 128");
2998 				win_sz = 128;
2999 			}
3000 			switch (win_sz) {
3001 			case 1:
3002 			case 2:
3003 			case 4:
3004 			case 8:
3005 			case 16:
3006 			case 32:
3007 				decap_pdb.options |= PDBOPTS_ESP_ARS32;
3008 				break;
3009 			case 64:
3010 				decap_pdb.options |= PDBOPTS_ESP_ARS64;
3011 				break;
3012 			case 256:
3013 				decap_pdb.options |= PDBOPTS_ESP_ARS256;
3014 				break;
3015 			case 512:
3016 				decap_pdb.options |= PDBOPTS_ESP_ARS512;
3017 				break;
3018 			case 1024:
3019 				decap_pdb.options |= PDBOPTS_ESP_ARS1024;
3020 				break;
3021 			case 128:
3022 			default:
3023 				decap_pdb.options |= PDBOPTS_ESP_ARS128;
3024 			}
3025 		}
3026 		session->dir = DIR_DEC;
3027 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
3028 				1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ?
3029 				SHR_WAIT : SHR_SERIAL,
3030 				&decap_pdb, &cipherdata, &authdata);
3031 	} else
3032 		goto out;
3033 
3034 	if (bufsize < 0) {
3035 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3036 		goto out;
3037 	}
3038 
3039 	flc->word1_sdl = (uint8_t)bufsize;
3040 
3041 	/* Enable the stashing control bit */
3042 	DPAA2_SET_FLC_RSC(flc);
3043 	flc->word2_rflc_31_0 = lower_32_bits(
3044 			(size_t)&(((struct dpaa2_sec_qp *)
3045 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3046 	flc->word3_rflc_63_32 = upper_32_bits(
3047 			(size_t)&(((struct dpaa2_sec_qp *)
3048 			dev->data->queue_pairs[0])->rx_vq));
3049 
3050 	/* Set EWS bit i.e. enable write-safe */
3051 	DPAA2_SET_FLC_EWS(flc);
3052 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3053 	DPAA2_SET_FLC_REUSE_BS(flc);
3054 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3055 	DPAA2_SET_FLC_REUSE_FF(flc);
3056 
3057 	session->ctxt = priv;
3058 
3059 	return 0;
3060 out:
3061 	rte_free(session->auth_key.data);
3062 	rte_free(session->cipher_key.data);
3063 	rte_free(priv);
3064 	return ret;
3065 }
3066 
3067 static int
3068 dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
3069 			   struct rte_security_session_conf *conf,
3070 			   void *sess)
3071 {
3072 	struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
3073 	struct rte_crypto_sym_xform *xform = conf->crypto_xform;
3074 	struct rte_crypto_auth_xform *auth_xform = NULL;
3075 	struct rte_crypto_cipher_xform *cipher_xform;
3076 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
3077 	struct ctxt_priv *priv;
3078 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
3079 	struct alginfo authdata, cipherdata;
3080 	struct alginfo *p_authdata = NULL;
3081 	int bufsize = -1;
3082 	struct sec_flow_context *flc;
3083 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
3084 	int swap = true;
3085 #else
3086 	int swap = false;
3087 #endif
3088 
3089 	PMD_INIT_FUNC_TRACE();
3090 
3091 	memset(session, 0, sizeof(dpaa2_sec_session));
3092 
3093 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
3094 				sizeof(struct ctxt_priv) +
3095 				sizeof(struct sec_flc_desc),
3096 				RTE_CACHE_LINE_SIZE);
3097 
3098 	if (priv == NULL) {
3099 		DPAA2_SEC_ERR("No memory for priv CTXT");
3100 		return -ENOMEM;
3101 	}
3102 
3103 	priv->fle_pool = dev_priv->fle_pool;
3104 	flc = &priv->flc_desc[0].flc;
3105 
3106 	/* find xfrm types */
3107 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
3108 		cipher_xform = &xform->cipher;
3109 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
3110 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
3111 		session->ext_params.aead_ctxt.auth_cipher_text = true;
3112 		cipher_xform = &xform->cipher;
3113 		auth_xform = &xform->next->auth;
3114 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
3115 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
3116 		session->ext_params.aead_ctxt.auth_cipher_text = false;
3117 		cipher_xform = &xform->next->cipher;
3118 		auth_xform = &xform->auth;
3119 	} else {
3120 		DPAA2_SEC_ERR("Invalid crypto type");
3121 		return -EINVAL;
3122 	}
3123 
3124 	session->ctxt_type = DPAA2_SEC_PDCP;
3125 	if (cipher_xform) {
3126 		session->cipher_key.data = rte_zmalloc(NULL,
3127 					       cipher_xform->key.length,
3128 					       RTE_CACHE_LINE_SIZE);
3129 		if (session->cipher_key.data == NULL &&
3130 				cipher_xform->key.length > 0) {
3131 			DPAA2_SEC_ERR("No Memory for cipher key");
3132 			rte_free(priv);
3133 			return -ENOMEM;
3134 		}
3135 		session->cipher_key.length = cipher_xform->key.length;
3136 		memcpy(session->cipher_key.data, cipher_xform->key.data,
3137 			cipher_xform->key.length);
3138 		session->dir =
3139 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
3140 					DIR_ENC : DIR_DEC;
3141 		session->cipher_alg = cipher_xform->algo;
3142 	} else {
3143 		session->cipher_key.data = NULL;
3144 		session->cipher_key.length = 0;
3145 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
3146 		session->dir = DIR_ENC;
3147 	}
3148 
3149 	session->pdcp.domain = pdcp_xform->domain;
3150 	session->pdcp.bearer = pdcp_xform->bearer;
3151 	session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3152 	session->pdcp.sn_size = pdcp_xform->sn_size;
3153 	session->pdcp.hfn = pdcp_xform->hfn;
3154 	session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3155 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3156 	/* hfv ovd offset location is stored in iv.offset value*/
3157 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3158 
3159 	cipherdata.key = (size_t)session->cipher_key.data;
3160 	cipherdata.keylen = session->cipher_key.length;
3161 	cipherdata.key_enc_flags = 0;
3162 	cipherdata.key_type = RTA_DATA_IMM;
3163 
3164 	switch (session->cipher_alg) {
3165 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
3166 		cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
3167 		break;
3168 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
3169 		cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
3170 		break;
3171 	case RTE_CRYPTO_CIPHER_AES_CTR:
3172 		cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
3173 		break;
3174 	case RTE_CRYPTO_CIPHER_NULL:
3175 		cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
3176 		break;
3177 	default:
3178 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
3179 			      session->cipher_alg);
3180 		goto out;
3181 	}
3182 
3183 	if (auth_xform) {
3184 		session->auth_key.data = rte_zmalloc(NULL,
3185 						     auth_xform->key.length,
3186 						     RTE_CACHE_LINE_SIZE);
3187 		if (!session->auth_key.data &&
3188 		    auth_xform->key.length > 0) {
3189 			DPAA2_SEC_ERR("No Memory for auth key");
3190 			rte_free(session->cipher_key.data);
3191 			rte_free(priv);
3192 			return -ENOMEM;
3193 		}
3194 		session->auth_key.length = auth_xform->key.length;
3195 		memcpy(session->auth_key.data, auth_xform->key.data,
3196 		       auth_xform->key.length);
3197 		session->auth_alg = auth_xform->algo;
3198 	} else {
3199 		session->auth_key.data = NULL;
3200 		session->auth_key.length = 0;
3201 		session->auth_alg = 0;
3202 	}
3203 	authdata.key = (size_t)session->auth_key.data;
3204 	authdata.keylen = session->auth_key.length;
3205 	authdata.key_enc_flags = 0;
3206 	authdata.key_type = RTA_DATA_IMM;
3207 
3208 	if (session->auth_alg) {
3209 		switch (session->auth_alg) {
3210 		case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
3211 			authdata.algtype = PDCP_AUTH_TYPE_SNOW;
3212 			break;
3213 		case RTE_CRYPTO_AUTH_ZUC_EIA3:
3214 			authdata.algtype = PDCP_AUTH_TYPE_ZUC;
3215 			break;
3216 		case RTE_CRYPTO_AUTH_AES_CMAC:
3217 			authdata.algtype = PDCP_AUTH_TYPE_AES;
3218 			break;
3219 		case RTE_CRYPTO_AUTH_NULL:
3220 			authdata.algtype = PDCP_AUTH_TYPE_NULL;
3221 			break;
3222 		default:
3223 			DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
3224 				      session->auth_alg);
3225 			goto out;
3226 		}
3227 
3228 		p_authdata = &authdata;
3229 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3230 		DPAA2_SEC_ERR("Crypto: Integrity must for c-plane");
3231 		goto out;
3232 	}
3233 
3234 	if (rta_inline_pdcp_query(authdata.algtype,
3235 				cipherdata.algtype,
3236 				session->pdcp.sn_size,
3237 				session->pdcp.hfn_ovd)) {
3238 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
3239 		cipherdata.key_type = RTA_DATA_PTR;
3240 	}
3241 
3242 	if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
3243 		if (session->dir == DIR_ENC)
3244 			bufsize = cnstr_shdsc_pdcp_c_plane_encap(
3245 					priv->flc_desc[0].desc, 1, swap,
3246 					pdcp_xform->hfn,
3247 					session->pdcp.sn_size,
3248 					pdcp_xform->bearer,
3249 					pdcp_xform->pkt_dir,
3250 					pdcp_xform->hfn_threshold,
3251 					&cipherdata, &authdata,
3252 					0);
3253 		else if (session->dir == DIR_DEC)
3254 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
3255 					priv->flc_desc[0].desc, 1, swap,
3256 					pdcp_xform->hfn,
3257 					session->pdcp.sn_size,
3258 					pdcp_xform->bearer,
3259 					pdcp_xform->pkt_dir,
3260 					pdcp_xform->hfn_threshold,
3261 					&cipherdata, &authdata,
3262 					0);
3263 	} else {
3264 		if (session->dir == DIR_ENC) {
3265 			if (pdcp_xform->sdap_enabled)
3266 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap(
3267 					priv->flc_desc[0].desc, 1, swap,
3268 					session->pdcp.sn_size,
3269 					pdcp_xform->hfn,
3270 					pdcp_xform->bearer,
3271 					pdcp_xform->pkt_dir,
3272 					pdcp_xform->hfn_threshold,
3273 					&cipherdata, p_authdata, 0);
3274 			else
3275 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
3276 					priv->flc_desc[0].desc, 1, swap,
3277 					session->pdcp.sn_size,
3278 					pdcp_xform->hfn,
3279 					pdcp_xform->bearer,
3280 					pdcp_xform->pkt_dir,
3281 					pdcp_xform->hfn_threshold,
3282 					&cipherdata, p_authdata, 0);
3283 		} else if (session->dir == DIR_DEC) {
3284 			if (pdcp_xform->sdap_enabled)
3285 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
3286 					priv->flc_desc[0].desc, 1, swap,
3287 					session->pdcp.sn_size,
3288 					pdcp_xform->hfn,
3289 					pdcp_xform->bearer,
3290 					pdcp_xform->pkt_dir,
3291 					pdcp_xform->hfn_threshold,
3292 					&cipherdata, p_authdata, 0);
3293 			else
3294 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
3295 					priv->flc_desc[0].desc, 1, swap,
3296 					session->pdcp.sn_size,
3297 					pdcp_xform->hfn,
3298 					pdcp_xform->bearer,
3299 					pdcp_xform->pkt_dir,
3300 					pdcp_xform->hfn_threshold,
3301 					&cipherdata, p_authdata, 0);
3302 		}
3303 	}
3304 
3305 	if (bufsize < 0) {
3306 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
3307 		goto out;
3308 	}
3309 
3310 	/* Enable the stashing control bit */
3311 	DPAA2_SET_FLC_RSC(flc);
3312 	flc->word2_rflc_31_0 = lower_32_bits(
3313 			(size_t)&(((struct dpaa2_sec_qp *)
3314 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
3315 	flc->word3_rflc_63_32 = upper_32_bits(
3316 			(size_t)&(((struct dpaa2_sec_qp *)
3317 			dev->data->queue_pairs[0])->rx_vq));
3318 
3319 	flc->word1_sdl = (uint8_t)bufsize;
3320 
3321 	/* TODO - check the perf impact or
3322 	 * align as per descriptor type
3323 	 * Set EWS bit i.e. enable write-safe
3324 	 * DPAA2_SET_FLC_EWS(flc);
3325 	 */
3326 
3327 	/* Set BS = 1 i.e reuse input buffers as output buffers */
3328 	DPAA2_SET_FLC_REUSE_BS(flc);
3329 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
3330 	DPAA2_SET_FLC_REUSE_FF(flc);
3331 
3332 	session->ctxt = priv;
3333 
3334 	return 0;
3335 out:
3336 	rte_free(session->auth_key.data);
3337 	rte_free(session->cipher_key.data);
3338 	rte_free(priv);
3339 	return -EINVAL;
3340 }
3341 
3342 static int
3343 dpaa2_sec_security_session_create(void *dev,
3344 				  struct rte_security_session_conf *conf,
3345 				  struct rte_security_session *sess,
3346 				  struct rte_mempool *mempool)
3347 {
3348 	void *sess_private_data;
3349 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3350 	int ret;
3351 
3352 	if (rte_mempool_get(mempool, &sess_private_data)) {
3353 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3354 		return -ENOMEM;
3355 	}
3356 
3357 	switch (conf->protocol) {
3358 	case RTE_SECURITY_PROTOCOL_IPSEC:
3359 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
3360 				sess_private_data);
3361 		break;
3362 	case RTE_SECURITY_PROTOCOL_MACSEC:
3363 		return -ENOTSUP;
3364 	case RTE_SECURITY_PROTOCOL_PDCP:
3365 		ret = dpaa2_sec_set_pdcp_session(cdev, conf,
3366 				sess_private_data);
3367 		break;
3368 	default:
3369 		return -EINVAL;
3370 	}
3371 	if (ret != 0) {
3372 		DPAA2_SEC_ERR("Failed to configure session parameters");
3373 		/* Return session to mempool */
3374 		rte_mempool_put(mempool, sess_private_data);
3375 		return ret;
3376 	}
3377 
3378 	set_sec_session_private_data(sess, sess_private_data);
3379 
3380 	return ret;
3381 }
3382 
3383 /** Clear the memory of session so it doesn't leave key material behind */
3384 static int
3385 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
3386 		struct rte_security_session *sess)
3387 {
3388 	PMD_INIT_FUNC_TRACE();
3389 	void *sess_priv = get_sec_session_private_data(sess);
3390 
3391 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3392 
3393 	if (sess_priv) {
3394 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3395 
3396 		rte_free(s->ctxt);
3397 		rte_free(s->cipher_key.data);
3398 		rte_free(s->auth_key.data);
3399 		memset(s, 0, sizeof(dpaa2_sec_session));
3400 		set_sec_session_private_data(sess, NULL);
3401 		rte_mempool_put(sess_mp, sess_priv);
3402 	}
3403 	return 0;
3404 }
3405 #endif
3406 static int
3407 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
3408 		struct rte_crypto_sym_xform *xform,
3409 		struct rte_cryptodev_sym_session *sess,
3410 		struct rte_mempool *mempool)
3411 {
3412 	void *sess_private_data;
3413 	int ret;
3414 
3415 	if (rte_mempool_get(mempool, &sess_private_data)) {
3416 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
3417 		return -ENOMEM;
3418 	}
3419 
3420 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
3421 	if (ret != 0) {
3422 		DPAA2_SEC_ERR("Failed to configure session parameters");
3423 		/* Return session to mempool */
3424 		rte_mempool_put(mempool, sess_private_data);
3425 		return ret;
3426 	}
3427 
3428 	set_sym_session_private_data(sess, dev->driver_id,
3429 		sess_private_data);
3430 
3431 	return 0;
3432 }
3433 
3434 /** Clear the memory of session so it doesn't leave key material behind */
3435 static void
3436 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
3437 		struct rte_cryptodev_sym_session *sess)
3438 {
3439 	PMD_INIT_FUNC_TRACE();
3440 	uint8_t index = dev->driver_id;
3441 	void *sess_priv = get_sym_session_private_data(sess, index);
3442 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
3443 
3444 	if (sess_priv) {
3445 		rte_free(s->ctxt);
3446 		rte_free(s->cipher_key.data);
3447 		rte_free(s->auth_key.data);
3448 		memset(s, 0, sizeof(dpaa2_sec_session));
3449 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
3450 		set_sym_session_private_data(sess, index, NULL);
3451 		rte_mempool_put(sess_mp, sess_priv);
3452 	}
3453 }
3454 
3455 static int
3456 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3457 			struct rte_cryptodev_config *config __rte_unused)
3458 {
3459 	PMD_INIT_FUNC_TRACE();
3460 
3461 	return 0;
3462 }
3463 
3464 static int
3465 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
3466 {
3467 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3468 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3469 	struct dpseci_attr attr;
3470 	struct dpaa2_queue *dpaa2_q;
3471 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3472 					dev->data->queue_pairs;
3473 	struct dpseci_rx_queue_attr rx_attr;
3474 	struct dpseci_tx_queue_attr tx_attr;
3475 	int ret, i;
3476 
3477 	PMD_INIT_FUNC_TRACE();
3478 
3479 	memset(&attr, 0, sizeof(struct dpseci_attr));
3480 
3481 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
3482 	if (ret) {
3483 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
3484 			      priv->hw_id);
3485 		goto get_attr_failure;
3486 	}
3487 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
3488 	if (ret) {
3489 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
3490 		goto get_attr_failure;
3491 	}
3492 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
3493 		dpaa2_q = &qp[i]->rx_vq;
3494 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3495 				    &rx_attr);
3496 		dpaa2_q->fqid = rx_attr.fqid;
3497 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
3498 	}
3499 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
3500 		dpaa2_q = &qp[i]->tx_vq;
3501 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
3502 				    &tx_attr);
3503 		dpaa2_q->fqid = tx_attr.fqid;
3504 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
3505 	}
3506 
3507 	return 0;
3508 get_attr_failure:
3509 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3510 	return -1;
3511 }
3512 
3513 static void
3514 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
3515 {
3516 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3517 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3518 	int ret;
3519 
3520 	PMD_INIT_FUNC_TRACE();
3521 
3522 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
3523 	if (ret) {
3524 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
3525 			     priv->hw_id);
3526 		return;
3527 	}
3528 
3529 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
3530 	if (ret < 0) {
3531 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
3532 		return;
3533 	}
3534 }
3535 
3536 static int
3537 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
3538 {
3539 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3540 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3541 	int ret;
3542 
3543 	PMD_INIT_FUNC_TRACE();
3544 
3545 	/* Function is reverse of dpaa2_sec_dev_init.
3546 	 * It does the following:
3547 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
3548 	 * 2. Close the DPSECI device
3549 	 * 3. Free the allocated resources.
3550 	 */
3551 
3552 	/*Close the device at underlying layer*/
3553 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
3554 	if (ret) {
3555 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
3556 		return -1;
3557 	}
3558 
3559 	/*Free the allocated memory for ethernet private data and dpseci*/
3560 	priv->hw = NULL;
3561 	rte_free(dpseci);
3562 
3563 	return 0;
3564 }
3565 
3566 static void
3567 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
3568 			struct rte_cryptodev_info *info)
3569 {
3570 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3571 
3572 	PMD_INIT_FUNC_TRACE();
3573 	if (info != NULL) {
3574 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3575 		info->feature_flags = dev->feature_flags;
3576 		info->capabilities = dpaa2_sec_capabilities;
3577 		/* No limit of number of sessions */
3578 		info->sym.max_nb_sessions = 0;
3579 		info->driver_id = cryptodev_driver_id;
3580 	}
3581 }
3582 
3583 static
3584 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
3585 			 struct rte_cryptodev_stats *stats)
3586 {
3587 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3588 	struct fsl_mc_io dpseci;
3589 	struct dpseci_sec_counters counters = {0};
3590 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3591 					dev->data->queue_pairs;
3592 	int ret, i;
3593 
3594 	PMD_INIT_FUNC_TRACE();
3595 	if (stats == NULL) {
3596 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
3597 		return;
3598 	}
3599 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3600 		if (qp == NULL || qp[i] == NULL) {
3601 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3602 			continue;
3603 		}
3604 
3605 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
3606 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
3607 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
3608 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
3609 	}
3610 
3611 	/* In case as secondary process access stats, MCP portal in priv-hw
3612 	 * may have primary process address. Need the secondary process
3613 	 * based MCP portal address for this object.
3614 	 */
3615 	dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3616 	ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token,
3617 				      &counters);
3618 	if (ret) {
3619 		DPAA2_SEC_ERR("SEC counters failed");
3620 	} else {
3621 		DPAA2_SEC_INFO("dpseci hardware stats:"
3622 			    "\n\tNum of Requests Dequeued = %" PRIu64
3623 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3624 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3625 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3626 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
3627 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3628 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
3629 			    counters.dequeued_requests,
3630 			    counters.ob_enc_requests,
3631 			    counters.ib_dec_requests,
3632 			    counters.ob_enc_bytes,
3633 			    counters.ob_prot_bytes,
3634 			    counters.ib_dec_bytes,
3635 			    counters.ib_valid_bytes);
3636 	}
3637 }
3638 
3639 static
3640 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
3641 {
3642 	int i;
3643 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
3644 				   (dev->data->queue_pairs);
3645 
3646 	PMD_INIT_FUNC_TRACE();
3647 
3648 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
3649 		if (qp[i] == NULL) {
3650 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
3651 			continue;
3652 		}
3653 		qp[i]->tx_vq.rx_pkts = 0;
3654 		qp[i]->tx_vq.tx_pkts = 0;
3655 		qp[i]->tx_vq.err_pkts = 0;
3656 		qp[i]->rx_vq.rx_pkts = 0;
3657 		qp[i]->rx_vq.tx_pkts = 0;
3658 		qp[i]->rx_vq.err_pkts = 0;
3659 	}
3660 }
3661 
3662 static void __rte_hot
3663 dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
3664 				 const struct qbman_fd *fd,
3665 				 const struct qbman_result *dq,
3666 				 struct dpaa2_queue *rxq,
3667 				 struct rte_event *ev)
3668 {
3669 	/* Prefetching mbuf */
3670 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3671 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3672 
3673 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3674 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3675 
3676 	ev->flow_id = rxq->ev.flow_id;
3677 	ev->sub_event_type = rxq->ev.sub_event_type;
3678 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3679 	ev->op = RTE_EVENT_OP_NEW;
3680 	ev->sched_type = rxq->ev.sched_type;
3681 	ev->queue_id = rxq->ev.queue_id;
3682 	ev->priority = rxq->ev.priority;
3683 	ev->event_ptr = sec_fd_to_mbuf(fd);
3684 
3685 	qbman_swp_dqrr_consume(swp, dq);
3686 }
3687 static void
3688 dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
3689 				 const struct qbman_fd *fd,
3690 				 const struct qbman_result *dq,
3691 				 struct dpaa2_queue *rxq,
3692 				 struct rte_event *ev)
3693 {
3694 	uint8_t dqrr_index;
3695 	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
3696 	/* Prefetching mbuf */
3697 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
3698 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
3699 
3700 	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
3701 	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
3702 
3703 	ev->flow_id = rxq->ev.flow_id;
3704 	ev->sub_event_type = rxq->ev.sub_event_type;
3705 	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3706 	ev->op = RTE_EVENT_OP_NEW;
3707 	ev->sched_type = rxq->ev.sched_type;
3708 	ev->queue_id = rxq->ev.queue_id;
3709 	ev->priority = rxq->ev.priority;
3710 
3711 	ev->event_ptr = sec_fd_to_mbuf(fd);
3712 	dqrr_index = qbman_get_dqrr_idx(dq);
3713 	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
3714 	DPAA2_PER_LCORE_DQRR_SIZE++;
3715 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
3716 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
3717 }
3718 
3719 int
3720 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
3721 		int qp_id,
3722 		struct dpaa2_dpcon_dev *dpcon,
3723 		const struct rte_event *event)
3724 {
3725 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3726 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3727 	struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
3728 	struct dpseci_rx_queue_cfg cfg;
3729 	uint8_t priority;
3730 	int ret;
3731 
3732 	if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
3733 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
3734 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
3735 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
3736 	else
3737 		return -EINVAL;
3738 
3739 	priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
3740 		   (dpcon->num_priorities - 1);
3741 
3742 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3743 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3744 	cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
3745 	cfg.dest_cfg.dest_id = dpcon->dpcon_id;
3746 	cfg.dest_cfg.priority = priority;
3747 
3748 	cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
3749 	cfg.user_ctx = (size_t)(qp);
3750 	if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
3751 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
3752 		cfg.order_preservation_en = 1;
3753 	}
3754 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3755 				  qp_id, &cfg);
3756 	if (ret) {
3757 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3758 		return ret;
3759 	}
3760 
3761 	memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
3762 
3763 	return 0;
3764 }
3765 
3766 int
3767 dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
3768 			int qp_id)
3769 {
3770 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
3771 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
3772 	struct dpseci_rx_queue_cfg cfg;
3773 	int ret;
3774 
3775 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
3776 	cfg.options = DPSECI_QUEUE_OPT_DEST;
3777 	cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
3778 
3779 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
3780 				  qp_id, &cfg);
3781 	if (ret)
3782 		RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
3783 
3784 	return ret;
3785 }
3786 
3787 static struct rte_cryptodev_ops crypto_ops = {
3788 	.dev_configure	      = dpaa2_sec_dev_configure,
3789 	.dev_start	      = dpaa2_sec_dev_start,
3790 	.dev_stop	      = dpaa2_sec_dev_stop,
3791 	.dev_close	      = dpaa2_sec_dev_close,
3792 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
3793 	.stats_get	      = dpaa2_sec_stats_get,
3794 	.stats_reset	      = dpaa2_sec_stats_reset,
3795 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
3796 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
3797 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
3798 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
3799 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
3800 };
3801 
3802 #ifdef RTE_LIB_SECURITY
3803 static const struct rte_security_capability *
3804 dpaa2_sec_capabilities_get(void *device __rte_unused)
3805 {
3806 	return dpaa2_sec_security_cap;
3807 }
3808 
3809 static const struct rte_security_ops dpaa2_sec_security_ops = {
3810 	.session_create = dpaa2_sec_security_session_create,
3811 	.session_update = NULL,
3812 	.session_stats_get = NULL,
3813 	.session_destroy = dpaa2_sec_security_session_destroy,
3814 	.set_pkt_metadata = NULL,
3815 	.capabilities_get = dpaa2_sec_capabilities_get
3816 };
3817 #endif
3818 
3819 static int
3820 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
3821 {
3822 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
3823 
3824 	rte_free(dev->security_ctx);
3825 
3826 	rte_mempool_free(internals->fle_pool);
3827 
3828 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3829 		       dev->data->name, rte_socket_id());
3830 
3831 	return 0;
3832 }
3833 
3834 static int
3835 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
3836 {
3837 	struct dpaa2_sec_dev_private *internals;
3838 	struct rte_device *dev = cryptodev->device;
3839 	struct rte_dpaa2_device *dpaa2_dev;
3840 #ifdef RTE_LIB_SECURITY
3841 	struct rte_security_ctx *security_instance;
3842 #endif
3843 	struct fsl_mc_io *dpseci;
3844 	uint16_t token;
3845 	struct dpseci_attr attr;
3846 	int retcode, hw_id;
3847 	char str[30];
3848 
3849 	PMD_INIT_FUNC_TRACE();
3850 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
3851 	hw_id = dpaa2_dev->object_id;
3852 
3853 	cryptodev->driver_id = cryptodev_driver_id;
3854 	cryptodev->dev_ops = &crypto_ops;
3855 
3856 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
3857 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
3858 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3859 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
3860 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3861 			RTE_CRYPTODEV_FF_SECURITY |
3862 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3863 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3864 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3865 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3866 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3867 
3868 	internals = cryptodev->data->dev_private;
3869 
3870 	/*
3871 	 * For secondary processes, we don't initialise any further as primary
3872 	 * has already done this work. Only check we don't need a different
3873 	 * RX function
3874 	 */
3875 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3876 		DPAA2_SEC_DEBUG("Device already init by primary process");
3877 		return 0;
3878 	}
3879 #ifdef RTE_LIB_SECURITY
3880 	/* Initialize security_ctx only for primary process*/
3881 	security_instance = rte_malloc("rte_security_instances_ops",
3882 				sizeof(struct rte_security_ctx), 0);
3883 	if (security_instance == NULL)
3884 		return -ENOMEM;
3885 	security_instance->device = (void *)cryptodev;
3886 	security_instance->ops = &dpaa2_sec_security_ops;
3887 	security_instance->sess_cnt = 0;
3888 	cryptodev->security_ctx = security_instance;
3889 #endif
3890 	/*Open the rte device via MC and save the handle for further use*/
3891 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
3892 				sizeof(struct fsl_mc_io), 0);
3893 	if (!dpseci) {
3894 		DPAA2_SEC_ERR(
3895 			"Error in allocating the memory for dpsec object");
3896 		return -ENOMEM;
3897 	}
3898 	dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
3899 
3900 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
3901 	if (retcode != 0) {
3902 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3903 			      retcode);
3904 		goto init_error;
3905 	}
3906 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
3907 	if (retcode != 0) {
3908 		DPAA2_SEC_ERR(
3909 			     "Cannot get dpsec device attributed: Error = %x",
3910 			     retcode);
3911 		goto init_error;
3912 	}
3913 	snprintf(cryptodev->data->name, sizeof(cryptodev->data->name),
3914 			"dpsec-%u", hw_id);
3915 
3916 	internals->max_nb_queue_pairs = attr.num_tx_queues;
3917 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
3918 	internals->hw = dpseci;
3919 	internals->token = token;
3920 
3921 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
3922 			getpid(), cryptodev->data->dev_id);
3923 	internals->fle_pool = rte_mempool_create((const char *)str,
3924 			FLE_POOL_NUM_BUFS,
3925 			FLE_POOL_BUF_SIZE,
3926 			FLE_POOL_CACHE_SIZE, 0,
3927 			NULL, NULL, NULL, NULL,
3928 			SOCKET_ID_ANY, 0);
3929 	if (!internals->fle_pool) {
3930 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
3931 		goto init_error;
3932 	}
3933 
3934 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
3935 	return 0;
3936 
3937 init_error:
3938 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
3939 
3940 	/* dpaa2_sec_uninit(crypto_dev_name); */
3941 	return -EFAULT;
3942 }
3943 
3944 static int
3945 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
3946 			  struct rte_dpaa2_device *dpaa2_dev)
3947 {
3948 	struct rte_cryptodev *cryptodev;
3949 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3950 
3951 	int retval;
3952 
3953 	snprintf(cryptodev_name, sizeof(cryptodev_name), "dpsec-%d",
3954 			dpaa2_dev->object_id);
3955 
3956 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3957 	if (cryptodev == NULL)
3958 		return -ENOMEM;
3959 
3960 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3961 		cryptodev->data->dev_private = rte_zmalloc_socket(
3962 					"cryptodev private structure",
3963 					sizeof(struct dpaa2_sec_dev_private),
3964 					RTE_CACHE_LINE_SIZE,
3965 					rte_socket_id());
3966 
3967 		if (cryptodev->data->dev_private == NULL)
3968 			rte_panic("Cannot allocate memzone for private "
3969 				  "device data");
3970 	}
3971 
3972 	dpaa2_dev->cryptodev = cryptodev;
3973 	cryptodev->device = &dpaa2_dev->device;
3974 
3975 	/* init user callbacks */
3976 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
3977 
3978 	if (dpaa2_svr_family == SVR_LX2160A)
3979 		rta_set_sec_era(RTA_SEC_ERA_10);
3980 	else
3981 		rta_set_sec_era(RTA_SEC_ERA_8);
3982 
3983 	DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
3984 
3985 	/* Invoke PMD device initialization function */
3986 	retval = dpaa2_sec_dev_init(cryptodev);
3987 	if (retval == 0)
3988 		return 0;
3989 
3990 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3991 		rte_free(cryptodev->data->dev_private);
3992 
3993 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
3994 
3995 	return -ENXIO;
3996 }
3997 
3998 static int
3999 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
4000 {
4001 	struct rte_cryptodev *cryptodev;
4002 	int ret;
4003 
4004 	cryptodev = dpaa2_dev->cryptodev;
4005 	if (cryptodev == NULL)
4006 		return -ENODEV;
4007 
4008 	ret = dpaa2_sec_uninit(cryptodev);
4009 	if (ret)
4010 		return ret;
4011 
4012 	return rte_cryptodev_pmd_destroy(cryptodev);
4013 }
4014 
4015 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
4016 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
4017 	.drv_type = DPAA2_CRYPTO,
4018 	.driver = {
4019 		.name = "DPAA2 SEC PMD"
4020 	},
4021 	.probe = cryptodev_dpaa2_sec_probe,
4022 	.remove = cryptodev_dpaa2_sec_remove,
4023 };
4024 
4025 static struct cryptodev_driver dpaa2_sec_crypto_drv;
4026 
4027 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
4028 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
4029 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
4030 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
4031