xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision 49781e37e78a1be538e498233b84a2b0e644b5eb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
19 #include <rte_dev.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
29 
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
32 
33 /* Required types */
34 typedef uint64_t	dma_addr_t;
35 
36 /* RTA header files */
37 #include <hw/desc/ipsec.h>
38 #include <hw/desc/algo.h>
39 
40 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
41  * a pointer to the shared descriptor
42  */
43 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
44 #define FSL_VENDOR_ID           0x1957
45 #define FSL_DEVICE_ID           0x410
46 #define FSL_SUBSYSTEM_SEC       1
47 #define FSL_MC_DPSECI_DEVID     3
48 
49 #define NO_PREFETCH 0
50 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
51 #define FLE_POOL_NUM_BUFS	32000
52 #define FLE_POOL_BUF_SIZE	256
53 #define FLE_POOL_CACHE_SIZE	512
54 #define FLE_SG_MEM_SIZE		2048
55 #define SEC_FLC_DHR_OUTBOUND	-114
56 #define SEC_FLC_DHR_INBOUND	0
57 
58 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
59 
60 static uint8_t cryptodev_driver_id;
61 
62 int dpaa2_logtype_sec;
63 
64 static inline int
65 build_proto_fd(dpaa2_sec_session *sess,
66 	       struct rte_crypto_op *op,
67 	       struct qbman_fd *fd, uint16_t bpid)
68 {
69 	struct rte_crypto_sym_op *sym_op = op->sym;
70 	struct ctxt_priv *priv = sess->ctxt;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf = sym_op->m_src;
73 
74 	if (likely(bpid < MAX_BPID))
75 		DPAA2_SET_FD_BPID(fd, bpid);
76 	else
77 		DPAA2_SET_FD_IVP(fd);
78 
79 	/* Save the shared descriptor */
80 	flc = &priv->flc_desc[0].flc;
81 
82 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
83 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
84 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
85 	DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
86 
87 	/* save physical address of mbuf */
88 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
89 	mbuf->buf_iova = (size_t)op;
90 
91 	return 0;
92 }
93 
94 static inline int
95 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
96 		 struct rte_crypto_op *op,
97 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
98 {
99 	struct rte_crypto_sym_op *sym_op = op->sym;
100 	struct ctxt_priv *priv = sess->ctxt;
101 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
102 	struct sec_flow_context *flc;
103 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
104 	int icv_len = sess->digest_length;
105 	uint8_t *old_icv;
106 	struct rte_mbuf *mbuf;
107 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
108 			sess->iv.offset);
109 
110 	PMD_INIT_FUNC_TRACE();
111 
112 	if (sym_op->m_dst)
113 		mbuf = sym_op->m_dst;
114 	else
115 		mbuf = sym_op->m_src;
116 
117 	/* first FLE entry used to store mbuf and session ctxt */
118 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
119 			RTE_CACHE_LINE_SIZE);
120 	if (unlikely(!fle)) {
121 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
122 		return -1;
123 	}
124 	memset(fle, 0, FLE_SG_MEM_SIZE);
125 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
126 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
127 
128 	op_fle = fle + 1;
129 	ip_fle = fle + 2;
130 	sge = fle + 3;
131 
132 	/* Save the shared descriptor */
133 	flc = &priv->flc_desc[0].flc;
134 
135 	/* Configure FD as a FRAME LIST */
136 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
137 	DPAA2_SET_FD_COMPOUND_FMT(fd);
138 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
139 
140 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
141 		   "iv-len=%d data_off: 0x%x\n",
142 		   sym_op->aead.data.offset,
143 		   sym_op->aead.data.length,
144 		   sess->digest_length,
145 		   sess->iv.length,
146 		   sym_op->m_src->data_off);
147 
148 	/* Configure Output FLE with Scatter/Gather Entry */
149 	DPAA2_SET_FLE_SG_EXT(op_fle);
150 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
151 
152 	if (auth_only_len)
153 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
154 
155 	op_fle->length = (sess->dir == DIR_ENC) ?
156 			(sym_op->aead.data.length + icv_len + auth_only_len) :
157 			sym_op->aead.data.length + auth_only_len;
158 
159 	/* Configure Output SGE for Encap/Decap */
160 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
162 								auth_only_len);
163 	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
164 
165 	mbuf = mbuf->next;
166 	/* o/p segs */
167 	while (mbuf) {
168 		sge++;
169 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
170 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
171 		sge->length = mbuf->data_len;
172 		mbuf = mbuf->next;
173 	}
174 	sge->length -= icv_len;
175 
176 	if (sess->dir == DIR_ENC) {
177 		sge++;
178 		DPAA2_SET_FLE_ADDR(sge,
179 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
180 		sge->length = icv_len;
181 	}
182 	DPAA2_SET_FLE_FIN(sge);
183 
184 	sge++;
185 	mbuf = sym_op->m_src;
186 
187 	/* Configure Input FLE with Scatter/Gather Entry */
188 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
190 	DPAA2_SET_FLE_FIN(ip_fle);
191 	ip_fle->length = (sess->dir == DIR_ENC) ?
192 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
193 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
194 		 icv_len);
195 
196 	/* Configure Input SGE for Encap/Decap */
197 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
198 	sge->length = sess->iv.length;
199 
200 	sge++;
201 	if (auth_only_len) {
202 		DPAA2_SET_FLE_ADDR(sge,
203 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
204 		sge->length = auth_only_len;
205 		sge++;
206 	}
207 
208 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
209 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
210 				mbuf->data_off);
211 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
212 
213 	mbuf = mbuf->next;
214 	/* i/p segs */
215 	while (mbuf) {
216 		sge++;
217 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
218 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
219 		sge->length = mbuf->data_len;
220 		mbuf = mbuf->next;
221 	}
222 
223 	if (sess->dir == DIR_DEC) {
224 		sge++;
225 		old_icv = (uint8_t *)(sge + 1);
226 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
227 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
228 		sge->length = icv_len;
229 	}
230 
231 	DPAA2_SET_FLE_FIN(sge);
232 	if (auth_only_len) {
233 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
234 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
235 	}
236 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
237 
238 	return 0;
239 }
240 
241 static inline int
242 build_authenc_gcm_fd(dpaa2_sec_session *sess,
243 		     struct rte_crypto_op *op,
244 		     struct qbman_fd *fd, uint16_t bpid)
245 {
246 	struct rte_crypto_sym_op *sym_op = op->sym;
247 	struct ctxt_priv *priv = sess->ctxt;
248 	struct qbman_fle *fle, *sge;
249 	struct sec_flow_context *flc;
250 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
251 	int icv_len = sess->digest_length, retval;
252 	uint8_t *old_icv;
253 	struct rte_mbuf *dst;
254 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
255 			sess->iv.offset);
256 
257 	PMD_INIT_FUNC_TRACE();
258 
259 	if (sym_op->m_dst)
260 		dst = sym_op->m_dst;
261 	else
262 		dst = sym_op->m_src;
263 
264 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
265 	 * Currently we donot know which FLE has the mbuf stored.
266 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
267 	 * to get the MBUF Addr from the previous FLE.
268 	 * We can have a better approach to use the inline Mbuf
269 	 */
270 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
271 	if (retval) {
272 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
273 		return -1;
274 	}
275 	memset(fle, 0, FLE_POOL_BUF_SIZE);
276 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
278 	fle = fle + 1;
279 	sge = fle + 2;
280 	if (likely(bpid < MAX_BPID)) {
281 		DPAA2_SET_FD_BPID(fd, bpid);
282 		DPAA2_SET_FLE_BPID(fle, bpid);
283 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
284 		DPAA2_SET_FLE_BPID(sge, bpid);
285 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
286 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
287 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
288 	} else {
289 		DPAA2_SET_FD_IVP(fd);
290 		DPAA2_SET_FLE_IVP(fle);
291 		DPAA2_SET_FLE_IVP((fle + 1));
292 		DPAA2_SET_FLE_IVP(sge);
293 		DPAA2_SET_FLE_IVP((sge + 1));
294 		DPAA2_SET_FLE_IVP((sge + 2));
295 		DPAA2_SET_FLE_IVP((sge + 3));
296 	}
297 
298 	/* Save the shared descriptor */
299 	flc = &priv->flc_desc[0].flc;
300 	/* Configure FD as a FRAME LIST */
301 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
302 	DPAA2_SET_FD_COMPOUND_FMT(fd);
303 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
304 
305 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
306 		   "iv-len=%d data_off: 0x%x\n",
307 		   sym_op->aead.data.offset,
308 		   sym_op->aead.data.length,
309 		   sess->digest_length,
310 		   sess->iv.length,
311 		   sym_op->m_src->data_off);
312 
313 	/* Configure Output FLE with Scatter/Gather Entry */
314 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
315 	if (auth_only_len)
316 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
317 	fle->length = (sess->dir == DIR_ENC) ?
318 			(sym_op->aead.data.length + icv_len + auth_only_len) :
319 			sym_op->aead.data.length + auth_only_len;
320 
321 	DPAA2_SET_FLE_SG_EXT(fle);
322 
323 	/* Configure Output SGE for Encap/Decap */
324 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
325 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
326 				dst->data_off - auth_only_len);
327 	sge->length = sym_op->aead.data.length + auth_only_len;
328 
329 	if (sess->dir == DIR_ENC) {
330 		sge++;
331 		DPAA2_SET_FLE_ADDR(sge,
332 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
333 		sge->length = sess->digest_length;
334 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
335 					sess->iv.length + auth_only_len));
336 	}
337 	DPAA2_SET_FLE_FIN(sge);
338 
339 	sge++;
340 	fle++;
341 
342 	/* Configure Input FLE with Scatter/Gather Entry */
343 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
344 	DPAA2_SET_FLE_SG_EXT(fle);
345 	DPAA2_SET_FLE_FIN(fle);
346 	fle->length = (sess->dir == DIR_ENC) ?
347 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
348 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
349 		 sess->digest_length);
350 
351 	/* Configure Input SGE for Encap/Decap */
352 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
353 	sge->length = sess->iv.length;
354 	sge++;
355 	if (auth_only_len) {
356 		DPAA2_SET_FLE_ADDR(sge,
357 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
358 		sge->length = auth_only_len;
359 		DPAA2_SET_FLE_BPID(sge, bpid);
360 		sge++;
361 	}
362 
363 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
364 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
365 				sym_op->m_src->data_off);
366 	sge->length = sym_op->aead.data.length;
367 	if (sess->dir == DIR_DEC) {
368 		sge++;
369 		old_icv = (uint8_t *)(sge + 1);
370 		memcpy(old_icv,	sym_op->aead.digest.data,
371 		       sess->digest_length);
372 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
373 		sge->length = sess->digest_length;
374 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
375 				 sess->digest_length +
376 				 sess->iv.length +
377 				 auth_only_len));
378 	}
379 	DPAA2_SET_FLE_FIN(sge);
380 
381 	if (auth_only_len) {
382 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
383 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
384 	}
385 
386 	return 0;
387 }
388 
389 static inline int
390 build_authenc_sg_fd(dpaa2_sec_session *sess,
391 		 struct rte_crypto_op *op,
392 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
393 {
394 	struct rte_crypto_sym_op *sym_op = op->sym;
395 	struct ctxt_priv *priv = sess->ctxt;
396 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
397 	struct sec_flow_context *flc;
398 	uint32_t auth_only_len = sym_op->auth.data.length -
399 				sym_op->cipher.data.length;
400 	int icv_len = sess->digest_length;
401 	uint8_t *old_icv;
402 	struct rte_mbuf *mbuf;
403 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
404 			sess->iv.offset);
405 
406 	PMD_INIT_FUNC_TRACE();
407 
408 	if (sym_op->m_dst)
409 		mbuf = sym_op->m_dst;
410 	else
411 		mbuf = sym_op->m_src;
412 
413 	/* first FLE entry used to store mbuf and session ctxt */
414 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
415 			RTE_CACHE_LINE_SIZE);
416 	if (unlikely(!fle)) {
417 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
418 		return -1;
419 	}
420 	memset(fle, 0, FLE_SG_MEM_SIZE);
421 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
422 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
423 
424 	op_fle = fle + 1;
425 	ip_fle = fle + 2;
426 	sge = fle + 3;
427 
428 	/* Save the shared descriptor */
429 	flc = &priv->flc_desc[0].flc;
430 
431 	/* Configure FD as a FRAME LIST */
432 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
433 	DPAA2_SET_FD_COMPOUND_FMT(fd);
434 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
435 
436 	DPAA2_SEC_DP_DEBUG(
437 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
438 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
439 		sym_op->auth.data.offset,
440 		sym_op->auth.data.length,
441 		sess->digest_length,
442 		sym_op->cipher.data.offset,
443 		sym_op->cipher.data.length,
444 		sess->iv.length,
445 		sym_op->m_src->data_off);
446 
447 	/* Configure Output FLE with Scatter/Gather Entry */
448 	DPAA2_SET_FLE_SG_EXT(op_fle);
449 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
450 
451 	if (auth_only_len)
452 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
453 
454 	op_fle->length = (sess->dir == DIR_ENC) ?
455 			(sym_op->cipher.data.length + icv_len) :
456 			sym_op->cipher.data.length;
457 
458 	/* Configure Output SGE for Encap/Decap */
459 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
460 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
461 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
462 
463 	mbuf = mbuf->next;
464 	/* o/p segs */
465 	while (mbuf) {
466 		sge++;
467 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
468 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
469 		sge->length = mbuf->data_len;
470 		mbuf = mbuf->next;
471 	}
472 	sge->length -= icv_len;
473 
474 	if (sess->dir == DIR_ENC) {
475 		sge++;
476 		DPAA2_SET_FLE_ADDR(sge,
477 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
478 		sge->length = icv_len;
479 	}
480 	DPAA2_SET_FLE_FIN(sge);
481 
482 	sge++;
483 	mbuf = sym_op->m_src;
484 
485 	/* Configure Input FLE with Scatter/Gather Entry */
486 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
487 	DPAA2_SET_FLE_SG_EXT(ip_fle);
488 	DPAA2_SET_FLE_FIN(ip_fle);
489 	ip_fle->length = (sess->dir == DIR_ENC) ?
490 			(sym_op->auth.data.length + sess->iv.length) :
491 			(sym_op->auth.data.length + sess->iv.length +
492 			 icv_len);
493 
494 	/* Configure Input SGE for Encap/Decap */
495 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
496 	sge->length = sess->iv.length;
497 
498 	sge++;
499 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
500 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
501 				mbuf->data_off);
502 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
503 
504 	mbuf = mbuf->next;
505 	/* i/p segs */
506 	while (mbuf) {
507 		sge++;
508 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
509 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
510 		sge->length = mbuf->data_len;
511 		mbuf = mbuf->next;
512 	}
513 	sge->length -= icv_len;
514 
515 	if (sess->dir == DIR_DEC) {
516 		sge++;
517 		old_icv = (uint8_t *)(sge + 1);
518 		memcpy(old_icv,	sym_op->auth.digest.data,
519 		       icv_len);
520 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
521 		sge->length = icv_len;
522 	}
523 
524 	DPAA2_SET_FLE_FIN(sge);
525 	if (auth_only_len) {
526 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
527 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
528 	}
529 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
530 
531 	return 0;
532 }
533 
534 static inline int
535 build_authenc_fd(dpaa2_sec_session *sess,
536 		 struct rte_crypto_op *op,
537 		 struct qbman_fd *fd, uint16_t bpid)
538 {
539 	struct rte_crypto_sym_op *sym_op = op->sym;
540 	struct ctxt_priv *priv = sess->ctxt;
541 	struct qbman_fle *fle, *sge;
542 	struct sec_flow_context *flc;
543 	uint32_t auth_only_len = sym_op->auth.data.length -
544 				sym_op->cipher.data.length;
545 	int icv_len = sess->digest_length, retval;
546 	uint8_t *old_icv;
547 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
548 			sess->iv.offset);
549 	struct rte_mbuf *dst;
550 
551 	PMD_INIT_FUNC_TRACE();
552 
553 	if (sym_op->m_dst)
554 		dst = sym_op->m_dst;
555 	else
556 		dst = sym_op->m_src;
557 
558 	/* we are using the first FLE entry to store Mbuf.
559 	 * Currently we donot know which FLE has the mbuf stored.
560 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
561 	 * to get the MBUF Addr from the previous FLE.
562 	 * We can have a better approach to use the inline Mbuf
563 	 */
564 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
565 	if (retval) {
566 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
567 		return -1;
568 	}
569 	memset(fle, 0, FLE_POOL_BUF_SIZE);
570 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
571 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
572 	fle = fle + 1;
573 	sge = fle + 2;
574 	if (likely(bpid < MAX_BPID)) {
575 		DPAA2_SET_FD_BPID(fd, bpid);
576 		DPAA2_SET_FLE_BPID(fle, bpid);
577 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
578 		DPAA2_SET_FLE_BPID(sge, bpid);
579 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
580 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
581 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
582 	} else {
583 		DPAA2_SET_FD_IVP(fd);
584 		DPAA2_SET_FLE_IVP(fle);
585 		DPAA2_SET_FLE_IVP((fle + 1));
586 		DPAA2_SET_FLE_IVP(sge);
587 		DPAA2_SET_FLE_IVP((sge + 1));
588 		DPAA2_SET_FLE_IVP((sge + 2));
589 		DPAA2_SET_FLE_IVP((sge + 3));
590 	}
591 
592 	/* Save the shared descriptor */
593 	flc = &priv->flc_desc[0].flc;
594 	/* Configure FD as a FRAME LIST */
595 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
596 	DPAA2_SET_FD_COMPOUND_FMT(fd);
597 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
598 
599 	DPAA2_SEC_DP_DEBUG(
600 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
601 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
602 		sym_op->auth.data.offset,
603 		sym_op->auth.data.length,
604 		sess->digest_length,
605 		sym_op->cipher.data.offset,
606 		sym_op->cipher.data.length,
607 		sess->iv.length,
608 		sym_op->m_src->data_off);
609 
610 	/* Configure Output FLE with Scatter/Gather Entry */
611 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
612 	if (auth_only_len)
613 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
614 	fle->length = (sess->dir == DIR_ENC) ?
615 			(sym_op->cipher.data.length + icv_len) :
616 			sym_op->cipher.data.length;
617 
618 	DPAA2_SET_FLE_SG_EXT(fle);
619 
620 	/* Configure Output SGE for Encap/Decap */
621 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
622 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
623 				dst->data_off);
624 	sge->length = sym_op->cipher.data.length;
625 
626 	if (sess->dir == DIR_ENC) {
627 		sge++;
628 		DPAA2_SET_FLE_ADDR(sge,
629 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
630 		sge->length = sess->digest_length;
631 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
632 					sess->iv.length));
633 	}
634 	DPAA2_SET_FLE_FIN(sge);
635 
636 	sge++;
637 	fle++;
638 
639 	/* Configure Input FLE with Scatter/Gather Entry */
640 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
641 	DPAA2_SET_FLE_SG_EXT(fle);
642 	DPAA2_SET_FLE_FIN(fle);
643 	fle->length = (sess->dir == DIR_ENC) ?
644 			(sym_op->auth.data.length + sess->iv.length) :
645 			(sym_op->auth.data.length + sess->iv.length +
646 			 sess->digest_length);
647 
648 	/* Configure Input SGE for Encap/Decap */
649 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
650 	sge->length = sess->iv.length;
651 	sge++;
652 
653 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
654 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
655 				sym_op->m_src->data_off);
656 	sge->length = sym_op->auth.data.length;
657 	if (sess->dir == DIR_DEC) {
658 		sge++;
659 		old_icv = (uint8_t *)(sge + 1);
660 		memcpy(old_icv,	sym_op->auth.digest.data,
661 		       sess->digest_length);
662 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
663 		sge->length = sess->digest_length;
664 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
665 				 sess->digest_length +
666 				 sess->iv.length));
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 	if (auth_only_len) {
670 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
671 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
672 	}
673 	return 0;
674 }
675 
676 static inline int build_auth_sg_fd(
677 		dpaa2_sec_session *sess,
678 		struct rte_crypto_op *op,
679 		struct qbman_fd *fd,
680 		__rte_unused uint16_t bpid)
681 {
682 	struct rte_crypto_sym_op *sym_op = op->sym;
683 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
684 	struct sec_flow_context *flc;
685 	struct ctxt_priv *priv = sess->ctxt;
686 	uint8_t *old_digest;
687 	struct rte_mbuf *mbuf;
688 
689 	PMD_INIT_FUNC_TRACE();
690 
691 	mbuf = sym_op->m_src;
692 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
693 			RTE_CACHE_LINE_SIZE);
694 	if (unlikely(!fle)) {
695 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
696 		return -1;
697 	}
698 	memset(fle, 0, FLE_SG_MEM_SIZE);
699 	/* first FLE entry used to store mbuf and session ctxt */
700 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
701 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
702 	op_fle = fle + 1;
703 	ip_fle = fle + 2;
704 	sge = fle + 3;
705 
706 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
707 	/* sg FD */
708 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
709 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
710 	DPAA2_SET_FD_COMPOUND_FMT(fd);
711 
712 	/* o/p fle */
713 	DPAA2_SET_FLE_ADDR(op_fle,
714 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
715 	op_fle->length = sess->digest_length;
716 
717 	/* i/p fle */
718 	DPAA2_SET_FLE_SG_EXT(ip_fle);
719 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
720 	/* i/p 1st seg */
721 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
722 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
723 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
724 
725 	/* i/p segs */
726 	mbuf = mbuf->next;
727 	while (mbuf) {
728 		sge++;
729 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
730 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
731 		sge->length = mbuf->data_len;
732 		mbuf = mbuf->next;
733 	}
734 	if (sess->dir == DIR_ENC) {
735 		/* Digest calculation case */
736 		sge->length -= sess->digest_length;
737 		ip_fle->length = sym_op->auth.data.length;
738 	} else {
739 		/* Digest verification case */
740 		sge++;
741 		old_digest = (uint8_t *)(sge + 1);
742 		rte_memcpy(old_digest, sym_op->auth.digest.data,
743 			   sess->digest_length);
744 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
745 		sge->length = sess->digest_length;
746 		ip_fle->length = sym_op->auth.data.length +
747 				sess->digest_length;
748 	}
749 	DPAA2_SET_FLE_FIN(sge);
750 	DPAA2_SET_FLE_FIN(ip_fle);
751 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
752 
753 	return 0;
754 }
755 
756 static inline int
757 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
758 	      struct qbman_fd *fd, uint16_t bpid)
759 {
760 	struct rte_crypto_sym_op *sym_op = op->sym;
761 	struct qbman_fle *fle, *sge;
762 	struct sec_flow_context *flc;
763 	struct ctxt_priv *priv = sess->ctxt;
764 	uint8_t *old_digest;
765 	int retval;
766 
767 	PMD_INIT_FUNC_TRACE();
768 
769 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
770 	if (retval) {
771 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
772 		return -1;
773 	}
774 	memset(fle, 0, FLE_POOL_BUF_SIZE);
775 	/* TODO we are using the first FLE entry to store Mbuf.
776 	 * Currently we donot know which FLE has the mbuf stored.
777 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
778 	 * to get the MBUF Addr from the previous FLE.
779 	 * We can have a better approach to use the inline Mbuf
780 	 */
781 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
782 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
783 	fle = fle + 1;
784 
785 	if (likely(bpid < MAX_BPID)) {
786 		DPAA2_SET_FD_BPID(fd, bpid);
787 		DPAA2_SET_FLE_BPID(fle, bpid);
788 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
789 	} else {
790 		DPAA2_SET_FD_IVP(fd);
791 		DPAA2_SET_FLE_IVP(fle);
792 		DPAA2_SET_FLE_IVP((fle + 1));
793 	}
794 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
795 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
796 
797 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
798 	fle->length = sess->digest_length;
799 
800 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
801 	DPAA2_SET_FD_COMPOUND_FMT(fd);
802 	fle++;
803 
804 	if (sess->dir == DIR_ENC) {
805 		DPAA2_SET_FLE_ADDR(fle,
806 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
807 		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
808 				     sym_op->m_src->data_off);
809 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
810 		fle->length = sym_op->auth.data.length;
811 	} else {
812 		sge = fle + 2;
813 		DPAA2_SET_FLE_SG_EXT(fle);
814 		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
815 
816 		if (likely(bpid < MAX_BPID)) {
817 			DPAA2_SET_FLE_BPID(sge, bpid);
818 			DPAA2_SET_FLE_BPID(sge + 1, bpid);
819 		} else {
820 			DPAA2_SET_FLE_IVP(sge);
821 			DPAA2_SET_FLE_IVP((sge + 1));
822 		}
823 		DPAA2_SET_FLE_ADDR(sge,
824 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
825 		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
826 				     sym_op->m_src->data_off);
827 
828 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
829 				 sess->digest_length);
830 		sge->length = sym_op->auth.data.length;
831 		sge++;
832 		old_digest = (uint8_t *)(sge + 1);
833 		rte_memcpy(old_digest, sym_op->auth.digest.data,
834 			   sess->digest_length);
835 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
836 		sge->length = sess->digest_length;
837 		fle->length = sym_op->auth.data.length +
838 				sess->digest_length;
839 		DPAA2_SET_FLE_FIN(sge);
840 	}
841 	DPAA2_SET_FLE_FIN(fle);
842 
843 	return 0;
844 }
845 
846 static int
847 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
848 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
849 {
850 	struct rte_crypto_sym_op *sym_op = op->sym;
851 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
852 	struct sec_flow_context *flc;
853 	struct ctxt_priv *priv = sess->ctxt;
854 	struct rte_mbuf *mbuf;
855 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
856 			sess->iv.offset);
857 
858 	PMD_INIT_FUNC_TRACE();
859 
860 	if (sym_op->m_dst)
861 		mbuf = sym_op->m_dst;
862 	else
863 		mbuf = sym_op->m_src;
864 
865 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
866 			RTE_CACHE_LINE_SIZE);
867 	if (!fle) {
868 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
869 		return -1;
870 	}
871 	memset(fle, 0, FLE_SG_MEM_SIZE);
872 	/* first FLE entry used to store mbuf and session ctxt */
873 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
874 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
875 
876 	op_fle = fle + 1;
877 	ip_fle = fle + 2;
878 	sge = fle + 3;
879 
880 	flc = &priv->flc_desc[0].flc;
881 
882 	DPAA2_SEC_DP_DEBUG(
883 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
884 		" data_off: 0x%x\n",
885 		sym_op->cipher.data.offset,
886 		sym_op->cipher.data.length,
887 		sess->iv.length,
888 		sym_op->m_src->data_off);
889 
890 	/* o/p fle */
891 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
892 	op_fle->length = sym_op->cipher.data.length;
893 	DPAA2_SET_FLE_SG_EXT(op_fle);
894 
895 	/* o/p 1st seg */
896 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
897 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
898 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
899 
900 	mbuf = mbuf->next;
901 	/* o/p segs */
902 	while (mbuf) {
903 		sge++;
904 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
905 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
906 		sge->length = mbuf->data_len;
907 		mbuf = mbuf->next;
908 	}
909 	DPAA2_SET_FLE_FIN(sge);
910 
911 	DPAA2_SEC_DP_DEBUG(
912 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
913 		flc, fle, fle->addr_hi, fle->addr_lo,
914 		fle->length);
915 
916 	/* i/p fle */
917 	mbuf = sym_op->m_src;
918 	sge++;
919 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
920 	ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
921 	DPAA2_SET_FLE_SG_EXT(ip_fle);
922 
923 	/* i/p IV */
924 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
925 	DPAA2_SET_FLE_OFFSET(sge, 0);
926 	sge->length = sess->iv.length;
927 
928 	sge++;
929 
930 	/* i/p 1st seg */
931 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
932 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
933 			     mbuf->data_off);
934 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
935 
936 	mbuf = mbuf->next;
937 	/* i/p segs */
938 	while (mbuf) {
939 		sge++;
940 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
941 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
942 		sge->length = mbuf->data_len;
943 		mbuf = mbuf->next;
944 	}
945 	DPAA2_SET_FLE_FIN(sge);
946 	DPAA2_SET_FLE_FIN(ip_fle);
947 
948 	/* sg fd */
949 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
950 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
951 	DPAA2_SET_FD_COMPOUND_FMT(fd);
952 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
953 
954 	DPAA2_SEC_DP_DEBUG(
955 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
956 		" off =%d, len =%d\n",
957 		DPAA2_GET_FD_ADDR(fd),
958 		DPAA2_GET_FD_BPID(fd),
959 		rte_dpaa2_bpid_info[bpid].meta_data_size,
960 		DPAA2_GET_FD_OFFSET(fd),
961 		DPAA2_GET_FD_LEN(fd));
962 	return 0;
963 }
964 
965 static int
966 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
967 		struct qbman_fd *fd, uint16_t bpid)
968 {
969 	struct rte_crypto_sym_op *sym_op = op->sym;
970 	struct qbman_fle *fle, *sge;
971 	int retval;
972 	struct sec_flow_context *flc;
973 	struct ctxt_priv *priv = sess->ctxt;
974 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
975 			sess->iv.offset);
976 	struct rte_mbuf *dst;
977 
978 	PMD_INIT_FUNC_TRACE();
979 
980 	if (sym_op->m_dst)
981 		dst = sym_op->m_dst;
982 	else
983 		dst = sym_op->m_src;
984 
985 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
986 	if (retval) {
987 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
988 		return -1;
989 	}
990 	memset(fle, 0, FLE_POOL_BUF_SIZE);
991 	/* TODO we are using the first FLE entry to store Mbuf.
992 	 * Currently we donot know which FLE has the mbuf stored.
993 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
994 	 * to get the MBUF Addr from the previous FLE.
995 	 * We can have a better approach to use the inline Mbuf
996 	 */
997 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
998 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
999 	fle = fle + 1;
1000 	sge = fle + 2;
1001 
1002 	if (likely(bpid < MAX_BPID)) {
1003 		DPAA2_SET_FD_BPID(fd, bpid);
1004 		DPAA2_SET_FLE_BPID(fle, bpid);
1005 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1006 		DPAA2_SET_FLE_BPID(sge, bpid);
1007 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1008 	} else {
1009 		DPAA2_SET_FD_IVP(fd);
1010 		DPAA2_SET_FLE_IVP(fle);
1011 		DPAA2_SET_FLE_IVP((fle + 1));
1012 		DPAA2_SET_FLE_IVP(sge);
1013 		DPAA2_SET_FLE_IVP((sge + 1));
1014 	}
1015 
1016 	flc = &priv->flc_desc[0].flc;
1017 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1018 	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1019 			 sess->iv.length);
1020 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1021 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1022 
1023 	DPAA2_SEC_DP_DEBUG(
1024 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1025 		" data_off: 0x%x\n",
1026 		sym_op->cipher.data.offset,
1027 		sym_op->cipher.data.length,
1028 		sess->iv.length,
1029 		sym_op->m_src->data_off);
1030 
1031 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1032 	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1033 			     dst->data_off);
1034 
1035 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1036 
1037 	DPAA2_SEC_DP_DEBUG(
1038 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1039 		flc, fle, fle->addr_hi, fle->addr_lo,
1040 		fle->length);
1041 
1042 	fle++;
1043 
1044 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1045 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1046 
1047 	DPAA2_SET_FLE_SG_EXT(fle);
1048 
1049 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1050 	sge->length = sess->iv.length;
1051 
1052 	sge++;
1053 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1054 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1055 			     sym_op->m_src->data_off);
1056 
1057 	sge->length = sym_op->cipher.data.length;
1058 	DPAA2_SET_FLE_FIN(sge);
1059 	DPAA2_SET_FLE_FIN(fle);
1060 
1061 	DPAA2_SEC_DP_DEBUG(
1062 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1063 		" off =%d, len =%d\n",
1064 		DPAA2_GET_FD_ADDR(fd),
1065 		DPAA2_GET_FD_BPID(fd),
1066 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1067 		DPAA2_GET_FD_OFFSET(fd),
1068 		DPAA2_GET_FD_LEN(fd));
1069 
1070 	return 0;
1071 }
1072 
1073 static inline int
1074 build_sec_fd(struct rte_crypto_op *op,
1075 	     struct qbman_fd *fd, uint16_t bpid)
1076 {
1077 	int ret = -1;
1078 	dpaa2_sec_session *sess;
1079 
1080 	PMD_INIT_FUNC_TRACE();
1081 
1082 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1083 		sess = (dpaa2_sec_session *)get_session_private_data(
1084 				op->sym->session, cryptodev_driver_id);
1085 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1086 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1087 				op->sym->sec_session);
1088 	else
1089 		return -1;
1090 
1091 	/* Segmented buffer */
1092 	if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1093 		switch (sess->ctxt_type) {
1094 		case DPAA2_SEC_CIPHER:
1095 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1096 			break;
1097 		case DPAA2_SEC_AUTH:
1098 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1099 			break;
1100 		case DPAA2_SEC_AEAD:
1101 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1102 			break;
1103 		case DPAA2_SEC_CIPHER_HASH:
1104 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1105 			break;
1106 		case DPAA2_SEC_HASH_CIPHER:
1107 		default:
1108 			DPAA2_SEC_ERR("error: Unsupported session");
1109 		}
1110 	} else {
1111 		switch (sess->ctxt_type) {
1112 		case DPAA2_SEC_CIPHER:
1113 			ret = build_cipher_fd(sess, op, fd, bpid);
1114 			break;
1115 		case DPAA2_SEC_AUTH:
1116 			ret = build_auth_fd(sess, op, fd, bpid);
1117 			break;
1118 		case DPAA2_SEC_AEAD:
1119 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1120 			break;
1121 		case DPAA2_SEC_CIPHER_HASH:
1122 			ret = build_authenc_fd(sess, op, fd, bpid);
1123 			break;
1124 		case DPAA2_SEC_IPSEC:
1125 			ret = build_proto_fd(sess, op, fd, bpid);
1126 			break;
1127 		case DPAA2_SEC_HASH_CIPHER:
1128 		default:
1129 			DPAA2_SEC_ERR("error: Unsupported session");
1130 		}
1131 	}
1132 	return ret;
1133 }
1134 
1135 static uint16_t
1136 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1137 			uint16_t nb_ops)
1138 {
1139 	/* Function to transmit the frames to given device and VQ*/
1140 	uint32_t loop;
1141 	int32_t ret;
1142 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1143 	uint32_t frames_to_send;
1144 	struct qbman_eq_desc eqdesc;
1145 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1146 	struct qbman_swp *swp;
1147 	uint16_t num_tx = 0;
1148 	/*todo - need to support multiple buffer pools */
1149 	uint16_t bpid;
1150 	struct rte_mempool *mb_pool;
1151 
1152 	if (unlikely(nb_ops == 0))
1153 		return 0;
1154 
1155 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1156 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1157 		return 0;
1158 	}
1159 	/*Prepare enqueue descriptor*/
1160 	qbman_eq_desc_clear(&eqdesc);
1161 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1162 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1163 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1164 
1165 	if (!DPAA2_PER_LCORE_DPIO) {
1166 		ret = dpaa2_affine_qbman_swp();
1167 		if (ret) {
1168 			DPAA2_SEC_ERR("Failure in affining portal");
1169 			return 0;
1170 		}
1171 	}
1172 	swp = DPAA2_PER_LCORE_PORTAL;
1173 
1174 	while (nb_ops) {
1175 		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1176 
1177 		for (loop = 0; loop < frames_to_send; loop++) {
1178 			/*Clear the unused FD fields before sending*/
1179 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1180 			mb_pool = (*ops)->sym->m_src->pool;
1181 			bpid = mempool_to_bpid(mb_pool);
1182 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1183 			if (ret) {
1184 				DPAA2_SEC_ERR("error: Improper packet contents"
1185 					      " for crypto operation");
1186 				goto skip_tx;
1187 			}
1188 			ops++;
1189 		}
1190 		loop = 0;
1191 		while (loop < frames_to_send) {
1192 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1193 							&fd_arr[loop],
1194 							NULL,
1195 							frames_to_send - loop);
1196 		}
1197 
1198 		num_tx += frames_to_send;
1199 		nb_ops -= frames_to_send;
1200 	}
1201 skip_tx:
1202 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1203 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1204 	return num_tx;
1205 }
1206 
1207 static inline struct rte_crypto_op *
1208 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1209 {
1210 	struct rte_crypto_op *op;
1211 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1212 	uint16_t diff = 0;
1213 	dpaa2_sec_session *sess_priv;
1214 
1215 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1216 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1217 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1218 
1219 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1220 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1221 	op->sym->aead.digest.phys_addr = 0L;
1222 
1223 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1224 				op->sym->sec_session);
1225 	if (sess_priv->dir == DIR_ENC)
1226 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1227 	else
1228 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1229 	diff = len - mbuf->pkt_len;
1230 	mbuf->pkt_len += diff;
1231 	mbuf->data_len += diff;
1232 
1233 	return op;
1234 }
1235 
1236 static inline struct rte_crypto_op *
1237 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1238 {
1239 	struct qbman_fle *fle;
1240 	struct rte_crypto_op *op;
1241 	struct ctxt_priv *priv;
1242 	struct rte_mbuf *dst, *src;
1243 
1244 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1245 		return sec_simple_fd_to_mbuf(fd, driver_id);
1246 
1247 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1248 
1249 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1250 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1251 
1252 	/* we are using the first FLE entry to store Mbuf.
1253 	 * Currently we donot know which FLE has the mbuf stored.
1254 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1255 	 * to get the MBUF Addr from the previous FLE.
1256 	 * We can have a better approach to use the inline Mbuf
1257 	 */
1258 
1259 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1260 		/* TODO complete it. */
1261 		DPAA2_SEC_ERR("error: non inline buffer");
1262 		return NULL;
1263 	}
1264 	op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
1265 			DPAA2_GET_FLE_ADDR((fle - 1)));
1266 
1267 	/* Prefeth op */
1268 	src = op->sym->m_src;
1269 	rte_prefetch0(src);
1270 
1271 	if (op->sym->m_dst) {
1272 		dst = op->sym->m_dst;
1273 		rte_prefetch0(dst);
1274 	} else
1275 		dst = src;
1276 
1277 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1278 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1279 		(void *)dst,
1280 		dst->buf_addr,
1281 		DPAA2_GET_FD_ADDR(fd),
1282 		DPAA2_GET_FD_BPID(fd),
1283 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1284 		DPAA2_GET_FD_OFFSET(fd),
1285 		DPAA2_GET_FD_LEN(fd));
1286 
1287 	/* free the fle memory */
1288 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1289 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1290 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1291 	} else
1292 		rte_free((void *)(fle-1));
1293 
1294 	return op;
1295 }
1296 
1297 static uint16_t
1298 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1299 			uint16_t nb_ops)
1300 {
1301 	/* Function is responsible to receive frames for a given device and VQ*/
1302 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1303 	struct rte_cryptodev *dev =
1304 			(struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1305 	struct qbman_result *dq_storage;
1306 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1307 	int ret, num_rx = 0;
1308 	uint8_t is_last = 0, status;
1309 	struct qbman_swp *swp;
1310 	const struct qbman_fd *fd;
1311 	struct qbman_pull_desc pulldesc;
1312 
1313 	if (!DPAA2_PER_LCORE_DPIO) {
1314 		ret = dpaa2_affine_qbman_swp();
1315 		if (ret) {
1316 			DPAA2_SEC_ERR("Failure in affining portal");
1317 			return 0;
1318 		}
1319 	}
1320 	swp = DPAA2_PER_LCORE_PORTAL;
1321 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1322 
1323 	qbman_pull_desc_clear(&pulldesc);
1324 	qbman_pull_desc_set_numframes(&pulldesc,
1325 				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1326 				      DPAA2_DQRR_RING_SIZE : nb_ops);
1327 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1328 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1329 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1330 				    1);
1331 
1332 	/*Issue a volatile dequeue command. */
1333 	while (1) {
1334 		if (qbman_swp_pull(swp, &pulldesc)) {
1335 			DPAA2_SEC_WARN(
1336 				"SEC VDQ command is not issued : QBMAN busy");
1337 			/* Portal was busy, try again */
1338 			continue;
1339 		}
1340 		break;
1341 	};
1342 
1343 	/* Receive the packets till Last Dequeue entry is found with
1344 	 * respect to the above issues PULL command.
1345 	 */
1346 	while (!is_last) {
1347 		/* Check if the previous issued command is completed.
1348 		 * Also seems like the SWP is shared between the Ethernet Driver
1349 		 * and the SEC driver.
1350 		 */
1351 		while (!qbman_check_command_complete(dq_storage))
1352 			;
1353 
1354 		/* Loop until the dq_storage is updated with
1355 		 * new token by QBMAN
1356 		 */
1357 		while (!qbman_check_new_result(dq_storage))
1358 			;
1359 		/* Check whether Last Pull command is Expired and
1360 		 * setting Condition for Loop termination
1361 		 */
1362 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1363 			is_last = 1;
1364 			/* Check for valid frame. */
1365 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1366 			if (unlikely(
1367 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1368 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1369 				continue;
1370 			}
1371 		}
1372 
1373 		fd = qbman_result_DQ_fd(dq_storage);
1374 		ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1375 
1376 		if (unlikely(fd->simple.frc)) {
1377 			/* TODO Parse SEC errors */
1378 			DPAA2_SEC_ERR("SEC returned Error - %x",
1379 				      fd->simple.frc);
1380 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1381 		} else {
1382 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1383 		}
1384 
1385 		num_rx++;
1386 		dq_storage++;
1387 	} /* End of Packet Rx loop */
1388 
1389 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1390 
1391 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1392 	/*Return the total number of packets received to DPAA2 app*/
1393 	return num_rx;
1394 }
1395 
1396 /** Release queue pair */
1397 static int
1398 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1399 {
1400 	struct dpaa2_sec_qp *qp =
1401 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1402 
1403 	PMD_INIT_FUNC_TRACE();
1404 
1405 	if (qp->rx_vq.q_storage) {
1406 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1407 		rte_free(qp->rx_vq.q_storage);
1408 	}
1409 	rte_free(qp);
1410 
1411 	dev->data->queue_pairs[queue_pair_id] = NULL;
1412 
1413 	return 0;
1414 }
1415 
1416 /** Setup a queue pair */
1417 static int
1418 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1419 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1420 		__rte_unused int socket_id,
1421 		__rte_unused struct rte_mempool *session_pool)
1422 {
1423 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1424 	struct dpaa2_sec_qp *qp;
1425 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1426 	struct dpseci_rx_queue_cfg cfg;
1427 	int32_t retcode;
1428 
1429 	PMD_INIT_FUNC_TRACE();
1430 
1431 	/* If qp is already in use free ring memory and qp metadata. */
1432 	if (dev->data->queue_pairs[qp_id] != NULL) {
1433 		DPAA2_SEC_INFO("QP already setup");
1434 		return 0;
1435 	}
1436 
1437 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1438 		    dev, qp_id, qp_conf);
1439 
1440 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1441 
1442 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1443 			RTE_CACHE_LINE_SIZE);
1444 	if (!qp) {
1445 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1446 		return -1;
1447 	}
1448 
1449 	qp->rx_vq.dev = dev;
1450 	qp->tx_vq.dev = dev;
1451 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1452 		sizeof(struct queue_storage_info_t),
1453 		RTE_CACHE_LINE_SIZE);
1454 	if (!qp->rx_vq.q_storage) {
1455 		DPAA2_SEC_ERR("malloc failed for q_storage");
1456 		return -1;
1457 	}
1458 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1459 
1460 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1461 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1462 		return -1;
1463 	}
1464 
1465 	dev->data->queue_pairs[qp_id] = qp;
1466 
1467 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1468 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1469 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1470 				      qp_id, &cfg);
1471 	return retcode;
1472 }
1473 
1474 /** Start queue pair */
1475 static int
1476 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1477 			   __rte_unused uint16_t queue_pair_id)
1478 {
1479 	PMD_INIT_FUNC_TRACE();
1480 
1481 	return 0;
1482 }
1483 
1484 /** Stop queue pair */
1485 static int
1486 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1487 			  __rte_unused uint16_t queue_pair_id)
1488 {
1489 	PMD_INIT_FUNC_TRACE();
1490 
1491 	return 0;
1492 }
1493 
1494 /** Return the number of allocated queue pairs */
1495 static uint32_t
1496 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1497 {
1498 	PMD_INIT_FUNC_TRACE();
1499 
1500 	return dev->data->nb_queue_pairs;
1501 }
1502 
1503 /** Returns the size of the aesni gcm session structure */
1504 static unsigned int
1505 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1506 {
1507 	PMD_INIT_FUNC_TRACE();
1508 
1509 	return sizeof(dpaa2_sec_session);
1510 }
1511 
1512 static int
1513 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1514 		      struct rte_crypto_sym_xform *xform,
1515 		      dpaa2_sec_session *session)
1516 {
1517 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1518 	struct alginfo cipherdata;
1519 	int bufsize, i;
1520 	struct ctxt_priv *priv;
1521 	struct sec_flow_context *flc;
1522 
1523 	PMD_INIT_FUNC_TRACE();
1524 
1525 	/* For SEC CIPHER only one descriptor is required. */
1526 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1527 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1528 			RTE_CACHE_LINE_SIZE);
1529 	if (priv == NULL) {
1530 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1531 		return -1;
1532 	}
1533 
1534 	priv->fle_pool = dev_priv->fle_pool;
1535 
1536 	flc = &priv->flc_desc[0].flc;
1537 
1538 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1539 			RTE_CACHE_LINE_SIZE);
1540 	if (session->cipher_key.data == NULL) {
1541 		DPAA2_SEC_ERR("No Memory for cipher key");
1542 		rte_free(priv);
1543 		return -1;
1544 	}
1545 	session->cipher_key.length = xform->cipher.key.length;
1546 
1547 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1548 	       xform->cipher.key.length);
1549 	cipherdata.key = (size_t)session->cipher_key.data;
1550 	cipherdata.keylen = session->cipher_key.length;
1551 	cipherdata.key_enc_flags = 0;
1552 	cipherdata.key_type = RTA_DATA_IMM;
1553 
1554 	/* Set IV parameters */
1555 	session->iv.offset = xform->cipher.iv.offset;
1556 	session->iv.length = xform->cipher.iv.length;
1557 
1558 	switch (xform->cipher.algo) {
1559 	case RTE_CRYPTO_CIPHER_AES_CBC:
1560 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1561 		cipherdata.algmode = OP_ALG_AAI_CBC;
1562 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1563 		break;
1564 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1565 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1566 		cipherdata.algmode = OP_ALG_AAI_CBC;
1567 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1568 		break;
1569 	case RTE_CRYPTO_CIPHER_AES_CTR:
1570 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1571 		cipherdata.algmode = OP_ALG_AAI_CTR;
1572 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1573 		break;
1574 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1575 	case RTE_CRYPTO_CIPHER_AES_ECB:
1576 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1577 	case RTE_CRYPTO_CIPHER_AES_XTS:
1578 	case RTE_CRYPTO_CIPHER_AES_F8:
1579 	case RTE_CRYPTO_CIPHER_ARC4:
1580 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1581 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1582 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1583 	case RTE_CRYPTO_CIPHER_NULL:
1584 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1585 			xform->cipher.algo);
1586 		goto error_out;
1587 	default:
1588 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1589 			xform->cipher.algo);
1590 		goto error_out;
1591 	}
1592 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1593 				DIR_ENC : DIR_DEC;
1594 
1595 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1596 					&cipherdata, NULL, session->iv.length,
1597 					session->dir);
1598 	if (bufsize < 0) {
1599 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1600 		goto error_out;
1601 	}
1602 	flc->dhr = 0;
1603 	flc->bpv0 = 0x1;
1604 	flc->mode_bits = 0x8000;
1605 
1606 	flc->word1_sdl = (uint8_t)bufsize;
1607 	flc->word2_rflc_31_0 = lower_32_bits(
1608 			(size_t)&(((struct dpaa2_sec_qp *)
1609 			dev->data->queue_pairs[0])->rx_vq));
1610 	flc->word3_rflc_63_32 = upper_32_bits(
1611 			(size_t)&(((struct dpaa2_sec_qp *)
1612 			dev->data->queue_pairs[0])->rx_vq));
1613 	session->ctxt = priv;
1614 
1615 	for (i = 0; i < bufsize; i++)
1616 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1617 
1618 	return 0;
1619 
1620 error_out:
1621 	rte_free(session->cipher_key.data);
1622 	rte_free(priv);
1623 	return -1;
1624 }
1625 
1626 static int
1627 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1628 		    struct rte_crypto_sym_xform *xform,
1629 		    dpaa2_sec_session *session)
1630 {
1631 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1632 	struct alginfo authdata;
1633 	unsigned int bufsize, i;
1634 	struct ctxt_priv *priv;
1635 	struct sec_flow_context *flc;
1636 
1637 	PMD_INIT_FUNC_TRACE();
1638 
1639 	/* For SEC AUTH three descriptors are required for various stages */
1640 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1641 			sizeof(struct ctxt_priv) + 3 *
1642 			sizeof(struct sec_flc_desc),
1643 			RTE_CACHE_LINE_SIZE);
1644 	if (priv == NULL) {
1645 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1646 		return -1;
1647 	}
1648 
1649 	priv->fle_pool = dev_priv->fle_pool;
1650 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1651 
1652 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1653 			RTE_CACHE_LINE_SIZE);
1654 	if (session->auth_key.data == NULL) {
1655 		DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1656 		rte_free(priv);
1657 		return -1;
1658 	}
1659 	session->auth_key.length = xform->auth.key.length;
1660 
1661 	memcpy(session->auth_key.data, xform->auth.key.data,
1662 	       xform->auth.key.length);
1663 	authdata.key = (size_t)session->auth_key.data;
1664 	authdata.keylen = session->auth_key.length;
1665 	authdata.key_enc_flags = 0;
1666 	authdata.key_type = RTA_DATA_IMM;
1667 
1668 	session->digest_length = xform->auth.digest_length;
1669 
1670 	switch (xform->auth.algo) {
1671 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1672 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1673 		authdata.algmode = OP_ALG_AAI_HMAC;
1674 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1675 		break;
1676 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1677 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1678 		authdata.algmode = OP_ALG_AAI_HMAC;
1679 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1680 		break;
1681 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1682 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1683 		authdata.algmode = OP_ALG_AAI_HMAC;
1684 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1685 		break;
1686 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1687 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1688 		authdata.algmode = OP_ALG_AAI_HMAC;
1689 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1690 		break;
1691 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1692 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1693 		authdata.algmode = OP_ALG_AAI_HMAC;
1694 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1695 		break;
1696 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1697 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1698 		authdata.algmode = OP_ALG_AAI_HMAC;
1699 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1700 		break;
1701 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1702 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1703 	case RTE_CRYPTO_AUTH_NULL:
1704 	case RTE_CRYPTO_AUTH_SHA1:
1705 	case RTE_CRYPTO_AUTH_SHA256:
1706 	case RTE_CRYPTO_AUTH_SHA512:
1707 	case RTE_CRYPTO_AUTH_SHA224:
1708 	case RTE_CRYPTO_AUTH_SHA384:
1709 	case RTE_CRYPTO_AUTH_MD5:
1710 	case RTE_CRYPTO_AUTH_AES_GMAC:
1711 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1712 	case RTE_CRYPTO_AUTH_AES_CMAC:
1713 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1714 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1715 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1716 			      xform->auth.algo);
1717 		goto error_out;
1718 	default:
1719 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1720 			      xform->auth.algo);
1721 		goto error_out;
1722 	}
1723 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1724 				DIR_ENC : DIR_DEC;
1725 
1726 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1727 				   1, 0, &authdata, !session->dir,
1728 				   session->digest_length);
1729 
1730 	flc->word1_sdl = (uint8_t)bufsize;
1731 	flc->word2_rflc_31_0 = lower_32_bits(
1732 			(size_t)&(((struct dpaa2_sec_qp *)
1733 			dev->data->queue_pairs[0])->rx_vq));
1734 	flc->word3_rflc_63_32 = upper_32_bits(
1735 			(size_t)&(((struct dpaa2_sec_qp *)
1736 			dev->data->queue_pairs[0])->rx_vq));
1737 	session->ctxt = priv;
1738 	for (i = 0; i < bufsize; i++)
1739 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1740 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1741 
1742 
1743 	return 0;
1744 
1745 error_out:
1746 	rte_free(session->auth_key.data);
1747 	rte_free(priv);
1748 	return -1;
1749 }
1750 
1751 static int
1752 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1753 		    struct rte_crypto_sym_xform *xform,
1754 		    dpaa2_sec_session *session)
1755 {
1756 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1757 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1758 	struct alginfo aeaddata;
1759 	unsigned int bufsize, i;
1760 	struct ctxt_priv *priv;
1761 	struct sec_flow_context *flc;
1762 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1763 	int err;
1764 
1765 	PMD_INIT_FUNC_TRACE();
1766 
1767 	/* Set IV parameters */
1768 	session->iv.offset = aead_xform->iv.offset;
1769 	session->iv.length = aead_xform->iv.length;
1770 	session->ctxt_type = DPAA2_SEC_AEAD;
1771 
1772 	/* For SEC AEAD only one descriptor is required */
1773 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1774 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1775 			RTE_CACHE_LINE_SIZE);
1776 	if (priv == NULL) {
1777 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1778 		return -1;
1779 	}
1780 
1781 	priv->fle_pool = dev_priv->fle_pool;
1782 	flc = &priv->flc_desc[0].flc;
1783 
1784 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1785 					       RTE_CACHE_LINE_SIZE);
1786 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1787 		DPAA2_SEC_ERR("No Memory for aead key");
1788 		rte_free(priv);
1789 		return -1;
1790 	}
1791 	memcpy(session->aead_key.data, aead_xform->key.data,
1792 	       aead_xform->key.length);
1793 
1794 	session->digest_length = aead_xform->digest_length;
1795 	session->aead_key.length = aead_xform->key.length;
1796 	ctxt->auth_only_len = aead_xform->aad_length;
1797 
1798 	aeaddata.key = (size_t)session->aead_key.data;
1799 	aeaddata.keylen = session->aead_key.length;
1800 	aeaddata.key_enc_flags = 0;
1801 	aeaddata.key_type = RTA_DATA_IMM;
1802 
1803 	switch (aead_xform->algo) {
1804 	case RTE_CRYPTO_AEAD_AES_GCM:
1805 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
1806 		aeaddata.algmode = OP_ALG_AAI_GCM;
1807 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1808 		break;
1809 	case RTE_CRYPTO_AEAD_AES_CCM:
1810 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1811 			      aead_xform->algo);
1812 		goto error_out;
1813 	default:
1814 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1815 			      aead_xform->algo);
1816 		goto error_out;
1817 	}
1818 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1819 				DIR_ENC : DIR_DEC;
1820 
1821 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
1822 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1823 			       MIN_JOB_DESC_SIZE,
1824 			       (unsigned int *)priv->flc_desc[0].desc,
1825 			       &priv->flc_desc[0].desc[1], 1);
1826 
1827 	if (err < 0) {
1828 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1829 		goto error_out;
1830 	}
1831 	if (priv->flc_desc[0].desc[1] & 1) {
1832 		aeaddata.key_type = RTA_DATA_IMM;
1833 	} else {
1834 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1835 		aeaddata.key_type = RTA_DATA_PTR;
1836 	}
1837 	priv->flc_desc[0].desc[0] = 0;
1838 	priv->flc_desc[0].desc[1] = 0;
1839 
1840 	if (session->dir == DIR_ENC)
1841 		bufsize = cnstr_shdsc_gcm_encap(
1842 				priv->flc_desc[0].desc, 1, 0,
1843 				&aeaddata, session->iv.length,
1844 				session->digest_length);
1845 	else
1846 		bufsize = cnstr_shdsc_gcm_decap(
1847 				priv->flc_desc[0].desc, 1, 0,
1848 				&aeaddata, session->iv.length,
1849 				session->digest_length);
1850 	flc->word1_sdl = (uint8_t)bufsize;
1851 	flc->word2_rflc_31_0 = lower_32_bits(
1852 			(size_t)&(((struct dpaa2_sec_qp *)
1853 			dev->data->queue_pairs[0])->rx_vq));
1854 	flc->word3_rflc_63_32 = upper_32_bits(
1855 			(size_t)&(((struct dpaa2_sec_qp *)
1856 			dev->data->queue_pairs[0])->rx_vq));
1857 	session->ctxt = priv;
1858 	for (i = 0; i < bufsize; i++)
1859 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1860 			    i, priv->flc_desc[0].desc[i]);
1861 
1862 	return 0;
1863 
1864 error_out:
1865 	rte_free(session->aead_key.data);
1866 	rte_free(priv);
1867 	return -1;
1868 }
1869 
1870 
1871 static int
1872 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1873 		    struct rte_crypto_sym_xform *xform,
1874 		    dpaa2_sec_session *session)
1875 {
1876 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1877 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1878 	struct alginfo authdata, cipherdata;
1879 	unsigned int bufsize, i;
1880 	struct ctxt_priv *priv;
1881 	struct sec_flow_context *flc;
1882 	struct rte_crypto_cipher_xform *cipher_xform;
1883 	struct rte_crypto_auth_xform *auth_xform;
1884 	int err;
1885 
1886 	PMD_INIT_FUNC_TRACE();
1887 
1888 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
1889 		cipher_xform = &xform->cipher;
1890 		auth_xform = &xform->next->auth;
1891 		session->ctxt_type =
1892 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1893 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1894 	} else {
1895 		cipher_xform = &xform->next->cipher;
1896 		auth_xform = &xform->auth;
1897 		session->ctxt_type =
1898 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1899 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1900 	}
1901 
1902 	/* Set IV parameters */
1903 	session->iv.offset = cipher_xform->iv.offset;
1904 	session->iv.length = cipher_xform->iv.length;
1905 
1906 	/* For SEC AEAD only one descriptor is required */
1907 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1908 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1909 			RTE_CACHE_LINE_SIZE);
1910 	if (priv == NULL) {
1911 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1912 		return -1;
1913 	}
1914 
1915 	priv->fle_pool = dev_priv->fle_pool;
1916 	flc = &priv->flc_desc[0].flc;
1917 
1918 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1919 					       RTE_CACHE_LINE_SIZE);
1920 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1921 		DPAA2_SEC_ERR("No Memory for cipher key");
1922 		rte_free(priv);
1923 		return -1;
1924 	}
1925 	session->cipher_key.length = cipher_xform->key.length;
1926 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1927 					     RTE_CACHE_LINE_SIZE);
1928 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1929 		DPAA2_SEC_ERR("No Memory for auth key");
1930 		rte_free(session->cipher_key.data);
1931 		rte_free(priv);
1932 		return -1;
1933 	}
1934 	session->auth_key.length = auth_xform->key.length;
1935 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1936 	       cipher_xform->key.length);
1937 	memcpy(session->auth_key.data, auth_xform->key.data,
1938 	       auth_xform->key.length);
1939 
1940 	authdata.key = (size_t)session->auth_key.data;
1941 	authdata.keylen = session->auth_key.length;
1942 	authdata.key_enc_flags = 0;
1943 	authdata.key_type = RTA_DATA_IMM;
1944 
1945 	session->digest_length = auth_xform->digest_length;
1946 
1947 	switch (auth_xform->algo) {
1948 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1949 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1950 		authdata.algmode = OP_ALG_AAI_HMAC;
1951 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1952 		break;
1953 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1954 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1955 		authdata.algmode = OP_ALG_AAI_HMAC;
1956 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1957 		break;
1958 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1959 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1960 		authdata.algmode = OP_ALG_AAI_HMAC;
1961 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1962 		break;
1963 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1964 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1965 		authdata.algmode = OP_ALG_AAI_HMAC;
1966 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1967 		break;
1968 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1969 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1970 		authdata.algmode = OP_ALG_AAI_HMAC;
1971 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1972 		break;
1973 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1974 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1975 		authdata.algmode = OP_ALG_AAI_HMAC;
1976 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1977 		break;
1978 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1979 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1980 	case RTE_CRYPTO_AUTH_NULL:
1981 	case RTE_CRYPTO_AUTH_SHA1:
1982 	case RTE_CRYPTO_AUTH_SHA256:
1983 	case RTE_CRYPTO_AUTH_SHA512:
1984 	case RTE_CRYPTO_AUTH_SHA224:
1985 	case RTE_CRYPTO_AUTH_SHA384:
1986 	case RTE_CRYPTO_AUTH_MD5:
1987 	case RTE_CRYPTO_AUTH_AES_GMAC:
1988 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1989 	case RTE_CRYPTO_AUTH_AES_CMAC:
1990 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1991 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1992 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
1993 			      auth_xform->algo);
1994 		goto error_out;
1995 	default:
1996 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1997 			      auth_xform->algo);
1998 		goto error_out;
1999 	}
2000 	cipherdata.key = (size_t)session->cipher_key.data;
2001 	cipherdata.keylen = session->cipher_key.length;
2002 	cipherdata.key_enc_flags = 0;
2003 	cipherdata.key_type = RTA_DATA_IMM;
2004 
2005 	switch (cipher_xform->algo) {
2006 	case RTE_CRYPTO_CIPHER_AES_CBC:
2007 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2008 		cipherdata.algmode = OP_ALG_AAI_CBC;
2009 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2010 		break;
2011 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2012 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2013 		cipherdata.algmode = OP_ALG_AAI_CBC;
2014 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2015 		break;
2016 	case RTE_CRYPTO_CIPHER_AES_CTR:
2017 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2018 		cipherdata.algmode = OP_ALG_AAI_CTR;
2019 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2020 		break;
2021 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2022 	case RTE_CRYPTO_CIPHER_NULL:
2023 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2024 	case RTE_CRYPTO_CIPHER_AES_ECB:
2025 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2026 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2027 			      cipher_xform->algo);
2028 		goto error_out;
2029 	default:
2030 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2031 			      cipher_xform->algo);
2032 		goto error_out;
2033 	}
2034 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2035 				DIR_ENC : DIR_DEC;
2036 
2037 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2038 	priv->flc_desc[0].desc[1] = authdata.keylen;
2039 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2040 			       MIN_JOB_DESC_SIZE,
2041 			       (unsigned int *)priv->flc_desc[0].desc,
2042 			       &priv->flc_desc[0].desc[2], 2);
2043 
2044 	if (err < 0) {
2045 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2046 		goto error_out;
2047 	}
2048 	if (priv->flc_desc[0].desc[2] & 1) {
2049 		cipherdata.key_type = RTA_DATA_IMM;
2050 	} else {
2051 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2052 		cipherdata.key_type = RTA_DATA_PTR;
2053 	}
2054 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2055 		authdata.key_type = RTA_DATA_IMM;
2056 	} else {
2057 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2058 		authdata.key_type = RTA_DATA_PTR;
2059 	}
2060 	priv->flc_desc[0].desc[0] = 0;
2061 	priv->flc_desc[0].desc[1] = 0;
2062 	priv->flc_desc[0].desc[2] = 0;
2063 
2064 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2065 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2066 					      0, &cipherdata, &authdata,
2067 					      session->iv.length,
2068 					      ctxt->auth_only_len,
2069 					      session->digest_length,
2070 					      session->dir);
2071 	} else {
2072 		DPAA2_SEC_ERR("Hash before cipher not supported");
2073 		goto error_out;
2074 	}
2075 
2076 	flc->word1_sdl = (uint8_t)bufsize;
2077 	flc->word2_rflc_31_0 = lower_32_bits(
2078 			(size_t)&(((struct dpaa2_sec_qp *)
2079 			dev->data->queue_pairs[0])->rx_vq));
2080 	flc->word3_rflc_63_32 = upper_32_bits(
2081 			(size_t)&(((struct dpaa2_sec_qp *)
2082 			dev->data->queue_pairs[0])->rx_vq));
2083 	session->ctxt = priv;
2084 	for (i = 0; i < bufsize; i++)
2085 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2086 			    i, priv->flc_desc[0].desc[i]);
2087 
2088 	return 0;
2089 
2090 error_out:
2091 	rte_free(session->cipher_key.data);
2092 	rte_free(session->auth_key.data);
2093 	rte_free(priv);
2094 	return -1;
2095 }
2096 
2097 static int
2098 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2099 			    struct rte_crypto_sym_xform *xform,	void *sess)
2100 {
2101 	dpaa2_sec_session *session = sess;
2102 
2103 	PMD_INIT_FUNC_TRACE();
2104 
2105 	if (unlikely(sess == NULL)) {
2106 		DPAA2_SEC_ERR("Invalid session struct");
2107 		return -1;
2108 	}
2109 
2110 	/* Default IV length = 0 */
2111 	session->iv.length = 0;
2112 
2113 	/* Cipher Only */
2114 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2115 		session->ctxt_type = DPAA2_SEC_CIPHER;
2116 		dpaa2_sec_cipher_init(dev, xform, session);
2117 
2118 	/* Authentication Only */
2119 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2120 		   xform->next == NULL) {
2121 		session->ctxt_type = DPAA2_SEC_AUTH;
2122 		dpaa2_sec_auth_init(dev, xform, session);
2123 
2124 	/* Cipher then Authenticate */
2125 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2126 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2127 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2128 		dpaa2_sec_aead_chain_init(dev, xform, session);
2129 
2130 	/* Authenticate then Cipher */
2131 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2132 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2133 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2134 		dpaa2_sec_aead_chain_init(dev, xform, session);
2135 
2136 	/* AEAD operation for AES-GCM kind of Algorithms */
2137 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2138 		   xform->next == NULL) {
2139 		dpaa2_sec_aead_init(dev, xform, session);
2140 
2141 	} else {
2142 		DPAA2_SEC_ERR("Invalid crypto type");
2143 		return -EINVAL;
2144 	}
2145 
2146 	return 0;
2147 }
2148 
2149 static int
2150 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2151 			    struct rte_security_session_conf *conf,
2152 			    void *sess)
2153 {
2154 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2155 	struct rte_crypto_auth_xform *auth_xform;
2156 	struct rte_crypto_cipher_xform *cipher_xform;
2157 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2158 	struct ctxt_priv *priv;
2159 	struct ipsec_encap_pdb encap_pdb;
2160 	struct ipsec_decap_pdb decap_pdb;
2161 	struct alginfo authdata, cipherdata;
2162 	unsigned int bufsize;
2163 	struct sec_flow_context *flc;
2164 
2165 	PMD_INIT_FUNC_TRACE();
2166 
2167 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2168 		cipher_xform = &conf->crypto_xform->cipher;
2169 		auth_xform = &conf->crypto_xform->next->auth;
2170 	} else {
2171 		auth_xform = &conf->crypto_xform->auth;
2172 		cipher_xform = &conf->crypto_xform->next->cipher;
2173 	}
2174 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2175 				sizeof(struct ctxt_priv) +
2176 				sizeof(struct sec_flc_desc),
2177 				RTE_CACHE_LINE_SIZE);
2178 
2179 	if (priv == NULL) {
2180 		DPAA2_SEC_ERR("No memory for priv CTXT");
2181 		return -ENOMEM;
2182 	}
2183 
2184 	flc = &priv->flc_desc[0].flc;
2185 
2186 	session->ctxt_type = DPAA2_SEC_IPSEC;
2187 	session->cipher_key.data = rte_zmalloc(NULL,
2188 					       cipher_xform->key.length,
2189 					       RTE_CACHE_LINE_SIZE);
2190 	if (session->cipher_key.data == NULL &&
2191 			cipher_xform->key.length > 0) {
2192 		DPAA2_SEC_ERR("No Memory for cipher key");
2193 		rte_free(priv);
2194 		return -ENOMEM;
2195 	}
2196 
2197 	session->cipher_key.length = cipher_xform->key.length;
2198 	session->auth_key.data = rte_zmalloc(NULL,
2199 					auth_xform->key.length,
2200 					RTE_CACHE_LINE_SIZE);
2201 	if (session->auth_key.data == NULL &&
2202 			auth_xform->key.length > 0) {
2203 		DPAA2_SEC_ERR("No Memory for auth key");
2204 		rte_free(session->cipher_key.data);
2205 		rte_free(priv);
2206 		return -ENOMEM;
2207 	}
2208 	session->auth_key.length = auth_xform->key.length;
2209 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2210 			cipher_xform->key.length);
2211 	memcpy(session->auth_key.data, auth_xform->key.data,
2212 			auth_xform->key.length);
2213 
2214 	authdata.key = (size_t)session->auth_key.data;
2215 	authdata.keylen = session->auth_key.length;
2216 	authdata.key_enc_flags = 0;
2217 	authdata.key_type = RTA_DATA_IMM;
2218 	switch (auth_xform->algo) {
2219 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2220 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2221 		authdata.algmode = OP_ALG_AAI_HMAC;
2222 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2223 		break;
2224 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2225 		authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2226 		authdata.algmode = OP_ALG_AAI_HMAC;
2227 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2228 		break;
2229 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2230 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2231 		authdata.algmode = OP_ALG_AAI_HMAC;
2232 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2233 		break;
2234 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2235 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2236 		authdata.algmode = OP_ALG_AAI_HMAC;
2237 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2238 		break;
2239 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2240 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2241 		authdata.algmode = OP_ALG_AAI_HMAC;
2242 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2243 		break;
2244 	case RTE_CRYPTO_AUTH_AES_CMAC:
2245 		authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2246 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2247 		break;
2248 	case RTE_CRYPTO_AUTH_NULL:
2249 		authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2250 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2251 		break;
2252 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2253 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2254 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2255 	case RTE_CRYPTO_AUTH_SHA1:
2256 	case RTE_CRYPTO_AUTH_SHA256:
2257 	case RTE_CRYPTO_AUTH_SHA512:
2258 	case RTE_CRYPTO_AUTH_SHA224:
2259 	case RTE_CRYPTO_AUTH_SHA384:
2260 	case RTE_CRYPTO_AUTH_MD5:
2261 	case RTE_CRYPTO_AUTH_AES_GMAC:
2262 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2263 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2264 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2265 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2266 			      auth_xform->algo);
2267 		goto out;
2268 	default:
2269 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2270 			      auth_xform->algo);
2271 		goto out;
2272 	}
2273 	cipherdata.key = (size_t)session->cipher_key.data;
2274 	cipherdata.keylen = session->cipher_key.length;
2275 	cipherdata.key_enc_flags = 0;
2276 	cipherdata.key_type = RTA_DATA_IMM;
2277 
2278 	switch (cipher_xform->algo) {
2279 	case RTE_CRYPTO_CIPHER_AES_CBC:
2280 		cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2281 		cipherdata.algmode = OP_ALG_AAI_CBC;
2282 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2283 		break;
2284 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2285 		cipherdata.algtype = OP_PCL_IPSEC_3DES;
2286 		cipherdata.algmode = OP_ALG_AAI_CBC;
2287 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2288 		break;
2289 	case RTE_CRYPTO_CIPHER_AES_CTR:
2290 		cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2291 		cipherdata.algmode = OP_ALG_AAI_CTR;
2292 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2293 		break;
2294 	case RTE_CRYPTO_CIPHER_NULL:
2295 		cipherdata.algtype = OP_PCL_IPSEC_NULL;
2296 		break;
2297 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2298 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2299 	case RTE_CRYPTO_CIPHER_AES_ECB:
2300 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2301 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2302 			      cipher_xform->algo);
2303 		goto out;
2304 	default:
2305 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2306 			      cipher_xform->algo);
2307 		goto out;
2308 	}
2309 
2310 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2311 		struct ip ip4_hdr;
2312 
2313 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2314 		ip4_hdr.ip_v = IPVERSION;
2315 		ip4_hdr.ip_hl = 5;
2316 		ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2317 		ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2318 		ip4_hdr.ip_id = 0;
2319 		ip4_hdr.ip_off = 0;
2320 		ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2321 		ip4_hdr.ip_p = 0x32;
2322 		ip4_hdr.ip_sum = 0;
2323 		ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2324 		ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2325 		ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2326 			sizeof(struct ip));
2327 
2328 		/* For Sec Proto only one descriptor is required. */
2329 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2330 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2331 			PDBOPTS_ESP_OIHI_PDB_INL |
2332 			PDBOPTS_ESP_IVSRC |
2333 			PDBHMO_ESP_ENCAP_DTTL;
2334 		encap_pdb.spi = ipsec_xform->spi;
2335 		encap_pdb.ip_hdr_len = sizeof(struct ip);
2336 
2337 		session->dir = DIR_ENC;
2338 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2339 				1, 0, &encap_pdb,
2340 				(uint8_t *)&ip4_hdr,
2341 				&cipherdata, &authdata);
2342 	} else if (ipsec_xform->direction ==
2343 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2344 		flc->dhr = SEC_FLC_DHR_INBOUND;
2345 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2346 		decap_pdb.options = sizeof(struct ip) << 16;
2347 		session->dir = DIR_DEC;
2348 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2349 				1, 0, &decap_pdb, &cipherdata, &authdata);
2350 	} else
2351 		goto out;
2352 	flc->word1_sdl = (uint8_t)bufsize;
2353 
2354 	/* Enable the stashing control bit */
2355 	DPAA2_SET_FLC_RSC(flc);
2356 	flc->word2_rflc_31_0 = lower_32_bits(
2357 			(size_t)&(((struct dpaa2_sec_qp *)
2358 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2359 	flc->word3_rflc_63_32 = upper_32_bits(
2360 			(size_t)&(((struct dpaa2_sec_qp *)
2361 			dev->data->queue_pairs[0])->rx_vq));
2362 
2363 	/* Set EWS bit i.e. enable write-safe */
2364 	DPAA2_SET_FLC_EWS(flc);
2365 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2366 	DPAA2_SET_FLC_REUSE_BS(flc);
2367 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2368 	DPAA2_SET_FLC_REUSE_FF(flc);
2369 
2370 	session->ctxt = priv;
2371 
2372 	return 0;
2373 out:
2374 	rte_free(session->auth_key.data);
2375 	rte_free(session->cipher_key.data);
2376 	rte_free(priv);
2377 	return -1;
2378 }
2379 
2380 static int
2381 dpaa2_sec_security_session_create(void *dev,
2382 				  struct rte_security_session_conf *conf,
2383 				  struct rte_security_session *sess,
2384 				  struct rte_mempool *mempool)
2385 {
2386 	void *sess_private_data;
2387 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2388 	int ret;
2389 
2390 	if (rte_mempool_get(mempool, &sess_private_data)) {
2391 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
2392 		return -ENOMEM;
2393 	}
2394 
2395 	switch (conf->protocol) {
2396 	case RTE_SECURITY_PROTOCOL_IPSEC:
2397 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2398 				sess_private_data);
2399 		break;
2400 	case RTE_SECURITY_PROTOCOL_MACSEC:
2401 		return -ENOTSUP;
2402 	default:
2403 		return -EINVAL;
2404 	}
2405 	if (ret != 0) {
2406 		DPAA2_SEC_ERR("Failed to configure session parameters");
2407 		/* Return session to mempool */
2408 		rte_mempool_put(mempool, sess_private_data);
2409 		return ret;
2410 	}
2411 
2412 	set_sec_session_private_data(sess, sess_private_data);
2413 
2414 	return ret;
2415 }
2416 
2417 /** Clear the memory of session so it doesn't leave key material behind */
2418 static int
2419 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2420 		struct rte_security_session *sess)
2421 {
2422 	PMD_INIT_FUNC_TRACE();
2423 	void *sess_priv = get_sec_session_private_data(sess);
2424 
2425 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2426 
2427 	if (sess_priv) {
2428 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2429 
2430 		rte_free(s->ctxt);
2431 		rte_free(s->cipher_key.data);
2432 		rte_free(s->auth_key.data);
2433 		memset(sess, 0, sizeof(dpaa2_sec_session));
2434 		set_sec_session_private_data(sess, NULL);
2435 		rte_mempool_put(sess_mp, sess_priv);
2436 	}
2437 	return 0;
2438 }
2439 
2440 static int
2441 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
2442 		struct rte_crypto_sym_xform *xform,
2443 		struct rte_cryptodev_sym_session *sess,
2444 		struct rte_mempool *mempool)
2445 {
2446 	void *sess_private_data;
2447 	int ret;
2448 
2449 	if (rte_mempool_get(mempool, &sess_private_data)) {
2450 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
2451 		return -ENOMEM;
2452 	}
2453 
2454 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2455 	if (ret != 0) {
2456 		DPAA2_SEC_ERR("Failed to configure session parameters");
2457 		/* Return session to mempool */
2458 		rte_mempool_put(mempool, sess_private_data);
2459 		return ret;
2460 	}
2461 
2462 	set_session_private_data(sess, dev->driver_id,
2463 		sess_private_data);
2464 
2465 	return 0;
2466 }
2467 
2468 /** Clear the memory of session so it doesn't leave key material behind */
2469 static void
2470 dpaa2_sec_session_clear(struct rte_cryptodev *dev,
2471 		struct rte_cryptodev_sym_session *sess)
2472 {
2473 	PMD_INIT_FUNC_TRACE();
2474 	uint8_t index = dev->driver_id;
2475 	void *sess_priv = get_session_private_data(sess, index);
2476 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2477 
2478 	if (sess_priv) {
2479 		rte_free(s->ctxt);
2480 		rte_free(s->cipher_key.data);
2481 		rte_free(s->auth_key.data);
2482 		memset(sess, 0, sizeof(dpaa2_sec_session));
2483 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2484 		set_session_private_data(sess, index, NULL);
2485 		rte_mempool_put(sess_mp, sess_priv);
2486 	}
2487 }
2488 
2489 static int
2490 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2491 			struct rte_cryptodev_config *config __rte_unused)
2492 {
2493 	PMD_INIT_FUNC_TRACE();
2494 
2495 	return 0;
2496 }
2497 
2498 static int
2499 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2500 {
2501 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2502 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2503 	struct dpseci_attr attr;
2504 	struct dpaa2_queue *dpaa2_q;
2505 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2506 					dev->data->queue_pairs;
2507 	struct dpseci_rx_queue_attr rx_attr;
2508 	struct dpseci_tx_queue_attr tx_attr;
2509 	int ret, i;
2510 
2511 	PMD_INIT_FUNC_TRACE();
2512 
2513 	memset(&attr, 0, sizeof(struct dpseci_attr));
2514 
2515 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2516 	if (ret) {
2517 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2518 			      priv->hw_id);
2519 		goto get_attr_failure;
2520 	}
2521 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2522 	if (ret) {
2523 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2524 		goto get_attr_failure;
2525 	}
2526 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2527 		dpaa2_q = &qp[i]->rx_vq;
2528 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2529 				    &rx_attr);
2530 		dpaa2_q->fqid = rx_attr.fqid;
2531 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2532 	}
2533 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2534 		dpaa2_q = &qp[i]->tx_vq;
2535 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2536 				    &tx_attr);
2537 		dpaa2_q->fqid = tx_attr.fqid;
2538 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2539 	}
2540 
2541 	return 0;
2542 get_attr_failure:
2543 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2544 	return -1;
2545 }
2546 
2547 static void
2548 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2549 {
2550 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2551 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2552 	int ret;
2553 
2554 	PMD_INIT_FUNC_TRACE();
2555 
2556 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2557 	if (ret) {
2558 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2559 			     priv->hw_id);
2560 		return;
2561 	}
2562 
2563 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2564 	if (ret < 0) {
2565 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2566 		return;
2567 	}
2568 }
2569 
2570 static int
2571 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2572 {
2573 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2574 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2575 	int ret;
2576 
2577 	PMD_INIT_FUNC_TRACE();
2578 
2579 	/* Function is reverse of dpaa2_sec_dev_init.
2580 	 * It does the following:
2581 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2582 	 * 2. Close the DPSECI device
2583 	 * 3. Free the allocated resources.
2584 	 */
2585 
2586 	/*Close the device at underlying layer*/
2587 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2588 	if (ret) {
2589 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2590 		return -1;
2591 	}
2592 
2593 	/*Free the allocated memory for ethernet private data and dpseci*/
2594 	priv->hw = NULL;
2595 	rte_free(dpseci);
2596 
2597 	return 0;
2598 }
2599 
2600 static void
2601 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2602 			struct rte_cryptodev_info *info)
2603 {
2604 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2605 
2606 	PMD_INIT_FUNC_TRACE();
2607 	if (info != NULL) {
2608 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2609 		info->feature_flags = dev->feature_flags;
2610 		info->capabilities = dpaa2_sec_capabilities;
2611 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2612 		info->driver_id = cryptodev_driver_id;
2613 	}
2614 }
2615 
2616 static
2617 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2618 			 struct rte_cryptodev_stats *stats)
2619 {
2620 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2621 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2622 	struct dpseci_sec_counters counters = {0};
2623 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2624 					dev->data->queue_pairs;
2625 	int ret, i;
2626 
2627 	PMD_INIT_FUNC_TRACE();
2628 	if (stats == NULL) {
2629 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
2630 		return;
2631 	}
2632 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2633 		if (qp[i] == NULL) {
2634 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
2635 			continue;
2636 		}
2637 
2638 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2639 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2640 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2641 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2642 	}
2643 
2644 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2645 				      &counters);
2646 	if (ret) {
2647 		DPAA2_SEC_ERR("SEC counters failed");
2648 	} else {
2649 		DPAA2_SEC_INFO("dpseci hardware stats:"
2650 			    "\n\tNum of Requests Dequeued = %" PRIu64
2651 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
2652 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
2653 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
2654 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
2655 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
2656 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
2657 			    counters.dequeued_requests,
2658 			    counters.ob_enc_requests,
2659 			    counters.ib_dec_requests,
2660 			    counters.ob_enc_bytes,
2661 			    counters.ob_prot_bytes,
2662 			    counters.ib_dec_bytes,
2663 			    counters.ib_valid_bytes);
2664 	}
2665 }
2666 
2667 static
2668 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2669 {
2670 	int i;
2671 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2672 				   (dev->data->queue_pairs);
2673 
2674 	PMD_INIT_FUNC_TRACE();
2675 
2676 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2677 		if (qp[i] == NULL) {
2678 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
2679 			continue;
2680 		}
2681 		qp[i]->tx_vq.rx_pkts = 0;
2682 		qp[i]->tx_vq.tx_pkts = 0;
2683 		qp[i]->tx_vq.err_pkts = 0;
2684 		qp[i]->rx_vq.rx_pkts = 0;
2685 		qp[i]->rx_vq.tx_pkts = 0;
2686 		qp[i]->rx_vq.err_pkts = 0;
2687 	}
2688 }
2689 
2690 static struct rte_cryptodev_ops crypto_ops = {
2691 	.dev_configure	      = dpaa2_sec_dev_configure,
2692 	.dev_start	      = dpaa2_sec_dev_start,
2693 	.dev_stop	      = dpaa2_sec_dev_stop,
2694 	.dev_close	      = dpaa2_sec_dev_close,
2695 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
2696 	.stats_get	      = dpaa2_sec_stats_get,
2697 	.stats_reset	      = dpaa2_sec_stats_reset,
2698 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
2699 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
2700 	.queue_pair_start     = dpaa2_sec_queue_pair_start,
2701 	.queue_pair_stop      = dpaa2_sec_queue_pair_stop,
2702 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
2703 	.session_get_size     = dpaa2_sec_session_get_size,
2704 	.session_configure    = dpaa2_sec_session_configure,
2705 	.session_clear        = dpaa2_sec_session_clear,
2706 };
2707 
2708 static const struct rte_security_capability *
2709 dpaa2_sec_capabilities_get(void *device __rte_unused)
2710 {
2711 	return dpaa2_sec_security_cap;
2712 }
2713 
2714 struct rte_security_ops dpaa2_sec_security_ops = {
2715 	.session_create = dpaa2_sec_security_session_create,
2716 	.session_update = NULL,
2717 	.session_stats_get = NULL,
2718 	.session_destroy = dpaa2_sec_security_session_destroy,
2719 	.set_pkt_metadata = NULL,
2720 	.capabilities_get = dpaa2_sec_capabilities_get
2721 };
2722 
2723 static int
2724 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2725 {
2726 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2727 
2728 	rte_free(dev->security_ctx);
2729 
2730 	rte_mempool_free(internals->fle_pool);
2731 
2732 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
2733 		       dev->data->name, rte_socket_id());
2734 
2735 	return 0;
2736 }
2737 
2738 static int
2739 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2740 {
2741 	struct dpaa2_sec_dev_private *internals;
2742 	struct rte_device *dev = cryptodev->device;
2743 	struct rte_dpaa2_device *dpaa2_dev;
2744 	struct rte_security_ctx *security_instance;
2745 	struct fsl_mc_io *dpseci;
2746 	uint16_t token;
2747 	struct dpseci_attr attr;
2748 	int retcode, hw_id;
2749 	char str[20];
2750 
2751 	PMD_INIT_FUNC_TRACE();
2752 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2753 	if (dpaa2_dev == NULL) {
2754 		DPAA2_SEC_ERR("DPAA2 SEC device not found");
2755 		return -1;
2756 	}
2757 	hw_id = dpaa2_dev->object_id;
2758 
2759 	cryptodev->driver_id = cryptodev_driver_id;
2760 	cryptodev->dev_ops = &crypto_ops;
2761 
2762 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2763 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2764 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2765 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2766 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2767 			RTE_CRYPTODEV_FF_SECURITY |
2768 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2769 
2770 	internals = cryptodev->data->dev_private;
2771 	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
2772 
2773 	/*
2774 	 * For secondary processes, we don't initialise any further as primary
2775 	 * has already done this work. Only check we don't need a different
2776 	 * RX function
2777 	 */
2778 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2779 		DPAA2_SEC_DEBUG("Device already init by primary process");
2780 		return 0;
2781 	}
2782 
2783 	/* Initialize security_ctx only for primary process*/
2784 	security_instance = rte_malloc("rte_security_instances_ops",
2785 				sizeof(struct rte_security_ctx), 0);
2786 	if (security_instance == NULL)
2787 		return -ENOMEM;
2788 	security_instance->device = (void *)cryptodev;
2789 	security_instance->ops = &dpaa2_sec_security_ops;
2790 	security_instance->sess_cnt = 0;
2791 	cryptodev->security_ctx = security_instance;
2792 
2793 	/*Open the rte device via MC and save the handle for further use*/
2794 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2795 				sizeof(struct fsl_mc_io), 0);
2796 	if (!dpseci) {
2797 		DPAA2_SEC_ERR(
2798 			"Error in allocating the memory for dpsec object");
2799 		return -1;
2800 	}
2801 	dpseci->regs = rte_mcp_ptr_list[0];
2802 
2803 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2804 	if (retcode != 0) {
2805 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
2806 			      retcode);
2807 		goto init_error;
2808 	}
2809 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2810 	if (retcode != 0) {
2811 		DPAA2_SEC_ERR(
2812 			     "Cannot get dpsec device attributed: Error = %x",
2813 			     retcode);
2814 		goto init_error;
2815 	}
2816 	sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2817 
2818 	internals->max_nb_queue_pairs = attr.num_tx_queues;
2819 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2820 	internals->hw = dpseci;
2821 	internals->token = token;
2822 
2823 	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2824 	internals->fle_pool = rte_mempool_create((const char *)str,
2825 			FLE_POOL_NUM_BUFS,
2826 			FLE_POOL_BUF_SIZE,
2827 			FLE_POOL_CACHE_SIZE, 0,
2828 			NULL, NULL, NULL, NULL,
2829 			SOCKET_ID_ANY, 0);
2830 	if (!internals->fle_pool) {
2831 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2832 		goto init_error;
2833 	}
2834 
2835 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
2836 	return 0;
2837 
2838 init_error:
2839 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
2840 
2841 	/* dpaa2_sec_uninit(crypto_dev_name); */
2842 	return -EFAULT;
2843 }
2844 
2845 static int
2846 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
2847 			  struct rte_dpaa2_device *dpaa2_dev)
2848 {
2849 	struct rte_cryptodev *cryptodev;
2850 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2851 
2852 	int retval;
2853 
2854 	sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2855 
2856 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2857 	if (cryptodev == NULL)
2858 		return -ENOMEM;
2859 
2860 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2861 		cryptodev->data->dev_private = rte_zmalloc_socket(
2862 					"cryptodev private structure",
2863 					sizeof(struct dpaa2_sec_dev_private),
2864 					RTE_CACHE_LINE_SIZE,
2865 					rte_socket_id());
2866 
2867 		if (cryptodev->data->dev_private == NULL)
2868 			rte_panic("Cannot allocate memzone for private "
2869 				  "device data");
2870 	}
2871 
2872 	dpaa2_dev->cryptodev = cryptodev;
2873 	cryptodev->device = &dpaa2_dev->device;
2874 	cryptodev->device->driver = &dpaa2_drv->driver;
2875 
2876 	/* init user callbacks */
2877 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2878 
2879 	/* Invoke PMD device initialization function */
2880 	retval = dpaa2_sec_dev_init(cryptodev);
2881 	if (retval == 0)
2882 		return 0;
2883 
2884 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2885 		rte_free(cryptodev->data->dev_private);
2886 
2887 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2888 
2889 	return -ENXIO;
2890 }
2891 
2892 static int
2893 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2894 {
2895 	struct rte_cryptodev *cryptodev;
2896 	int ret;
2897 
2898 	cryptodev = dpaa2_dev->cryptodev;
2899 	if (cryptodev == NULL)
2900 		return -ENODEV;
2901 
2902 	ret = dpaa2_sec_uninit(cryptodev);
2903 	if (ret)
2904 		return ret;
2905 
2906 	return rte_cryptodev_pmd_destroy(cryptodev);
2907 }
2908 
2909 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2910 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2911 	.drv_type = DPAA2_CRYPTO,
2912 	.driver = {
2913 		.name = "DPAA2 SEC PMD"
2914 	},
2915 	.probe = cryptodev_dpaa2_sec_probe,
2916 	.remove = cryptodev_dpaa2_sec_remove,
2917 };
2918 
2919 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2920 
2921 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2922 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
2923 		cryptodev_driver_id);
2924 
2925 RTE_INIT(dpaa2_sec_init_log);
2926 static void
2927 dpaa2_sec_init_log(void)
2928 {
2929 	/* Bus level logs */
2930 	dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
2931 	if (dpaa2_logtype_sec >= 0)
2932 		rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
2933 }
2934