xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision bb44fb6fe7713ddcd023d5b9bacadf074d68092e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7 
8 #include <time.h>
9 #include <net/if.h>
10 
11 #include <rte_mbuf.h>
12 #include <rte_cryptodev.h>
13 #include <rte_security_driver.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
19 #include <rte_dev.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpseci.h>
28 #include <fsl_mc_sys.h>
29 
30 #include "dpaa2_sec_priv.h"
31 #include "dpaa2_sec_logs.h"
32 
33 /* Required types */
34 typedef uint64_t	dma_addr_t;
35 
36 /* RTA header files */
37 #include <hw/desc/ipsec.h>
38 #include <hw/desc/algo.h>
39 
40 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
41  * a pointer to the shared descriptor
42  */
43 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
44 #define FSL_VENDOR_ID           0x1957
45 #define FSL_DEVICE_ID           0x410
46 #define FSL_SUBSYSTEM_SEC       1
47 #define FSL_MC_DPSECI_DEVID     3
48 
49 #define NO_PREFETCH 0
50 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
51 #define FLE_POOL_NUM_BUFS	32000
52 #define FLE_POOL_BUF_SIZE	256
53 #define FLE_POOL_CACHE_SIZE	512
54 #define FLE_SG_MEM_SIZE		2048
55 #define SEC_FLC_DHR_OUTBOUND	-114
56 #define SEC_FLC_DHR_INBOUND	0
57 
58 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
59 
60 static uint8_t cryptodev_driver_id;
61 
62 int dpaa2_logtype_sec;
63 
64 static inline int
65 build_proto_fd(dpaa2_sec_session *sess,
66 	       struct rte_crypto_op *op,
67 	       struct qbman_fd *fd, uint16_t bpid)
68 {
69 	struct rte_crypto_sym_op *sym_op = op->sym;
70 	struct ctxt_priv *priv = sess->ctxt;
71 	struct sec_flow_context *flc;
72 	struct rte_mbuf *mbuf = sym_op->m_src;
73 
74 	if (likely(bpid < MAX_BPID))
75 		DPAA2_SET_FD_BPID(fd, bpid);
76 	else
77 		DPAA2_SET_FD_IVP(fd);
78 
79 	/* Save the shared descriptor */
80 	flc = &priv->flc_desc[0].flc;
81 
82 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
83 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
84 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
85 	DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
86 
87 	/* save physical address of mbuf */
88 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
89 	mbuf->buf_iova = (size_t)op;
90 
91 	return 0;
92 }
93 
94 static inline int
95 build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
96 		 struct rte_crypto_op *op,
97 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
98 {
99 	struct rte_crypto_sym_op *sym_op = op->sym;
100 	struct ctxt_priv *priv = sess->ctxt;
101 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
102 	struct sec_flow_context *flc;
103 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
104 	int icv_len = sess->digest_length;
105 	uint8_t *old_icv;
106 	struct rte_mbuf *mbuf;
107 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
108 			sess->iv.offset);
109 
110 	PMD_INIT_FUNC_TRACE();
111 
112 	if (sym_op->m_dst)
113 		mbuf = sym_op->m_dst;
114 	else
115 		mbuf = sym_op->m_src;
116 
117 	/* first FLE entry used to store mbuf and session ctxt */
118 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
119 			RTE_CACHE_LINE_SIZE);
120 	if (unlikely(!fle)) {
121 		DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
122 		return -1;
123 	}
124 	memset(fle, 0, FLE_SG_MEM_SIZE);
125 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
126 	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
127 
128 	op_fle = fle + 1;
129 	ip_fle = fle + 2;
130 	sge = fle + 3;
131 
132 	/* Save the shared descriptor */
133 	flc = &priv->flc_desc[0].flc;
134 
135 	/* Configure FD as a FRAME LIST */
136 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
137 	DPAA2_SET_FD_COMPOUND_FMT(fd);
138 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
139 
140 	DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
141 		   "iv-len=%d data_off: 0x%x\n",
142 		   sym_op->aead.data.offset,
143 		   sym_op->aead.data.length,
144 		   sess->digest_length,
145 		   sess->iv.length,
146 		   sym_op->m_src->data_off);
147 
148 	/* Configure Output FLE with Scatter/Gather Entry */
149 	DPAA2_SET_FLE_SG_EXT(op_fle);
150 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
151 
152 	if (auth_only_len)
153 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
154 
155 	op_fle->length = (sess->dir == DIR_ENC) ?
156 			(sym_op->aead.data.length + icv_len + auth_only_len) :
157 			sym_op->aead.data.length + auth_only_len;
158 
159 	/* Configure Output SGE for Encap/Decap */
160 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
161 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
162 								auth_only_len);
163 	sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
164 
165 	mbuf = mbuf->next;
166 	/* o/p segs */
167 	while (mbuf) {
168 		sge++;
169 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
170 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
171 		sge->length = mbuf->data_len;
172 		mbuf = mbuf->next;
173 	}
174 	sge->length -= icv_len;
175 
176 	if (sess->dir == DIR_ENC) {
177 		sge++;
178 		DPAA2_SET_FLE_ADDR(sge,
179 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
180 		sge->length = icv_len;
181 	}
182 	DPAA2_SET_FLE_FIN(sge);
183 
184 	sge++;
185 	mbuf = sym_op->m_src;
186 
187 	/* Configure Input FLE with Scatter/Gather Entry */
188 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
189 	DPAA2_SET_FLE_SG_EXT(ip_fle);
190 	DPAA2_SET_FLE_FIN(ip_fle);
191 	ip_fle->length = (sess->dir == DIR_ENC) ?
192 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
193 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
194 		 icv_len);
195 
196 	/* Configure Input SGE for Encap/Decap */
197 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
198 	sge->length = sess->iv.length;
199 
200 	sge++;
201 	if (auth_only_len) {
202 		DPAA2_SET_FLE_ADDR(sge,
203 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
204 		sge->length = auth_only_len;
205 		sge++;
206 	}
207 
208 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
209 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
210 				mbuf->data_off);
211 	sge->length = mbuf->data_len - sym_op->aead.data.offset;
212 
213 	mbuf = mbuf->next;
214 	/* i/p segs */
215 	while (mbuf) {
216 		sge++;
217 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
218 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
219 		sge->length = mbuf->data_len;
220 		mbuf = mbuf->next;
221 	}
222 
223 	if (sess->dir == DIR_DEC) {
224 		sge++;
225 		old_icv = (uint8_t *)(sge + 1);
226 		memcpy(old_icv,	sym_op->aead.digest.data, icv_len);
227 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
228 		sge->length = icv_len;
229 	}
230 
231 	DPAA2_SET_FLE_FIN(sge);
232 	if (auth_only_len) {
233 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
234 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
235 	}
236 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
237 
238 	return 0;
239 }
240 
241 static inline int
242 build_authenc_gcm_fd(dpaa2_sec_session *sess,
243 		     struct rte_crypto_op *op,
244 		     struct qbman_fd *fd, uint16_t bpid)
245 {
246 	struct rte_crypto_sym_op *sym_op = op->sym;
247 	struct ctxt_priv *priv = sess->ctxt;
248 	struct qbman_fle *fle, *sge;
249 	struct sec_flow_context *flc;
250 	uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
251 	int icv_len = sess->digest_length, retval;
252 	uint8_t *old_icv;
253 	struct rte_mbuf *dst;
254 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
255 			sess->iv.offset);
256 
257 	PMD_INIT_FUNC_TRACE();
258 
259 	if (sym_op->m_dst)
260 		dst = sym_op->m_dst;
261 	else
262 		dst = sym_op->m_src;
263 
264 	/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
265 	 * Currently we donot know which FLE has the mbuf stored.
266 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
267 	 * to get the MBUF Addr from the previous FLE.
268 	 * We can have a better approach to use the inline Mbuf
269 	 */
270 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
271 	if (retval) {
272 		DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
273 		return -1;
274 	}
275 	memset(fle, 0, FLE_POOL_BUF_SIZE);
276 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
277 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
278 	fle = fle + 1;
279 	sge = fle + 2;
280 	if (likely(bpid < MAX_BPID)) {
281 		DPAA2_SET_FD_BPID(fd, bpid);
282 		DPAA2_SET_FLE_BPID(fle, bpid);
283 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
284 		DPAA2_SET_FLE_BPID(sge, bpid);
285 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
286 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
287 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
288 	} else {
289 		DPAA2_SET_FD_IVP(fd);
290 		DPAA2_SET_FLE_IVP(fle);
291 		DPAA2_SET_FLE_IVP((fle + 1));
292 		DPAA2_SET_FLE_IVP(sge);
293 		DPAA2_SET_FLE_IVP((sge + 1));
294 		DPAA2_SET_FLE_IVP((sge + 2));
295 		DPAA2_SET_FLE_IVP((sge + 3));
296 	}
297 
298 	/* Save the shared descriptor */
299 	flc = &priv->flc_desc[0].flc;
300 	/* Configure FD as a FRAME LIST */
301 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
302 	DPAA2_SET_FD_COMPOUND_FMT(fd);
303 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
304 
305 	DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
306 		   "iv-len=%d data_off: 0x%x\n",
307 		   sym_op->aead.data.offset,
308 		   sym_op->aead.data.length,
309 		   sess->digest_length,
310 		   sess->iv.length,
311 		   sym_op->m_src->data_off);
312 
313 	/* Configure Output FLE with Scatter/Gather Entry */
314 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
315 	if (auth_only_len)
316 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
317 	fle->length = (sess->dir == DIR_ENC) ?
318 			(sym_op->aead.data.length + icv_len + auth_only_len) :
319 			sym_op->aead.data.length + auth_only_len;
320 
321 	DPAA2_SET_FLE_SG_EXT(fle);
322 
323 	/* Configure Output SGE for Encap/Decap */
324 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
325 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
326 				dst->data_off - auth_only_len);
327 	sge->length = sym_op->aead.data.length + auth_only_len;
328 
329 	if (sess->dir == DIR_ENC) {
330 		sge++;
331 		DPAA2_SET_FLE_ADDR(sge,
332 				DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
333 		sge->length = sess->digest_length;
334 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
335 					sess->iv.length + auth_only_len));
336 	}
337 	DPAA2_SET_FLE_FIN(sge);
338 
339 	sge++;
340 	fle++;
341 
342 	/* Configure Input FLE with Scatter/Gather Entry */
343 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
344 	DPAA2_SET_FLE_SG_EXT(fle);
345 	DPAA2_SET_FLE_FIN(fle);
346 	fle->length = (sess->dir == DIR_ENC) ?
347 		(sym_op->aead.data.length + sess->iv.length + auth_only_len) :
348 		(sym_op->aead.data.length + sess->iv.length + auth_only_len +
349 		 sess->digest_length);
350 
351 	/* Configure Input SGE for Encap/Decap */
352 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
353 	sge->length = sess->iv.length;
354 	sge++;
355 	if (auth_only_len) {
356 		DPAA2_SET_FLE_ADDR(sge,
357 				DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
358 		sge->length = auth_only_len;
359 		DPAA2_SET_FLE_BPID(sge, bpid);
360 		sge++;
361 	}
362 
363 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
364 	DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
365 				sym_op->m_src->data_off);
366 	sge->length = sym_op->aead.data.length;
367 	if (sess->dir == DIR_DEC) {
368 		sge++;
369 		old_icv = (uint8_t *)(sge + 1);
370 		memcpy(old_icv,	sym_op->aead.digest.data,
371 		       sess->digest_length);
372 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
373 		sge->length = sess->digest_length;
374 		DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
375 				 sess->digest_length +
376 				 sess->iv.length +
377 				 auth_only_len));
378 	}
379 	DPAA2_SET_FLE_FIN(sge);
380 
381 	if (auth_only_len) {
382 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
383 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
384 	}
385 
386 	return 0;
387 }
388 
389 static inline int
390 build_authenc_sg_fd(dpaa2_sec_session *sess,
391 		 struct rte_crypto_op *op,
392 		 struct qbman_fd *fd, __rte_unused uint16_t bpid)
393 {
394 	struct rte_crypto_sym_op *sym_op = op->sym;
395 	struct ctxt_priv *priv = sess->ctxt;
396 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
397 	struct sec_flow_context *flc;
398 	uint32_t auth_only_len = sym_op->auth.data.length -
399 				sym_op->cipher.data.length;
400 	int icv_len = sess->digest_length;
401 	uint8_t *old_icv;
402 	struct rte_mbuf *mbuf;
403 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
404 			sess->iv.offset);
405 
406 	PMD_INIT_FUNC_TRACE();
407 
408 	if (sym_op->m_dst)
409 		mbuf = sym_op->m_dst;
410 	else
411 		mbuf = sym_op->m_src;
412 
413 	/* first FLE entry used to store mbuf and session ctxt */
414 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
415 			RTE_CACHE_LINE_SIZE);
416 	if (unlikely(!fle)) {
417 		DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
418 		return -1;
419 	}
420 	memset(fle, 0, FLE_SG_MEM_SIZE);
421 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
422 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
423 
424 	op_fle = fle + 1;
425 	ip_fle = fle + 2;
426 	sge = fle + 3;
427 
428 	/* Save the shared descriptor */
429 	flc = &priv->flc_desc[0].flc;
430 
431 	/* Configure FD as a FRAME LIST */
432 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
433 	DPAA2_SET_FD_COMPOUND_FMT(fd);
434 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
435 
436 	DPAA2_SEC_DP_DEBUG(
437 		"AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
438 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
439 		sym_op->auth.data.offset,
440 		sym_op->auth.data.length,
441 		sess->digest_length,
442 		sym_op->cipher.data.offset,
443 		sym_op->cipher.data.length,
444 		sess->iv.length,
445 		sym_op->m_src->data_off);
446 
447 	/* Configure Output FLE with Scatter/Gather Entry */
448 	DPAA2_SET_FLE_SG_EXT(op_fle);
449 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
450 
451 	if (auth_only_len)
452 		DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
453 
454 	op_fle->length = (sess->dir == DIR_ENC) ?
455 			(sym_op->cipher.data.length + icv_len) :
456 			sym_op->cipher.data.length;
457 
458 	/* Configure Output SGE for Encap/Decap */
459 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
460 	DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
461 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
462 
463 	mbuf = mbuf->next;
464 	/* o/p segs */
465 	while (mbuf) {
466 		sge++;
467 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
468 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
469 		sge->length = mbuf->data_len;
470 		mbuf = mbuf->next;
471 	}
472 	sge->length -= icv_len;
473 
474 	if (sess->dir == DIR_ENC) {
475 		sge++;
476 		DPAA2_SET_FLE_ADDR(sge,
477 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
478 		sge->length = icv_len;
479 	}
480 	DPAA2_SET_FLE_FIN(sge);
481 
482 	sge++;
483 	mbuf = sym_op->m_src;
484 
485 	/* Configure Input FLE with Scatter/Gather Entry */
486 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
487 	DPAA2_SET_FLE_SG_EXT(ip_fle);
488 	DPAA2_SET_FLE_FIN(ip_fle);
489 	ip_fle->length = (sess->dir == DIR_ENC) ?
490 			(sym_op->auth.data.length + sess->iv.length) :
491 			(sym_op->auth.data.length + sess->iv.length +
492 			 icv_len);
493 
494 	/* Configure Input SGE for Encap/Decap */
495 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
496 	sge->length = sess->iv.length;
497 
498 	sge++;
499 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
500 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
501 				mbuf->data_off);
502 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
503 
504 	mbuf = mbuf->next;
505 	/* i/p segs */
506 	while (mbuf) {
507 		sge++;
508 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
509 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
510 		sge->length = mbuf->data_len;
511 		mbuf = mbuf->next;
512 	}
513 	sge->length -= icv_len;
514 
515 	if (sess->dir == DIR_DEC) {
516 		sge++;
517 		old_icv = (uint8_t *)(sge + 1);
518 		memcpy(old_icv,	sym_op->auth.digest.data,
519 		       icv_len);
520 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
521 		sge->length = icv_len;
522 	}
523 
524 	DPAA2_SET_FLE_FIN(sge);
525 	if (auth_only_len) {
526 		DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
527 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
528 	}
529 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
530 
531 	return 0;
532 }
533 
534 static inline int
535 build_authenc_fd(dpaa2_sec_session *sess,
536 		 struct rte_crypto_op *op,
537 		 struct qbman_fd *fd, uint16_t bpid)
538 {
539 	struct rte_crypto_sym_op *sym_op = op->sym;
540 	struct ctxt_priv *priv = sess->ctxt;
541 	struct qbman_fle *fle, *sge;
542 	struct sec_flow_context *flc;
543 	uint32_t auth_only_len = sym_op->auth.data.length -
544 				sym_op->cipher.data.length;
545 	int icv_len = sess->digest_length, retval;
546 	uint8_t *old_icv;
547 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
548 			sess->iv.offset);
549 	struct rte_mbuf *dst;
550 
551 	PMD_INIT_FUNC_TRACE();
552 
553 	if (sym_op->m_dst)
554 		dst = sym_op->m_dst;
555 	else
556 		dst = sym_op->m_src;
557 
558 	/* we are using the first FLE entry to store Mbuf.
559 	 * Currently we donot know which FLE has the mbuf stored.
560 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
561 	 * to get the MBUF Addr from the previous FLE.
562 	 * We can have a better approach to use the inline Mbuf
563 	 */
564 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
565 	if (retval) {
566 		DPAA2_SEC_ERR("Memory alloc failed for SGE");
567 		return -1;
568 	}
569 	memset(fle, 0, FLE_POOL_BUF_SIZE);
570 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
571 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
572 	fle = fle + 1;
573 	sge = fle + 2;
574 	if (likely(bpid < MAX_BPID)) {
575 		DPAA2_SET_FD_BPID(fd, bpid);
576 		DPAA2_SET_FLE_BPID(fle, bpid);
577 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
578 		DPAA2_SET_FLE_BPID(sge, bpid);
579 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
580 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
581 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
582 	} else {
583 		DPAA2_SET_FD_IVP(fd);
584 		DPAA2_SET_FLE_IVP(fle);
585 		DPAA2_SET_FLE_IVP((fle + 1));
586 		DPAA2_SET_FLE_IVP(sge);
587 		DPAA2_SET_FLE_IVP((sge + 1));
588 		DPAA2_SET_FLE_IVP((sge + 2));
589 		DPAA2_SET_FLE_IVP((sge + 3));
590 	}
591 
592 	/* Save the shared descriptor */
593 	flc = &priv->flc_desc[0].flc;
594 	/* Configure FD as a FRAME LIST */
595 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
596 	DPAA2_SET_FD_COMPOUND_FMT(fd);
597 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
598 
599 	DPAA2_SEC_DP_DEBUG(
600 		"AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
601 		"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
602 		sym_op->auth.data.offset,
603 		sym_op->auth.data.length,
604 		sess->digest_length,
605 		sym_op->cipher.data.offset,
606 		sym_op->cipher.data.length,
607 		sess->iv.length,
608 		sym_op->m_src->data_off);
609 
610 	/* Configure Output FLE with Scatter/Gather Entry */
611 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
612 	if (auth_only_len)
613 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
614 	fle->length = (sess->dir == DIR_ENC) ?
615 			(sym_op->cipher.data.length + icv_len) :
616 			sym_op->cipher.data.length;
617 
618 	DPAA2_SET_FLE_SG_EXT(fle);
619 
620 	/* Configure Output SGE for Encap/Decap */
621 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
622 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
623 				dst->data_off);
624 	sge->length = sym_op->cipher.data.length;
625 
626 	if (sess->dir == DIR_ENC) {
627 		sge++;
628 		DPAA2_SET_FLE_ADDR(sge,
629 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
630 		sge->length = sess->digest_length;
631 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
632 					sess->iv.length));
633 	}
634 	DPAA2_SET_FLE_FIN(sge);
635 
636 	sge++;
637 	fle++;
638 
639 	/* Configure Input FLE with Scatter/Gather Entry */
640 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
641 	DPAA2_SET_FLE_SG_EXT(fle);
642 	DPAA2_SET_FLE_FIN(fle);
643 	fle->length = (sess->dir == DIR_ENC) ?
644 			(sym_op->auth.data.length + sess->iv.length) :
645 			(sym_op->auth.data.length + sess->iv.length +
646 			 sess->digest_length);
647 
648 	/* Configure Input SGE for Encap/Decap */
649 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
650 	sge->length = sess->iv.length;
651 	sge++;
652 
653 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
654 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
655 				sym_op->m_src->data_off);
656 	sge->length = sym_op->auth.data.length;
657 	if (sess->dir == DIR_DEC) {
658 		sge++;
659 		old_icv = (uint8_t *)(sge + 1);
660 		memcpy(old_icv,	sym_op->auth.digest.data,
661 		       sess->digest_length);
662 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
663 		sge->length = sess->digest_length;
664 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
665 				 sess->digest_length +
666 				 sess->iv.length));
667 	}
668 	DPAA2_SET_FLE_FIN(sge);
669 	if (auth_only_len) {
670 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
671 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
672 	}
673 	return 0;
674 }
675 
676 static inline int build_auth_sg_fd(
677 		dpaa2_sec_session *sess,
678 		struct rte_crypto_op *op,
679 		struct qbman_fd *fd,
680 		__rte_unused uint16_t bpid)
681 {
682 	struct rte_crypto_sym_op *sym_op = op->sym;
683 	struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
684 	struct sec_flow_context *flc;
685 	struct ctxt_priv *priv = sess->ctxt;
686 	uint8_t *old_digest;
687 	struct rte_mbuf *mbuf;
688 
689 	PMD_INIT_FUNC_TRACE();
690 
691 	mbuf = sym_op->m_src;
692 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
693 			RTE_CACHE_LINE_SIZE);
694 	if (unlikely(!fle)) {
695 		DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
696 		return -1;
697 	}
698 	memset(fle, 0, FLE_SG_MEM_SIZE);
699 	/* first FLE entry used to store mbuf and session ctxt */
700 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
701 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
702 	op_fle = fle + 1;
703 	ip_fle = fle + 2;
704 	sge = fle + 3;
705 
706 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
707 	/* sg FD */
708 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
709 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
710 	DPAA2_SET_FD_COMPOUND_FMT(fd);
711 
712 	/* o/p fle */
713 	DPAA2_SET_FLE_ADDR(op_fle,
714 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
715 	op_fle->length = sess->digest_length;
716 
717 	/* i/p fle */
718 	DPAA2_SET_FLE_SG_EXT(ip_fle);
719 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
720 	/* i/p 1st seg */
721 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
722 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
723 	sge->length = mbuf->data_len - sym_op->auth.data.offset;
724 
725 	/* i/p segs */
726 	mbuf = mbuf->next;
727 	while (mbuf) {
728 		sge++;
729 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
730 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
731 		sge->length = mbuf->data_len;
732 		mbuf = mbuf->next;
733 	}
734 	if (sess->dir == DIR_ENC) {
735 		/* Digest calculation case */
736 		sge->length -= sess->digest_length;
737 		ip_fle->length = sym_op->auth.data.length;
738 	} else {
739 		/* Digest verification case */
740 		sge++;
741 		old_digest = (uint8_t *)(sge + 1);
742 		rte_memcpy(old_digest, sym_op->auth.digest.data,
743 			   sess->digest_length);
744 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
745 		sge->length = sess->digest_length;
746 		ip_fle->length = sym_op->auth.data.length +
747 				sess->digest_length;
748 	}
749 	DPAA2_SET_FLE_FIN(sge);
750 	DPAA2_SET_FLE_FIN(ip_fle);
751 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
752 
753 	return 0;
754 }
755 
756 static inline int
757 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
758 	      struct qbman_fd *fd, uint16_t bpid)
759 {
760 	struct rte_crypto_sym_op *sym_op = op->sym;
761 	struct qbman_fle *fle, *sge;
762 	struct sec_flow_context *flc;
763 	struct ctxt_priv *priv = sess->ctxt;
764 	uint8_t *old_digest;
765 	int retval;
766 
767 	PMD_INIT_FUNC_TRACE();
768 
769 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
770 	if (retval) {
771 		DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
772 		return -1;
773 	}
774 	memset(fle, 0, FLE_POOL_BUF_SIZE);
775 	/* TODO we are using the first FLE entry to store Mbuf.
776 	 * Currently we donot know which FLE has the mbuf stored.
777 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
778 	 * to get the MBUF Addr from the previous FLE.
779 	 * We can have a better approach to use the inline Mbuf
780 	 */
781 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
782 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
783 	fle = fle + 1;
784 
785 	if (likely(bpid < MAX_BPID)) {
786 		DPAA2_SET_FD_BPID(fd, bpid);
787 		DPAA2_SET_FLE_BPID(fle, bpid);
788 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
789 	} else {
790 		DPAA2_SET_FD_IVP(fd);
791 		DPAA2_SET_FLE_IVP(fle);
792 		DPAA2_SET_FLE_IVP((fle + 1));
793 	}
794 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
795 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
796 
797 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
798 	fle->length = sess->digest_length;
799 
800 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
801 	DPAA2_SET_FD_COMPOUND_FMT(fd);
802 	fle++;
803 
804 	if (sess->dir == DIR_ENC) {
805 		DPAA2_SET_FLE_ADDR(fle,
806 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
807 		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
808 				     sym_op->m_src->data_off);
809 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
810 		fle->length = sym_op->auth.data.length;
811 	} else {
812 		sge = fle + 2;
813 		DPAA2_SET_FLE_SG_EXT(fle);
814 		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
815 
816 		if (likely(bpid < MAX_BPID)) {
817 			DPAA2_SET_FLE_BPID(sge, bpid);
818 			DPAA2_SET_FLE_BPID(sge + 1, bpid);
819 		} else {
820 			DPAA2_SET_FLE_IVP(sge);
821 			DPAA2_SET_FLE_IVP((sge + 1));
822 		}
823 		DPAA2_SET_FLE_ADDR(sge,
824 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
825 		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
826 				     sym_op->m_src->data_off);
827 
828 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
829 				 sess->digest_length);
830 		sge->length = sym_op->auth.data.length;
831 		sge++;
832 		old_digest = (uint8_t *)(sge + 1);
833 		rte_memcpy(old_digest, sym_op->auth.digest.data,
834 			   sess->digest_length);
835 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
836 		sge->length = sess->digest_length;
837 		fle->length = sym_op->auth.data.length +
838 				sess->digest_length;
839 		DPAA2_SET_FLE_FIN(sge);
840 	}
841 	DPAA2_SET_FLE_FIN(fle);
842 
843 	return 0;
844 }
845 
846 static int
847 build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
848 		struct qbman_fd *fd, __rte_unused uint16_t bpid)
849 {
850 	struct rte_crypto_sym_op *sym_op = op->sym;
851 	struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
852 	struct sec_flow_context *flc;
853 	struct ctxt_priv *priv = sess->ctxt;
854 	struct rte_mbuf *mbuf;
855 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
856 			sess->iv.offset);
857 
858 	PMD_INIT_FUNC_TRACE();
859 
860 	if (sym_op->m_dst)
861 		mbuf = sym_op->m_dst;
862 	else
863 		mbuf = sym_op->m_src;
864 
865 	fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
866 			RTE_CACHE_LINE_SIZE);
867 	if (!fle) {
868 		DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
869 		return -1;
870 	}
871 	memset(fle, 0, FLE_SG_MEM_SIZE);
872 	/* first FLE entry used to store mbuf and session ctxt */
873 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
874 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
875 
876 	op_fle = fle + 1;
877 	ip_fle = fle + 2;
878 	sge = fle + 3;
879 
880 	flc = &priv->flc_desc[0].flc;
881 
882 	DPAA2_SEC_DP_DEBUG(
883 		"CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
884 		" data_off: 0x%x\n",
885 		sym_op->cipher.data.offset,
886 		sym_op->cipher.data.length,
887 		sess->iv.length,
888 		sym_op->m_src->data_off);
889 
890 	/* o/p fle */
891 	DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
892 	op_fle->length = sym_op->cipher.data.length;
893 	DPAA2_SET_FLE_SG_EXT(op_fle);
894 
895 	/* o/p 1st seg */
896 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
897 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
898 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
899 
900 	mbuf = mbuf->next;
901 	/* o/p segs */
902 	while (mbuf) {
903 		sge++;
904 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
905 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
906 		sge->length = mbuf->data_len;
907 		mbuf = mbuf->next;
908 	}
909 	DPAA2_SET_FLE_FIN(sge);
910 
911 	DPAA2_SEC_DP_DEBUG(
912 		"CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
913 		flc, fle, fle->addr_hi, fle->addr_lo,
914 		fle->length);
915 
916 	/* i/p fle */
917 	mbuf = sym_op->m_src;
918 	sge++;
919 	DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
920 	ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
921 	DPAA2_SET_FLE_SG_EXT(ip_fle);
922 
923 	/* i/p IV */
924 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
925 	DPAA2_SET_FLE_OFFSET(sge, 0);
926 	sge->length = sess->iv.length;
927 
928 	sge++;
929 
930 	/* i/p 1st seg */
931 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
932 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
933 			     mbuf->data_off);
934 	sge->length = mbuf->data_len - sym_op->cipher.data.offset;
935 
936 	mbuf = mbuf->next;
937 	/* i/p segs */
938 	while (mbuf) {
939 		sge++;
940 		DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
941 		DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
942 		sge->length = mbuf->data_len;
943 		mbuf = mbuf->next;
944 	}
945 	DPAA2_SET_FLE_FIN(sge);
946 	DPAA2_SET_FLE_FIN(ip_fle);
947 
948 	/* sg fd */
949 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
950 	DPAA2_SET_FD_LEN(fd, ip_fle->length);
951 	DPAA2_SET_FD_COMPOUND_FMT(fd);
952 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
953 
954 	DPAA2_SEC_DP_DEBUG(
955 		"CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
956 		" off =%d, len =%d\n",
957 		DPAA2_GET_FD_ADDR(fd),
958 		DPAA2_GET_FD_BPID(fd),
959 		rte_dpaa2_bpid_info[bpid].meta_data_size,
960 		DPAA2_GET_FD_OFFSET(fd),
961 		DPAA2_GET_FD_LEN(fd));
962 	return 0;
963 }
964 
965 static int
966 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
967 		struct qbman_fd *fd, uint16_t bpid)
968 {
969 	struct rte_crypto_sym_op *sym_op = op->sym;
970 	struct qbman_fle *fle, *sge;
971 	int retval;
972 	struct sec_flow_context *flc;
973 	struct ctxt_priv *priv = sess->ctxt;
974 	uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
975 			sess->iv.offset);
976 	struct rte_mbuf *dst;
977 
978 	PMD_INIT_FUNC_TRACE();
979 
980 	if (sym_op->m_dst)
981 		dst = sym_op->m_dst;
982 	else
983 		dst = sym_op->m_src;
984 
985 	retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
986 	if (retval) {
987 		DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
988 		return -1;
989 	}
990 	memset(fle, 0, FLE_POOL_BUF_SIZE);
991 	/* TODO we are using the first FLE entry to store Mbuf.
992 	 * Currently we donot know which FLE has the mbuf stored.
993 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
994 	 * to get the MBUF Addr from the previous FLE.
995 	 * We can have a better approach to use the inline Mbuf
996 	 */
997 	DPAA2_SET_FLE_ADDR(fle, (size_t)op);
998 	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
999 	fle = fle + 1;
1000 	sge = fle + 2;
1001 
1002 	if (likely(bpid < MAX_BPID)) {
1003 		DPAA2_SET_FD_BPID(fd, bpid);
1004 		DPAA2_SET_FLE_BPID(fle, bpid);
1005 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
1006 		DPAA2_SET_FLE_BPID(sge, bpid);
1007 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
1008 	} else {
1009 		DPAA2_SET_FD_IVP(fd);
1010 		DPAA2_SET_FLE_IVP(fle);
1011 		DPAA2_SET_FLE_IVP((fle + 1));
1012 		DPAA2_SET_FLE_IVP(sge);
1013 		DPAA2_SET_FLE_IVP((sge + 1));
1014 	}
1015 
1016 	flc = &priv->flc_desc[0].flc;
1017 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
1018 	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
1019 			 sess->iv.length);
1020 	DPAA2_SET_FD_COMPOUND_FMT(fd);
1021 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
1022 
1023 	DPAA2_SEC_DP_DEBUG(
1024 		"CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1025 		" data_off: 0x%x\n",
1026 		sym_op->cipher.data.offset,
1027 		sym_op->cipher.data.length,
1028 		sess->iv.length,
1029 		sym_op->m_src->data_off);
1030 
1031 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
1032 	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
1033 			     dst->data_off);
1034 
1035 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1036 
1037 	DPAA2_SEC_DP_DEBUG(
1038 		"CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1039 		flc, fle, fle->addr_hi, fle->addr_lo,
1040 		fle->length);
1041 
1042 	fle++;
1043 
1044 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
1045 	fle->length = sym_op->cipher.data.length + sess->iv.length;
1046 
1047 	DPAA2_SET_FLE_SG_EXT(fle);
1048 
1049 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
1050 	sge->length = sess->iv.length;
1051 
1052 	sge++;
1053 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
1054 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
1055 			     sym_op->m_src->data_off);
1056 
1057 	sge->length = sym_op->cipher.data.length;
1058 	DPAA2_SET_FLE_FIN(sge);
1059 	DPAA2_SET_FLE_FIN(fle);
1060 
1061 	DPAA2_SEC_DP_DEBUG(
1062 		"CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
1063 		" off =%d, len =%d\n",
1064 		DPAA2_GET_FD_ADDR(fd),
1065 		DPAA2_GET_FD_BPID(fd),
1066 		rte_dpaa2_bpid_info[bpid].meta_data_size,
1067 		DPAA2_GET_FD_OFFSET(fd),
1068 		DPAA2_GET_FD_LEN(fd));
1069 
1070 	return 0;
1071 }
1072 
1073 static inline int
1074 build_sec_fd(struct rte_crypto_op *op,
1075 	     struct qbman_fd *fd, uint16_t bpid)
1076 {
1077 	int ret = -1;
1078 	dpaa2_sec_session *sess;
1079 
1080 	PMD_INIT_FUNC_TRACE();
1081 
1082 	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
1083 		sess = (dpaa2_sec_session *)get_sym_session_private_data(
1084 				op->sym->session, cryptodev_driver_id);
1085 	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
1086 		sess = (dpaa2_sec_session *)get_sec_session_private_data(
1087 				op->sym->sec_session);
1088 	else
1089 		return -1;
1090 
1091 	/* Segmented buffer */
1092 	if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
1093 		switch (sess->ctxt_type) {
1094 		case DPAA2_SEC_CIPHER:
1095 			ret = build_cipher_sg_fd(sess, op, fd, bpid);
1096 			break;
1097 		case DPAA2_SEC_AUTH:
1098 			ret = build_auth_sg_fd(sess, op, fd, bpid);
1099 			break;
1100 		case DPAA2_SEC_AEAD:
1101 			ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
1102 			break;
1103 		case DPAA2_SEC_CIPHER_HASH:
1104 			ret = build_authenc_sg_fd(sess, op, fd, bpid);
1105 			break;
1106 		case DPAA2_SEC_HASH_CIPHER:
1107 		default:
1108 			DPAA2_SEC_ERR("error: Unsupported session");
1109 		}
1110 	} else {
1111 		switch (sess->ctxt_type) {
1112 		case DPAA2_SEC_CIPHER:
1113 			ret = build_cipher_fd(sess, op, fd, bpid);
1114 			break;
1115 		case DPAA2_SEC_AUTH:
1116 			ret = build_auth_fd(sess, op, fd, bpid);
1117 			break;
1118 		case DPAA2_SEC_AEAD:
1119 			ret = build_authenc_gcm_fd(sess, op, fd, bpid);
1120 			break;
1121 		case DPAA2_SEC_CIPHER_HASH:
1122 			ret = build_authenc_fd(sess, op, fd, bpid);
1123 			break;
1124 		case DPAA2_SEC_IPSEC:
1125 			ret = build_proto_fd(sess, op, fd, bpid);
1126 			break;
1127 		case DPAA2_SEC_HASH_CIPHER:
1128 		default:
1129 			DPAA2_SEC_ERR("error: Unsupported session");
1130 		}
1131 	}
1132 	return ret;
1133 }
1134 
1135 static uint16_t
1136 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1137 			uint16_t nb_ops)
1138 {
1139 	/* Function to transmit the frames to given device and VQ*/
1140 	uint32_t loop;
1141 	int32_t ret;
1142 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1143 	uint32_t frames_to_send;
1144 	struct qbman_eq_desc eqdesc;
1145 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1146 	struct qbman_swp *swp;
1147 	uint16_t num_tx = 0;
1148 	/*todo - need to support multiple buffer pools */
1149 	uint16_t bpid;
1150 	struct rte_mempool *mb_pool;
1151 
1152 	if (unlikely(nb_ops == 0))
1153 		return 0;
1154 
1155 	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
1156 		DPAA2_SEC_ERR("sessionless crypto op not supported");
1157 		return 0;
1158 	}
1159 	/*Prepare enqueue descriptor*/
1160 	qbman_eq_desc_clear(&eqdesc);
1161 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1162 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
1163 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
1164 
1165 	if (!DPAA2_PER_LCORE_DPIO) {
1166 		ret = dpaa2_affine_qbman_swp();
1167 		if (ret) {
1168 			DPAA2_SEC_ERR("Failure in affining portal");
1169 			return 0;
1170 		}
1171 	}
1172 	swp = DPAA2_PER_LCORE_PORTAL;
1173 
1174 	while (nb_ops) {
1175 		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
1176 
1177 		for (loop = 0; loop < frames_to_send; loop++) {
1178 			/*Clear the unused FD fields before sending*/
1179 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
1180 			mb_pool = (*ops)->sym->m_src->pool;
1181 			bpid = mempool_to_bpid(mb_pool);
1182 			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
1183 			if (ret) {
1184 				DPAA2_SEC_ERR("error: Improper packet contents"
1185 					      " for crypto operation");
1186 				goto skip_tx;
1187 			}
1188 			ops++;
1189 		}
1190 		loop = 0;
1191 		while (loop < frames_to_send) {
1192 			loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
1193 							&fd_arr[loop],
1194 							NULL,
1195 							frames_to_send - loop);
1196 		}
1197 
1198 		num_tx += frames_to_send;
1199 		nb_ops -= frames_to_send;
1200 	}
1201 skip_tx:
1202 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
1203 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
1204 	return num_tx;
1205 }
1206 
1207 static inline struct rte_crypto_op *
1208 sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
1209 {
1210 	struct rte_crypto_op *op;
1211 	uint16_t len = DPAA2_GET_FD_LEN(fd);
1212 	uint16_t diff = 0;
1213 	dpaa2_sec_session *sess_priv;
1214 
1215 	struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
1216 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
1217 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1218 
1219 	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
1220 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
1221 	op->sym->aead.digest.phys_addr = 0L;
1222 
1223 	sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
1224 				op->sym->sec_session);
1225 	if (sess_priv->dir == DIR_ENC)
1226 		mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
1227 	else
1228 		mbuf->data_off += SEC_FLC_DHR_INBOUND;
1229 	diff = len - mbuf->pkt_len;
1230 	mbuf->pkt_len += diff;
1231 	mbuf->data_len += diff;
1232 
1233 	return op;
1234 }
1235 
1236 static inline struct rte_crypto_op *
1237 sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
1238 {
1239 	struct qbman_fle *fle;
1240 	struct rte_crypto_op *op;
1241 	struct ctxt_priv *priv;
1242 	struct rte_mbuf *dst, *src;
1243 
1244 	if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
1245 		return sec_simple_fd_to_mbuf(fd, driver_id);
1246 
1247 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1248 
1249 	DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1250 			   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
1251 
1252 	/* we are using the first FLE entry to store Mbuf.
1253 	 * Currently we donot know which FLE has the mbuf stored.
1254 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
1255 	 * to get the MBUF Addr from the previous FLE.
1256 	 * We can have a better approach to use the inline Mbuf
1257 	 */
1258 
1259 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
1260 		/* TODO complete it. */
1261 		DPAA2_SEC_ERR("error: non inline buffer");
1262 		return NULL;
1263 	}
1264 	op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
1265 
1266 	/* Prefeth op */
1267 	src = op->sym->m_src;
1268 	rte_prefetch0(src);
1269 
1270 	if (op->sym->m_dst) {
1271 		dst = op->sym->m_dst;
1272 		rte_prefetch0(dst);
1273 	} else
1274 		dst = src;
1275 
1276 	DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1277 		" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
1278 		(void *)dst,
1279 		dst->buf_addr,
1280 		DPAA2_GET_FD_ADDR(fd),
1281 		DPAA2_GET_FD_BPID(fd),
1282 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
1283 		DPAA2_GET_FD_OFFSET(fd),
1284 		DPAA2_GET_FD_LEN(fd));
1285 
1286 	/* free the fle memory */
1287 	if (likely(rte_pktmbuf_is_contiguous(src))) {
1288 		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
1289 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
1290 	} else
1291 		rte_free((void *)(fle-1));
1292 
1293 	return op;
1294 }
1295 
1296 static uint16_t
1297 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1298 			uint16_t nb_ops)
1299 {
1300 	/* Function is responsible to receive frames for a given device and VQ*/
1301 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
1302 	struct rte_cryptodev *dev =
1303 			(struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
1304 	struct qbman_result *dq_storage;
1305 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
1306 	int ret, num_rx = 0;
1307 	uint8_t is_last = 0, status;
1308 	struct qbman_swp *swp;
1309 	const struct qbman_fd *fd;
1310 	struct qbman_pull_desc pulldesc;
1311 
1312 	if (!DPAA2_PER_LCORE_DPIO) {
1313 		ret = dpaa2_affine_qbman_swp();
1314 		if (ret) {
1315 			DPAA2_SEC_ERR("Failure in affining portal");
1316 			return 0;
1317 		}
1318 	}
1319 	swp = DPAA2_PER_LCORE_PORTAL;
1320 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
1321 
1322 	qbman_pull_desc_clear(&pulldesc);
1323 	qbman_pull_desc_set_numframes(&pulldesc,
1324 				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
1325 				      DPAA2_DQRR_RING_SIZE : nb_ops);
1326 	qbman_pull_desc_set_fq(&pulldesc, fqid);
1327 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1328 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
1329 				    1);
1330 
1331 	/*Issue a volatile dequeue command. */
1332 	while (1) {
1333 		if (qbman_swp_pull(swp, &pulldesc)) {
1334 			DPAA2_SEC_WARN(
1335 				"SEC VDQ command is not issued : QBMAN busy");
1336 			/* Portal was busy, try again */
1337 			continue;
1338 		}
1339 		break;
1340 	};
1341 
1342 	/* Receive the packets till Last Dequeue entry is found with
1343 	 * respect to the above issues PULL command.
1344 	 */
1345 	while (!is_last) {
1346 		/* Check if the previous issued command is completed.
1347 		 * Also seems like the SWP is shared between the Ethernet Driver
1348 		 * and the SEC driver.
1349 		 */
1350 		while (!qbman_check_command_complete(dq_storage))
1351 			;
1352 
1353 		/* Loop until the dq_storage is updated with
1354 		 * new token by QBMAN
1355 		 */
1356 		while (!qbman_check_new_result(dq_storage))
1357 			;
1358 		/* Check whether Last Pull command is Expired and
1359 		 * setting Condition for Loop termination
1360 		 */
1361 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1362 			is_last = 1;
1363 			/* Check for valid frame. */
1364 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
1365 			if (unlikely(
1366 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
1367 				DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1368 				continue;
1369 			}
1370 		}
1371 
1372 		fd = qbman_result_DQ_fd(dq_storage);
1373 		ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
1374 
1375 		if (unlikely(fd->simple.frc)) {
1376 			/* TODO Parse SEC errors */
1377 			DPAA2_SEC_ERR("SEC returned Error - %x",
1378 				      fd->simple.frc);
1379 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
1380 		} else {
1381 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1382 		}
1383 
1384 		num_rx++;
1385 		dq_storage++;
1386 	} /* End of Packet Rx loop */
1387 
1388 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
1389 
1390 	DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1391 	/*Return the total number of packets received to DPAA2 app*/
1392 	return num_rx;
1393 }
1394 
1395 /** Release queue pair */
1396 static int
1397 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
1398 {
1399 	struct dpaa2_sec_qp *qp =
1400 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
1401 
1402 	PMD_INIT_FUNC_TRACE();
1403 
1404 	if (qp->rx_vq.q_storage) {
1405 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
1406 		rte_free(qp->rx_vq.q_storage);
1407 	}
1408 	rte_free(qp);
1409 
1410 	dev->data->queue_pairs[queue_pair_id] = NULL;
1411 
1412 	return 0;
1413 }
1414 
1415 /** Setup a queue pair */
1416 static int
1417 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1418 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1419 		__rte_unused int socket_id,
1420 		__rte_unused struct rte_mempool *session_pool)
1421 {
1422 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1423 	struct dpaa2_sec_qp *qp;
1424 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1425 	struct dpseci_rx_queue_cfg cfg;
1426 	int32_t retcode;
1427 
1428 	PMD_INIT_FUNC_TRACE();
1429 
1430 	/* If qp is already in use free ring memory and qp metadata. */
1431 	if (dev->data->queue_pairs[qp_id] != NULL) {
1432 		DPAA2_SEC_INFO("QP already setup");
1433 		return 0;
1434 	}
1435 
1436 	DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1437 		    dev, qp_id, qp_conf);
1438 
1439 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
1440 
1441 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
1442 			RTE_CACHE_LINE_SIZE);
1443 	if (!qp) {
1444 		DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1445 		return -1;
1446 	}
1447 
1448 	qp->rx_vq.dev = dev;
1449 	qp->tx_vq.dev = dev;
1450 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
1451 		sizeof(struct queue_storage_info_t),
1452 		RTE_CACHE_LINE_SIZE);
1453 	if (!qp->rx_vq.q_storage) {
1454 		DPAA2_SEC_ERR("malloc failed for q_storage");
1455 		return -1;
1456 	}
1457 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
1458 
1459 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
1460 		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1461 		return -1;
1462 	}
1463 
1464 	dev->data->queue_pairs[qp_id] = qp;
1465 
1466 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
1467 	cfg.user_ctx = (size_t)(&qp->rx_vq);
1468 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
1469 				      qp_id, &cfg);
1470 	return retcode;
1471 }
1472 
1473 /** Return the number of allocated queue pairs */
1474 static uint32_t
1475 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
1476 {
1477 	PMD_INIT_FUNC_TRACE();
1478 
1479 	return dev->data->nb_queue_pairs;
1480 }
1481 
1482 /** Returns the size of the aesni gcm session structure */
1483 static unsigned int
1484 dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1485 {
1486 	PMD_INIT_FUNC_TRACE();
1487 
1488 	return sizeof(dpaa2_sec_session);
1489 }
1490 
1491 static int
1492 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
1493 		      struct rte_crypto_sym_xform *xform,
1494 		      dpaa2_sec_session *session)
1495 {
1496 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1497 	struct alginfo cipherdata;
1498 	int bufsize, i;
1499 	struct ctxt_priv *priv;
1500 	struct sec_flow_context *flc;
1501 
1502 	PMD_INIT_FUNC_TRACE();
1503 
1504 	/* For SEC CIPHER only one descriptor is required. */
1505 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1506 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1507 			RTE_CACHE_LINE_SIZE);
1508 	if (priv == NULL) {
1509 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1510 		return -1;
1511 	}
1512 
1513 	priv->fle_pool = dev_priv->fle_pool;
1514 
1515 	flc = &priv->flc_desc[0].flc;
1516 
1517 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1518 			RTE_CACHE_LINE_SIZE);
1519 	if (session->cipher_key.data == NULL) {
1520 		DPAA2_SEC_ERR("No Memory for cipher key");
1521 		rte_free(priv);
1522 		return -1;
1523 	}
1524 	session->cipher_key.length = xform->cipher.key.length;
1525 
1526 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1527 	       xform->cipher.key.length);
1528 	cipherdata.key = (size_t)session->cipher_key.data;
1529 	cipherdata.keylen = session->cipher_key.length;
1530 	cipherdata.key_enc_flags = 0;
1531 	cipherdata.key_type = RTA_DATA_IMM;
1532 
1533 	/* Set IV parameters */
1534 	session->iv.offset = xform->cipher.iv.offset;
1535 	session->iv.length = xform->cipher.iv.length;
1536 
1537 	switch (xform->cipher.algo) {
1538 	case RTE_CRYPTO_CIPHER_AES_CBC:
1539 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1540 		cipherdata.algmode = OP_ALG_AAI_CBC;
1541 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1542 		break;
1543 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1544 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1545 		cipherdata.algmode = OP_ALG_AAI_CBC;
1546 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1547 		break;
1548 	case RTE_CRYPTO_CIPHER_AES_CTR:
1549 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1550 		cipherdata.algmode = OP_ALG_AAI_CTR;
1551 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1552 		break;
1553 	case RTE_CRYPTO_CIPHER_3DES_CTR:
1554 	case RTE_CRYPTO_CIPHER_AES_ECB:
1555 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1556 	case RTE_CRYPTO_CIPHER_AES_XTS:
1557 	case RTE_CRYPTO_CIPHER_AES_F8:
1558 	case RTE_CRYPTO_CIPHER_ARC4:
1559 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1560 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1561 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
1562 	case RTE_CRYPTO_CIPHER_NULL:
1563 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1564 			xform->cipher.algo);
1565 		goto error_out;
1566 	default:
1567 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1568 			xform->cipher.algo);
1569 		goto error_out;
1570 	}
1571 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1572 				DIR_ENC : DIR_DEC;
1573 
1574 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
1575 					&cipherdata, NULL, session->iv.length,
1576 					session->dir);
1577 	if (bufsize < 0) {
1578 		DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1579 		goto error_out;
1580 	}
1581 	flc->dhr = 0;
1582 	flc->bpv0 = 0x1;
1583 	flc->mode_bits = 0x8000;
1584 
1585 	flc->word1_sdl = (uint8_t)bufsize;
1586 	flc->word2_rflc_31_0 = lower_32_bits(
1587 			(size_t)&(((struct dpaa2_sec_qp *)
1588 			dev->data->queue_pairs[0])->rx_vq));
1589 	flc->word3_rflc_63_32 = upper_32_bits(
1590 			(size_t)&(((struct dpaa2_sec_qp *)
1591 			dev->data->queue_pairs[0])->rx_vq));
1592 	session->ctxt = priv;
1593 
1594 	for (i = 0; i < bufsize; i++)
1595 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
1596 
1597 	return 0;
1598 
1599 error_out:
1600 	rte_free(session->cipher_key.data);
1601 	rte_free(priv);
1602 	return -1;
1603 }
1604 
1605 static int
1606 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
1607 		    struct rte_crypto_sym_xform *xform,
1608 		    dpaa2_sec_session *session)
1609 {
1610 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1611 	struct alginfo authdata;
1612 	int bufsize, i;
1613 	struct ctxt_priv *priv;
1614 	struct sec_flow_context *flc;
1615 
1616 	PMD_INIT_FUNC_TRACE();
1617 
1618 	/* For SEC AUTH three descriptors are required for various stages */
1619 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1620 			sizeof(struct ctxt_priv) + 3 *
1621 			sizeof(struct sec_flc_desc),
1622 			RTE_CACHE_LINE_SIZE);
1623 	if (priv == NULL) {
1624 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1625 		return -1;
1626 	}
1627 
1628 	priv->fle_pool = dev_priv->fle_pool;
1629 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
1630 
1631 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1632 			RTE_CACHE_LINE_SIZE);
1633 	if (session->auth_key.data == NULL) {
1634 		DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1635 		rte_free(priv);
1636 		return -1;
1637 	}
1638 	session->auth_key.length = xform->auth.key.length;
1639 
1640 	memcpy(session->auth_key.data, xform->auth.key.data,
1641 	       xform->auth.key.length);
1642 	authdata.key = (size_t)session->auth_key.data;
1643 	authdata.keylen = session->auth_key.length;
1644 	authdata.key_enc_flags = 0;
1645 	authdata.key_type = RTA_DATA_IMM;
1646 
1647 	session->digest_length = xform->auth.digest_length;
1648 
1649 	switch (xform->auth.algo) {
1650 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1651 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1652 		authdata.algmode = OP_ALG_AAI_HMAC;
1653 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1654 		break;
1655 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1656 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1657 		authdata.algmode = OP_ALG_AAI_HMAC;
1658 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1659 		break;
1660 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1661 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1662 		authdata.algmode = OP_ALG_AAI_HMAC;
1663 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1664 		break;
1665 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1666 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1667 		authdata.algmode = OP_ALG_AAI_HMAC;
1668 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1669 		break;
1670 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1671 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1672 		authdata.algmode = OP_ALG_AAI_HMAC;
1673 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1674 		break;
1675 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1676 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1677 		authdata.algmode = OP_ALG_AAI_HMAC;
1678 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1679 		break;
1680 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1681 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1682 	case RTE_CRYPTO_AUTH_NULL:
1683 	case RTE_CRYPTO_AUTH_SHA1:
1684 	case RTE_CRYPTO_AUTH_SHA256:
1685 	case RTE_CRYPTO_AUTH_SHA512:
1686 	case RTE_CRYPTO_AUTH_SHA224:
1687 	case RTE_CRYPTO_AUTH_SHA384:
1688 	case RTE_CRYPTO_AUTH_MD5:
1689 	case RTE_CRYPTO_AUTH_AES_GMAC:
1690 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1691 	case RTE_CRYPTO_AUTH_AES_CMAC:
1692 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1693 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1694 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1695 			      xform->auth.algo);
1696 		goto error_out;
1697 	default:
1698 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1699 			      xform->auth.algo);
1700 		goto error_out;
1701 	}
1702 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1703 				DIR_ENC : DIR_DEC;
1704 
1705 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1706 				   1, 0, &authdata, !session->dir,
1707 				   session->digest_length);
1708 	if (bufsize < 0) {
1709 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1710 		goto error_out;
1711 	}
1712 
1713 	flc->word1_sdl = (uint8_t)bufsize;
1714 	flc->word2_rflc_31_0 = lower_32_bits(
1715 			(size_t)&(((struct dpaa2_sec_qp *)
1716 			dev->data->queue_pairs[0])->rx_vq));
1717 	flc->word3_rflc_63_32 = upper_32_bits(
1718 			(size_t)&(((struct dpaa2_sec_qp *)
1719 			dev->data->queue_pairs[0])->rx_vq));
1720 	session->ctxt = priv;
1721 	for (i = 0; i < bufsize; i++)
1722 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1723 				i, priv->flc_desc[DESC_INITFINAL].desc[i]);
1724 
1725 
1726 	return 0;
1727 
1728 error_out:
1729 	rte_free(session->auth_key.data);
1730 	rte_free(priv);
1731 	return -1;
1732 }
1733 
1734 static int
1735 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
1736 		    struct rte_crypto_sym_xform *xform,
1737 		    dpaa2_sec_session *session)
1738 {
1739 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1740 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1741 	struct alginfo aeaddata;
1742 	int bufsize, i;
1743 	struct ctxt_priv *priv;
1744 	struct sec_flow_context *flc;
1745 	struct rte_crypto_aead_xform *aead_xform = &xform->aead;
1746 	int err;
1747 
1748 	PMD_INIT_FUNC_TRACE();
1749 
1750 	/* Set IV parameters */
1751 	session->iv.offset = aead_xform->iv.offset;
1752 	session->iv.length = aead_xform->iv.length;
1753 	session->ctxt_type = DPAA2_SEC_AEAD;
1754 
1755 	/* For SEC AEAD only one descriptor is required */
1756 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1757 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1758 			RTE_CACHE_LINE_SIZE);
1759 	if (priv == NULL) {
1760 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1761 		return -1;
1762 	}
1763 
1764 	priv->fle_pool = dev_priv->fle_pool;
1765 	flc = &priv->flc_desc[0].flc;
1766 
1767 	session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
1768 					       RTE_CACHE_LINE_SIZE);
1769 	if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
1770 		DPAA2_SEC_ERR("No Memory for aead key");
1771 		rte_free(priv);
1772 		return -1;
1773 	}
1774 	memcpy(session->aead_key.data, aead_xform->key.data,
1775 	       aead_xform->key.length);
1776 
1777 	session->digest_length = aead_xform->digest_length;
1778 	session->aead_key.length = aead_xform->key.length;
1779 	ctxt->auth_only_len = aead_xform->aad_length;
1780 
1781 	aeaddata.key = (size_t)session->aead_key.data;
1782 	aeaddata.keylen = session->aead_key.length;
1783 	aeaddata.key_enc_flags = 0;
1784 	aeaddata.key_type = RTA_DATA_IMM;
1785 
1786 	switch (aead_xform->algo) {
1787 	case RTE_CRYPTO_AEAD_AES_GCM:
1788 		aeaddata.algtype = OP_ALG_ALGSEL_AES;
1789 		aeaddata.algmode = OP_ALG_AAI_GCM;
1790 		session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
1791 		break;
1792 	case RTE_CRYPTO_AEAD_AES_CCM:
1793 		DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1794 			      aead_xform->algo);
1795 		goto error_out;
1796 	default:
1797 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1798 			      aead_xform->algo);
1799 		goto error_out;
1800 	}
1801 	session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1802 				DIR_ENC : DIR_DEC;
1803 
1804 	priv->flc_desc[0].desc[0] = aeaddata.keylen;
1805 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1806 			       MIN_JOB_DESC_SIZE,
1807 			       (unsigned int *)priv->flc_desc[0].desc,
1808 			       &priv->flc_desc[0].desc[1], 1);
1809 
1810 	if (err < 0) {
1811 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1812 		goto error_out;
1813 	}
1814 	if (priv->flc_desc[0].desc[1] & 1) {
1815 		aeaddata.key_type = RTA_DATA_IMM;
1816 	} else {
1817 		aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
1818 		aeaddata.key_type = RTA_DATA_PTR;
1819 	}
1820 	priv->flc_desc[0].desc[0] = 0;
1821 	priv->flc_desc[0].desc[1] = 0;
1822 
1823 	if (session->dir == DIR_ENC)
1824 		bufsize = cnstr_shdsc_gcm_encap(
1825 				priv->flc_desc[0].desc, 1, 0,
1826 				&aeaddata, session->iv.length,
1827 				session->digest_length);
1828 	else
1829 		bufsize = cnstr_shdsc_gcm_decap(
1830 				priv->flc_desc[0].desc, 1, 0,
1831 				&aeaddata, session->iv.length,
1832 				session->digest_length);
1833 	if (bufsize < 0) {
1834 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1835 		goto error_out;
1836 	}
1837 
1838 	flc->word1_sdl = (uint8_t)bufsize;
1839 	flc->word2_rflc_31_0 = lower_32_bits(
1840 			(size_t)&(((struct dpaa2_sec_qp *)
1841 			dev->data->queue_pairs[0])->rx_vq));
1842 	flc->word3_rflc_63_32 = upper_32_bits(
1843 			(size_t)&(((struct dpaa2_sec_qp *)
1844 			dev->data->queue_pairs[0])->rx_vq));
1845 	session->ctxt = priv;
1846 	for (i = 0; i < bufsize; i++)
1847 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1848 			    i, priv->flc_desc[0].desc[i]);
1849 
1850 	return 0;
1851 
1852 error_out:
1853 	rte_free(session->aead_key.data);
1854 	rte_free(priv);
1855 	return -1;
1856 }
1857 
1858 
1859 static int
1860 dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
1861 		    struct rte_crypto_sym_xform *xform,
1862 		    dpaa2_sec_session *session)
1863 {
1864 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
1865 	struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
1866 	struct alginfo authdata, cipherdata;
1867 	int bufsize, i;
1868 	struct ctxt_priv *priv;
1869 	struct sec_flow_context *flc;
1870 	struct rte_crypto_cipher_xform *cipher_xform;
1871 	struct rte_crypto_auth_xform *auth_xform;
1872 	int err;
1873 
1874 	PMD_INIT_FUNC_TRACE();
1875 
1876 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
1877 		cipher_xform = &xform->cipher;
1878 		auth_xform = &xform->next->auth;
1879 		session->ctxt_type =
1880 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1881 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1882 	} else {
1883 		cipher_xform = &xform->next->cipher;
1884 		auth_xform = &xform->auth;
1885 		session->ctxt_type =
1886 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1887 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1888 	}
1889 
1890 	/* Set IV parameters */
1891 	session->iv.offset = cipher_xform->iv.offset;
1892 	session->iv.length = cipher_xform->iv.length;
1893 
1894 	/* For SEC AEAD only one descriptor is required */
1895 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1896 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1897 			RTE_CACHE_LINE_SIZE);
1898 	if (priv == NULL) {
1899 		DPAA2_SEC_ERR("No Memory for priv CTXT");
1900 		return -1;
1901 	}
1902 
1903 	priv->fle_pool = dev_priv->fle_pool;
1904 	flc = &priv->flc_desc[0].flc;
1905 
1906 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1907 					       RTE_CACHE_LINE_SIZE);
1908 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1909 		DPAA2_SEC_ERR("No Memory for cipher key");
1910 		rte_free(priv);
1911 		return -1;
1912 	}
1913 	session->cipher_key.length = cipher_xform->key.length;
1914 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1915 					     RTE_CACHE_LINE_SIZE);
1916 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1917 		DPAA2_SEC_ERR("No Memory for auth key");
1918 		rte_free(session->cipher_key.data);
1919 		rte_free(priv);
1920 		return -1;
1921 	}
1922 	session->auth_key.length = auth_xform->key.length;
1923 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1924 	       cipher_xform->key.length);
1925 	memcpy(session->auth_key.data, auth_xform->key.data,
1926 	       auth_xform->key.length);
1927 
1928 	authdata.key = (size_t)session->auth_key.data;
1929 	authdata.keylen = session->auth_key.length;
1930 	authdata.key_enc_flags = 0;
1931 	authdata.key_type = RTA_DATA_IMM;
1932 
1933 	session->digest_length = auth_xform->digest_length;
1934 
1935 	switch (auth_xform->algo) {
1936 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1937 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1938 		authdata.algmode = OP_ALG_AAI_HMAC;
1939 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1940 		break;
1941 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1942 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1943 		authdata.algmode = OP_ALG_AAI_HMAC;
1944 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1945 		break;
1946 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1947 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1948 		authdata.algmode = OP_ALG_AAI_HMAC;
1949 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1950 		break;
1951 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1952 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1953 		authdata.algmode = OP_ALG_AAI_HMAC;
1954 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1955 		break;
1956 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1957 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1958 		authdata.algmode = OP_ALG_AAI_HMAC;
1959 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1960 		break;
1961 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1962 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1963 		authdata.algmode = OP_ALG_AAI_HMAC;
1964 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1965 		break;
1966 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1967 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1968 	case RTE_CRYPTO_AUTH_NULL:
1969 	case RTE_CRYPTO_AUTH_SHA1:
1970 	case RTE_CRYPTO_AUTH_SHA256:
1971 	case RTE_CRYPTO_AUTH_SHA512:
1972 	case RTE_CRYPTO_AUTH_SHA224:
1973 	case RTE_CRYPTO_AUTH_SHA384:
1974 	case RTE_CRYPTO_AUTH_MD5:
1975 	case RTE_CRYPTO_AUTH_AES_GMAC:
1976 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1977 	case RTE_CRYPTO_AUTH_AES_CMAC:
1978 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1979 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1980 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
1981 			      auth_xform->algo);
1982 		goto error_out;
1983 	default:
1984 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1985 			      auth_xform->algo);
1986 		goto error_out;
1987 	}
1988 	cipherdata.key = (size_t)session->cipher_key.data;
1989 	cipherdata.keylen = session->cipher_key.length;
1990 	cipherdata.key_enc_flags = 0;
1991 	cipherdata.key_type = RTA_DATA_IMM;
1992 
1993 	switch (cipher_xform->algo) {
1994 	case RTE_CRYPTO_CIPHER_AES_CBC:
1995 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1996 		cipherdata.algmode = OP_ALG_AAI_CBC;
1997 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1998 		break;
1999 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2000 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
2001 		cipherdata.algmode = OP_ALG_AAI_CBC;
2002 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2003 		break;
2004 	case RTE_CRYPTO_CIPHER_AES_CTR:
2005 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
2006 		cipherdata.algmode = OP_ALG_AAI_CTR;
2007 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2008 		break;
2009 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2010 	case RTE_CRYPTO_CIPHER_NULL:
2011 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2012 	case RTE_CRYPTO_CIPHER_AES_ECB:
2013 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2014 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2015 			      cipher_xform->algo);
2016 		goto error_out;
2017 	default:
2018 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2019 			      cipher_xform->algo);
2020 		goto error_out;
2021 	}
2022 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2023 				DIR_ENC : DIR_DEC;
2024 
2025 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
2026 	priv->flc_desc[0].desc[1] = authdata.keylen;
2027 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
2028 			       MIN_JOB_DESC_SIZE,
2029 			       (unsigned int *)priv->flc_desc[0].desc,
2030 			       &priv->flc_desc[0].desc[2], 2);
2031 
2032 	if (err < 0) {
2033 		DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2034 		goto error_out;
2035 	}
2036 	if (priv->flc_desc[0].desc[2] & 1) {
2037 		cipherdata.key_type = RTA_DATA_IMM;
2038 	} else {
2039 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
2040 		cipherdata.key_type = RTA_DATA_PTR;
2041 	}
2042 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
2043 		authdata.key_type = RTA_DATA_IMM;
2044 	} else {
2045 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
2046 		authdata.key_type = RTA_DATA_PTR;
2047 	}
2048 	priv->flc_desc[0].desc[0] = 0;
2049 	priv->flc_desc[0].desc[1] = 0;
2050 	priv->flc_desc[0].desc[2] = 0;
2051 
2052 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
2053 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
2054 					      0, &cipherdata, &authdata,
2055 					      session->iv.length,
2056 					      ctxt->auth_only_len,
2057 					      session->digest_length,
2058 					      session->dir);
2059 		if (bufsize < 0) {
2060 			DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2061 			goto error_out;
2062 		}
2063 	} else {
2064 		DPAA2_SEC_ERR("Hash before cipher not supported");
2065 		goto error_out;
2066 	}
2067 
2068 	flc->word1_sdl = (uint8_t)bufsize;
2069 	flc->word2_rflc_31_0 = lower_32_bits(
2070 			(size_t)&(((struct dpaa2_sec_qp *)
2071 			dev->data->queue_pairs[0])->rx_vq));
2072 	flc->word3_rflc_63_32 = upper_32_bits(
2073 			(size_t)&(((struct dpaa2_sec_qp *)
2074 			dev->data->queue_pairs[0])->rx_vq));
2075 	session->ctxt = priv;
2076 	for (i = 0; i < bufsize; i++)
2077 		DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2078 			    i, priv->flc_desc[0].desc[i]);
2079 
2080 	return 0;
2081 
2082 error_out:
2083 	rte_free(session->cipher_key.data);
2084 	rte_free(session->auth_key.data);
2085 	rte_free(priv);
2086 	return -1;
2087 }
2088 
2089 static int
2090 dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
2091 			    struct rte_crypto_sym_xform *xform,	void *sess)
2092 {
2093 	dpaa2_sec_session *session = sess;
2094 
2095 	PMD_INIT_FUNC_TRACE();
2096 
2097 	if (unlikely(sess == NULL)) {
2098 		DPAA2_SEC_ERR("Invalid session struct");
2099 		return -1;
2100 	}
2101 
2102 	/* Default IV length = 0 */
2103 	session->iv.length = 0;
2104 
2105 	/* Cipher Only */
2106 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2107 		session->ctxt_type = DPAA2_SEC_CIPHER;
2108 		dpaa2_sec_cipher_init(dev, xform, session);
2109 
2110 	/* Authentication Only */
2111 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2112 		   xform->next == NULL) {
2113 		session->ctxt_type = DPAA2_SEC_AUTH;
2114 		dpaa2_sec_auth_init(dev, xform, session);
2115 
2116 	/* Cipher then Authenticate */
2117 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2118 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2119 		session->ext_params.aead_ctxt.auth_cipher_text = true;
2120 		dpaa2_sec_aead_chain_init(dev, xform, session);
2121 
2122 	/* Authenticate then Cipher */
2123 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2124 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2125 		session->ext_params.aead_ctxt.auth_cipher_text = false;
2126 		dpaa2_sec_aead_chain_init(dev, xform, session);
2127 
2128 	/* AEAD operation for AES-GCM kind of Algorithms */
2129 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2130 		   xform->next == NULL) {
2131 		dpaa2_sec_aead_init(dev, xform, session);
2132 
2133 	} else {
2134 		DPAA2_SEC_ERR("Invalid crypto type");
2135 		return -EINVAL;
2136 	}
2137 
2138 	return 0;
2139 }
2140 
2141 static int
2142 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
2143 			    struct rte_security_session_conf *conf,
2144 			    void *sess)
2145 {
2146 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2147 	struct rte_crypto_auth_xform *auth_xform;
2148 	struct rte_crypto_cipher_xform *cipher_xform;
2149 	dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
2150 	struct ctxt_priv *priv;
2151 	struct ipsec_encap_pdb encap_pdb;
2152 	struct ipsec_decap_pdb decap_pdb;
2153 	struct alginfo authdata, cipherdata;
2154 	int bufsize;
2155 	struct sec_flow_context *flc;
2156 
2157 	PMD_INIT_FUNC_TRACE();
2158 
2159 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2160 		cipher_xform = &conf->crypto_xform->cipher;
2161 		auth_xform = &conf->crypto_xform->next->auth;
2162 	} else {
2163 		auth_xform = &conf->crypto_xform->auth;
2164 		cipher_xform = &conf->crypto_xform->next->cipher;
2165 	}
2166 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
2167 				sizeof(struct ctxt_priv) +
2168 				sizeof(struct sec_flc_desc),
2169 				RTE_CACHE_LINE_SIZE);
2170 
2171 	if (priv == NULL) {
2172 		DPAA2_SEC_ERR("No memory for priv CTXT");
2173 		return -ENOMEM;
2174 	}
2175 
2176 	flc = &priv->flc_desc[0].flc;
2177 
2178 	session->ctxt_type = DPAA2_SEC_IPSEC;
2179 	session->cipher_key.data = rte_zmalloc(NULL,
2180 					       cipher_xform->key.length,
2181 					       RTE_CACHE_LINE_SIZE);
2182 	if (session->cipher_key.data == NULL &&
2183 			cipher_xform->key.length > 0) {
2184 		DPAA2_SEC_ERR("No Memory for cipher key");
2185 		rte_free(priv);
2186 		return -ENOMEM;
2187 	}
2188 
2189 	session->cipher_key.length = cipher_xform->key.length;
2190 	session->auth_key.data = rte_zmalloc(NULL,
2191 					auth_xform->key.length,
2192 					RTE_CACHE_LINE_SIZE);
2193 	if (session->auth_key.data == NULL &&
2194 			auth_xform->key.length > 0) {
2195 		DPAA2_SEC_ERR("No Memory for auth key");
2196 		rte_free(session->cipher_key.data);
2197 		rte_free(priv);
2198 		return -ENOMEM;
2199 	}
2200 	session->auth_key.length = auth_xform->key.length;
2201 	memcpy(session->cipher_key.data, cipher_xform->key.data,
2202 			cipher_xform->key.length);
2203 	memcpy(session->auth_key.data, auth_xform->key.data,
2204 			auth_xform->key.length);
2205 
2206 	authdata.key = (size_t)session->auth_key.data;
2207 	authdata.keylen = session->auth_key.length;
2208 	authdata.key_enc_flags = 0;
2209 	authdata.key_type = RTA_DATA_IMM;
2210 	switch (auth_xform->algo) {
2211 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
2212 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
2213 		authdata.algmode = OP_ALG_AAI_HMAC;
2214 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
2215 		break;
2216 	case RTE_CRYPTO_AUTH_MD5_HMAC:
2217 		authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
2218 		authdata.algmode = OP_ALG_AAI_HMAC;
2219 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
2220 		break;
2221 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
2222 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2223 		authdata.algmode = OP_ALG_AAI_HMAC;
2224 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
2225 		break;
2226 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
2227 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2228 		authdata.algmode = OP_ALG_AAI_HMAC;
2229 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
2230 		break;
2231 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
2232 		authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2233 		authdata.algmode = OP_ALG_AAI_HMAC;
2234 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
2235 		break;
2236 	case RTE_CRYPTO_AUTH_AES_CMAC:
2237 		authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
2238 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
2239 		break;
2240 	case RTE_CRYPTO_AUTH_NULL:
2241 		authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
2242 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2243 		break;
2244 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2245 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2246 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2247 	case RTE_CRYPTO_AUTH_SHA1:
2248 	case RTE_CRYPTO_AUTH_SHA256:
2249 	case RTE_CRYPTO_AUTH_SHA512:
2250 	case RTE_CRYPTO_AUTH_SHA224:
2251 	case RTE_CRYPTO_AUTH_SHA384:
2252 	case RTE_CRYPTO_AUTH_MD5:
2253 	case RTE_CRYPTO_AUTH_AES_GMAC:
2254 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2255 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2256 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2257 		DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2258 			      auth_xform->algo);
2259 		goto out;
2260 	default:
2261 		DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2262 			      auth_xform->algo);
2263 		goto out;
2264 	}
2265 	cipherdata.key = (size_t)session->cipher_key.data;
2266 	cipherdata.keylen = session->cipher_key.length;
2267 	cipherdata.key_enc_flags = 0;
2268 	cipherdata.key_type = RTA_DATA_IMM;
2269 
2270 	switch (cipher_xform->algo) {
2271 	case RTE_CRYPTO_CIPHER_AES_CBC:
2272 		cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
2273 		cipherdata.algmode = OP_ALG_AAI_CBC;
2274 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2275 		break;
2276 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2277 		cipherdata.algtype = OP_PCL_IPSEC_3DES;
2278 		cipherdata.algmode = OP_ALG_AAI_CBC;
2279 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2280 		break;
2281 	case RTE_CRYPTO_CIPHER_AES_CTR:
2282 		cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
2283 		cipherdata.algmode = OP_ALG_AAI_CTR;
2284 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2285 		break;
2286 	case RTE_CRYPTO_CIPHER_NULL:
2287 		cipherdata.algtype = OP_PCL_IPSEC_NULL;
2288 		break;
2289 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2290 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2291 	case RTE_CRYPTO_CIPHER_AES_ECB:
2292 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2293 		DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2294 			      cipher_xform->algo);
2295 		goto out;
2296 	default:
2297 		DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2298 			      cipher_xform->algo);
2299 		goto out;
2300 	}
2301 
2302 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2303 		struct ip ip4_hdr;
2304 
2305 		flc->dhr = SEC_FLC_DHR_OUTBOUND;
2306 		ip4_hdr.ip_v = IPVERSION;
2307 		ip4_hdr.ip_hl = 5;
2308 		ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
2309 		ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2310 		ip4_hdr.ip_id = 0;
2311 		ip4_hdr.ip_off = 0;
2312 		ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2313 		ip4_hdr.ip_p = 0x32;
2314 		ip4_hdr.ip_sum = 0;
2315 		ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2316 		ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2317 		ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
2318 			sizeof(struct ip));
2319 
2320 		/* For Sec Proto only one descriptor is required. */
2321 		memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
2322 		encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2323 			PDBOPTS_ESP_OIHI_PDB_INL |
2324 			PDBOPTS_ESP_IVSRC |
2325 			PDBHMO_ESP_ENCAP_DTTL;
2326 		encap_pdb.spi = ipsec_xform->spi;
2327 		encap_pdb.ip_hdr_len = sizeof(struct ip);
2328 
2329 		session->dir = DIR_ENC;
2330 		bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
2331 				1, 0, &encap_pdb,
2332 				(uint8_t *)&ip4_hdr,
2333 				&cipherdata, &authdata);
2334 	} else if (ipsec_xform->direction ==
2335 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2336 		flc->dhr = SEC_FLC_DHR_INBOUND;
2337 		memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2338 		decap_pdb.options = sizeof(struct ip) << 16;
2339 		session->dir = DIR_DEC;
2340 		bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
2341 				1, 0, &decap_pdb, &cipherdata, &authdata);
2342 	} else
2343 		goto out;
2344 
2345 	if (bufsize < 0) {
2346 		DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2347 		goto out;
2348 	}
2349 
2350 	flc->word1_sdl = (uint8_t)bufsize;
2351 
2352 	/* Enable the stashing control bit */
2353 	DPAA2_SET_FLC_RSC(flc);
2354 	flc->word2_rflc_31_0 = lower_32_bits(
2355 			(size_t)&(((struct dpaa2_sec_qp *)
2356 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
2357 	flc->word3_rflc_63_32 = upper_32_bits(
2358 			(size_t)&(((struct dpaa2_sec_qp *)
2359 			dev->data->queue_pairs[0])->rx_vq));
2360 
2361 	/* Set EWS bit i.e. enable write-safe */
2362 	DPAA2_SET_FLC_EWS(flc);
2363 	/* Set BS = 1 i.e reuse input buffers as output buffers */
2364 	DPAA2_SET_FLC_REUSE_BS(flc);
2365 	/* Set FF = 10; reuse input buffers if they provide sufficient space */
2366 	DPAA2_SET_FLC_REUSE_FF(flc);
2367 
2368 	session->ctxt = priv;
2369 
2370 	return 0;
2371 out:
2372 	rte_free(session->auth_key.data);
2373 	rte_free(session->cipher_key.data);
2374 	rte_free(priv);
2375 	return -1;
2376 }
2377 
2378 static int
2379 dpaa2_sec_security_session_create(void *dev,
2380 				  struct rte_security_session_conf *conf,
2381 				  struct rte_security_session *sess,
2382 				  struct rte_mempool *mempool)
2383 {
2384 	void *sess_private_data;
2385 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2386 	int ret;
2387 
2388 	if (rte_mempool_get(mempool, &sess_private_data)) {
2389 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
2390 		return -ENOMEM;
2391 	}
2392 
2393 	switch (conf->protocol) {
2394 	case RTE_SECURITY_PROTOCOL_IPSEC:
2395 		ret = dpaa2_sec_set_ipsec_session(cdev, conf,
2396 				sess_private_data);
2397 		break;
2398 	case RTE_SECURITY_PROTOCOL_MACSEC:
2399 		return -ENOTSUP;
2400 	default:
2401 		return -EINVAL;
2402 	}
2403 	if (ret != 0) {
2404 		DPAA2_SEC_ERR("Failed to configure session parameters");
2405 		/* Return session to mempool */
2406 		rte_mempool_put(mempool, sess_private_data);
2407 		return ret;
2408 	}
2409 
2410 	set_sec_session_private_data(sess, sess_private_data);
2411 
2412 	return ret;
2413 }
2414 
2415 /** Clear the memory of session so it doesn't leave key material behind */
2416 static int
2417 dpaa2_sec_security_session_destroy(void *dev __rte_unused,
2418 		struct rte_security_session *sess)
2419 {
2420 	PMD_INIT_FUNC_TRACE();
2421 	void *sess_priv = get_sec_session_private_data(sess);
2422 
2423 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2424 
2425 	if (sess_priv) {
2426 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2427 
2428 		rte_free(s->ctxt);
2429 		rte_free(s->cipher_key.data);
2430 		rte_free(s->auth_key.data);
2431 		memset(sess, 0, sizeof(dpaa2_sec_session));
2432 		set_sec_session_private_data(sess, NULL);
2433 		rte_mempool_put(sess_mp, sess_priv);
2434 	}
2435 	return 0;
2436 }
2437 
2438 static int
2439 dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
2440 		struct rte_crypto_sym_xform *xform,
2441 		struct rte_cryptodev_sym_session *sess,
2442 		struct rte_mempool *mempool)
2443 {
2444 	void *sess_private_data;
2445 	int ret;
2446 
2447 	if (rte_mempool_get(mempool, &sess_private_data)) {
2448 		DPAA2_SEC_ERR("Couldn't get object from session mempool");
2449 		return -ENOMEM;
2450 	}
2451 
2452 	ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
2453 	if (ret != 0) {
2454 		DPAA2_SEC_ERR("Failed to configure session parameters");
2455 		/* Return session to mempool */
2456 		rte_mempool_put(mempool, sess_private_data);
2457 		return ret;
2458 	}
2459 
2460 	set_sym_session_private_data(sess, dev->driver_id,
2461 		sess_private_data);
2462 
2463 	return 0;
2464 }
2465 
2466 /** Clear the memory of session so it doesn't leave key material behind */
2467 static void
2468 dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
2469 		struct rte_cryptodev_sym_session *sess)
2470 {
2471 	PMD_INIT_FUNC_TRACE();
2472 	uint8_t index = dev->driver_id;
2473 	void *sess_priv = get_sym_session_private_data(sess, index);
2474 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
2475 
2476 	if (sess_priv) {
2477 		rte_free(s->ctxt);
2478 		rte_free(s->cipher_key.data);
2479 		rte_free(s->auth_key.data);
2480 		memset(sess, 0, sizeof(dpaa2_sec_session));
2481 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2482 		set_sym_session_private_data(sess, index, NULL);
2483 		rte_mempool_put(sess_mp, sess_priv);
2484 	}
2485 }
2486 
2487 static int
2488 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2489 			struct rte_cryptodev_config *config __rte_unused)
2490 {
2491 	PMD_INIT_FUNC_TRACE();
2492 
2493 	return 0;
2494 }
2495 
2496 static int
2497 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
2498 {
2499 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2500 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2501 	struct dpseci_attr attr;
2502 	struct dpaa2_queue *dpaa2_q;
2503 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2504 					dev->data->queue_pairs;
2505 	struct dpseci_rx_queue_attr rx_attr;
2506 	struct dpseci_tx_queue_attr tx_attr;
2507 	int ret, i;
2508 
2509 	PMD_INIT_FUNC_TRACE();
2510 
2511 	memset(&attr, 0, sizeof(struct dpseci_attr));
2512 
2513 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
2514 	if (ret) {
2515 		DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2516 			      priv->hw_id);
2517 		goto get_attr_failure;
2518 	}
2519 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
2520 	if (ret) {
2521 		DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2522 		goto get_attr_failure;
2523 	}
2524 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
2525 		dpaa2_q = &qp[i]->rx_vq;
2526 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2527 				    &rx_attr);
2528 		dpaa2_q->fqid = rx_attr.fqid;
2529 		DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
2530 	}
2531 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
2532 		dpaa2_q = &qp[i]->tx_vq;
2533 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
2534 				    &tx_attr);
2535 		dpaa2_q->fqid = tx_attr.fqid;
2536 		DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
2537 	}
2538 
2539 	return 0;
2540 get_attr_failure:
2541 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2542 	return -1;
2543 }
2544 
2545 static void
2546 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
2547 {
2548 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2549 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2550 	int ret;
2551 
2552 	PMD_INIT_FUNC_TRACE();
2553 
2554 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
2555 	if (ret) {
2556 		DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2557 			     priv->hw_id);
2558 		return;
2559 	}
2560 
2561 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
2562 	if (ret < 0) {
2563 		DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
2564 		return;
2565 	}
2566 }
2567 
2568 static int
2569 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
2570 {
2571 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2572 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2573 	int ret;
2574 
2575 	PMD_INIT_FUNC_TRACE();
2576 
2577 	/* Function is reverse of dpaa2_sec_dev_init.
2578 	 * It does the following:
2579 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2580 	 * 2. Close the DPSECI device
2581 	 * 3. Free the allocated resources.
2582 	 */
2583 
2584 	/*Close the device at underlying layer*/
2585 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
2586 	if (ret) {
2587 		DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
2588 		return -1;
2589 	}
2590 
2591 	/*Free the allocated memory for ethernet private data and dpseci*/
2592 	priv->hw = NULL;
2593 	rte_free(dpseci);
2594 
2595 	return 0;
2596 }
2597 
2598 static void
2599 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
2600 			struct rte_cryptodev_info *info)
2601 {
2602 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2603 
2604 	PMD_INIT_FUNC_TRACE();
2605 	if (info != NULL) {
2606 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2607 		info->feature_flags = dev->feature_flags;
2608 		info->capabilities = dpaa2_sec_capabilities;
2609 		/* No limit of number of sessions */
2610 		info->sym.max_nb_sessions = 0;
2611 		info->driver_id = cryptodev_driver_id;
2612 	}
2613 }
2614 
2615 static
2616 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
2617 			 struct rte_cryptodev_stats *stats)
2618 {
2619 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
2620 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
2621 	struct dpseci_sec_counters counters = {0};
2622 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2623 					dev->data->queue_pairs;
2624 	int ret, i;
2625 
2626 	PMD_INIT_FUNC_TRACE();
2627 	if (stats == NULL) {
2628 		DPAA2_SEC_ERR("Invalid stats ptr NULL");
2629 		return;
2630 	}
2631 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2632 		if (qp[i] == NULL) {
2633 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
2634 			continue;
2635 		}
2636 
2637 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
2638 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
2639 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
2640 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
2641 	}
2642 
2643 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
2644 				      &counters);
2645 	if (ret) {
2646 		DPAA2_SEC_ERR("SEC counters failed");
2647 	} else {
2648 		DPAA2_SEC_INFO("dpseci hardware stats:"
2649 			    "\n\tNum of Requests Dequeued = %" PRIu64
2650 			    "\n\tNum of Outbound Encrypt Requests = %" PRIu64
2651 			    "\n\tNum of Inbound Decrypt Requests = %" PRIu64
2652 			    "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
2653 			    "\n\tNum of Outbound Bytes Protected = %" PRIu64
2654 			    "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
2655 			    "\n\tNum of Inbound Bytes Validated = %" PRIu64,
2656 			    counters.dequeued_requests,
2657 			    counters.ob_enc_requests,
2658 			    counters.ib_dec_requests,
2659 			    counters.ob_enc_bytes,
2660 			    counters.ob_prot_bytes,
2661 			    counters.ib_dec_bytes,
2662 			    counters.ib_valid_bytes);
2663 	}
2664 }
2665 
2666 static
2667 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
2668 {
2669 	int i;
2670 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
2671 				   (dev->data->queue_pairs);
2672 
2673 	PMD_INIT_FUNC_TRACE();
2674 
2675 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
2676 		if (qp[i] == NULL) {
2677 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
2678 			continue;
2679 		}
2680 		qp[i]->tx_vq.rx_pkts = 0;
2681 		qp[i]->tx_vq.tx_pkts = 0;
2682 		qp[i]->tx_vq.err_pkts = 0;
2683 		qp[i]->rx_vq.rx_pkts = 0;
2684 		qp[i]->rx_vq.tx_pkts = 0;
2685 		qp[i]->rx_vq.err_pkts = 0;
2686 	}
2687 }
2688 
2689 static struct rte_cryptodev_ops crypto_ops = {
2690 	.dev_configure	      = dpaa2_sec_dev_configure,
2691 	.dev_start	      = dpaa2_sec_dev_start,
2692 	.dev_stop	      = dpaa2_sec_dev_stop,
2693 	.dev_close	      = dpaa2_sec_dev_close,
2694 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
2695 	.stats_get	      = dpaa2_sec_stats_get,
2696 	.stats_reset	      = dpaa2_sec_stats_reset,
2697 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
2698 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
2699 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
2700 	.sym_session_get_size     = dpaa2_sec_sym_session_get_size,
2701 	.sym_session_configure    = dpaa2_sec_sym_session_configure,
2702 	.sym_session_clear        = dpaa2_sec_sym_session_clear,
2703 };
2704 
2705 static const struct rte_security_capability *
2706 dpaa2_sec_capabilities_get(void *device __rte_unused)
2707 {
2708 	return dpaa2_sec_security_cap;
2709 }
2710 
2711 struct rte_security_ops dpaa2_sec_security_ops = {
2712 	.session_create = dpaa2_sec_security_session_create,
2713 	.session_update = NULL,
2714 	.session_stats_get = NULL,
2715 	.session_destroy = dpaa2_sec_security_session_destroy,
2716 	.set_pkt_metadata = NULL,
2717 	.capabilities_get = dpaa2_sec_capabilities_get
2718 };
2719 
2720 static int
2721 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
2722 {
2723 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
2724 
2725 	rte_free(dev->security_ctx);
2726 
2727 	rte_mempool_free(internals->fle_pool);
2728 
2729 	DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
2730 		       dev->data->name, rte_socket_id());
2731 
2732 	return 0;
2733 }
2734 
2735 static int
2736 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
2737 {
2738 	struct dpaa2_sec_dev_private *internals;
2739 	struct rte_device *dev = cryptodev->device;
2740 	struct rte_dpaa2_device *dpaa2_dev;
2741 	struct rte_security_ctx *security_instance;
2742 	struct fsl_mc_io *dpseci;
2743 	uint16_t token;
2744 	struct dpseci_attr attr;
2745 	int retcode, hw_id;
2746 	char str[20];
2747 
2748 	PMD_INIT_FUNC_TRACE();
2749 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
2750 	if (dpaa2_dev == NULL) {
2751 		DPAA2_SEC_ERR("DPAA2 SEC device not found");
2752 		return -1;
2753 	}
2754 	hw_id = dpaa2_dev->object_id;
2755 
2756 	cryptodev->driver_id = cryptodev_driver_id;
2757 	cryptodev->dev_ops = &crypto_ops;
2758 
2759 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
2760 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
2761 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2762 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2763 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2764 			RTE_CRYPTODEV_FF_SECURITY |
2765 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2766 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2767 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2768 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2769 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2770 
2771 	internals = cryptodev->data->dev_private;
2772 
2773 	/*
2774 	 * For secondary processes, we don't initialise any further as primary
2775 	 * has already done this work. Only check we don't need a different
2776 	 * RX function
2777 	 */
2778 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2779 		DPAA2_SEC_DEBUG("Device already init by primary process");
2780 		return 0;
2781 	}
2782 
2783 	/* Initialize security_ctx only for primary process*/
2784 	security_instance = rte_malloc("rte_security_instances_ops",
2785 				sizeof(struct rte_security_ctx), 0);
2786 	if (security_instance == NULL)
2787 		return -ENOMEM;
2788 	security_instance->device = (void *)cryptodev;
2789 	security_instance->ops = &dpaa2_sec_security_ops;
2790 	security_instance->sess_cnt = 0;
2791 	cryptodev->security_ctx = security_instance;
2792 
2793 	/*Open the rte device via MC and save the handle for further use*/
2794 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
2795 				sizeof(struct fsl_mc_io), 0);
2796 	if (!dpseci) {
2797 		DPAA2_SEC_ERR(
2798 			"Error in allocating the memory for dpsec object");
2799 		return -1;
2800 	}
2801 	dpseci->regs = rte_mcp_ptr_list[0];
2802 
2803 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
2804 	if (retcode != 0) {
2805 		DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
2806 			      retcode);
2807 		goto init_error;
2808 	}
2809 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
2810 	if (retcode != 0) {
2811 		DPAA2_SEC_ERR(
2812 			     "Cannot get dpsec device attributed: Error = %x",
2813 			     retcode);
2814 		goto init_error;
2815 	}
2816 	sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
2817 
2818 	internals->max_nb_queue_pairs = attr.num_tx_queues;
2819 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
2820 	internals->hw = dpseci;
2821 	internals->token = token;
2822 
2823 	sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
2824 	internals->fle_pool = rte_mempool_create((const char *)str,
2825 			FLE_POOL_NUM_BUFS,
2826 			FLE_POOL_BUF_SIZE,
2827 			FLE_POOL_CACHE_SIZE, 0,
2828 			NULL, NULL, NULL, NULL,
2829 			SOCKET_ID_ANY, 0);
2830 	if (!internals->fle_pool) {
2831 		DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
2832 		goto init_error;
2833 	}
2834 
2835 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
2836 	return 0;
2837 
2838 init_error:
2839 	DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
2840 
2841 	/* dpaa2_sec_uninit(crypto_dev_name); */
2842 	return -EFAULT;
2843 }
2844 
2845 static int
2846 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
2847 			  struct rte_dpaa2_device *dpaa2_dev)
2848 {
2849 	struct rte_cryptodev *cryptodev;
2850 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2851 
2852 	int retval;
2853 
2854 	sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
2855 
2856 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2857 	if (cryptodev == NULL)
2858 		return -ENOMEM;
2859 
2860 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2861 		cryptodev->data->dev_private = rte_zmalloc_socket(
2862 					"cryptodev private structure",
2863 					sizeof(struct dpaa2_sec_dev_private),
2864 					RTE_CACHE_LINE_SIZE,
2865 					rte_socket_id());
2866 
2867 		if (cryptodev->data->dev_private == NULL)
2868 			rte_panic("Cannot allocate memzone for private "
2869 				  "device data");
2870 	}
2871 
2872 	dpaa2_dev->cryptodev = cryptodev;
2873 	cryptodev->device = &dpaa2_dev->device;
2874 	cryptodev->device->driver = &dpaa2_drv->driver;
2875 
2876 	/* init user callbacks */
2877 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2878 
2879 	/* Invoke PMD device initialization function */
2880 	retval = dpaa2_sec_dev_init(cryptodev);
2881 	if (retval == 0)
2882 		return 0;
2883 
2884 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2885 		rte_free(cryptodev->data->dev_private);
2886 
2887 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
2888 
2889 	return -ENXIO;
2890 }
2891 
2892 static int
2893 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
2894 {
2895 	struct rte_cryptodev *cryptodev;
2896 	int ret;
2897 
2898 	cryptodev = dpaa2_dev->cryptodev;
2899 	if (cryptodev == NULL)
2900 		return -ENODEV;
2901 
2902 	ret = dpaa2_sec_uninit(cryptodev);
2903 	if (ret)
2904 		return ret;
2905 
2906 	return rte_cryptodev_pmd_destroy(cryptodev);
2907 }
2908 
2909 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
2910 	.drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
2911 	.drv_type = DPAA2_CRYPTO,
2912 	.driver = {
2913 		.name = "DPAA2 SEC PMD"
2914 	},
2915 	.probe = cryptodev_dpaa2_sec_probe,
2916 	.remove = cryptodev_dpaa2_sec_remove,
2917 };
2918 
2919 static struct cryptodev_driver dpaa2_sec_crypto_drv;
2920 
2921 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
2922 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
2923 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
2924 
2925 RTE_INIT(dpaa2_sec_init_log)
2926 {
2927 	/* Bus level logs */
2928 	dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
2929 	if (dpaa2_logtype_sec >= 0)
2930 		rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
2931 }
2932