xref: /dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c (revision a3a2e2c8f7de433e10b1548df65b20bf10086d9c)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of  Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <time.h>
35 #include <net/if.h>
36 
37 #include <rte_mbuf.h>
38 #include <rte_cryptodev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_cycles.h>
43 #include <rte_kvargs.h>
44 #include <rte_dev.h>
45 #include <rte_cryptodev_pmd.h>
46 #include <rte_common.h>
47 #include <rte_fslmc.h>
48 #include <fslmc_vfio.h>
49 #include <dpaa2_hw_pvt.h>
50 #include <dpaa2_hw_dpio.h>
51 #include <dpaa2_hw_mempool.h>
52 #include <fsl_dpseci.h>
53 #include <fsl_mc_sys.h>
54 
55 #include "dpaa2_sec_priv.h"
56 #include "dpaa2_sec_logs.h"
57 
58 /* RTA header files */
59 #include <hw/desc/ipsec.h>
60 #include <hw/desc/algo.h>
61 
62 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
63  * a pointer to the shared descriptor
64  */
65 #define MIN_JOB_DESC_SIZE	(CAAM_CMD_SZ + CAAM_PTR_SZ)
66 #define FSL_VENDOR_ID           0x1957
67 #define FSL_DEVICE_ID           0x410
68 #define FSL_SUBSYSTEM_SEC       1
69 #define FSL_MC_DPSECI_DEVID     3
70 
71 #define NO_PREFETCH 0
72 #define TDES_CBC_IV_LEN 8
73 #define AES_CBC_IV_LEN 16
74 enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
75 
76 static inline int
77 build_authenc_fd(dpaa2_sec_session *sess,
78 		 struct rte_crypto_op *op,
79 		 struct qbman_fd *fd, uint16_t bpid)
80 {
81 	struct rte_crypto_sym_op *sym_op = op->sym;
82 	struct ctxt_priv *priv = sess->ctxt;
83 	struct qbman_fle *fle, *sge;
84 	struct sec_flow_context *flc;
85 	uint32_t auth_only_len = sym_op->auth.data.length -
86 				sym_op->cipher.data.length;
87 	int icv_len = sym_op->auth.digest.length;
88 	uint8_t *old_icv;
89 	uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
90 
91 	PMD_INIT_FUNC_TRACE();
92 
93 	/* we are using the first FLE entry to store Mbuf.
94 	 * Currently we donot know which FLE has the mbuf stored.
95 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
96 	 * to get the MBUF Addr from the previous FLE.
97 	 * We can have a better approach to use the inline Mbuf
98 	 */
99 	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
100 	if (!fle) {
101 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
102 		return -1;
103 	}
104 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
105 	fle = fle + 1;
106 	sge = fle + 2;
107 	if (likely(bpid < MAX_BPID)) {
108 		DPAA2_SET_FD_BPID(fd, bpid);
109 		DPAA2_SET_FLE_BPID(fle, bpid);
110 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
111 		DPAA2_SET_FLE_BPID(sge, bpid);
112 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
113 		DPAA2_SET_FLE_BPID(sge + 2, bpid);
114 		DPAA2_SET_FLE_BPID(sge + 3, bpid);
115 	} else {
116 		DPAA2_SET_FD_IVP(fd);
117 		DPAA2_SET_FLE_IVP(fle);
118 		DPAA2_SET_FLE_IVP((fle + 1));
119 		DPAA2_SET_FLE_IVP(sge);
120 		DPAA2_SET_FLE_IVP((sge + 1));
121 		DPAA2_SET_FLE_IVP((sge + 2));
122 		DPAA2_SET_FLE_IVP((sge + 3));
123 	}
124 
125 	/* Save the shared descriptor */
126 	flc = &priv->flc_desc[0].flc;
127 	/* Configure FD as a FRAME LIST */
128 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
129 	DPAA2_SET_FD_COMPOUND_FMT(fd);
130 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
131 
132 	PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
133 		   "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
134 		   sym_op->auth.data.offset,
135 		   sym_op->auth.data.length,
136 		   sym_op->auth.digest.length,
137 		   sym_op->cipher.data.offset,
138 		   sym_op->cipher.data.length,
139 		   sym_op->cipher.iv.length,
140 		   sym_op->m_src->data_off);
141 
142 	/* Configure Output FLE with Scatter/Gather Entry */
143 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
144 	if (auth_only_len)
145 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
146 	fle->length = (sess->dir == DIR_ENC) ?
147 			(sym_op->cipher.data.length + icv_len) :
148 			sym_op->cipher.data.length;
149 
150 	DPAA2_SET_FLE_SG_EXT(fle);
151 
152 	/* Configure Output SGE for Encap/Decap */
153 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
154 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
155 				sym_op->m_src->data_off);
156 	sge->length = sym_op->cipher.data.length;
157 
158 	if (sess->dir == DIR_ENC) {
159 		sge++;
160 		DPAA2_SET_FLE_ADDR(sge,
161 				DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
162 		sge->length = sym_op->auth.digest.length;
163 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
164 					sym_op->cipher.iv.length));
165 	}
166 	DPAA2_SET_FLE_FIN(sge);
167 
168 	sge++;
169 	fle++;
170 
171 	/* Configure Input FLE with Scatter/Gather Entry */
172 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
173 	DPAA2_SET_FLE_SG_EXT(fle);
174 	DPAA2_SET_FLE_FIN(fle);
175 	fle->length = (sess->dir == DIR_ENC) ?
176 			(sym_op->auth.data.length + sym_op->cipher.iv.length) :
177 			(sym_op->auth.data.length + sym_op->cipher.iv.length +
178 			 sym_op->auth.digest.length);
179 
180 	/* Configure Input SGE for Encap/Decap */
181 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
182 	sge->length = sym_op->cipher.iv.length;
183 	sge++;
184 
185 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
186 	DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
187 				sym_op->m_src->data_off);
188 	sge->length = sym_op->auth.data.length;
189 	if (sess->dir == DIR_DEC) {
190 		sge++;
191 		old_icv = (uint8_t *)(sge + 1);
192 		memcpy(old_icv,	sym_op->auth.digest.data,
193 		       sym_op->auth.digest.length);
194 		memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
195 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
196 		sge->length = sym_op->auth.digest.length;
197 		DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
198 				 sym_op->auth.digest.length +
199 				 sym_op->cipher.iv.length));
200 	}
201 	DPAA2_SET_FLE_FIN(sge);
202 	if (auth_only_len) {
203 		DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
204 		DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
205 	}
206 	return 0;
207 }
208 
209 static inline int
210 build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
211 	      struct qbman_fd *fd, uint16_t bpid)
212 {
213 	struct rte_crypto_sym_op *sym_op = op->sym;
214 	struct qbman_fle *fle, *sge;
215 	uint32_t mem_len = (sess->dir == DIR_ENC) ?
216 			   (3 * sizeof(struct qbman_fle)) :
217 			   (5 * sizeof(struct qbman_fle) +
218 			    sym_op->auth.digest.length);
219 	struct sec_flow_context *flc;
220 	struct ctxt_priv *priv = sess->ctxt;
221 	uint8_t *old_digest;
222 
223 	PMD_INIT_FUNC_TRACE();
224 
225 	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
226 	if (!fle) {
227 		RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
228 		return -1;
229 	}
230 	/* TODO we are using the first FLE entry to store Mbuf.
231 	 * Currently we donot know which FLE has the mbuf stored.
232 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
233 	 * to get the MBUF Addr from the previous FLE.
234 	 * We can have a better approach to use the inline Mbuf
235 	 */
236 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
237 	fle = fle + 1;
238 
239 	if (likely(bpid < MAX_BPID)) {
240 		DPAA2_SET_FD_BPID(fd, bpid);
241 		DPAA2_SET_FLE_BPID(fle, bpid);
242 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
243 	} else {
244 		DPAA2_SET_FD_IVP(fd);
245 		DPAA2_SET_FLE_IVP(fle);
246 		DPAA2_SET_FLE_IVP((fle + 1));
247 	}
248 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
249 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
250 
251 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
252 	fle->length = sym_op->auth.digest.length;
253 
254 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
255 	DPAA2_SET_FD_COMPOUND_FMT(fd);
256 	fle++;
257 
258 	if (sess->dir == DIR_ENC) {
259 		DPAA2_SET_FLE_ADDR(fle,
260 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
261 		DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
262 				     sym_op->m_src->data_off);
263 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
264 		fle->length = sym_op->auth.data.length;
265 	} else {
266 		sge = fle + 2;
267 		DPAA2_SET_FLE_SG_EXT(fle);
268 		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
269 
270 		if (likely(bpid < MAX_BPID)) {
271 			DPAA2_SET_FLE_BPID(sge, bpid);
272 			DPAA2_SET_FLE_BPID(sge + 1, bpid);
273 		} else {
274 			DPAA2_SET_FLE_IVP(sge);
275 			DPAA2_SET_FLE_IVP((sge + 1));
276 		}
277 		DPAA2_SET_FLE_ADDR(sge,
278 				   DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
279 		DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
280 				     sym_op->m_src->data_off);
281 
282 		DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
283 				 sym_op->auth.digest.length);
284 		sge->length = sym_op->auth.data.length;
285 		sge++;
286 		old_digest = (uint8_t *)(sge + 1);
287 		rte_memcpy(old_digest, sym_op->auth.digest.data,
288 			   sym_op->auth.digest.length);
289 		memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
290 		DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
291 		sge->length = sym_op->auth.digest.length;
292 		fle->length = sym_op->auth.data.length +
293 				sym_op->auth.digest.length;
294 		DPAA2_SET_FLE_FIN(sge);
295 	}
296 	DPAA2_SET_FLE_FIN(fle);
297 
298 	return 0;
299 }
300 
301 static int
302 build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
303 		struct qbman_fd *fd, uint16_t bpid)
304 {
305 	struct rte_crypto_sym_op *sym_op = op->sym;
306 	struct qbman_fle *fle, *sge;
307 	uint32_t mem_len = (5 * sizeof(struct qbman_fle));
308 	struct sec_flow_context *flc;
309 	struct ctxt_priv *priv = sess->ctxt;
310 
311 	PMD_INIT_FUNC_TRACE();
312 
313 	/* todo - we can use some mempool to avoid malloc here */
314 	fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
315 	if (!fle) {
316 		RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
317 		return -1;
318 	}
319 	/* TODO we are using the first FLE entry to store Mbuf.
320 	 * Currently we donot know which FLE has the mbuf stored.
321 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
322 	 * to get the MBUF Addr from the previous FLE.
323 	 * We can have a better approach to use the inline Mbuf
324 	 */
325 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
326 	fle = fle + 1;
327 	sge = fle + 2;
328 
329 	if (likely(bpid < MAX_BPID)) {
330 		DPAA2_SET_FD_BPID(fd, bpid);
331 		DPAA2_SET_FLE_BPID(fle, bpid);
332 		DPAA2_SET_FLE_BPID(fle + 1, bpid);
333 		DPAA2_SET_FLE_BPID(sge, bpid);
334 		DPAA2_SET_FLE_BPID(sge + 1, bpid);
335 	} else {
336 		DPAA2_SET_FD_IVP(fd);
337 		DPAA2_SET_FLE_IVP(fle);
338 		DPAA2_SET_FLE_IVP((fle + 1));
339 		DPAA2_SET_FLE_IVP(sge);
340 		DPAA2_SET_FLE_IVP((sge + 1));
341 	}
342 
343 	flc = &priv->flc_desc[0].flc;
344 	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
345 	DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
346 			 sym_op->cipher.iv.length);
347 	DPAA2_SET_FD_COMPOUND_FMT(fd);
348 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
349 
350 	PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
351 		   sym_op->cipher.data.offset,
352 		   sym_op->cipher.data.length,
353 		   sym_op->cipher.iv.length,
354 		   sym_op->m_src->data_off);
355 
356 	DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
357 	DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
358 			     sym_op->m_src->data_off);
359 
360 	fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
361 
362 	PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
363 		   flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
364 
365 	fle++;
366 
367 	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
368 	fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
369 
370 	DPAA2_SET_FLE_SG_EXT(fle);
371 
372 	DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
373 	sge->length = sym_op->cipher.iv.length;
374 
375 	sge++;
376 	DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
377 	DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
378 			     sym_op->m_src->data_off);
379 
380 	sge->length = sym_op->cipher.data.length;
381 	DPAA2_SET_FLE_FIN(sge);
382 	DPAA2_SET_FLE_FIN(fle);
383 
384 	PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
385 		   (void *)DPAA2_GET_FD_ADDR(fd),
386 		   DPAA2_GET_FD_BPID(fd),
387 		   rte_dpaa2_bpid_info[bpid].meta_data_size,
388 		   DPAA2_GET_FD_OFFSET(fd),
389 		   DPAA2_GET_FD_LEN(fd));
390 
391 	return 0;
392 }
393 
394 static inline int
395 build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
396 	     struct qbman_fd *fd, uint16_t bpid)
397 {
398 	int ret = -1;
399 
400 	PMD_INIT_FUNC_TRACE();
401 
402 	switch (sess->ctxt_type) {
403 	case DPAA2_SEC_CIPHER:
404 		ret = build_cipher_fd(sess, op, fd, bpid);
405 		break;
406 	case DPAA2_SEC_AUTH:
407 		ret = build_auth_fd(sess, op, fd, bpid);
408 		break;
409 	case DPAA2_SEC_CIPHER_HASH:
410 		ret = build_authenc_fd(sess, op, fd, bpid);
411 		break;
412 	case DPAA2_SEC_HASH_CIPHER:
413 	default:
414 		RTE_LOG(ERR, PMD, "error: Unsupported session\n");
415 	}
416 	return ret;
417 }
418 
419 static uint16_t
420 dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
421 			uint16_t nb_ops)
422 {
423 	/* Function to transmit the frames to given device and VQ*/
424 	uint32_t loop;
425 	int32_t ret;
426 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
427 	uint32_t frames_to_send;
428 	struct qbman_eq_desc eqdesc;
429 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
430 	struct qbman_swp *swp;
431 	uint16_t num_tx = 0;
432 	/*todo - need to support multiple buffer pools */
433 	uint16_t bpid;
434 	struct rte_mempool *mb_pool;
435 	dpaa2_sec_session *sess;
436 
437 	if (unlikely(nb_ops == 0))
438 		return 0;
439 
440 	if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
441 		RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
442 		return 0;
443 	}
444 	/*Prepare enqueue descriptor*/
445 	qbman_eq_desc_clear(&eqdesc);
446 	qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
447 	qbman_eq_desc_set_response(&eqdesc, 0, 0);
448 	qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
449 
450 	if (!DPAA2_PER_LCORE_SEC_DPIO) {
451 		ret = dpaa2_affine_qbman_swp_sec();
452 		if (ret) {
453 			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
454 			return 0;
455 		}
456 	}
457 	swp = DPAA2_PER_LCORE_SEC_PORTAL;
458 
459 	while (nb_ops) {
460 		frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
461 
462 		for (loop = 0; loop < frames_to_send; loop++) {
463 			/*Clear the unused FD fields before sending*/
464 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
465 			sess = (dpaa2_sec_session *)
466 				(*ops)->sym->session->_private;
467 			mb_pool = (*ops)->sym->m_src->pool;
468 			bpid = mempool_to_bpid(mb_pool);
469 			ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
470 			if (ret) {
471 				PMD_DRV_LOG(ERR, "error: Improper packet"
472 					    " contents for crypto operation\n");
473 				goto skip_tx;
474 			}
475 			ops++;
476 		}
477 		loop = 0;
478 		while (loop < frames_to_send) {
479 			loop += qbman_swp_send_multiple(swp, &eqdesc,
480 							&fd_arr[loop],
481 							frames_to_send - loop);
482 		}
483 
484 		num_tx += frames_to_send;
485 		nb_ops -= frames_to_send;
486 	}
487 skip_tx:
488 	dpaa2_qp->tx_vq.tx_pkts += num_tx;
489 	dpaa2_qp->tx_vq.err_pkts += nb_ops;
490 	return num_tx;
491 }
492 
493 static inline struct rte_crypto_op *
494 sec_fd_to_mbuf(const struct qbman_fd *fd)
495 {
496 	struct qbman_fle *fle;
497 	struct rte_crypto_op *op;
498 
499 	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
500 
501 	PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
502 		   fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
503 
504 	/* we are using the first FLE entry to store Mbuf.
505 	 * Currently we donot know which FLE has the mbuf stored.
506 	 * So while retreiving we can go back 1 FLE from the FD -ADDR
507 	 * to get the MBUF Addr from the previous FLE.
508 	 * We can have a better approach to use the inline Mbuf
509 	 */
510 
511 	if (unlikely(DPAA2_GET_FD_IVP(fd))) {
512 		/* TODO complete it. */
513 		RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
514 		return NULL;
515 	}
516 	op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
517 			DPAA2_GET_FLE_ADDR((fle - 1)));
518 
519 	/* Prefeth op */
520 	rte_prefetch0(op->sym->m_src);
521 
522 	PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
523 		   (void *)op->sym->m_src, op->sym->m_src->buf_addr);
524 
525 	PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
526 		   (void *)DPAA2_GET_FD_ADDR(fd),
527 		   DPAA2_GET_FD_BPID(fd),
528 		   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
529 		   DPAA2_GET_FD_OFFSET(fd),
530 		   DPAA2_GET_FD_LEN(fd));
531 
532 	/* free the fle memory */
533 	rte_free(fle - 1);
534 
535 	return op;
536 }
537 
538 static uint16_t
539 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
540 			uint16_t nb_ops)
541 {
542 	/* Function is responsible to receive frames for a given device and VQ*/
543 	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
544 	struct qbman_result *dq_storage;
545 	uint32_t fqid = dpaa2_qp->rx_vq.fqid;
546 	int ret, num_rx = 0;
547 	uint8_t is_last = 0, status;
548 	struct qbman_swp *swp;
549 	const struct qbman_fd *fd;
550 	struct qbman_pull_desc pulldesc;
551 
552 	if (!DPAA2_PER_LCORE_SEC_DPIO) {
553 		ret = dpaa2_affine_qbman_swp_sec();
554 		if (ret) {
555 			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
556 			return 0;
557 		}
558 	}
559 	swp = DPAA2_PER_LCORE_SEC_PORTAL;
560 	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
561 
562 	qbman_pull_desc_clear(&pulldesc);
563 	qbman_pull_desc_set_numframes(&pulldesc,
564 				      (nb_ops > DPAA2_DQRR_RING_SIZE) ?
565 				      DPAA2_DQRR_RING_SIZE : nb_ops);
566 	qbman_pull_desc_set_fq(&pulldesc, fqid);
567 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
568 				    (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
569 				    1);
570 
571 	/*Issue a volatile dequeue command. */
572 	while (1) {
573 		if (qbman_swp_pull(swp, &pulldesc)) {
574 			RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
575 				"QBMAN is busy\n");
576 			/* Portal was busy, try again */
577 			continue;
578 		}
579 		break;
580 	};
581 
582 	/* Receive the packets till Last Dequeue entry is found with
583 	 * respect to the above issues PULL command.
584 	 */
585 	while (!is_last) {
586 		/* Check if the previous issued command is completed.
587 		 * Also seems like the SWP is shared between the Ethernet Driver
588 		 * and the SEC driver.
589 		 */
590 		while (!qbman_check_command_complete(swp, dq_storage))
591 			;
592 
593 		/* Loop until the dq_storage is updated with
594 		 * new token by QBMAN
595 		 */
596 		while (!qbman_result_has_new_result(swp, dq_storage))
597 			;
598 		/* Check whether Last Pull command is Expired and
599 		 * setting Condition for Loop termination
600 		 */
601 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
602 			is_last = 1;
603 			/* Check for valid frame. */
604 			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
605 			if (unlikely(
606 				(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
607 				PMD_RX_LOG(DEBUG, "No frame is delivered");
608 				continue;
609 			}
610 		}
611 
612 		fd = qbman_result_DQ_fd(dq_storage);
613 		ops[num_rx] = sec_fd_to_mbuf(fd);
614 
615 		if (unlikely(fd->simple.frc)) {
616 			/* TODO Parse SEC errors */
617 			RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
618 				fd->simple.frc);
619 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
620 		} else {
621 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
622 		}
623 
624 		num_rx++;
625 		dq_storage++;
626 	} /* End of Packet Rx loop */
627 
628 	dpaa2_qp->rx_vq.rx_pkts += num_rx;
629 
630 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
631 	/*Return the total number of packets received to DPAA2 app*/
632 	return num_rx;
633 }
634 
635 /** Release queue pair */
636 static int
637 dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
638 {
639 	struct dpaa2_sec_qp *qp =
640 		(struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
641 
642 	PMD_INIT_FUNC_TRACE();
643 
644 	if (qp->rx_vq.q_storage) {
645 		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
646 		rte_free(qp->rx_vq.q_storage);
647 	}
648 	rte_free(qp);
649 
650 	dev->data->queue_pairs[queue_pair_id] = NULL;
651 
652 	return 0;
653 }
654 
655 /** Setup a queue pair */
656 static int
657 dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
658 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
659 		__rte_unused int socket_id)
660 {
661 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
662 	struct dpaa2_sec_qp *qp;
663 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
664 	struct dpseci_rx_queue_cfg cfg;
665 	int32_t retcode;
666 
667 	PMD_INIT_FUNC_TRACE();
668 
669 	/* If qp is already in use free ring memory and qp metadata. */
670 	if (dev->data->queue_pairs[qp_id] != NULL) {
671 		PMD_DRV_LOG(INFO, "QP already setup");
672 		return 0;
673 	}
674 
675 	PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
676 		    dev, qp_id, qp_conf);
677 
678 	memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
679 
680 	qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
681 			RTE_CACHE_LINE_SIZE);
682 	if (!qp) {
683 		RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
684 		return -1;
685 	}
686 
687 	qp->rx_vq.dev = dev;
688 	qp->tx_vq.dev = dev;
689 	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
690 		sizeof(struct queue_storage_info_t),
691 		RTE_CACHE_LINE_SIZE);
692 	if (!qp->rx_vq.q_storage) {
693 		RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
694 		return -1;
695 	}
696 	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
697 
698 	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
699 		RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
700 		return -1;
701 	}
702 
703 	dev->data->queue_pairs[qp_id] = qp;
704 
705 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
706 	cfg.user_ctx = (uint64_t)(&qp->rx_vq);
707 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
708 				      qp_id, &cfg);
709 	return retcode;
710 }
711 
712 /** Start queue pair */
713 static int
714 dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
715 			   __rte_unused uint16_t queue_pair_id)
716 {
717 	PMD_INIT_FUNC_TRACE();
718 
719 	return 0;
720 }
721 
722 /** Stop queue pair */
723 static int
724 dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
725 			  __rte_unused uint16_t queue_pair_id)
726 {
727 	PMD_INIT_FUNC_TRACE();
728 
729 	return 0;
730 }
731 
732 /** Return the number of allocated queue pairs */
733 static uint32_t
734 dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
735 {
736 	PMD_INIT_FUNC_TRACE();
737 
738 	return dev->data->nb_queue_pairs;
739 }
740 
741 /** Returns the size of the aesni gcm session structure */
742 static unsigned int
743 dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
744 {
745 	PMD_INIT_FUNC_TRACE();
746 
747 	return sizeof(dpaa2_sec_session);
748 }
749 
750 static void
751 dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
752 			     void *sess __rte_unused)
753 {
754 	PMD_INIT_FUNC_TRACE();
755 }
756 
757 static int
758 dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
759 		      struct rte_crypto_sym_xform *xform,
760 		      dpaa2_sec_session *session)
761 {
762 	struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
763 	struct alginfo cipherdata;
764 	int bufsize, i;
765 	struct ctxt_priv *priv;
766 	struct sec_flow_context *flc;
767 
768 	PMD_INIT_FUNC_TRACE();
769 
770 	/* For SEC CIPHER only one descriptor is required. */
771 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
772 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
773 			RTE_CACHE_LINE_SIZE);
774 	if (priv == NULL) {
775 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
776 		return -1;
777 	}
778 
779 	flc = &priv->flc_desc[0].flc;
780 
781 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
782 			RTE_CACHE_LINE_SIZE);
783 	if (session->cipher_key.data == NULL) {
784 		RTE_LOG(ERR, PMD, "No Memory for cipher key");
785 		rte_free(priv);
786 		return -1;
787 	}
788 	session->cipher_key.length = xform->cipher.key.length;
789 
790 	memcpy(session->cipher_key.data, xform->cipher.key.data,
791 	       xform->cipher.key.length);
792 	cipherdata.key = (uint64_t)session->cipher_key.data;
793 	cipherdata.keylen = session->cipher_key.length;
794 	cipherdata.key_enc_flags = 0;
795 	cipherdata.key_type = RTA_DATA_IMM;
796 
797 	switch (xform->cipher.algo) {
798 	case RTE_CRYPTO_CIPHER_AES_CBC:
799 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
800 		cipherdata.algmode = OP_ALG_AAI_CBC;
801 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
802 		ctxt->iv.length = AES_CBC_IV_LEN;
803 		break;
804 	case RTE_CRYPTO_CIPHER_3DES_CBC:
805 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
806 		cipherdata.algmode = OP_ALG_AAI_CBC;
807 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
808 		ctxt->iv.length = TDES_CBC_IV_LEN;
809 		break;
810 	case RTE_CRYPTO_CIPHER_AES_CTR:
811 	case RTE_CRYPTO_CIPHER_3DES_CTR:
812 	case RTE_CRYPTO_CIPHER_AES_GCM:
813 	case RTE_CRYPTO_CIPHER_AES_CCM:
814 	case RTE_CRYPTO_CIPHER_AES_ECB:
815 	case RTE_CRYPTO_CIPHER_3DES_ECB:
816 	case RTE_CRYPTO_CIPHER_AES_XTS:
817 	case RTE_CRYPTO_CIPHER_AES_F8:
818 	case RTE_CRYPTO_CIPHER_ARC4:
819 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
820 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
821 	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
822 	case RTE_CRYPTO_CIPHER_NULL:
823 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
824 			xform->cipher.algo);
825 		goto error_out;
826 	default:
827 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
828 			xform->cipher.algo);
829 		goto error_out;
830 	}
831 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
832 				DIR_ENC : DIR_DEC;
833 
834 	bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
835 					&cipherdata, NULL, ctxt->iv.length,
836 			session->dir);
837 	if (bufsize < 0) {
838 		RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
839 		goto error_out;
840 	}
841 	flc->dhr = 0;
842 	flc->bpv0 = 0x1;
843 	flc->mode_bits = 0x8000;
844 
845 	flc->word1_sdl = (uint8_t)bufsize;
846 	flc->word2_rflc_31_0 = lower_32_bits(
847 			(uint64_t)&(((struct dpaa2_sec_qp *)
848 			dev->data->queue_pairs[0])->rx_vq));
849 	flc->word3_rflc_63_32 = upper_32_bits(
850 			(uint64_t)&(((struct dpaa2_sec_qp *)
851 			dev->data->queue_pairs[0])->rx_vq));
852 	session->ctxt = priv;
853 
854 	for (i = 0; i < bufsize; i++)
855 		PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
856 			    i, priv->flc_desc[0].desc[i]);
857 
858 	return 0;
859 
860 error_out:
861 	rte_free(session->cipher_key.data);
862 	rte_free(priv);
863 	return -1;
864 }
865 
866 static int
867 dpaa2_sec_auth_init(struct rte_cryptodev *dev,
868 		    struct rte_crypto_sym_xform *xform,
869 		    dpaa2_sec_session *session)
870 {
871 	struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
872 	struct alginfo authdata;
873 	unsigned int bufsize;
874 	struct ctxt_priv *priv;
875 	struct sec_flow_context *flc;
876 
877 	PMD_INIT_FUNC_TRACE();
878 
879 	/* For SEC AUTH three descriptors are required for various stages */
880 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
881 			sizeof(struct ctxt_priv) + 3 *
882 			sizeof(struct sec_flc_desc),
883 			RTE_CACHE_LINE_SIZE);
884 	if (priv == NULL) {
885 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
886 		return -1;
887 	}
888 
889 	flc = &priv->flc_desc[DESC_INITFINAL].flc;
890 
891 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
892 			RTE_CACHE_LINE_SIZE);
893 	if (session->auth_key.data == NULL) {
894 		RTE_LOG(ERR, PMD, "No Memory for auth key");
895 		rte_free(priv);
896 		return -1;
897 	}
898 	session->auth_key.length = xform->auth.key.length;
899 
900 	memcpy(session->auth_key.data, xform->auth.key.data,
901 	       xform->auth.key.length);
902 	authdata.key = (uint64_t)session->auth_key.data;
903 	authdata.keylen = session->auth_key.length;
904 	authdata.key_enc_flags = 0;
905 	authdata.key_type = RTA_DATA_IMM;
906 
907 	switch (xform->auth.algo) {
908 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
909 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
910 		authdata.algmode = OP_ALG_AAI_HMAC;
911 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
912 		break;
913 	case RTE_CRYPTO_AUTH_MD5_HMAC:
914 		authdata.algtype = OP_ALG_ALGSEL_MD5;
915 		authdata.algmode = OP_ALG_AAI_HMAC;
916 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
917 		break;
918 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
919 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
920 		authdata.algmode = OP_ALG_AAI_HMAC;
921 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
922 		break;
923 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
924 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
925 		authdata.algmode = OP_ALG_AAI_HMAC;
926 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
927 		break;
928 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
929 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
930 		authdata.algmode = OP_ALG_AAI_HMAC;
931 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
932 		break;
933 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
934 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
935 		authdata.algmode = OP_ALG_AAI_HMAC;
936 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
937 		break;
938 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
939 	case RTE_CRYPTO_AUTH_AES_GCM:
940 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
941 	case RTE_CRYPTO_AUTH_NULL:
942 	case RTE_CRYPTO_AUTH_SHA1:
943 	case RTE_CRYPTO_AUTH_SHA256:
944 	case RTE_CRYPTO_AUTH_SHA512:
945 	case RTE_CRYPTO_AUTH_SHA224:
946 	case RTE_CRYPTO_AUTH_SHA384:
947 	case RTE_CRYPTO_AUTH_MD5:
948 	case RTE_CRYPTO_AUTH_AES_CCM:
949 	case RTE_CRYPTO_AUTH_AES_GMAC:
950 	case RTE_CRYPTO_AUTH_KASUMI_F9:
951 	case RTE_CRYPTO_AUTH_AES_CMAC:
952 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
953 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
954 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
955 			xform->auth.algo);
956 		goto error_out;
957 	default:
958 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
959 			xform->auth.algo);
960 		goto error_out;
961 	}
962 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
963 				DIR_ENC : DIR_DEC;
964 
965 	bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
966 				   1, 0, &authdata, !session->dir,
967 				   ctxt->trunc_len);
968 
969 	flc->word1_sdl = (uint8_t)bufsize;
970 	flc->word2_rflc_31_0 = lower_32_bits(
971 			(uint64_t)&(((struct dpaa2_sec_qp *)
972 			dev->data->queue_pairs[0])->rx_vq));
973 	flc->word3_rflc_63_32 = upper_32_bits(
974 			(uint64_t)&(((struct dpaa2_sec_qp *)
975 			dev->data->queue_pairs[0])->rx_vq));
976 	session->ctxt = priv;
977 
978 	return 0;
979 
980 error_out:
981 	rte_free(session->auth_key.data);
982 	rte_free(priv);
983 	return -1;
984 }
985 
986 static int
987 dpaa2_sec_aead_init(struct rte_cryptodev *dev,
988 		    struct rte_crypto_sym_xform *xform,
989 		    dpaa2_sec_session *session)
990 {
991 	struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
992 	struct alginfo authdata, cipherdata;
993 	unsigned int bufsize;
994 	struct ctxt_priv *priv;
995 	struct sec_flow_context *flc;
996 	struct rte_crypto_cipher_xform *cipher_xform;
997 	struct rte_crypto_auth_xform *auth_xform;
998 	int err;
999 
1000 	PMD_INIT_FUNC_TRACE();
1001 
1002 	if (session->ext_params.aead_ctxt.auth_cipher_text) {
1003 		cipher_xform = &xform->cipher;
1004 		auth_xform = &xform->next->auth;
1005 		session->ctxt_type =
1006 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1007 			DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
1008 	} else {
1009 		cipher_xform = &xform->next->cipher;
1010 		auth_xform = &xform->auth;
1011 		session->ctxt_type =
1012 			(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1013 			DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
1014 	}
1015 	/* For SEC AEAD only one descriptor is required */
1016 	priv = (struct ctxt_priv *)rte_zmalloc(NULL,
1017 			sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
1018 			RTE_CACHE_LINE_SIZE);
1019 	if (priv == NULL) {
1020 		RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
1021 		return -1;
1022 	}
1023 
1024 	flc = &priv->flc_desc[0].flc;
1025 
1026 	session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
1027 					       RTE_CACHE_LINE_SIZE);
1028 	if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
1029 		RTE_LOG(ERR, PMD, "No Memory for cipher key");
1030 		rte_free(priv);
1031 		return -1;
1032 	}
1033 	session->cipher_key.length = cipher_xform->key.length;
1034 	session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
1035 					     RTE_CACHE_LINE_SIZE);
1036 	if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
1037 		RTE_LOG(ERR, PMD, "No Memory for auth key");
1038 		rte_free(session->cipher_key.data);
1039 		rte_free(priv);
1040 		return -1;
1041 	}
1042 	session->auth_key.length = auth_xform->key.length;
1043 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1044 	       cipher_xform->key.length);
1045 	memcpy(session->auth_key.data, auth_xform->key.data,
1046 	       auth_xform->key.length);
1047 
1048 	ctxt->trunc_len = auth_xform->digest_length;
1049 	authdata.key = (uint64_t)session->auth_key.data;
1050 	authdata.keylen = session->auth_key.length;
1051 	authdata.key_enc_flags = 0;
1052 	authdata.key_type = RTA_DATA_IMM;
1053 
1054 	switch (auth_xform->algo) {
1055 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1056 		authdata.algtype = OP_ALG_ALGSEL_SHA1;
1057 		authdata.algmode = OP_ALG_AAI_HMAC;
1058 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1059 		break;
1060 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1061 		authdata.algtype = OP_ALG_ALGSEL_MD5;
1062 		authdata.algmode = OP_ALG_AAI_HMAC;
1063 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1064 		break;
1065 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1066 		authdata.algtype = OP_ALG_ALGSEL_SHA224;
1067 		authdata.algmode = OP_ALG_AAI_HMAC;
1068 		session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
1069 		break;
1070 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1071 		authdata.algtype = OP_ALG_ALGSEL_SHA256;
1072 		authdata.algmode = OP_ALG_AAI_HMAC;
1073 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1074 		break;
1075 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1076 		authdata.algtype = OP_ALG_ALGSEL_SHA384;
1077 		authdata.algmode = OP_ALG_AAI_HMAC;
1078 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1079 		break;
1080 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1081 		authdata.algtype = OP_ALG_ALGSEL_SHA512;
1082 		authdata.algmode = OP_ALG_AAI_HMAC;
1083 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1084 		break;
1085 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1086 	case RTE_CRYPTO_AUTH_AES_GCM:
1087 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1088 	case RTE_CRYPTO_AUTH_NULL:
1089 	case RTE_CRYPTO_AUTH_SHA1:
1090 	case RTE_CRYPTO_AUTH_SHA256:
1091 	case RTE_CRYPTO_AUTH_SHA512:
1092 	case RTE_CRYPTO_AUTH_SHA224:
1093 	case RTE_CRYPTO_AUTH_SHA384:
1094 	case RTE_CRYPTO_AUTH_MD5:
1095 	case RTE_CRYPTO_AUTH_AES_CCM:
1096 	case RTE_CRYPTO_AUTH_AES_GMAC:
1097 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1098 	case RTE_CRYPTO_AUTH_AES_CMAC:
1099 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1100 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1101 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
1102 			auth_xform->algo);
1103 		goto error_out;
1104 	default:
1105 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
1106 			auth_xform->algo);
1107 		goto error_out;
1108 	}
1109 	cipherdata.key = (uint64_t)session->cipher_key.data;
1110 	cipherdata.keylen = session->cipher_key.length;
1111 	cipherdata.key_enc_flags = 0;
1112 	cipherdata.key_type = RTA_DATA_IMM;
1113 
1114 	switch (cipher_xform->algo) {
1115 	case RTE_CRYPTO_CIPHER_AES_CBC:
1116 		cipherdata.algtype = OP_ALG_ALGSEL_AES;
1117 		cipherdata.algmode = OP_ALG_AAI_CBC;
1118 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1119 		ctxt->iv.length = AES_CBC_IV_LEN;
1120 		break;
1121 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1122 		cipherdata.algtype = OP_ALG_ALGSEL_3DES;
1123 		cipherdata.algmode = OP_ALG_AAI_CBC;
1124 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1125 		ctxt->iv.length = TDES_CBC_IV_LEN;
1126 		break;
1127 	case RTE_CRYPTO_CIPHER_AES_GCM:
1128 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1129 	case RTE_CRYPTO_CIPHER_NULL:
1130 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1131 	case RTE_CRYPTO_CIPHER_AES_ECB:
1132 	case RTE_CRYPTO_CIPHER_AES_CTR:
1133 	case RTE_CRYPTO_CIPHER_AES_CCM:
1134 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1135 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
1136 			cipher_xform->algo);
1137 		goto error_out;
1138 	default:
1139 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
1140 			cipher_xform->algo);
1141 		goto error_out;
1142 	}
1143 	session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1144 				DIR_ENC : DIR_DEC;
1145 
1146 	priv->flc_desc[0].desc[0] = cipherdata.keylen;
1147 	priv->flc_desc[0].desc[1] = authdata.keylen;
1148 	err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
1149 			       MIN_JOB_DESC_SIZE,
1150 			       (unsigned int *)priv->flc_desc[0].desc,
1151 			       &priv->flc_desc[0].desc[2], 2);
1152 
1153 	if (err < 0) {
1154 		PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
1155 		goto error_out;
1156 	}
1157 	if (priv->flc_desc[0].desc[2] & 1) {
1158 		cipherdata.key_type = RTA_DATA_IMM;
1159 	} else {
1160 		cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
1161 		cipherdata.key_type = RTA_DATA_PTR;
1162 	}
1163 	if (priv->flc_desc[0].desc[2] & (1 << 1)) {
1164 		authdata.key_type = RTA_DATA_IMM;
1165 	} else {
1166 		authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
1167 		authdata.key_type = RTA_DATA_PTR;
1168 	}
1169 	priv->flc_desc[0].desc[0] = 0;
1170 	priv->flc_desc[0].desc[1] = 0;
1171 	priv->flc_desc[0].desc[2] = 0;
1172 
1173 	if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
1174 		bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
1175 					      0, &cipherdata, &authdata,
1176 					      ctxt->iv.length,
1177 					      ctxt->auth_only_len,
1178 					      ctxt->trunc_len,
1179 					      session->dir);
1180 	} else {
1181 		RTE_LOG(ERR, PMD, "Hash before cipher not supported");
1182 		goto error_out;
1183 	}
1184 
1185 	flc->word1_sdl = (uint8_t)bufsize;
1186 	flc->word2_rflc_31_0 = lower_32_bits(
1187 			(uint64_t)&(((struct dpaa2_sec_qp *)
1188 			dev->data->queue_pairs[0])->rx_vq));
1189 	flc->word3_rflc_63_32 = upper_32_bits(
1190 			(uint64_t)&(((struct dpaa2_sec_qp *)
1191 			dev->data->queue_pairs[0])->rx_vq));
1192 	session->ctxt = priv;
1193 
1194 	return 0;
1195 
1196 error_out:
1197 	rte_free(session->cipher_key.data);
1198 	rte_free(session->auth_key.data);
1199 	rte_free(priv);
1200 	return -1;
1201 }
1202 
1203 static void *
1204 dpaa2_sec_session_configure(struct rte_cryptodev *dev,
1205 			    struct rte_crypto_sym_xform *xform,	void *sess)
1206 {
1207 	dpaa2_sec_session *session = sess;
1208 
1209 	PMD_INIT_FUNC_TRACE();
1210 
1211 	if (unlikely(sess == NULL)) {
1212 		RTE_LOG(ERR, PMD, "invalid session struct");
1213 		return NULL;
1214 	}
1215 	/* Cipher Only */
1216 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1217 		session->ctxt_type = DPAA2_SEC_CIPHER;
1218 		dpaa2_sec_cipher_init(dev, xform, session);
1219 
1220 	/* Authentication Only */
1221 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1222 		   xform->next == NULL) {
1223 		session->ctxt_type = DPAA2_SEC_AUTH;
1224 		dpaa2_sec_auth_init(dev, xform, session);
1225 
1226 	/* Cipher then Authenticate */
1227 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1228 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1229 		session->ext_params.aead_ctxt.auth_cipher_text = true;
1230 		dpaa2_sec_aead_init(dev, xform, session);
1231 
1232 	/* Authenticate then Cipher */
1233 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1234 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1235 		session->ext_params.aead_ctxt.auth_cipher_text = false;
1236 		dpaa2_sec_aead_init(dev, xform, session);
1237 	} else {
1238 		RTE_LOG(ERR, PMD, "Invalid crypto type");
1239 		return NULL;
1240 	}
1241 
1242 	return session;
1243 }
1244 
1245 /** Clear the memory of session so it doesn't leave key material behind */
1246 static void
1247 dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
1248 {
1249 	PMD_INIT_FUNC_TRACE();
1250 	dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
1251 
1252 	if (s) {
1253 		rte_free(s->ctxt);
1254 		rte_free(s->cipher_key.data);
1255 		rte_free(s->auth_key.data);
1256 		memset(sess, 0, sizeof(dpaa2_sec_session));
1257 	}
1258 }
1259 
1260 static int
1261 dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1262 			struct rte_cryptodev_config *config __rte_unused)
1263 {
1264 	PMD_INIT_FUNC_TRACE();
1265 
1266 	return -ENOTSUP;
1267 }
1268 
1269 static int
1270 dpaa2_sec_dev_start(struct rte_cryptodev *dev)
1271 {
1272 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1273 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1274 	struct dpseci_attr attr;
1275 	struct dpaa2_queue *dpaa2_q;
1276 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1277 					dev->data->queue_pairs;
1278 	struct dpseci_rx_queue_attr rx_attr;
1279 	struct dpseci_tx_queue_attr tx_attr;
1280 	int ret, i;
1281 
1282 	PMD_INIT_FUNC_TRACE();
1283 
1284 	memset(&attr, 0, sizeof(struct dpseci_attr));
1285 
1286 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
1287 	if (ret) {
1288 		PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
1289 			     priv->hw_id);
1290 		goto get_attr_failure;
1291 	}
1292 	ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
1293 	if (ret) {
1294 		PMD_INIT_LOG(ERR,
1295 			     "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
1296 		goto get_attr_failure;
1297 	}
1298 	for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
1299 		dpaa2_q = &qp[i]->rx_vq;
1300 		dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1301 				    &rx_attr);
1302 		dpaa2_q->fqid = rx_attr.fqid;
1303 		PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
1304 	}
1305 	for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
1306 		dpaa2_q = &qp[i]->tx_vq;
1307 		dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
1308 				    &tx_attr);
1309 		dpaa2_q->fqid = tx_attr.fqid;
1310 		PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
1311 	}
1312 
1313 	return 0;
1314 get_attr_failure:
1315 	dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1316 	return -1;
1317 }
1318 
1319 static void
1320 dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
1321 {
1322 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1323 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1324 	int ret;
1325 
1326 	PMD_INIT_FUNC_TRACE();
1327 
1328 	ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
1329 	if (ret) {
1330 		PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
1331 			     priv->hw_id);
1332 		return;
1333 	}
1334 
1335 	ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
1336 	if (ret < 0) {
1337 		PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
1338 			     ret);
1339 		return;
1340 	}
1341 }
1342 
1343 static int
1344 dpaa2_sec_dev_close(struct rte_cryptodev *dev)
1345 {
1346 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1347 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1348 	int ret;
1349 
1350 	PMD_INIT_FUNC_TRACE();
1351 
1352 	/* Function is reverse of dpaa2_sec_dev_init.
1353 	 * It does the following:
1354 	 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
1355 	 * 2. Close the DPSECI device
1356 	 * 3. Free the allocated resources.
1357 	 */
1358 
1359 	/*Close the device at underlying layer*/
1360 	ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
1361 	if (ret) {
1362 		PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
1363 			     " error code %d\n", ret);
1364 		return -1;
1365 	}
1366 
1367 	/*Free the allocated memory for ethernet private data and dpseci*/
1368 	priv->hw = NULL;
1369 	free(dpseci);
1370 
1371 	return 0;
1372 }
1373 
1374 static void
1375 dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
1376 			struct rte_cryptodev_info *info)
1377 {
1378 	struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
1379 
1380 	PMD_INIT_FUNC_TRACE();
1381 	if (info != NULL) {
1382 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1383 		info->feature_flags = dev->feature_flags;
1384 		info->capabilities = dpaa2_sec_capabilities;
1385 		info->sym.max_nb_sessions = internals->max_nb_sessions;
1386 		info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
1387 	}
1388 }
1389 
1390 static
1391 void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
1392 			 struct rte_cryptodev_stats *stats)
1393 {
1394 	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
1395 	struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
1396 	struct dpseci_sec_counters counters = {0};
1397 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1398 					dev->data->queue_pairs;
1399 	int ret, i;
1400 
1401 	PMD_INIT_FUNC_TRACE();
1402 	if (stats == NULL) {
1403 		PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1404 		return;
1405 	}
1406 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1407 		if (qp[i] == NULL) {
1408 			PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1409 			continue;
1410 		}
1411 
1412 		stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
1413 		stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
1414 		stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
1415 		stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
1416 	}
1417 
1418 	ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
1419 				      &counters);
1420 	if (ret) {
1421 		PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
1422 	} else {
1423 		PMD_DRV_LOG(INFO, "dpseci hw stats:"
1424 			    "\n\tNumber of Requests Dequeued = %lu"
1425 			    "\n\tNumber of Outbound Encrypt Requests = %lu"
1426 			    "\n\tNumber of Inbound Decrypt Requests = %lu"
1427 			    "\n\tNumber of Outbound Bytes Encrypted = %lu"
1428 			    "\n\tNumber of Outbound Bytes Protected = %lu"
1429 			    "\n\tNumber of Inbound Bytes Decrypted = %lu"
1430 			    "\n\tNumber of Inbound Bytes Validated = %lu",
1431 			    counters.dequeued_requests,
1432 			    counters.ob_enc_requests,
1433 			    counters.ib_dec_requests,
1434 			    counters.ob_enc_bytes,
1435 			    counters.ob_prot_bytes,
1436 			    counters.ib_dec_bytes,
1437 			    counters.ib_valid_bytes);
1438 	}
1439 }
1440 
1441 static
1442 void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
1443 {
1444 	int i;
1445 	struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
1446 				   (dev->data->queue_pairs);
1447 
1448 	PMD_INIT_FUNC_TRACE();
1449 
1450 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1451 		if (qp[i] == NULL) {
1452 			PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1453 			continue;
1454 		}
1455 		qp[i]->tx_vq.rx_pkts = 0;
1456 		qp[i]->tx_vq.tx_pkts = 0;
1457 		qp[i]->tx_vq.err_pkts = 0;
1458 		qp[i]->rx_vq.rx_pkts = 0;
1459 		qp[i]->rx_vq.tx_pkts = 0;
1460 		qp[i]->rx_vq.err_pkts = 0;
1461 	}
1462 }
1463 
1464 static struct rte_cryptodev_ops crypto_ops = {
1465 	.dev_configure	      = dpaa2_sec_dev_configure,
1466 	.dev_start	      = dpaa2_sec_dev_start,
1467 	.dev_stop	      = dpaa2_sec_dev_stop,
1468 	.dev_close	      = dpaa2_sec_dev_close,
1469 	.dev_infos_get        = dpaa2_sec_dev_infos_get,
1470 	.stats_get	      = dpaa2_sec_stats_get,
1471 	.stats_reset	      = dpaa2_sec_stats_reset,
1472 	.queue_pair_setup     = dpaa2_sec_queue_pair_setup,
1473 	.queue_pair_release   = dpaa2_sec_queue_pair_release,
1474 	.queue_pair_start     = dpaa2_sec_queue_pair_start,
1475 	.queue_pair_stop      = dpaa2_sec_queue_pair_stop,
1476 	.queue_pair_count     = dpaa2_sec_queue_pair_count,
1477 	.session_get_size     = dpaa2_sec_session_get_size,
1478 	.session_initialize   = dpaa2_sec_session_initialize,
1479 	.session_configure    = dpaa2_sec_session_configure,
1480 	.session_clear        = dpaa2_sec_session_clear,
1481 };
1482 
1483 static int
1484 dpaa2_sec_uninit(const struct rte_cryptodev *dev)
1485 {
1486 	PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
1487 		     dev->data->name, rte_socket_id());
1488 
1489 	return 0;
1490 }
1491 
1492 static int
1493 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
1494 {
1495 	struct dpaa2_sec_dev_private *internals;
1496 	struct rte_device *dev = cryptodev->device;
1497 	struct rte_dpaa2_device *dpaa2_dev;
1498 	struct fsl_mc_io *dpseci;
1499 	uint16_t token;
1500 	struct dpseci_attr attr;
1501 	int retcode, hw_id;
1502 
1503 	PMD_INIT_FUNC_TRACE();
1504 	dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
1505 	if (dpaa2_dev == NULL) {
1506 		PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
1507 		return -1;
1508 	}
1509 	hw_id = dpaa2_dev->object_id;
1510 
1511 	cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
1512 	cryptodev->dev_ops = &crypto_ops;
1513 
1514 	cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
1515 	cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
1516 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1517 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
1518 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1519 
1520 	internals = cryptodev->data->dev_private;
1521 	internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
1522 
1523 	/*
1524 	 * For secondary processes, we don't initialise any further as primary
1525 	 * has already done this work. Only check we don't need a different
1526 	 * RX function
1527 	 */
1528 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1529 		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
1530 		return 0;
1531 	}
1532 	/*Open the rte device via MC and save the handle for further use*/
1533 	dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
1534 				sizeof(struct fsl_mc_io), 0);
1535 	if (!dpseci) {
1536 		PMD_INIT_LOG(ERR,
1537 			     "Error in allocating the memory for dpsec object");
1538 		return -1;
1539 	}
1540 	dpseci->regs = rte_mcp_ptr_list[0];
1541 
1542 	retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
1543 	if (retcode != 0) {
1544 		PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
1545 			     retcode);
1546 		goto init_error;
1547 	}
1548 	retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
1549 	if (retcode != 0) {
1550 		PMD_INIT_LOG(ERR,
1551 			     "Cannot get dpsec device attributed: Error = %x",
1552 			     retcode);
1553 		goto init_error;
1554 	}
1555 	sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
1556 
1557 	internals->max_nb_queue_pairs = attr.num_tx_queues;
1558 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
1559 	internals->hw = dpseci;
1560 	internals->token = token;
1561 
1562 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1563 	return 0;
1564 
1565 init_error:
1566 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1567 
1568 	/* dpaa2_sec_uninit(crypto_dev_name); */
1569 	return -EFAULT;
1570 }
1571 
1572 static int
1573 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
1574 			  struct rte_dpaa2_device *dpaa2_dev)
1575 {
1576 	struct rte_cryptodev *cryptodev;
1577 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1578 
1579 	int retval;
1580 
1581 	sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
1582 
1583 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1584 	if (cryptodev == NULL)
1585 		return -ENOMEM;
1586 
1587 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1588 		cryptodev->data->dev_private = rte_zmalloc_socket(
1589 					"cryptodev private structure",
1590 					sizeof(struct dpaa2_sec_dev_private),
1591 					RTE_CACHE_LINE_SIZE,
1592 					rte_socket_id());
1593 
1594 		if (cryptodev->data->dev_private == NULL)
1595 			rte_panic("Cannot allocate memzone for private "
1596 					"device data");
1597 	}
1598 
1599 	dpaa2_dev->cryptodev = cryptodev;
1600 	cryptodev->device = &dpaa2_dev->device;
1601 
1602 	/* init user callbacks */
1603 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
1604 
1605 	/* Invoke PMD device initialization function */
1606 	retval = dpaa2_sec_dev_init(cryptodev);
1607 	if (retval == 0)
1608 		return 0;
1609 
1610 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1611 		rte_free(cryptodev->data->dev_private);
1612 
1613 	cryptodev->attached = RTE_CRYPTODEV_DETACHED;
1614 
1615 	return -ENXIO;
1616 }
1617 
1618 static int
1619 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
1620 {
1621 	struct rte_cryptodev *cryptodev;
1622 	int ret;
1623 
1624 	cryptodev = dpaa2_dev->cryptodev;
1625 	if (cryptodev == NULL)
1626 		return -ENODEV;
1627 
1628 	ret = dpaa2_sec_uninit(cryptodev);
1629 	if (ret)
1630 		return ret;
1631 
1632 	/* free crypto device */
1633 	rte_cryptodev_pmd_release_device(cryptodev);
1634 
1635 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1636 		rte_free(cryptodev->data->dev_private);
1637 
1638 	cryptodev->device = NULL;
1639 	cryptodev->data = NULL;
1640 
1641 	return 0;
1642 }
1643 
1644 static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
1645 	.drv_type = DPAA2_MC_DPSECI_DEVID,
1646 	.driver = {
1647 		.name = "DPAA2 SEC PMD"
1648 	},
1649 	.probe = cryptodev_dpaa2_sec_probe,
1650 	.remove = cryptodev_dpaa2_sec_remove,
1651 };
1652 
1653 RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
1654