xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 6491dbbecebb1e4f07fc970ef90b34119d8be2e3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 static uint8_t cryptodev_driver_id;
43 
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
46 
47 static int
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
49 
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53 	if (!ctx->fd_status) {
54 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55 	} else {
56 		PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58 	}
59 
60 	/* report op status to sym->op and then free the ctx memeory  */
61 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
62 }
63 
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 {
67 	struct dpaa_sec_op_ctx *ctx;
68 	int retval;
69 
70 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71 	if (!ctx || retval) {
72 		PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
73 		return NULL;
74 	}
75 	/*
76 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 	 * each packet, memset is costlier than dcbz_64().
80 	 */
81 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85 
86 	ctx->ctx_pool = ses->ctx_pool;
87 	ctx->vtop_offset = (size_t) ctx
88 				- rte_mempool_virt2iova(ctx);
89 
90 	return ctx;
91 }
92 
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
95 {
96 	const struct rte_memseg *ms;
97 
98 	ms = rte_mem_virt2memseg(vaddr, NULL);
99 	if (ms)
100 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
101 	return (size_t)NULL;
102 }
103 
104 /* virtual address conversin when mempool support is available for ctx */
105 static inline phys_addr_t
106 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
107 {
108 	return (size_t)vaddr - ctx->vtop_offset;
109 }
110 
111 static inline void *
112 dpaa_mem_ptov(rte_iova_t paddr)
113 {
114 	return rte_mem_iova2virt(paddr);
115 }
116 
117 static void
118 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
119 		   struct qman_fq *fq,
120 		   const struct qm_mr_entry *msg)
121 {
122 	RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
123 		   fq->fqid, msg->ern.rc, msg->ern.seqnum);
124 }
125 
126 /* initialize the queue with dest chan as caam chan so that
127  * all the packets in this queue could be dispatched into caam
128  */
129 static int
130 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
131 		 uint32_t fqid_out)
132 {
133 	struct qm_mcc_initfq fq_opts;
134 	uint32_t flags;
135 	int ret = -1;
136 
137 	/* Clear FQ options */
138 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
139 
140 	flags = QMAN_INITFQ_FLAG_SCHED;
141 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
142 			  QM_INITFQ_WE_CONTEXTB;
143 
144 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
145 	fq_opts.fqd.context_b = fqid_out;
146 	fq_opts.fqd.dest.channel = qm_channel_caam;
147 	fq_opts.fqd.dest.wq = 0;
148 
149 	fq_in->cb.ern  = ern_sec_fq_handler;
150 
151 	PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
152 
153 	ret = qman_init_fq(fq_in, flags, &fq_opts);
154 	if (unlikely(ret != 0))
155 		PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
156 
157 	return ret;
158 }
159 
160 /* something is put into in_fq and caam put the crypto result into out_fq */
161 static enum qman_cb_dqrr_result
162 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
163 		  struct qman_fq *fq __always_unused,
164 		  const struct qm_dqrr_entry *dqrr)
165 {
166 	const struct qm_fd *fd;
167 	struct dpaa_sec_job *job;
168 	struct dpaa_sec_op_ctx *ctx;
169 
170 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
171 		return qman_cb_dqrr_defer;
172 
173 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
174 		return qman_cb_dqrr_consume;
175 
176 	fd = &dqrr->fd;
177 	/* sg is embedded in an op ctx,
178 	 * sg[0] is for output
179 	 * sg[1] for input
180 	 */
181 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
182 
183 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
184 	ctx->fd_status = fd->status;
185 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
186 		struct qm_sg_entry *sg_out;
187 		uint32_t len;
188 
189 		sg_out = &job->sg[0];
190 		hw_sg_to_cpu(sg_out);
191 		len = sg_out->length;
192 		ctx->op->sym->m_src->pkt_len = len;
193 		ctx->op->sym->m_src->data_len = len;
194 	}
195 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
196 	dpaa_sec_op_ending(ctx);
197 
198 	return qman_cb_dqrr_consume;
199 }
200 
201 /* caam result is put into this queue */
202 static int
203 dpaa_sec_init_tx(struct qman_fq *fq)
204 {
205 	int ret;
206 	struct qm_mcc_initfq opts;
207 	uint32_t flags;
208 
209 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
210 		QMAN_FQ_FLAG_DYNAMIC_FQID;
211 
212 	ret = qman_create_fq(0, flags, fq);
213 	if (unlikely(ret)) {
214 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
215 		return ret;
216 	}
217 
218 	memset(&opts, 0, sizeof(opts));
219 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
220 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
221 
222 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
223 
224 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
225 	fq->cb.ern  = ern_sec_fq_handler;
226 
227 	ret = qman_init_fq(fq, 0, &opts);
228 	if (unlikely(ret)) {
229 		PMD_INIT_LOG(ERR, "unable to init caam source fq!");
230 		return ret;
231 	}
232 
233 	return ret;
234 }
235 
236 static inline int is_cipher_only(dpaa_sec_session *ses)
237 {
238 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
239 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
240 }
241 
242 static inline int is_auth_only(dpaa_sec_session *ses)
243 {
244 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
245 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
246 }
247 
248 static inline int is_aead(dpaa_sec_session *ses)
249 {
250 	return ((ses->cipher_alg == 0) &&
251 		(ses->auth_alg == 0) &&
252 		(ses->aead_alg != 0));
253 }
254 
255 static inline int is_auth_cipher(dpaa_sec_session *ses)
256 {
257 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
258 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
259 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
260 }
261 
262 static inline int is_proto_ipsec(dpaa_sec_session *ses)
263 {
264 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
265 }
266 
267 static inline int is_encode(dpaa_sec_session *ses)
268 {
269 	return ses->dir == DIR_ENC;
270 }
271 
272 static inline int is_decode(dpaa_sec_session *ses)
273 {
274 	return ses->dir == DIR_DEC;
275 }
276 
277 static inline void
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
279 {
280 	switch (ses->auth_alg) {
281 	case RTE_CRYPTO_AUTH_NULL:
282 		ses->digest_length = 0;
283 		break;
284 	case RTE_CRYPTO_AUTH_MD5_HMAC:
285 		alginfo_a->algtype =
286 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
287 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
288 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
289 		break;
290 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
291 		alginfo_a->algtype =
292 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
293 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
294 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
295 		break;
296 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
297 		alginfo_a->algtype =
298 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
300 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
301 		break;
302 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
303 		alginfo_a->algtype =
304 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
306 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 		break;
308 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
309 		alginfo_a->algtype =
310 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
312 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 		break;
314 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
315 		alginfo_a->algtype =
316 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
318 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
319 		break;
320 	default:
321 		PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
322 	}
323 }
324 
325 static inline void
326 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
327 {
328 	switch (ses->cipher_alg) {
329 	case RTE_CRYPTO_CIPHER_NULL:
330 		break;
331 	case RTE_CRYPTO_CIPHER_AES_CBC:
332 		alginfo_c->algtype =
333 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
334 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
335 		alginfo_c->algmode = OP_ALG_AAI_CBC;
336 		break;
337 	case RTE_CRYPTO_CIPHER_3DES_CBC:
338 		alginfo_c->algtype =
339 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
340 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
341 		alginfo_c->algmode = OP_ALG_AAI_CBC;
342 		break;
343 	case RTE_CRYPTO_CIPHER_AES_CTR:
344 		alginfo_c->algtype =
345 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
347 		alginfo_c->algmode = OP_ALG_AAI_CTR;
348 		break;
349 	default:
350 		PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
351 	}
352 }
353 
354 static inline void
355 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
356 {
357 	switch (ses->aead_alg) {
358 	case RTE_CRYPTO_AEAD_AES_GCM:
359 		alginfo->algtype = OP_ALG_ALGSEL_AES;
360 		alginfo->algmode = OP_ALG_AAI_GCM;
361 		break;
362 	default:
363 		PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
364 	}
365 }
366 
367 
368 /* prepare command block of the session */
369 static int
370 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
371 {
372 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
373 	uint32_t shared_desc_len = 0;
374 	struct sec_cdb *cdb = &ses->cdb;
375 	int err;
376 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
377 	int swap = false;
378 #else
379 	int swap = true;
380 #endif
381 
382 	memset(cdb, 0, sizeof(struct sec_cdb));
383 
384 	if (is_cipher_only(ses)) {
385 		caam_cipher_alg(ses, &alginfo_c);
386 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
388 			return -ENOTSUP;
389 		}
390 
391 		alginfo_c.key = (size_t)ses->cipher_key.data;
392 		alginfo_c.keylen = ses->cipher_key.length;
393 		alginfo_c.key_enc_flags = 0;
394 		alginfo_c.key_type = RTA_DATA_IMM;
395 
396 		shared_desc_len = cnstr_shdsc_blkcipher(
397 						cdb->sh_desc, true,
398 						swap, &alginfo_c,
399 						NULL,
400 						ses->iv.length,
401 						ses->dir);
402 	} else if (is_auth_only(ses)) {
403 		caam_auth_alg(ses, &alginfo_a);
404 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 			PMD_TX_LOG(ERR, "not supported auth alg\n");
406 			return -ENOTSUP;
407 		}
408 
409 		alginfo_a.key = (size_t)ses->auth_key.data;
410 		alginfo_a.keylen = ses->auth_key.length;
411 		alginfo_a.key_enc_flags = 0;
412 		alginfo_a.key_type = RTA_DATA_IMM;
413 
414 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
415 						   swap, &alginfo_a,
416 						   !ses->dir,
417 						   ses->digest_length);
418 	} else if (is_aead(ses)) {
419 		caam_aead_alg(ses, &alginfo);
420 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
421 			PMD_TX_LOG(ERR, "not supported aead alg\n");
422 			return -ENOTSUP;
423 		}
424 		alginfo.key = (size_t)ses->aead_key.data;
425 		alginfo.keylen = ses->aead_key.length;
426 		alginfo.key_enc_flags = 0;
427 		alginfo.key_type = RTA_DATA_IMM;
428 
429 		if (ses->dir == DIR_ENC)
430 			shared_desc_len = cnstr_shdsc_gcm_encap(
431 					cdb->sh_desc, true, swap,
432 					&alginfo,
433 					ses->iv.length,
434 					ses->digest_length);
435 		else
436 			shared_desc_len = cnstr_shdsc_gcm_decap(
437 					cdb->sh_desc, true, swap,
438 					&alginfo,
439 					ses->iv.length,
440 					ses->digest_length);
441 	} else {
442 		caam_cipher_alg(ses, &alginfo_c);
443 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
444 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
445 			return -ENOTSUP;
446 		}
447 
448 		alginfo_c.key = (size_t)ses->cipher_key.data;
449 		alginfo_c.keylen = ses->cipher_key.length;
450 		alginfo_c.key_enc_flags = 0;
451 		alginfo_c.key_type = RTA_DATA_IMM;
452 
453 		caam_auth_alg(ses, &alginfo_a);
454 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
455 			PMD_TX_LOG(ERR, "not supported auth alg\n");
456 			return -ENOTSUP;
457 		}
458 
459 		alginfo_a.key = (size_t)ses->auth_key.data;
460 		alginfo_a.keylen = ses->auth_key.length;
461 		alginfo_a.key_enc_flags = 0;
462 		alginfo_a.key_type = RTA_DATA_IMM;
463 
464 		cdb->sh_desc[0] = alginfo_c.keylen;
465 		cdb->sh_desc[1] = alginfo_a.keylen;
466 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
467 				       MIN_JOB_DESC_SIZE,
468 				       (unsigned int *)cdb->sh_desc,
469 				       &cdb->sh_desc[2], 2);
470 
471 		if (err < 0) {
472 			PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
473 			return err;
474 		}
475 		if (cdb->sh_desc[2] & 1)
476 			alginfo_c.key_type = RTA_DATA_IMM;
477 		else {
478 			alginfo_c.key = (size_t)dpaa_mem_vtop(
479 						(void *)(size_t)alginfo_c.key);
480 			alginfo_c.key_type = RTA_DATA_PTR;
481 		}
482 		if (cdb->sh_desc[2] & (1<<1))
483 			alginfo_a.key_type = RTA_DATA_IMM;
484 		else {
485 			alginfo_a.key = (size_t)dpaa_mem_vtop(
486 						(void *)(size_t)alginfo_a.key);
487 			alginfo_a.key_type = RTA_DATA_PTR;
488 		}
489 		cdb->sh_desc[0] = 0;
490 		cdb->sh_desc[1] = 0;
491 		cdb->sh_desc[2] = 0;
492 		if (is_proto_ipsec(ses)) {
493 			if (ses->dir == DIR_ENC) {
494 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
495 						cdb->sh_desc,
496 						true, swap, &ses->encap_pdb,
497 						(uint8_t *)&ses->ip4_hdr,
498 						&alginfo_c, &alginfo_a);
499 			} else if (ses->dir == DIR_DEC) {
500 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
501 						cdb->sh_desc,
502 						true, swap, &ses->decap_pdb,
503 						&alginfo_c, &alginfo_a);
504 			}
505 		} else {
506 			/* Auth_only_len is set as 0 here and it will be
507 			 * overwritten in fd for each packet.
508 			 */
509 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
510 					true, swap, &alginfo_c, &alginfo_a,
511 					ses->iv.length, 0,
512 					ses->digest_length, ses->dir);
513 		}
514 	}
515 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
516 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
517 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
518 
519 	return 0;
520 }
521 
522 /* qp is lockless, should be accessed by only one thread */
523 static int
524 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
525 {
526 	struct qman_fq *fq;
527 	unsigned int pkts = 0;
528 	int ret;
529 	struct qm_dqrr_entry *dq;
530 
531 	fq = &qp->outq;
532 	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
533 				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
534 	if (ret)
535 		return 0;
536 
537 	do {
538 		const struct qm_fd *fd;
539 		struct dpaa_sec_job *job;
540 		struct dpaa_sec_op_ctx *ctx;
541 		struct rte_crypto_op *op;
542 
543 		dq = qman_dequeue(fq);
544 		if (!dq)
545 			continue;
546 
547 		fd = &dq->fd;
548 		/* sg is embedded in an op ctx,
549 		 * sg[0] is for output
550 		 * sg[1] for input
551 		 */
552 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
553 
554 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
555 		ctx->fd_status = fd->status;
556 		op = ctx->op;
557 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
558 			struct qm_sg_entry *sg_out;
559 			uint32_t len;
560 
561 			sg_out = &job->sg[0];
562 			hw_sg_to_cpu(sg_out);
563 			len = sg_out->length;
564 			op->sym->m_src->pkt_len = len;
565 			op->sym->m_src->data_len = len;
566 		}
567 		if (!ctx->fd_status) {
568 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
569 		} else {
570 			printf("\nSEC return err: 0x%x", ctx->fd_status);
571 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
572 		}
573 		ops[pkts++] = op;
574 
575 		/* report op status to sym->op and then free the ctx memeory */
576 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
577 
578 		qman_dqrr_consume(fq, dq);
579 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
580 
581 	return pkts;
582 }
583 
584 static inline struct dpaa_sec_job *
585 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
586 {
587 	struct rte_crypto_sym_op *sym = op->sym;
588 	struct rte_mbuf *mbuf = sym->m_src;
589 	struct dpaa_sec_job *cf;
590 	struct dpaa_sec_op_ctx *ctx;
591 	struct qm_sg_entry *sg, *out_sg, *in_sg;
592 	phys_addr_t start_addr;
593 	uint8_t *old_digest, extra_segs;
594 
595 	if (is_decode(ses))
596 		extra_segs = 3;
597 	else
598 		extra_segs = 2;
599 
600 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
601 		PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
602 								MAX_SG_ENTRIES);
603 		return NULL;
604 	}
605 	ctx = dpaa_sec_alloc_ctx(ses);
606 	if (!ctx)
607 		return NULL;
608 
609 	cf = &ctx->job;
610 	ctx->op = op;
611 	old_digest = ctx->digest;
612 
613 	/* output */
614 	out_sg = &cf->sg[0];
615 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
616 	out_sg->length = ses->digest_length;
617 	cpu_to_hw_sg(out_sg);
618 
619 	/* input */
620 	in_sg = &cf->sg[1];
621 	/* need to extend the input to a compound frame */
622 	in_sg->extension = 1;
623 	in_sg->final = 1;
624 	in_sg->length = sym->auth.data.length;
625 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
626 
627 	/* 1st seg */
628 	sg = in_sg + 1;
629 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
630 	sg->length = mbuf->data_len - sym->auth.data.offset;
631 	sg->offset = sym->auth.data.offset;
632 
633 	/* Successive segs */
634 	mbuf = mbuf->next;
635 	while (mbuf) {
636 		cpu_to_hw_sg(sg);
637 		sg++;
638 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
639 		sg->length = mbuf->data_len;
640 		mbuf = mbuf->next;
641 	}
642 
643 	if (is_decode(ses)) {
644 		/* Digest verification case */
645 		cpu_to_hw_sg(sg);
646 		sg++;
647 		rte_memcpy(old_digest, sym->auth.digest.data,
648 				ses->digest_length);
649 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
650 		qm_sg_entry_set64(sg, start_addr);
651 		sg->length = ses->digest_length;
652 		in_sg->length += ses->digest_length;
653 	} else {
654 		/* Digest calculation case */
655 		sg->length -= ses->digest_length;
656 	}
657 	sg->final = 1;
658 	cpu_to_hw_sg(sg);
659 	cpu_to_hw_sg(in_sg);
660 
661 	return cf;
662 }
663 
664 /**
665  * packet looks like:
666  *		|<----data_len------->|
667  *    |ip_header|ah_header|icv|payload|
668  *              ^
669  *		|
670  *	   mbuf->pkt.data
671  */
672 static inline struct dpaa_sec_job *
673 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
674 {
675 	struct rte_crypto_sym_op *sym = op->sym;
676 	struct rte_mbuf *mbuf = sym->m_src;
677 	struct dpaa_sec_job *cf;
678 	struct dpaa_sec_op_ctx *ctx;
679 	struct qm_sg_entry *sg;
680 	rte_iova_t start_addr;
681 	uint8_t *old_digest;
682 
683 	ctx = dpaa_sec_alloc_ctx(ses);
684 	if (!ctx)
685 		return NULL;
686 
687 	cf = &ctx->job;
688 	ctx->op = op;
689 	old_digest = ctx->digest;
690 
691 	start_addr = rte_pktmbuf_iova(mbuf);
692 	/* output */
693 	sg = &cf->sg[0];
694 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
695 	sg->length = ses->digest_length;
696 	cpu_to_hw_sg(sg);
697 
698 	/* input */
699 	sg = &cf->sg[1];
700 	if (is_decode(ses)) {
701 		/* need to extend the input to a compound frame */
702 		sg->extension = 1;
703 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
704 		sg->length = sym->auth.data.length + ses->digest_length;
705 		sg->final = 1;
706 		cpu_to_hw_sg(sg);
707 
708 		sg = &cf->sg[2];
709 		/* hash result or digest, save digest first */
710 		rte_memcpy(old_digest, sym->auth.digest.data,
711 			   ses->digest_length);
712 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
713 		sg->length = sym->auth.data.length;
714 		cpu_to_hw_sg(sg);
715 
716 		/* let's check digest by hw */
717 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
718 		sg++;
719 		qm_sg_entry_set64(sg, start_addr);
720 		sg->length = ses->digest_length;
721 		sg->final = 1;
722 		cpu_to_hw_sg(sg);
723 	} else {
724 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
725 		sg->length = sym->auth.data.length;
726 		sg->final = 1;
727 		cpu_to_hw_sg(sg);
728 	}
729 
730 	return cf;
731 }
732 
733 static inline struct dpaa_sec_job *
734 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
735 {
736 	struct rte_crypto_sym_op *sym = op->sym;
737 	struct dpaa_sec_job *cf;
738 	struct dpaa_sec_op_ctx *ctx;
739 	struct qm_sg_entry *sg, *out_sg, *in_sg;
740 	struct rte_mbuf *mbuf;
741 	uint8_t req_segs;
742 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
743 			ses->iv.offset);
744 
745 	if (sym->m_dst) {
746 		mbuf = sym->m_dst;
747 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
748 	} else {
749 		mbuf = sym->m_src;
750 		req_segs = mbuf->nb_segs * 2 + 3;
751 	}
752 
753 	if (req_segs > MAX_SG_ENTRIES) {
754 		PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
755 								MAX_SG_ENTRIES);
756 		return NULL;
757 	}
758 
759 	ctx = dpaa_sec_alloc_ctx(ses);
760 	if (!ctx)
761 		return NULL;
762 
763 	cf = &ctx->job;
764 	ctx->op = op;
765 
766 	/* output */
767 	out_sg = &cf->sg[0];
768 	out_sg->extension = 1;
769 	out_sg->length = sym->cipher.data.length;
770 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
771 	cpu_to_hw_sg(out_sg);
772 
773 	/* 1st seg */
774 	sg = &cf->sg[2];
775 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
776 	sg->length = mbuf->data_len - sym->cipher.data.offset;
777 	sg->offset = sym->cipher.data.offset;
778 
779 	/* Successive segs */
780 	mbuf = mbuf->next;
781 	while (mbuf) {
782 		cpu_to_hw_sg(sg);
783 		sg++;
784 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
785 		sg->length = mbuf->data_len;
786 		mbuf = mbuf->next;
787 	}
788 	sg->final = 1;
789 	cpu_to_hw_sg(sg);
790 
791 	/* input */
792 	mbuf = sym->m_src;
793 	in_sg = &cf->sg[1];
794 	in_sg->extension = 1;
795 	in_sg->final = 1;
796 	in_sg->length = sym->cipher.data.length + ses->iv.length;
797 
798 	sg++;
799 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
800 	cpu_to_hw_sg(in_sg);
801 
802 	/* IV */
803 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
804 	sg->length = ses->iv.length;
805 	cpu_to_hw_sg(sg);
806 
807 	/* 1st seg */
808 	sg++;
809 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
810 	sg->length = mbuf->data_len - sym->cipher.data.offset;
811 	sg->offset = sym->cipher.data.offset;
812 
813 	/* Successive segs */
814 	mbuf = mbuf->next;
815 	while (mbuf) {
816 		cpu_to_hw_sg(sg);
817 		sg++;
818 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
819 		sg->length = mbuf->data_len;
820 		mbuf = mbuf->next;
821 	}
822 	sg->final = 1;
823 	cpu_to_hw_sg(sg);
824 
825 	return cf;
826 }
827 
828 static inline struct dpaa_sec_job *
829 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
830 {
831 	struct rte_crypto_sym_op *sym = op->sym;
832 	struct dpaa_sec_job *cf;
833 	struct dpaa_sec_op_ctx *ctx;
834 	struct qm_sg_entry *sg;
835 	rte_iova_t src_start_addr, dst_start_addr;
836 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
837 			ses->iv.offset);
838 
839 	ctx = dpaa_sec_alloc_ctx(ses);
840 	if (!ctx)
841 		return NULL;
842 
843 	cf = &ctx->job;
844 	ctx->op = op;
845 
846 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
847 
848 	if (sym->m_dst)
849 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
850 	else
851 		dst_start_addr = src_start_addr;
852 
853 	/* output */
854 	sg = &cf->sg[0];
855 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
856 	sg->length = sym->cipher.data.length + ses->iv.length;
857 	cpu_to_hw_sg(sg);
858 
859 	/* input */
860 	sg = &cf->sg[1];
861 
862 	/* need to extend the input to a compound frame */
863 	sg->extension = 1;
864 	sg->final = 1;
865 	sg->length = sym->cipher.data.length + ses->iv.length;
866 	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
867 	cpu_to_hw_sg(sg);
868 
869 	sg = &cf->sg[2];
870 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
871 	sg->length = ses->iv.length;
872 	cpu_to_hw_sg(sg);
873 
874 	sg++;
875 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
876 	sg->length = sym->cipher.data.length;
877 	sg->final = 1;
878 	cpu_to_hw_sg(sg);
879 
880 	return cf;
881 }
882 
883 static inline struct dpaa_sec_job *
884 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
885 {
886 	struct rte_crypto_sym_op *sym = op->sym;
887 	struct dpaa_sec_job *cf;
888 	struct dpaa_sec_op_ctx *ctx;
889 	struct qm_sg_entry *sg, *out_sg, *in_sg;
890 	struct rte_mbuf *mbuf;
891 	uint8_t req_segs;
892 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
893 			ses->iv.offset);
894 
895 	if (sym->m_dst) {
896 		mbuf = sym->m_dst;
897 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
898 	} else {
899 		mbuf = sym->m_src;
900 		req_segs = mbuf->nb_segs * 2 + 4;
901 	}
902 
903 	if (ses->auth_only_len)
904 		req_segs++;
905 
906 	if (req_segs > MAX_SG_ENTRIES) {
907 		PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
908 				MAX_SG_ENTRIES);
909 		return NULL;
910 	}
911 
912 	ctx = dpaa_sec_alloc_ctx(ses);
913 	if (!ctx)
914 		return NULL;
915 
916 	cf = &ctx->job;
917 	ctx->op = op;
918 
919 	rte_prefetch0(cf->sg);
920 
921 	/* output */
922 	out_sg = &cf->sg[0];
923 	out_sg->extension = 1;
924 	if (is_encode(ses))
925 		out_sg->length = sym->aead.data.length + ses->auth_only_len
926 						+ ses->digest_length;
927 	else
928 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
929 
930 	/* output sg entries */
931 	sg = &cf->sg[2];
932 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
933 	cpu_to_hw_sg(out_sg);
934 
935 	/* 1st seg */
936 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
937 	sg->length = mbuf->data_len - sym->aead.data.offset +
938 					ses->auth_only_len;
939 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
940 
941 	/* Successive segs */
942 	mbuf = mbuf->next;
943 	while (mbuf) {
944 		cpu_to_hw_sg(sg);
945 		sg++;
946 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
947 		sg->length = mbuf->data_len;
948 		mbuf = mbuf->next;
949 	}
950 	sg->length -= ses->digest_length;
951 
952 	if (is_encode(ses)) {
953 		cpu_to_hw_sg(sg);
954 		/* set auth output */
955 		sg++;
956 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
957 		sg->length = ses->digest_length;
958 	}
959 	sg->final = 1;
960 	cpu_to_hw_sg(sg);
961 
962 	/* input */
963 	mbuf = sym->m_src;
964 	in_sg = &cf->sg[1];
965 	in_sg->extension = 1;
966 	in_sg->final = 1;
967 	if (is_encode(ses))
968 		in_sg->length = ses->iv.length + sym->aead.data.length
969 							+ ses->auth_only_len;
970 	else
971 		in_sg->length = ses->iv.length + sym->aead.data.length
972 				+ ses->auth_only_len + ses->digest_length;
973 
974 	/* input sg entries */
975 	sg++;
976 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
977 	cpu_to_hw_sg(in_sg);
978 
979 	/* 1st seg IV */
980 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
981 	sg->length = ses->iv.length;
982 	cpu_to_hw_sg(sg);
983 
984 	/* 2nd seg auth only */
985 	if (ses->auth_only_len) {
986 		sg++;
987 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
988 		sg->length = ses->auth_only_len;
989 		cpu_to_hw_sg(sg);
990 	}
991 
992 	/* 3rd seg */
993 	sg++;
994 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
995 	sg->length = mbuf->data_len - sym->aead.data.offset;
996 	sg->offset = sym->aead.data.offset;
997 
998 	/* Successive segs */
999 	mbuf = mbuf->next;
1000 	while (mbuf) {
1001 		cpu_to_hw_sg(sg);
1002 		sg++;
1003 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004 		sg->length = mbuf->data_len;
1005 		mbuf = mbuf->next;
1006 	}
1007 
1008 	if (is_decode(ses)) {
1009 		cpu_to_hw_sg(sg);
1010 		sg++;
1011 		memcpy(ctx->digest, sym->aead.digest.data,
1012 			ses->digest_length);
1013 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1014 		sg->length = ses->digest_length;
1015 	}
1016 	sg->final = 1;
1017 	cpu_to_hw_sg(sg);
1018 
1019 	return cf;
1020 }
1021 
1022 static inline struct dpaa_sec_job *
1023 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1024 {
1025 	struct rte_crypto_sym_op *sym = op->sym;
1026 	struct dpaa_sec_job *cf;
1027 	struct dpaa_sec_op_ctx *ctx;
1028 	struct qm_sg_entry *sg;
1029 	uint32_t length = 0;
1030 	rte_iova_t src_start_addr, dst_start_addr;
1031 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1032 			ses->iv.offset);
1033 
1034 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1035 
1036 	if (sym->m_dst)
1037 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1038 	else
1039 		dst_start_addr = src_start_addr;
1040 
1041 	ctx = dpaa_sec_alloc_ctx(ses);
1042 	if (!ctx)
1043 		return NULL;
1044 
1045 	cf = &ctx->job;
1046 	ctx->op = op;
1047 
1048 	/* input */
1049 	rte_prefetch0(cf->sg);
1050 	sg = &cf->sg[2];
1051 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1052 	if (is_encode(ses)) {
1053 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1054 		sg->length = ses->iv.length;
1055 		length += sg->length;
1056 		cpu_to_hw_sg(sg);
1057 
1058 		sg++;
1059 		if (ses->auth_only_len) {
1060 			qm_sg_entry_set64(sg,
1061 					  dpaa_mem_vtop(sym->aead.aad.data));
1062 			sg->length = ses->auth_only_len;
1063 			length += sg->length;
1064 			cpu_to_hw_sg(sg);
1065 			sg++;
1066 		}
1067 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1068 		sg->length = sym->aead.data.length;
1069 		length += sg->length;
1070 		sg->final = 1;
1071 		cpu_to_hw_sg(sg);
1072 	} else {
1073 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1074 		sg->length = ses->iv.length;
1075 		length += sg->length;
1076 		cpu_to_hw_sg(sg);
1077 
1078 		sg++;
1079 		if (ses->auth_only_len) {
1080 			qm_sg_entry_set64(sg,
1081 					  dpaa_mem_vtop(sym->aead.aad.data));
1082 			sg->length = ses->auth_only_len;
1083 			length += sg->length;
1084 			cpu_to_hw_sg(sg);
1085 			sg++;
1086 		}
1087 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1088 		sg->length = sym->aead.data.length;
1089 		length += sg->length;
1090 		cpu_to_hw_sg(sg);
1091 
1092 		memcpy(ctx->digest, sym->aead.digest.data,
1093 		       ses->digest_length);
1094 		sg++;
1095 
1096 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1097 		sg->length = ses->digest_length;
1098 		length += sg->length;
1099 		sg->final = 1;
1100 		cpu_to_hw_sg(sg);
1101 	}
1102 	/* input compound frame */
1103 	cf->sg[1].length = length;
1104 	cf->sg[1].extension = 1;
1105 	cf->sg[1].final = 1;
1106 	cpu_to_hw_sg(&cf->sg[1]);
1107 
1108 	/* output */
1109 	sg++;
1110 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1111 	qm_sg_entry_set64(sg,
1112 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1113 	sg->length = sym->aead.data.length + ses->auth_only_len;
1114 	length = sg->length;
1115 	if (is_encode(ses)) {
1116 		cpu_to_hw_sg(sg);
1117 		/* set auth output */
1118 		sg++;
1119 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1120 		sg->length = ses->digest_length;
1121 		length += sg->length;
1122 	}
1123 	sg->final = 1;
1124 	cpu_to_hw_sg(sg);
1125 
1126 	/* output compound frame */
1127 	cf->sg[0].length = length;
1128 	cf->sg[0].extension = 1;
1129 	cpu_to_hw_sg(&cf->sg[0]);
1130 
1131 	return cf;
1132 }
1133 
1134 static inline struct dpaa_sec_job *
1135 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1136 {
1137 	struct rte_crypto_sym_op *sym = op->sym;
1138 	struct dpaa_sec_job *cf;
1139 	struct dpaa_sec_op_ctx *ctx;
1140 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1141 	struct rte_mbuf *mbuf;
1142 	uint8_t req_segs;
1143 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1144 			ses->iv.offset);
1145 
1146 	if (sym->m_dst) {
1147 		mbuf = sym->m_dst;
1148 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1149 	} else {
1150 		mbuf = sym->m_src;
1151 		req_segs = mbuf->nb_segs * 2 + 4;
1152 	}
1153 
1154 	if (req_segs > MAX_SG_ENTRIES) {
1155 		PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1156 				MAX_SG_ENTRIES);
1157 		return NULL;
1158 	}
1159 
1160 	ctx = dpaa_sec_alloc_ctx(ses);
1161 	if (!ctx)
1162 		return NULL;
1163 
1164 	cf = &ctx->job;
1165 	ctx->op = op;
1166 
1167 	rte_prefetch0(cf->sg);
1168 
1169 	/* output */
1170 	out_sg = &cf->sg[0];
1171 	out_sg->extension = 1;
1172 	if (is_encode(ses))
1173 		out_sg->length = sym->auth.data.length + ses->digest_length;
1174 	else
1175 		out_sg->length = sym->auth.data.length;
1176 
1177 	/* output sg entries */
1178 	sg = &cf->sg[2];
1179 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1180 	cpu_to_hw_sg(out_sg);
1181 
1182 	/* 1st seg */
1183 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1184 	sg->length = mbuf->data_len - sym->auth.data.offset;
1185 	sg->offset = sym->auth.data.offset;
1186 
1187 	/* Successive segs */
1188 	mbuf = mbuf->next;
1189 	while (mbuf) {
1190 		cpu_to_hw_sg(sg);
1191 		sg++;
1192 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1193 		sg->length = mbuf->data_len;
1194 		mbuf = mbuf->next;
1195 	}
1196 	sg->length -= ses->digest_length;
1197 
1198 	if (is_encode(ses)) {
1199 		cpu_to_hw_sg(sg);
1200 		/* set auth output */
1201 		sg++;
1202 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1203 		sg->length = ses->digest_length;
1204 	}
1205 	sg->final = 1;
1206 	cpu_to_hw_sg(sg);
1207 
1208 	/* input */
1209 	mbuf = sym->m_src;
1210 	in_sg = &cf->sg[1];
1211 	in_sg->extension = 1;
1212 	in_sg->final = 1;
1213 	if (is_encode(ses))
1214 		in_sg->length = ses->iv.length + sym->auth.data.length;
1215 	else
1216 		in_sg->length = ses->iv.length + sym->auth.data.length
1217 						+ ses->digest_length;
1218 
1219 	/* input sg entries */
1220 	sg++;
1221 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1222 	cpu_to_hw_sg(in_sg);
1223 
1224 	/* 1st seg IV */
1225 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1226 	sg->length = ses->iv.length;
1227 	cpu_to_hw_sg(sg);
1228 
1229 	/* 2nd seg */
1230 	sg++;
1231 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1232 	sg->length = mbuf->data_len - sym->auth.data.offset;
1233 	sg->offset = sym->auth.data.offset;
1234 
1235 	/* Successive segs */
1236 	mbuf = mbuf->next;
1237 	while (mbuf) {
1238 		cpu_to_hw_sg(sg);
1239 		sg++;
1240 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1241 		sg->length = mbuf->data_len;
1242 		mbuf = mbuf->next;
1243 	}
1244 
1245 	sg->length -= ses->digest_length;
1246 	if (is_decode(ses)) {
1247 		cpu_to_hw_sg(sg);
1248 		sg++;
1249 		memcpy(ctx->digest, sym->auth.digest.data,
1250 			ses->digest_length);
1251 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1252 		sg->length = ses->digest_length;
1253 	}
1254 	sg->final = 1;
1255 	cpu_to_hw_sg(sg);
1256 
1257 	return cf;
1258 }
1259 
1260 static inline struct dpaa_sec_job *
1261 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1262 {
1263 	struct rte_crypto_sym_op *sym = op->sym;
1264 	struct dpaa_sec_job *cf;
1265 	struct dpaa_sec_op_ctx *ctx;
1266 	struct qm_sg_entry *sg;
1267 	rte_iova_t src_start_addr, dst_start_addr;
1268 	uint32_t length = 0;
1269 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1270 			ses->iv.offset);
1271 
1272 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1273 	if (sym->m_dst)
1274 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1275 	else
1276 		dst_start_addr = src_start_addr;
1277 
1278 	ctx = dpaa_sec_alloc_ctx(ses);
1279 	if (!ctx)
1280 		return NULL;
1281 
1282 	cf = &ctx->job;
1283 	ctx->op = op;
1284 
1285 	/* input */
1286 	rte_prefetch0(cf->sg);
1287 	sg = &cf->sg[2];
1288 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1289 	if (is_encode(ses)) {
1290 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1291 		sg->length = ses->iv.length;
1292 		length += sg->length;
1293 		cpu_to_hw_sg(sg);
1294 
1295 		sg++;
1296 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1297 		sg->length = sym->auth.data.length;
1298 		length += sg->length;
1299 		sg->final = 1;
1300 		cpu_to_hw_sg(sg);
1301 	} else {
1302 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1303 		sg->length = ses->iv.length;
1304 		length += sg->length;
1305 		cpu_to_hw_sg(sg);
1306 
1307 		sg++;
1308 
1309 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1310 		sg->length = sym->auth.data.length;
1311 		length += sg->length;
1312 		cpu_to_hw_sg(sg);
1313 
1314 		memcpy(ctx->digest, sym->auth.digest.data,
1315 		       ses->digest_length);
1316 		sg++;
1317 
1318 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1319 		sg->length = ses->digest_length;
1320 		length += sg->length;
1321 		sg->final = 1;
1322 		cpu_to_hw_sg(sg);
1323 	}
1324 	/* input compound frame */
1325 	cf->sg[1].length = length;
1326 	cf->sg[1].extension = 1;
1327 	cf->sg[1].final = 1;
1328 	cpu_to_hw_sg(&cf->sg[1]);
1329 
1330 	/* output */
1331 	sg++;
1332 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1333 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1334 	sg->length = sym->cipher.data.length;
1335 	length = sg->length;
1336 	if (is_encode(ses)) {
1337 		cpu_to_hw_sg(sg);
1338 		/* set auth output */
1339 		sg++;
1340 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1341 		sg->length = ses->digest_length;
1342 		length += sg->length;
1343 	}
1344 	sg->final = 1;
1345 	cpu_to_hw_sg(sg);
1346 
1347 	/* output compound frame */
1348 	cf->sg[0].length = length;
1349 	cf->sg[0].extension = 1;
1350 	cpu_to_hw_sg(&cf->sg[0]);
1351 
1352 	return cf;
1353 }
1354 
1355 static inline struct dpaa_sec_job *
1356 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1357 {
1358 	struct rte_crypto_sym_op *sym = op->sym;
1359 	struct dpaa_sec_job *cf;
1360 	struct dpaa_sec_op_ctx *ctx;
1361 	struct qm_sg_entry *sg;
1362 	phys_addr_t src_start_addr, dst_start_addr;
1363 
1364 	ctx = dpaa_sec_alloc_ctx(ses);
1365 	if (!ctx)
1366 		return NULL;
1367 	cf = &ctx->job;
1368 	ctx->op = op;
1369 
1370 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1371 
1372 	if (sym->m_dst)
1373 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1374 	else
1375 		dst_start_addr = src_start_addr;
1376 
1377 	/* input */
1378 	sg = &cf->sg[1];
1379 	qm_sg_entry_set64(sg, src_start_addr);
1380 	sg->length = sym->m_src->pkt_len;
1381 	sg->final = 1;
1382 	cpu_to_hw_sg(sg);
1383 
1384 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1385 	/* output */
1386 	sg = &cf->sg[0];
1387 	qm_sg_entry_set64(sg, dst_start_addr);
1388 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1389 	cpu_to_hw_sg(sg);
1390 
1391 	return cf;
1392 }
1393 
1394 static uint16_t
1395 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1396 		       uint16_t nb_ops)
1397 {
1398 	/* Function to transmit the frames to given device and queuepair */
1399 	uint32_t loop;
1400 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1401 	uint16_t num_tx = 0;
1402 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1403 	uint32_t frames_to_send;
1404 	struct rte_crypto_op *op;
1405 	struct dpaa_sec_job *cf;
1406 	dpaa_sec_session *ses;
1407 	struct dpaa_sec_op_ctx *ctx;
1408 	uint32_t auth_only_len;
1409 	struct qman_fq *inq[DPAA_SEC_BURST];
1410 
1411 	while (nb_ops) {
1412 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1413 				DPAA_SEC_BURST : nb_ops;
1414 		for (loop = 0; loop < frames_to_send; loop++) {
1415 			op = *(ops++);
1416 			switch (op->sess_type) {
1417 			case RTE_CRYPTO_OP_WITH_SESSION:
1418 				ses = (dpaa_sec_session *)
1419 					get_session_private_data(
1420 							op->sym->session,
1421 							cryptodev_driver_id);
1422 				break;
1423 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1424 				ses = (dpaa_sec_session *)
1425 					get_sec_session_private_data(
1426 							op->sym->sec_session);
1427 				break;
1428 			default:
1429 				PMD_TX_LOG(ERR,
1430 					"sessionless crypto op not supported");
1431 				frames_to_send = loop;
1432 				nb_ops = loop;
1433 				goto send_pkts;
1434 			}
1435 			if (unlikely(!ses->qp || ses->qp != qp)) {
1436 				PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1437 						ses->qp, qp);
1438 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1439 					frames_to_send = loop;
1440 					nb_ops = loop;
1441 					goto send_pkts;
1442 				}
1443 			}
1444 
1445 			auth_only_len = op->sym->auth.data.length -
1446 						op->sym->cipher.data.length;
1447 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1448 				if (is_auth_only(ses)) {
1449 					cf = build_auth_only(op, ses);
1450 				} else if (is_cipher_only(ses)) {
1451 					cf = build_cipher_only(op, ses);
1452 				} else if (is_aead(ses)) {
1453 					cf = build_cipher_auth_gcm(op, ses);
1454 					auth_only_len = ses->auth_only_len;
1455 				} else if (is_auth_cipher(ses)) {
1456 					cf = build_cipher_auth(op, ses);
1457 				} else if (is_proto_ipsec(ses)) {
1458 					cf = build_proto(op, ses);
1459 				} else {
1460 					PMD_TX_LOG(ERR, "not supported sec op");
1461 					frames_to_send = loop;
1462 					nb_ops = loop;
1463 					goto send_pkts;
1464 				}
1465 			} else {
1466 				if (is_auth_only(ses)) {
1467 					cf = build_auth_only_sg(op, ses);
1468 				} else if (is_cipher_only(ses)) {
1469 					cf = build_cipher_only_sg(op, ses);
1470 				} else if (is_aead(ses)) {
1471 					cf = build_cipher_auth_gcm_sg(op, ses);
1472 					auth_only_len = ses->auth_only_len;
1473 				} else if (is_auth_cipher(ses)) {
1474 					cf = build_cipher_auth_sg(op, ses);
1475 				} else {
1476 					PMD_TX_LOG(ERR, "not supported sec op");
1477 					frames_to_send = loop;
1478 					nb_ops = loop;
1479 					goto send_pkts;
1480 				}
1481 			}
1482 			if (unlikely(!cf)) {
1483 				frames_to_send = loop;
1484 				nb_ops = loop;
1485 				goto send_pkts;
1486 			}
1487 
1488 			fd = &fds[loop];
1489 			inq[loop] = ses->inq;
1490 			fd->opaque_addr = 0;
1491 			fd->cmd = 0;
1492 			ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1493 			qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1494 			fd->_format1 = qm_fd_compound;
1495 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1496 			/* Auth_only_len is set as 0 in descriptor and it is
1497 			 * overwritten here in the fd.cmd which will update
1498 			 * the DPOVRD reg.
1499 			 */
1500 			if (auth_only_len)
1501 				fd->cmd = 0x80000000 | auth_only_len;
1502 
1503 		}
1504 send_pkts:
1505 		loop = 0;
1506 		while (loop < frames_to_send) {
1507 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1508 					frames_to_send - loop);
1509 		}
1510 		nb_ops -= frames_to_send;
1511 		num_tx += frames_to_send;
1512 	}
1513 
1514 	dpaa_qp->tx_pkts += num_tx;
1515 	dpaa_qp->tx_errs += nb_ops - num_tx;
1516 
1517 	return num_tx;
1518 }
1519 
1520 static uint16_t
1521 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1522 		       uint16_t nb_ops)
1523 {
1524 	uint16_t num_rx;
1525 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1526 
1527 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1528 
1529 	dpaa_qp->rx_pkts += num_rx;
1530 	dpaa_qp->rx_errs += nb_ops - num_rx;
1531 
1532 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1533 
1534 	return num_rx;
1535 }
1536 
1537 /** Release queue pair */
1538 static int
1539 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1540 			    uint16_t qp_id)
1541 {
1542 	struct dpaa_sec_dev_private *internals;
1543 	struct dpaa_sec_qp *qp = NULL;
1544 
1545 	PMD_INIT_FUNC_TRACE();
1546 
1547 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1548 
1549 	internals = dev->data->dev_private;
1550 	if (qp_id >= internals->max_nb_queue_pairs) {
1551 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1552 			     internals->max_nb_queue_pairs);
1553 		return -EINVAL;
1554 	}
1555 
1556 	qp = &internals->qps[qp_id];
1557 	qp->internals = NULL;
1558 	dev->data->queue_pairs[qp_id] = NULL;
1559 
1560 	return 0;
1561 }
1562 
1563 /** Setup a queue pair */
1564 static int
1565 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1566 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1567 		__rte_unused int socket_id,
1568 		__rte_unused struct rte_mempool *session_pool)
1569 {
1570 	struct dpaa_sec_dev_private *internals;
1571 	struct dpaa_sec_qp *qp = NULL;
1572 
1573 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1574 		     dev, qp_id, qp_conf);
1575 
1576 	internals = dev->data->dev_private;
1577 	if (qp_id >= internals->max_nb_queue_pairs) {
1578 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1579 			     internals->max_nb_queue_pairs);
1580 		return -EINVAL;
1581 	}
1582 
1583 	qp = &internals->qps[qp_id];
1584 	qp->internals = internals;
1585 	dev->data->queue_pairs[qp_id] = qp;
1586 
1587 	return 0;
1588 }
1589 
1590 /** Start queue pair */
1591 static int
1592 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1593 			  __rte_unused uint16_t queue_pair_id)
1594 {
1595 	PMD_INIT_FUNC_TRACE();
1596 
1597 	return 0;
1598 }
1599 
1600 /** Stop queue pair */
1601 static int
1602 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1603 			 __rte_unused uint16_t queue_pair_id)
1604 {
1605 	PMD_INIT_FUNC_TRACE();
1606 
1607 	return 0;
1608 }
1609 
1610 /** Return the number of allocated queue pairs */
1611 static uint32_t
1612 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1613 {
1614 	PMD_INIT_FUNC_TRACE();
1615 
1616 	return dev->data->nb_queue_pairs;
1617 }
1618 
1619 /** Returns the size of session structure */
1620 static unsigned int
1621 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1622 {
1623 	PMD_INIT_FUNC_TRACE();
1624 
1625 	return sizeof(dpaa_sec_session);
1626 }
1627 
1628 static int
1629 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1630 		     struct rte_crypto_sym_xform *xform,
1631 		     dpaa_sec_session *session)
1632 {
1633 	session->cipher_alg = xform->cipher.algo;
1634 	session->iv.length = xform->cipher.iv.length;
1635 	session->iv.offset = xform->cipher.iv.offset;
1636 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1637 					       RTE_CACHE_LINE_SIZE);
1638 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1639 		PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1640 		return -ENOMEM;
1641 	}
1642 	session->cipher_key.length = xform->cipher.key.length;
1643 
1644 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1645 	       xform->cipher.key.length);
1646 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1647 			DIR_ENC : DIR_DEC;
1648 
1649 	return 0;
1650 }
1651 
1652 static int
1653 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1654 		   struct rte_crypto_sym_xform *xform,
1655 		   dpaa_sec_session *session)
1656 {
1657 	session->auth_alg = xform->auth.algo;
1658 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1659 					     RTE_CACHE_LINE_SIZE);
1660 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1661 		PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1662 		return -ENOMEM;
1663 	}
1664 	session->auth_key.length = xform->auth.key.length;
1665 	session->digest_length = xform->auth.digest_length;
1666 
1667 	memcpy(session->auth_key.data, xform->auth.key.data,
1668 	       xform->auth.key.length);
1669 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1670 			DIR_ENC : DIR_DEC;
1671 
1672 	return 0;
1673 }
1674 
1675 static int
1676 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1677 		   struct rte_crypto_sym_xform *xform,
1678 		   dpaa_sec_session *session)
1679 {
1680 	session->aead_alg = xform->aead.algo;
1681 	session->iv.length = xform->aead.iv.length;
1682 	session->iv.offset = xform->aead.iv.offset;
1683 	session->auth_only_len = xform->aead.aad_length;
1684 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1685 					     RTE_CACHE_LINE_SIZE);
1686 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1687 		PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1688 		return -ENOMEM;
1689 	}
1690 	session->aead_key.length = xform->aead.key.length;
1691 	session->digest_length = xform->aead.digest_length;
1692 
1693 	memcpy(session->aead_key.data, xform->aead.key.data,
1694 	       xform->aead.key.length);
1695 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1696 			DIR_ENC : DIR_DEC;
1697 
1698 	return 0;
1699 }
1700 
1701 static struct qman_fq *
1702 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1703 {
1704 	unsigned int i;
1705 
1706 	for (i = 0; i < qi->max_nb_sessions; i++) {
1707 		if (qi->inq_attach[i] == 0) {
1708 			qi->inq_attach[i] = 1;
1709 			return &qi->inq[i];
1710 		}
1711 	}
1712 	PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1713 
1714 	return NULL;
1715 }
1716 
1717 static int
1718 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1719 {
1720 	unsigned int i;
1721 
1722 	for (i = 0; i < qi->max_nb_sessions; i++) {
1723 		if (&qi->inq[i] == fq) {
1724 			qman_retire_fq(fq, NULL);
1725 			qman_oos_fq(fq);
1726 			qi->inq_attach[i] = 0;
1727 			return 0;
1728 		}
1729 	}
1730 	return -1;
1731 }
1732 
1733 static int
1734 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1735 {
1736 	int ret;
1737 
1738 	sess->qp = qp;
1739 	ret = dpaa_sec_prep_cdb(sess);
1740 	if (ret) {
1741 		PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1742 		return -1;
1743 	}
1744 
1745 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1746 			       qman_fq_fqid(&qp->outq));
1747 	if (ret)
1748 		PMD_DRV_LOG(ERR, "Unable to init sec queue");
1749 
1750 	return ret;
1751 }
1752 
1753 static int
1754 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1755 			uint16_t qp_id __rte_unused,
1756 			void *ses __rte_unused)
1757 {
1758 	PMD_INIT_FUNC_TRACE();
1759 	return 0;
1760 }
1761 
1762 static int
1763 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1764 			uint16_t qp_id  __rte_unused,
1765 			void *ses)
1766 {
1767 	dpaa_sec_session *sess = ses;
1768 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1769 
1770 	PMD_INIT_FUNC_TRACE();
1771 
1772 	if (sess->inq)
1773 		dpaa_sec_detach_rxq(qi, sess->inq);
1774 	sess->inq = NULL;
1775 
1776 	sess->qp = NULL;
1777 
1778 	return 0;
1779 }
1780 
1781 static int
1782 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1783 			    struct rte_crypto_sym_xform *xform,	void *sess)
1784 {
1785 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1786 	dpaa_sec_session *session = sess;
1787 
1788 	PMD_INIT_FUNC_TRACE();
1789 
1790 	if (unlikely(sess == NULL)) {
1791 		RTE_LOG(ERR, PMD, "invalid session struct\n");
1792 		return -EINVAL;
1793 	}
1794 
1795 	/* Default IV length = 0 */
1796 	session->iv.length = 0;
1797 
1798 	/* Cipher Only */
1799 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1800 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1801 		dpaa_sec_cipher_init(dev, xform, session);
1802 
1803 	/* Authentication Only */
1804 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1805 		   xform->next == NULL) {
1806 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1807 		dpaa_sec_auth_init(dev, xform, session);
1808 
1809 	/* Cipher then Authenticate */
1810 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1811 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1812 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1813 			dpaa_sec_cipher_init(dev, xform, session);
1814 			dpaa_sec_auth_init(dev, xform->next, session);
1815 		} else {
1816 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1817 			return -EINVAL;
1818 		}
1819 
1820 	/* Authenticate then Cipher */
1821 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1822 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1823 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1824 			dpaa_sec_auth_init(dev, xform, session);
1825 			dpaa_sec_cipher_init(dev, xform->next, session);
1826 		} else {
1827 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1828 			return -EINVAL;
1829 		}
1830 
1831 	/* AEAD operation for AES-GCM kind of Algorithms */
1832 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1833 		   xform->next == NULL) {
1834 		dpaa_sec_aead_init(dev, xform, session);
1835 
1836 	} else {
1837 		PMD_DRV_LOG(ERR, "Invalid crypto type");
1838 		return -EINVAL;
1839 	}
1840 	session->ctx_pool = internals->ctx_pool;
1841 	session->inq = dpaa_sec_attach_rxq(internals);
1842 	if (session->inq == NULL) {
1843 		PMD_DRV_LOG(ERR, "unable to attach sec queue");
1844 		goto err1;
1845 	}
1846 
1847 	return 0;
1848 
1849 err1:
1850 	rte_free(session->cipher_key.data);
1851 	rte_free(session->auth_key.data);
1852 	memset(session, 0, sizeof(dpaa_sec_session));
1853 
1854 	return -EINVAL;
1855 }
1856 
1857 static int
1858 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1859 		struct rte_crypto_sym_xform *xform,
1860 		struct rte_cryptodev_sym_session *sess,
1861 		struct rte_mempool *mempool)
1862 {
1863 	void *sess_private_data;
1864 	int ret;
1865 
1866 	PMD_INIT_FUNC_TRACE();
1867 
1868 	if (rte_mempool_get(mempool, &sess_private_data)) {
1869 		CDEV_LOG_ERR(
1870 			"Couldn't get object from session mempool");
1871 		return -ENOMEM;
1872 	}
1873 
1874 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1875 	if (ret != 0) {
1876 		PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1877 				"session parameters");
1878 
1879 		/* Return session to mempool */
1880 		rte_mempool_put(mempool, sess_private_data);
1881 		return ret;
1882 	}
1883 
1884 	set_session_private_data(sess, dev->driver_id,
1885 			sess_private_data);
1886 
1887 
1888 	return 0;
1889 }
1890 
1891 /** Clear the memory of session so it doesn't leave key material behind */
1892 static void
1893 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1894 		struct rte_cryptodev_sym_session *sess)
1895 {
1896 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1897 	uint8_t index = dev->driver_id;
1898 	void *sess_priv = get_session_private_data(sess, index);
1899 
1900 	PMD_INIT_FUNC_TRACE();
1901 
1902 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1903 
1904 	if (sess_priv) {
1905 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1906 
1907 		if (s->inq)
1908 			dpaa_sec_detach_rxq(qi, s->inq);
1909 		rte_free(s->cipher_key.data);
1910 		rte_free(s->auth_key.data);
1911 		memset(s, 0, sizeof(dpaa_sec_session));
1912 		set_session_private_data(sess, index, NULL);
1913 		rte_mempool_put(sess_mp, sess_priv);
1914 	}
1915 }
1916 
1917 static int
1918 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1919 			   struct rte_security_session_conf *conf,
1920 			   void *sess)
1921 {
1922 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1923 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1924 	struct rte_crypto_auth_xform *auth_xform;
1925 	struct rte_crypto_cipher_xform *cipher_xform;
1926 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1927 
1928 	PMD_INIT_FUNC_TRACE();
1929 
1930 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1931 		cipher_xform = &conf->crypto_xform->cipher;
1932 		auth_xform = &conf->crypto_xform->next->auth;
1933 	} else {
1934 		auth_xform = &conf->crypto_xform->auth;
1935 		cipher_xform = &conf->crypto_xform->next->cipher;
1936 	}
1937 	session->proto_alg = conf->protocol;
1938 	session->cipher_key.data = rte_zmalloc(NULL,
1939 					       cipher_xform->key.length,
1940 					       RTE_CACHE_LINE_SIZE);
1941 	if (session->cipher_key.data == NULL &&
1942 			cipher_xform->key.length > 0) {
1943 		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1944 		return -ENOMEM;
1945 	}
1946 
1947 	session->cipher_key.length = cipher_xform->key.length;
1948 	session->auth_key.data = rte_zmalloc(NULL,
1949 					auth_xform->key.length,
1950 					RTE_CACHE_LINE_SIZE);
1951 	if (session->auth_key.data == NULL &&
1952 			auth_xform->key.length > 0) {
1953 		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1954 		rte_free(session->cipher_key.data);
1955 		return -ENOMEM;
1956 	}
1957 	session->auth_key.length = auth_xform->key.length;
1958 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1959 			cipher_xform->key.length);
1960 	memcpy(session->auth_key.data, auth_xform->key.data,
1961 			auth_xform->key.length);
1962 
1963 	switch (auth_xform->algo) {
1964 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1965 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1966 		break;
1967 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1968 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1969 		break;
1970 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1971 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1972 		break;
1973 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1974 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1975 		break;
1976 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1977 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1978 		break;
1979 	case RTE_CRYPTO_AUTH_AES_CMAC:
1980 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1981 		break;
1982 	case RTE_CRYPTO_AUTH_NULL:
1983 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1984 		break;
1985 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1986 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1987 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1988 	case RTE_CRYPTO_AUTH_SHA1:
1989 	case RTE_CRYPTO_AUTH_SHA256:
1990 	case RTE_CRYPTO_AUTH_SHA512:
1991 	case RTE_CRYPTO_AUTH_SHA224:
1992 	case RTE_CRYPTO_AUTH_SHA384:
1993 	case RTE_CRYPTO_AUTH_MD5:
1994 	case RTE_CRYPTO_AUTH_AES_GMAC:
1995 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1996 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1997 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1998 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
1999 			auth_xform->algo);
2000 		goto out;
2001 	default:
2002 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2003 			auth_xform->algo);
2004 		goto out;
2005 	}
2006 
2007 	switch (cipher_xform->algo) {
2008 	case RTE_CRYPTO_CIPHER_AES_CBC:
2009 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2010 		break;
2011 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2012 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2013 		break;
2014 	case RTE_CRYPTO_CIPHER_AES_CTR:
2015 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2016 		break;
2017 	case RTE_CRYPTO_CIPHER_NULL:
2018 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2019 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2020 	case RTE_CRYPTO_CIPHER_AES_ECB:
2021 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2022 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2023 			cipher_xform->algo);
2024 		goto out;
2025 	default:
2026 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2027 			cipher_xform->algo);
2028 		goto out;
2029 	}
2030 
2031 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2032 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2033 				sizeof(session->ip4_hdr));
2034 		session->ip4_hdr.ip_v = IPVERSION;
2035 		session->ip4_hdr.ip_hl = 5;
2036 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2037 						sizeof(session->ip4_hdr));
2038 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2039 		session->ip4_hdr.ip_id = 0;
2040 		session->ip4_hdr.ip_off = 0;
2041 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2042 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2043 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2044 				: IPPROTO_AH;
2045 		session->ip4_hdr.ip_sum = 0;
2046 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2047 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2048 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2049 						(void *)&session->ip4_hdr,
2050 						sizeof(struct ip));
2051 
2052 		session->encap_pdb.options =
2053 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2054 			PDBOPTS_ESP_OIHI_PDB_INL |
2055 			PDBOPTS_ESP_IVSRC |
2056 			PDBHMO_ESP_ENCAP_DTTL;
2057 		session->encap_pdb.spi = ipsec_xform->spi;
2058 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2059 
2060 		session->dir = DIR_ENC;
2061 	} else if (ipsec_xform->direction ==
2062 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2063 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2064 		session->decap_pdb.options = sizeof(struct ip) << 16;
2065 		session->dir = DIR_DEC;
2066 	} else
2067 		goto out;
2068 	session->ctx_pool = internals->ctx_pool;
2069 	session->inq = dpaa_sec_attach_rxq(internals);
2070 	if (session->inq == NULL) {
2071 		PMD_DRV_LOG(ERR, "unable to attach sec queue");
2072 		goto out;
2073 	}
2074 
2075 
2076 	return 0;
2077 out:
2078 	rte_free(session->auth_key.data);
2079 	rte_free(session->cipher_key.data);
2080 	memset(session, 0, sizeof(dpaa_sec_session));
2081 	return -1;
2082 }
2083 
2084 static int
2085 dpaa_sec_security_session_create(void *dev,
2086 				 struct rte_security_session_conf *conf,
2087 				 struct rte_security_session *sess,
2088 				 struct rte_mempool *mempool)
2089 {
2090 	void *sess_private_data;
2091 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2092 	int ret;
2093 
2094 	if (rte_mempool_get(mempool, &sess_private_data)) {
2095 		CDEV_LOG_ERR(
2096 			"Couldn't get object from session mempool");
2097 		return -ENOMEM;
2098 	}
2099 
2100 	switch (conf->protocol) {
2101 	case RTE_SECURITY_PROTOCOL_IPSEC:
2102 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2103 				sess_private_data);
2104 		break;
2105 	case RTE_SECURITY_PROTOCOL_MACSEC:
2106 		return -ENOTSUP;
2107 	default:
2108 		return -EINVAL;
2109 	}
2110 	if (ret != 0) {
2111 		PMD_DRV_LOG(ERR,
2112 			"DPAA2 PMD: failed to configure session parameters");
2113 
2114 		/* Return session to mempool */
2115 		rte_mempool_put(mempool, sess_private_data);
2116 		return ret;
2117 	}
2118 
2119 	set_sec_session_private_data(sess, sess_private_data);
2120 
2121 	return ret;
2122 }
2123 
2124 /** Clear the memory of session so it doesn't leave key material behind */
2125 static int
2126 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2127 		struct rte_security_session *sess)
2128 {
2129 	PMD_INIT_FUNC_TRACE();
2130 	void *sess_priv = get_sec_session_private_data(sess);
2131 
2132 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2133 
2134 	if (sess_priv) {
2135 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2136 
2137 		rte_free(s->cipher_key.data);
2138 		rte_free(s->auth_key.data);
2139 		memset(sess, 0, sizeof(dpaa_sec_session));
2140 		set_sec_session_private_data(sess, NULL);
2141 		rte_mempool_put(sess_mp, sess_priv);
2142 	}
2143 	return 0;
2144 }
2145 
2146 
2147 static int
2148 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2149 		       struct rte_cryptodev_config *config __rte_unused)
2150 {
2151 	PMD_INIT_FUNC_TRACE();
2152 
2153 	return 0;
2154 }
2155 
2156 static int
2157 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2158 {
2159 	PMD_INIT_FUNC_TRACE();
2160 	return 0;
2161 }
2162 
2163 static void
2164 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2165 {
2166 	PMD_INIT_FUNC_TRACE();
2167 }
2168 
2169 static int
2170 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
2171 {
2172 	PMD_INIT_FUNC_TRACE();
2173 	return 0;
2174 }
2175 
2176 static void
2177 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2178 		       struct rte_cryptodev_info *info)
2179 {
2180 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2181 
2182 	PMD_INIT_FUNC_TRACE();
2183 	if (info != NULL) {
2184 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2185 		info->feature_flags = dev->feature_flags;
2186 		info->capabilities = dpaa_sec_capabilities;
2187 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2188 		info->sym.max_nb_sessions_per_qp =
2189 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2190 			RTE_DPAA_MAX_NB_SEC_QPS;
2191 		info->driver_id = cryptodev_driver_id;
2192 	}
2193 }
2194 
2195 static struct rte_cryptodev_ops crypto_ops = {
2196 	.dev_configure	      = dpaa_sec_dev_configure,
2197 	.dev_start	      = dpaa_sec_dev_start,
2198 	.dev_stop	      = dpaa_sec_dev_stop,
2199 	.dev_close	      = dpaa_sec_dev_close,
2200 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2201 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2202 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2203 	.queue_pair_start     = dpaa_sec_queue_pair_start,
2204 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
2205 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2206 	.session_get_size     = dpaa_sec_session_get_size,
2207 	.session_configure    = dpaa_sec_session_configure,
2208 	.session_clear        = dpaa_sec_session_clear,
2209 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
2210 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
2211 };
2212 
2213 static const struct rte_security_capability *
2214 dpaa_sec_capabilities_get(void *device __rte_unused)
2215 {
2216 	return dpaa_sec_security_cap;
2217 }
2218 
2219 struct rte_security_ops dpaa_sec_security_ops = {
2220 	.session_create = dpaa_sec_security_session_create,
2221 	.session_update = NULL,
2222 	.session_stats_get = NULL,
2223 	.session_destroy = dpaa_sec_security_session_destroy,
2224 	.set_pkt_metadata = NULL,
2225 	.capabilities_get = dpaa_sec_capabilities_get
2226 };
2227 
2228 static int
2229 dpaa_sec_uninit(struct rte_cryptodev *dev)
2230 {
2231 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2232 
2233 	if (dev == NULL)
2234 		return -ENODEV;
2235 
2236 	rte_free(dev->security_ctx);
2237 
2238 	rte_mempool_free(internals->ctx_pool);
2239 	rte_free(internals);
2240 
2241 	PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2242 		     dev->data->name, rte_socket_id());
2243 
2244 	return 0;
2245 }
2246 
2247 static int
2248 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2249 {
2250 	struct dpaa_sec_dev_private *internals;
2251 	struct rte_security_ctx *security_instance;
2252 	struct dpaa_sec_qp *qp;
2253 	uint32_t i, flags;
2254 	int ret;
2255 	char str[20];
2256 
2257 	PMD_INIT_FUNC_TRACE();
2258 
2259 	cryptodev->driver_id = cryptodev_driver_id;
2260 	cryptodev->dev_ops = &crypto_ops;
2261 
2262 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2263 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2264 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2265 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2266 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2267 			RTE_CRYPTODEV_FF_SECURITY |
2268 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2269 
2270 	internals = cryptodev->data->dev_private;
2271 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2272 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2273 
2274 	/*
2275 	 * For secondary processes, we don't initialise any further as primary
2276 	 * has already done this work. Only check we don't need a different
2277 	 * RX function
2278 	 */
2279 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2280 		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2281 		return 0;
2282 	}
2283 
2284 	/* Initialize security_ctx only for primary process*/
2285 	security_instance = rte_malloc("rte_security_instances_ops",
2286 				sizeof(struct rte_security_ctx), 0);
2287 	if (security_instance == NULL)
2288 		return -ENOMEM;
2289 	security_instance->device = (void *)cryptodev;
2290 	security_instance->ops = &dpaa_sec_security_ops;
2291 	security_instance->sess_cnt = 0;
2292 	cryptodev->security_ctx = security_instance;
2293 
2294 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2295 		/* init qman fq for queue pair */
2296 		qp = &internals->qps[i];
2297 		ret = dpaa_sec_init_tx(&qp->outq);
2298 		if (ret) {
2299 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
2300 			goto init_error;
2301 		}
2302 	}
2303 
2304 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2305 		QMAN_FQ_FLAG_TO_DCPORTAL;
2306 	for (i = 0; i < internals->max_nb_sessions; i++) {
2307 		/* create rx qman fq for sessions*/
2308 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2309 		if (unlikely(ret != 0)) {
2310 			PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2311 			goto init_error;
2312 		}
2313 	}
2314 
2315 	sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
2316 	internals->ctx_pool = rte_mempool_create((const char *)str,
2317 			CTX_POOL_NUM_BUFS,
2318 			CTX_POOL_BUF_SIZE,
2319 			CTX_POOL_CACHE_SIZE, 0,
2320 			NULL, NULL, NULL, NULL,
2321 			SOCKET_ID_ANY, 0);
2322 	if (!internals->ctx_pool) {
2323 		RTE_LOG(ERR, PMD, "%s create failed\n", str);
2324 		goto init_error;
2325 	}
2326 
2327 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2328 	return 0;
2329 
2330 init_error:
2331 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2332 
2333 	dpaa_sec_uninit(cryptodev);
2334 	return -EFAULT;
2335 }
2336 
2337 static int
2338 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2339 				struct rte_dpaa_device *dpaa_dev)
2340 {
2341 	struct rte_cryptodev *cryptodev;
2342 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2343 
2344 	int retval;
2345 
2346 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2347 
2348 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2349 	if (cryptodev == NULL)
2350 		return -ENOMEM;
2351 
2352 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2353 		cryptodev->data->dev_private = rte_zmalloc_socket(
2354 					"cryptodev private structure",
2355 					sizeof(struct dpaa_sec_dev_private),
2356 					RTE_CACHE_LINE_SIZE,
2357 					rte_socket_id());
2358 
2359 		if (cryptodev->data->dev_private == NULL)
2360 			rte_panic("Cannot allocate memzone for private "
2361 					"device data");
2362 	}
2363 
2364 	dpaa_dev->crypto_dev = cryptodev;
2365 	cryptodev->device = &dpaa_dev->device;
2366 	cryptodev->device->driver = &dpaa_drv->driver;
2367 
2368 	/* init user callbacks */
2369 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2370 
2371 	/* if sec device version is not configured */
2372 	if (!rta_get_sec_era()) {
2373 		const struct device_node *caam_node;
2374 
2375 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2376 			const uint32_t *prop = of_get_property(caam_node,
2377 					"fsl,sec-era",
2378 					NULL);
2379 			if (prop) {
2380 				rta_set_sec_era(
2381 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2382 				break;
2383 			}
2384 		}
2385 	}
2386 
2387 	/* Invoke PMD device initialization function */
2388 	retval = dpaa_sec_dev_init(cryptodev);
2389 	if (retval == 0)
2390 		return 0;
2391 
2392 	/* In case of error, cleanup is done */
2393 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2394 		rte_free(cryptodev->data->dev_private);
2395 
2396 	rte_cryptodev_pmd_release_device(cryptodev);
2397 
2398 	return -ENXIO;
2399 }
2400 
2401 static int
2402 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2403 {
2404 	struct rte_cryptodev *cryptodev;
2405 	int ret;
2406 
2407 	cryptodev = dpaa_dev->crypto_dev;
2408 	if (cryptodev == NULL)
2409 		return -ENODEV;
2410 
2411 	ret = dpaa_sec_uninit(cryptodev);
2412 	if (ret)
2413 		return ret;
2414 
2415 	return rte_cryptodev_pmd_destroy(cryptodev);
2416 }
2417 
2418 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2419 	.drv_type = FSL_DPAA_CRYPTO,
2420 	.driver = {
2421 		.name = "DPAA SEC PMD"
2422 	},
2423 	.probe = cryptodev_dpaa_sec_probe,
2424 	.remove = cryptodev_dpaa_sec_remove,
2425 };
2426 
2427 static struct cryptodev_driver dpaa_sec_crypto_drv;
2428 
2429 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2430 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2431 		cryptodev_driver_id);
2432