xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision f163231e7df536f50c95a06f9b9d2cc9a2e05c6a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 int dpaa_logtype_sec;
43 
44 static uint8_t cryptodev_driver_id;
45 
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
48 
49 static int
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 
52 static inline void
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
54 {
55 	if (!ctx->fd_status) {
56 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
57 	} else {
58 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 	}
61 
62 	/* report op status to sym->op and then free the ctx memeory  */
63 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 }
65 
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
68 {
69 	struct dpaa_sec_op_ctx *ctx;
70 	int retval;
71 
72 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
73 	if (!ctx || retval) {
74 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 		return NULL;
76 	}
77 	/*
78 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 	 * each packet, memset is costlier than dcbz_64().
82 	 */
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
87 
88 	ctx->ctx_pool = ses->ctx_pool;
89 	ctx->vtop_offset = (size_t) ctx
90 				- rte_mempool_virt2iova(ctx);
91 
92 	return ctx;
93 }
94 
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98 	const struct rte_memseg *ms;
99 
100 	ms = rte_mem_virt2memseg(vaddr, NULL);
101 	if (ms)
102 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
103 	return (size_t)NULL;
104 }
105 
106 /* virtual address conversin when mempool support is available for ctx */
107 static inline phys_addr_t
108 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
109 {
110 	return (size_t)vaddr - ctx->vtop_offset;
111 }
112 
113 static inline void *
114 dpaa_mem_ptov(rte_iova_t paddr)
115 {
116 	return rte_mem_iova2virt(paddr);
117 }
118 
119 static void
120 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
121 		   struct qman_fq *fq,
122 		   const struct qm_mr_entry *msg)
123 {
124 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
125 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
126 }
127 
128 /* initialize the queue with dest chan as caam chan so that
129  * all the packets in this queue could be dispatched into caam
130  */
131 static int
132 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
133 		 uint32_t fqid_out)
134 {
135 	struct qm_mcc_initfq fq_opts;
136 	uint32_t flags;
137 	int ret = -1;
138 
139 	/* Clear FQ options */
140 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
141 
142 	flags = QMAN_INITFQ_FLAG_SCHED;
143 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
144 			  QM_INITFQ_WE_CONTEXTB;
145 
146 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
147 	fq_opts.fqd.context_b = fqid_out;
148 	fq_opts.fqd.dest.channel = qm_channel_caam;
149 	fq_opts.fqd.dest.wq = 0;
150 
151 	fq_in->cb.ern  = ern_sec_fq_handler;
152 
153 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
154 
155 	ret = qman_init_fq(fq_in, flags, &fq_opts);
156 	if (unlikely(ret != 0))
157 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
158 
159 	return ret;
160 }
161 
162 /* something is put into in_fq and caam put the crypto result into out_fq */
163 static enum qman_cb_dqrr_result
164 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
165 		  struct qman_fq *fq __always_unused,
166 		  const struct qm_dqrr_entry *dqrr)
167 {
168 	const struct qm_fd *fd;
169 	struct dpaa_sec_job *job;
170 	struct dpaa_sec_op_ctx *ctx;
171 
172 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
173 		return qman_cb_dqrr_defer;
174 
175 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
176 		return qman_cb_dqrr_consume;
177 
178 	fd = &dqrr->fd;
179 	/* sg is embedded in an op ctx,
180 	 * sg[0] is for output
181 	 * sg[1] for input
182 	 */
183 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
184 
185 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
186 	ctx->fd_status = fd->status;
187 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
188 		struct qm_sg_entry *sg_out;
189 		uint32_t len;
190 
191 		sg_out = &job->sg[0];
192 		hw_sg_to_cpu(sg_out);
193 		len = sg_out->length;
194 		ctx->op->sym->m_src->pkt_len = len;
195 		ctx->op->sym->m_src->data_len = len;
196 	}
197 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
198 	dpaa_sec_op_ending(ctx);
199 
200 	return qman_cb_dqrr_consume;
201 }
202 
203 /* caam result is put into this queue */
204 static int
205 dpaa_sec_init_tx(struct qman_fq *fq)
206 {
207 	int ret;
208 	struct qm_mcc_initfq opts;
209 	uint32_t flags;
210 
211 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
212 		QMAN_FQ_FLAG_DYNAMIC_FQID;
213 
214 	ret = qman_create_fq(0, flags, fq);
215 	if (unlikely(ret)) {
216 		DPAA_SEC_ERR("qman_create_fq failed");
217 		return ret;
218 	}
219 
220 	memset(&opts, 0, sizeof(opts));
221 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
222 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
223 
224 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
225 
226 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
227 	fq->cb.ern  = ern_sec_fq_handler;
228 
229 	ret = qman_init_fq(fq, 0, &opts);
230 	if (unlikely(ret)) {
231 		DPAA_SEC_ERR("unable to init caam source fq!");
232 		return ret;
233 	}
234 
235 	return ret;
236 }
237 
238 static inline int is_cipher_only(dpaa_sec_session *ses)
239 {
240 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
241 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
242 }
243 
244 static inline int is_auth_only(dpaa_sec_session *ses)
245 {
246 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
247 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
248 }
249 
250 static inline int is_aead(dpaa_sec_session *ses)
251 {
252 	return ((ses->cipher_alg == 0) &&
253 		(ses->auth_alg == 0) &&
254 		(ses->aead_alg != 0));
255 }
256 
257 static inline int is_auth_cipher(dpaa_sec_session *ses)
258 {
259 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
260 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
261 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
262 }
263 
264 static inline int is_proto_ipsec(dpaa_sec_session *ses)
265 {
266 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
267 }
268 
269 static inline int is_encode(dpaa_sec_session *ses)
270 {
271 	return ses->dir == DIR_ENC;
272 }
273 
274 static inline int is_decode(dpaa_sec_session *ses)
275 {
276 	return ses->dir == DIR_DEC;
277 }
278 
279 static inline void
280 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
281 {
282 	switch (ses->auth_alg) {
283 	case RTE_CRYPTO_AUTH_NULL:
284 		ses->digest_length = 0;
285 		break;
286 	case RTE_CRYPTO_AUTH_MD5_HMAC:
287 		alginfo_a->algtype =
288 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
289 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
290 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
291 		break;
292 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
293 		alginfo_a->algtype =
294 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
295 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
296 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
297 		break;
298 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
299 		alginfo_a->algtype =
300 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
301 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
302 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
303 		break;
304 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
305 		alginfo_a->algtype =
306 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
307 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
308 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
309 		break;
310 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
311 		alginfo_a->algtype =
312 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
313 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
314 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
315 		break;
316 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
317 		alginfo_a->algtype =
318 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
319 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
320 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
321 		break;
322 	default:
323 		DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
324 	}
325 }
326 
327 static inline void
328 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
329 {
330 	switch (ses->cipher_alg) {
331 	case RTE_CRYPTO_CIPHER_NULL:
332 		break;
333 	case RTE_CRYPTO_CIPHER_AES_CBC:
334 		alginfo_c->algtype =
335 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
337 		alginfo_c->algmode = OP_ALG_AAI_CBC;
338 		break;
339 	case RTE_CRYPTO_CIPHER_3DES_CBC:
340 		alginfo_c->algtype =
341 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
343 		alginfo_c->algmode = OP_ALG_AAI_CBC;
344 		break;
345 	case RTE_CRYPTO_CIPHER_AES_CTR:
346 		alginfo_c->algtype =
347 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
349 		alginfo_c->algmode = OP_ALG_AAI_CTR;
350 		break;
351 	default:
352 		DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
353 	}
354 }
355 
356 static inline void
357 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
358 {
359 	switch (ses->aead_alg) {
360 	case RTE_CRYPTO_AEAD_AES_GCM:
361 		alginfo->algtype = OP_ALG_ALGSEL_AES;
362 		alginfo->algmode = OP_ALG_AAI_GCM;
363 		break;
364 	default:
365 		DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
366 	}
367 }
368 
369 
370 /* prepare command block of the session */
371 static int
372 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
373 {
374 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
375 	int32_t shared_desc_len = 0;
376 	struct sec_cdb *cdb = &ses->cdb;
377 	int err;
378 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
379 	int swap = false;
380 #else
381 	int swap = true;
382 #endif
383 
384 	memset(cdb, 0, sizeof(struct sec_cdb));
385 
386 	if (is_cipher_only(ses)) {
387 		caam_cipher_alg(ses, &alginfo_c);
388 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
389 			DPAA_SEC_ERR("not supported cipher alg");
390 			return -ENOTSUP;
391 		}
392 
393 		alginfo_c.key = (size_t)ses->cipher_key.data;
394 		alginfo_c.keylen = ses->cipher_key.length;
395 		alginfo_c.key_enc_flags = 0;
396 		alginfo_c.key_type = RTA_DATA_IMM;
397 
398 		shared_desc_len = cnstr_shdsc_blkcipher(
399 						cdb->sh_desc, true,
400 						swap, &alginfo_c,
401 						NULL,
402 						ses->iv.length,
403 						ses->dir);
404 	} else if (is_auth_only(ses)) {
405 		caam_auth_alg(ses, &alginfo_a);
406 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
407 			DPAA_SEC_ERR("not supported auth alg");
408 			return -ENOTSUP;
409 		}
410 
411 		alginfo_a.key = (size_t)ses->auth_key.data;
412 		alginfo_a.keylen = ses->auth_key.length;
413 		alginfo_a.key_enc_flags = 0;
414 		alginfo_a.key_type = RTA_DATA_IMM;
415 
416 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
417 						   swap, &alginfo_a,
418 						   !ses->dir,
419 						   ses->digest_length);
420 	} else if (is_aead(ses)) {
421 		caam_aead_alg(ses, &alginfo);
422 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
423 			DPAA_SEC_ERR("not supported aead alg");
424 			return -ENOTSUP;
425 		}
426 		alginfo.key = (size_t)ses->aead_key.data;
427 		alginfo.keylen = ses->aead_key.length;
428 		alginfo.key_enc_flags = 0;
429 		alginfo.key_type = RTA_DATA_IMM;
430 
431 		if (ses->dir == DIR_ENC)
432 			shared_desc_len = cnstr_shdsc_gcm_encap(
433 					cdb->sh_desc, true, swap,
434 					&alginfo,
435 					ses->iv.length,
436 					ses->digest_length);
437 		else
438 			shared_desc_len = cnstr_shdsc_gcm_decap(
439 					cdb->sh_desc, true, swap,
440 					&alginfo,
441 					ses->iv.length,
442 					ses->digest_length);
443 	} else {
444 		caam_cipher_alg(ses, &alginfo_c);
445 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
446 			DPAA_SEC_ERR("not supported cipher alg");
447 			return -ENOTSUP;
448 		}
449 
450 		alginfo_c.key = (size_t)ses->cipher_key.data;
451 		alginfo_c.keylen = ses->cipher_key.length;
452 		alginfo_c.key_enc_flags = 0;
453 		alginfo_c.key_type = RTA_DATA_IMM;
454 
455 		caam_auth_alg(ses, &alginfo_a);
456 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
457 			DPAA_SEC_ERR("not supported auth alg");
458 			return -ENOTSUP;
459 		}
460 
461 		alginfo_a.key = (size_t)ses->auth_key.data;
462 		alginfo_a.keylen = ses->auth_key.length;
463 		alginfo_a.key_enc_flags = 0;
464 		alginfo_a.key_type = RTA_DATA_IMM;
465 
466 		cdb->sh_desc[0] = alginfo_c.keylen;
467 		cdb->sh_desc[1] = alginfo_a.keylen;
468 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
469 				       MIN_JOB_DESC_SIZE,
470 				       (unsigned int *)cdb->sh_desc,
471 				       &cdb->sh_desc[2], 2);
472 
473 		if (err < 0) {
474 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
475 			return err;
476 		}
477 		if (cdb->sh_desc[2] & 1)
478 			alginfo_c.key_type = RTA_DATA_IMM;
479 		else {
480 			alginfo_c.key = (size_t)dpaa_mem_vtop(
481 						(void *)(size_t)alginfo_c.key);
482 			alginfo_c.key_type = RTA_DATA_PTR;
483 		}
484 		if (cdb->sh_desc[2] & (1<<1))
485 			alginfo_a.key_type = RTA_DATA_IMM;
486 		else {
487 			alginfo_a.key = (size_t)dpaa_mem_vtop(
488 						(void *)(size_t)alginfo_a.key);
489 			alginfo_a.key_type = RTA_DATA_PTR;
490 		}
491 		cdb->sh_desc[0] = 0;
492 		cdb->sh_desc[1] = 0;
493 		cdb->sh_desc[2] = 0;
494 		if (is_proto_ipsec(ses)) {
495 			if (ses->dir == DIR_ENC) {
496 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
497 						cdb->sh_desc,
498 						true, swap, &ses->encap_pdb,
499 						(uint8_t *)&ses->ip4_hdr,
500 						&alginfo_c, &alginfo_a);
501 			} else if (ses->dir == DIR_DEC) {
502 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
503 						cdb->sh_desc,
504 						true, swap, &ses->decap_pdb,
505 						&alginfo_c, &alginfo_a);
506 			}
507 		} else {
508 			/* Auth_only_len is set as 0 here and it will be
509 			 * overwritten in fd for each packet.
510 			 */
511 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
512 					true, swap, &alginfo_c, &alginfo_a,
513 					ses->iv.length, 0,
514 					ses->digest_length, ses->dir);
515 		}
516 	}
517 
518 	if (shared_desc_len < 0) {
519 		DPAA_SEC_ERR("error in preparing command block");
520 		return shared_desc_len;
521 	}
522 
523 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
524 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
525 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
526 
527 	return 0;
528 }
529 
530 /* qp is lockless, should be accessed by only one thread */
531 static int
532 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
533 {
534 	struct qman_fq *fq;
535 	unsigned int pkts = 0;
536 	int ret;
537 	struct qm_dqrr_entry *dq;
538 
539 	fq = &qp->outq;
540 	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
541 				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
542 	if (ret)
543 		return 0;
544 
545 	do {
546 		const struct qm_fd *fd;
547 		struct dpaa_sec_job *job;
548 		struct dpaa_sec_op_ctx *ctx;
549 		struct rte_crypto_op *op;
550 
551 		dq = qman_dequeue(fq);
552 		if (!dq)
553 			continue;
554 
555 		fd = &dq->fd;
556 		/* sg is embedded in an op ctx,
557 		 * sg[0] is for output
558 		 * sg[1] for input
559 		 */
560 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
561 
562 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
563 		ctx->fd_status = fd->status;
564 		op = ctx->op;
565 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
566 			struct qm_sg_entry *sg_out;
567 			uint32_t len;
568 
569 			sg_out = &job->sg[0];
570 			hw_sg_to_cpu(sg_out);
571 			len = sg_out->length;
572 			op->sym->m_src->pkt_len = len;
573 			op->sym->m_src->data_len = len;
574 		}
575 		if (!ctx->fd_status) {
576 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
577 		} else {
578 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
579 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
580 		}
581 		ops[pkts++] = op;
582 
583 		/* report op status to sym->op and then free the ctx memeory */
584 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
585 
586 		qman_dqrr_consume(fq, dq);
587 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
588 
589 	return pkts;
590 }
591 
592 static inline struct dpaa_sec_job *
593 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
594 {
595 	struct rte_crypto_sym_op *sym = op->sym;
596 	struct rte_mbuf *mbuf = sym->m_src;
597 	struct dpaa_sec_job *cf;
598 	struct dpaa_sec_op_ctx *ctx;
599 	struct qm_sg_entry *sg, *out_sg, *in_sg;
600 	phys_addr_t start_addr;
601 	uint8_t *old_digest, extra_segs;
602 
603 	if (is_decode(ses))
604 		extra_segs = 3;
605 	else
606 		extra_segs = 2;
607 
608 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
609 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
610 				MAX_SG_ENTRIES);
611 		return NULL;
612 	}
613 	ctx = dpaa_sec_alloc_ctx(ses);
614 	if (!ctx)
615 		return NULL;
616 
617 	cf = &ctx->job;
618 	ctx->op = op;
619 	old_digest = ctx->digest;
620 
621 	/* output */
622 	out_sg = &cf->sg[0];
623 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
624 	out_sg->length = ses->digest_length;
625 	cpu_to_hw_sg(out_sg);
626 
627 	/* input */
628 	in_sg = &cf->sg[1];
629 	/* need to extend the input to a compound frame */
630 	in_sg->extension = 1;
631 	in_sg->final = 1;
632 	in_sg->length = sym->auth.data.length;
633 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
634 
635 	/* 1st seg */
636 	sg = in_sg + 1;
637 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
638 	sg->length = mbuf->data_len - sym->auth.data.offset;
639 	sg->offset = sym->auth.data.offset;
640 
641 	/* Successive segs */
642 	mbuf = mbuf->next;
643 	while (mbuf) {
644 		cpu_to_hw_sg(sg);
645 		sg++;
646 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
647 		sg->length = mbuf->data_len;
648 		mbuf = mbuf->next;
649 	}
650 
651 	if (is_decode(ses)) {
652 		/* Digest verification case */
653 		cpu_to_hw_sg(sg);
654 		sg++;
655 		rte_memcpy(old_digest, sym->auth.digest.data,
656 				ses->digest_length);
657 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
658 		qm_sg_entry_set64(sg, start_addr);
659 		sg->length = ses->digest_length;
660 		in_sg->length += ses->digest_length;
661 	} else {
662 		/* Digest calculation case */
663 		sg->length -= ses->digest_length;
664 	}
665 	sg->final = 1;
666 	cpu_to_hw_sg(sg);
667 	cpu_to_hw_sg(in_sg);
668 
669 	return cf;
670 }
671 
672 /**
673  * packet looks like:
674  *		|<----data_len------->|
675  *    |ip_header|ah_header|icv|payload|
676  *              ^
677  *		|
678  *	   mbuf->pkt.data
679  */
680 static inline struct dpaa_sec_job *
681 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
682 {
683 	struct rte_crypto_sym_op *sym = op->sym;
684 	struct rte_mbuf *mbuf = sym->m_src;
685 	struct dpaa_sec_job *cf;
686 	struct dpaa_sec_op_ctx *ctx;
687 	struct qm_sg_entry *sg;
688 	rte_iova_t start_addr;
689 	uint8_t *old_digest;
690 
691 	ctx = dpaa_sec_alloc_ctx(ses);
692 	if (!ctx)
693 		return NULL;
694 
695 	cf = &ctx->job;
696 	ctx->op = op;
697 	old_digest = ctx->digest;
698 
699 	start_addr = rte_pktmbuf_iova(mbuf);
700 	/* output */
701 	sg = &cf->sg[0];
702 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
703 	sg->length = ses->digest_length;
704 	cpu_to_hw_sg(sg);
705 
706 	/* input */
707 	sg = &cf->sg[1];
708 	if (is_decode(ses)) {
709 		/* need to extend the input to a compound frame */
710 		sg->extension = 1;
711 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
712 		sg->length = sym->auth.data.length + ses->digest_length;
713 		sg->final = 1;
714 		cpu_to_hw_sg(sg);
715 
716 		sg = &cf->sg[2];
717 		/* hash result or digest, save digest first */
718 		rte_memcpy(old_digest, sym->auth.digest.data,
719 			   ses->digest_length);
720 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
721 		sg->length = sym->auth.data.length;
722 		cpu_to_hw_sg(sg);
723 
724 		/* let's check digest by hw */
725 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
726 		sg++;
727 		qm_sg_entry_set64(sg, start_addr);
728 		sg->length = ses->digest_length;
729 		sg->final = 1;
730 		cpu_to_hw_sg(sg);
731 	} else {
732 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
733 		sg->length = sym->auth.data.length;
734 		sg->final = 1;
735 		cpu_to_hw_sg(sg);
736 	}
737 
738 	return cf;
739 }
740 
741 static inline struct dpaa_sec_job *
742 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
743 {
744 	struct rte_crypto_sym_op *sym = op->sym;
745 	struct dpaa_sec_job *cf;
746 	struct dpaa_sec_op_ctx *ctx;
747 	struct qm_sg_entry *sg, *out_sg, *in_sg;
748 	struct rte_mbuf *mbuf;
749 	uint8_t req_segs;
750 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
751 			ses->iv.offset);
752 
753 	if (sym->m_dst) {
754 		mbuf = sym->m_dst;
755 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
756 	} else {
757 		mbuf = sym->m_src;
758 		req_segs = mbuf->nb_segs * 2 + 3;
759 	}
760 
761 	if (req_segs > MAX_SG_ENTRIES) {
762 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
763 				MAX_SG_ENTRIES);
764 		return NULL;
765 	}
766 
767 	ctx = dpaa_sec_alloc_ctx(ses);
768 	if (!ctx)
769 		return NULL;
770 
771 	cf = &ctx->job;
772 	ctx->op = op;
773 
774 	/* output */
775 	out_sg = &cf->sg[0];
776 	out_sg->extension = 1;
777 	out_sg->length = sym->cipher.data.length;
778 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
779 	cpu_to_hw_sg(out_sg);
780 
781 	/* 1st seg */
782 	sg = &cf->sg[2];
783 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
784 	sg->length = mbuf->data_len - sym->cipher.data.offset;
785 	sg->offset = sym->cipher.data.offset;
786 
787 	/* Successive segs */
788 	mbuf = mbuf->next;
789 	while (mbuf) {
790 		cpu_to_hw_sg(sg);
791 		sg++;
792 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
793 		sg->length = mbuf->data_len;
794 		mbuf = mbuf->next;
795 	}
796 	sg->final = 1;
797 	cpu_to_hw_sg(sg);
798 
799 	/* input */
800 	mbuf = sym->m_src;
801 	in_sg = &cf->sg[1];
802 	in_sg->extension = 1;
803 	in_sg->final = 1;
804 	in_sg->length = sym->cipher.data.length + ses->iv.length;
805 
806 	sg++;
807 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
808 	cpu_to_hw_sg(in_sg);
809 
810 	/* IV */
811 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
812 	sg->length = ses->iv.length;
813 	cpu_to_hw_sg(sg);
814 
815 	/* 1st seg */
816 	sg++;
817 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
818 	sg->length = mbuf->data_len - sym->cipher.data.offset;
819 	sg->offset = sym->cipher.data.offset;
820 
821 	/* Successive segs */
822 	mbuf = mbuf->next;
823 	while (mbuf) {
824 		cpu_to_hw_sg(sg);
825 		sg++;
826 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
827 		sg->length = mbuf->data_len;
828 		mbuf = mbuf->next;
829 	}
830 	sg->final = 1;
831 	cpu_to_hw_sg(sg);
832 
833 	return cf;
834 }
835 
836 static inline struct dpaa_sec_job *
837 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
838 {
839 	struct rte_crypto_sym_op *sym = op->sym;
840 	struct dpaa_sec_job *cf;
841 	struct dpaa_sec_op_ctx *ctx;
842 	struct qm_sg_entry *sg;
843 	rte_iova_t src_start_addr, dst_start_addr;
844 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
845 			ses->iv.offset);
846 
847 	ctx = dpaa_sec_alloc_ctx(ses);
848 	if (!ctx)
849 		return NULL;
850 
851 	cf = &ctx->job;
852 	ctx->op = op;
853 
854 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
855 
856 	if (sym->m_dst)
857 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
858 	else
859 		dst_start_addr = src_start_addr;
860 
861 	/* output */
862 	sg = &cf->sg[0];
863 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
864 	sg->length = sym->cipher.data.length + ses->iv.length;
865 	cpu_to_hw_sg(sg);
866 
867 	/* input */
868 	sg = &cf->sg[1];
869 
870 	/* need to extend the input to a compound frame */
871 	sg->extension = 1;
872 	sg->final = 1;
873 	sg->length = sym->cipher.data.length + ses->iv.length;
874 	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
875 	cpu_to_hw_sg(sg);
876 
877 	sg = &cf->sg[2];
878 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
879 	sg->length = ses->iv.length;
880 	cpu_to_hw_sg(sg);
881 
882 	sg++;
883 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
884 	sg->length = sym->cipher.data.length;
885 	sg->final = 1;
886 	cpu_to_hw_sg(sg);
887 
888 	return cf;
889 }
890 
891 static inline struct dpaa_sec_job *
892 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
893 {
894 	struct rte_crypto_sym_op *sym = op->sym;
895 	struct dpaa_sec_job *cf;
896 	struct dpaa_sec_op_ctx *ctx;
897 	struct qm_sg_entry *sg, *out_sg, *in_sg;
898 	struct rte_mbuf *mbuf;
899 	uint8_t req_segs;
900 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
901 			ses->iv.offset);
902 
903 	if (sym->m_dst) {
904 		mbuf = sym->m_dst;
905 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
906 	} else {
907 		mbuf = sym->m_src;
908 		req_segs = mbuf->nb_segs * 2 + 4;
909 	}
910 
911 	if (ses->auth_only_len)
912 		req_segs++;
913 
914 	if (req_segs > MAX_SG_ENTRIES) {
915 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
916 				MAX_SG_ENTRIES);
917 		return NULL;
918 	}
919 
920 	ctx = dpaa_sec_alloc_ctx(ses);
921 	if (!ctx)
922 		return NULL;
923 
924 	cf = &ctx->job;
925 	ctx->op = op;
926 
927 	rte_prefetch0(cf->sg);
928 
929 	/* output */
930 	out_sg = &cf->sg[0];
931 	out_sg->extension = 1;
932 	if (is_encode(ses))
933 		out_sg->length = sym->aead.data.length + ses->auth_only_len
934 						+ ses->digest_length;
935 	else
936 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
937 
938 	/* output sg entries */
939 	sg = &cf->sg[2];
940 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
941 	cpu_to_hw_sg(out_sg);
942 
943 	/* 1st seg */
944 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
945 	sg->length = mbuf->data_len - sym->aead.data.offset +
946 					ses->auth_only_len;
947 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
948 
949 	/* Successive segs */
950 	mbuf = mbuf->next;
951 	while (mbuf) {
952 		cpu_to_hw_sg(sg);
953 		sg++;
954 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
955 		sg->length = mbuf->data_len;
956 		mbuf = mbuf->next;
957 	}
958 	sg->length -= ses->digest_length;
959 
960 	if (is_encode(ses)) {
961 		cpu_to_hw_sg(sg);
962 		/* set auth output */
963 		sg++;
964 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
965 		sg->length = ses->digest_length;
966 	}
967 	sg->final = 1;
968 	cpu_to_hw_sg(sg);
969 
970 	/* input */
971 	mbuf = sym->m_src;
972 	in_sg = &cf->sg[1];
973 	in_sg->extension = 1;
974 	in_sg->final = 1;
975 	if (is_encode(ses))
976 		in_sg->length = ses->iv.length + sym->aead.data.length
977 							+ ses->auth_only_len;
978 	else
979 		in_sg->length = ses->iv.length + sym->aead.data.length
980 				+ ses->auth_only_len + ses->digest_length;
981 
982 	/* input sg entries */
983 	sg++;
984 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
985 	cpu_to_hw_sg(in_sg);
986 
987 	/* 1st seg IV */
988 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
989 	sg->length = ses->iv.length;
990 	cpu_to_hw_sg(sg);
991 
992 	/* 2nd seg auth only */
993 	if (ses->auth_only_len) {
994 		sg++;
995 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
996 		sg->length = ses->auth_only_len;
997 		cpu_to_hw_sg(sg);
998 	}
999 
1000 	/* 3rd seg */
1001 	sg++;
1002 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1003 	sg->length = mbuf->data_len - sym->aead.data.offset;
1004 	sg->offset = sym->aead.data.offset;
1005 
1006 	/* Successive segs */
1007 	mbuf = mbuf->next;
1008 	while (mbuf) {
1009 		cpu_to_hw_sg(sg);
1010 		sg++;
1011 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1012 		sg->length = mbuf->data_len;
1013 		mbuf = mbuf->next;
1014 	}
1015 
1016 	if (is_decode(ses)) {
1017 		cpu_to_hw_sg(sg);
1018 		sg++;
1019 		memcpy(ctx->digest, sym->aead.digest.data,
1020 			ses->digest_length);
1021 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1022 		sg->length = ses->digest_length;
1023 	}
1024 	sg->final = 1;
1025 	cpu_to_hw_sg(sg);
1026 
1027 	return cf;
1028 }
1029 
1030 static inline struct dpaa_sec_job *
1031 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1032 {
1033 	struct rte_crypto_sym_op *sym = op->sym;
1034 	struct dpaa_sec_job *cf;
1035 	struct dpaa_sec_op_ctx *ctx;
1036 	struct qm_sg_entry *sg;
1037 	uint32_t length = 0;
1038 	rte_iova_t src_start_addr, dst_start_addr;
1039 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1040 			ses->iv.offset);
1041 
1042 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1043 
1044 	if (sym->m_dst)
1045 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1046 	else
1047 		dst_start_addr = src_start_addr;
1048 
1049 	ctx = dpaa_sec_alloc_ctx(ses);
1050 	if (!ctx)
1051 		return NULL;
1052 
1053 	cf = &ctx->job;
1054 	ctx->op = op;
1055 
1056 	/* input */
1057 	rte_prefetch0(cf->sg);
1058 	sg = &cf->sg[2];
1059 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1060 	if (is_encode(ses)) {
1061 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1062 		sg->length = ses->iv.length;
1063 		length += sg->length;
1064 		cpu_to_hw_sg(sg);
1065 
1066 		sg++;
1067 		if (ses->auth_only_len) {
1068 			qm_sg_entry_set64(sg,
1069 					  dpaa_mem_vtop(sym->aead.aad.data));
1070 			sg->length = ses->auth_only_len;
1071 			length += sg->length;
1072 			cpu_to_hw_sg(sg);
1073 			sg++;
1074 		}
1075 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1076 		sg->length = sym->aead.data.length;
1077 		length += sg->length;
1078 		sg->final = 1;
1079 		cpu_to_hw_sg(sg);
1080 	} else {
1081 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1082 		sg->length = ses->iv.length;
1083 		length += sg->length;
1084 		cpu_to_hw_sg(sg);
1085 
1086 		sg++;
1087 		if (ses->auth_only_len) {
1088 			qm_sg_entry_set64(sg,
1089 					  dpaa_mem_vtop(sym->aead.aad.data));
1090 			sg->length = ses->auth_only_len;
1091 			length += sg->length;
1092 			cpu_to_hw_sg(sg);
1093 			sg++;
1094 		}
1095 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1096 		sg->length = sym->aead.data.length;
1097 		length += sg->length;
1098 		cpu_to_hw_sg(sg);
1099 
1100 		memcpy(ctx->digest, sym->aead.digest.data,
1101 		       ses->digest_length);
1102 		sg++;
1103 
1104 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1105 		sg->length = ses->digest_length;
1106 		length += sg->length;
1107 		sg->final = 1;
1108 		cpu_to_hw_sg(sg);
1109 	}
1110 	/* input compound frame */
1111 	cf->sg[1].length = length;
1112 	cf->sg[1].extension = 1;
1113 	cf->sg[1].final = 1;
1114 	cpu_to_hw_sg(&cf->sg[1]);
1115 
1116 	/* output */
1117 	sg++;
1118 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1119 	qm_sg_entry_set64(sg,
1120 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1121 	sg->length = sym->aead.data.length + ses->auth_only_len;
1122 	length = sg->length;
1123 	if (is_encode(ses)) {
1124 		cpu_to_hw_sg(sg);
1125 		/* set auth output */
1126 		sg++;
1127 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1128 		sg->length = ses->digest_length;
1129 		length += sg->length;
1130 	}
1131 	sg->final = 1;
1132 	cpu_to_hw_sg(sg);
1133 
1134 	/* output compound frame */
1135 	cf->sg[0].length = length;
1136 	cf->sg[0].extension = 1;
1137 	cpu_to_hw_sg(&cf->sg[0]);
1138 
1139 	return cf;
1140 }
1141 
1142 static inline struct dpaa_sec_job *
1143 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1144 {
1145 	struct rte_crypto_sym_op *sym = op->sym;
1146 	struct dpaa_sec_job *cf;
1147 	struct dpaa_sec_op_ctx *ctx;
1148 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1149 	struct rte_mbuf *mbuf;
1150 	uint8_t req_segs;
1151 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1152 			ses->iv.offset);
1153 
1154 	if (sym->m_dst) {
1155 		mbuf = sym->m_dst;
1156 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1157 	} else {
1158 		mbuf = sym->m_src;
1159 		req_segs = mbuf->nb_segs * 2 + 4;
1160 	}
1161 
1162 	if (req_segs > MAX_SG_ENTRIES) {
1163 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1164 				MAX_SG_ENTRIES);
1165 		return NULL;
1166 	}
1167 
1168 	ctx = dpaa_sec_alloc_ctx(ses);
1169 	if (!ctx)
1170 		return NULL;
1171 
1172 	cf = &ctx->job;
1173 	ctx->op = op;
1174 
1175 	rte_prefetch0(cf->sg);
1176 
1177 	/* output */
1178 	out_sg = &cf->sg[0];
1179 	out_sg->extension = 1;
1180 	if (is_encode(ses))
1181 		out_sg->length = sym->auth.data.length + ses->digest_length;
1182 	else
1183 		out_sg->length = sym->auth.data.length;
1184 
1185 	/* output sg entries */
1186 	sg = &cf->sg[2];
1187 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1188 	cpu_to_hw_sg(out_sg);
1189 
1190 	/* 1st seg */
1191 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1192 	sg->length = mbuf->data_len - sym->auth.data.offset;
1193 	sg->offset = sym->auth.data.offset;
1194 
1195 	/* Successive segs */
1196 	mbuf = mbuf->next;
1197 	while (mbuf) {
1198 		cpu_to_hw_sg(sg);
1199 		sg++;
1200 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1201 		sg->length = mbuf->data_len;
1202 		mbuf = mbuf->next;
1203 	}
1204 	sg->length -= ses->digest_length;
1205 
1206 	if (is_encode(ses)) {
1207 		cpu_to_hw_sg(sg);
1208 		/* set auth output */
1209 		sg++;
1210 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1211 		sg->length = ses->digest_length;
1212 	}
1213 	sg->final = 1;
1214 	cpu_to_hw_sg(sg);
1215 
1216 	/* input */
1217 	mbuf = sym->m_src;
1218 	in_sg = &cf->sg[1];
1219 	in_sg->extension = 1;
1220 	in_sg->final = 1;
1221 	if (is_encode(ses))
1222 		in_sg->length = ses->iv.length + sym->auth.data.length;
1223 	else
1224 		in_sg->length = ses->iv.length + sym->auth.data.length
1225 						+ ses->digest_length;
1226 
1227 	/* input sg entries */
1228 	sg++;
1229 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1230 	cpu_to_hw_sg(in_sg);
1231 
1232 	/* 1st seg IV */
1233 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1234 	sg->length = ses->iv.length;
1235 	cpu_to_hw_sg(sg);
1236 
1237 	/* 2nd seg */
1238 	sg++;
1239 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1240 	sg->length = mbuf->data_len - sym->auth.data.offset;
1241 	sg->offset = sym->auth.data.offset;
1242 
1243 	/* Successive segs */
1244 	mbuf = mbuf->next;
1245 	while (mbuf) {
1246 		cpu_to_hw_sg(sg);
1247 		sg++;
1248 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1249 		sg->length = mbuf->data_len;
1250 		mbuf = mbuf->next;
1251 	}
1252 
1253 	sg->length -= ses->digest_length;
1254 	if (is_decode(ses)) {
1255 		cpu_to_hw_sg(sg);
1256 		sg++;
1257 		memcpy(ctx->digest, sym->auth.digest.data,
1258 			ses->digest_length);
1259 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1260 		sg->length = ses->digest_length;
1261 	}
1262 	sg->final = 1;
1263 	cpu_to_hw_sg(sg);
1264 
1265 	return cf;
1266 }
1267 
1268 static inline struct dpaa_sec_job *
1269 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1270 {
1271 	struct rte_crypto_sym_op *sym = op->sym;
1272 	struct dpaa_sec_job *cf;
1273 	struct dpaa_sec_op_ctx *ctx;
1274 	struct qm_sg_entry *sg;
1275 	rte_iova_t src_start_addr, dst_start_addr;
1276 	uint32_t length = 0;
1277 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1278 			ses->iv.offset);
1279 
1280 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1281 	if (sym->m_dst)
1282 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1283 	else
1284 		dst_start_addr = src_start_addr;
1285 
1286 	ctx = dpaa_sec_alloc_ctx(ses);
1287 	if (!ctx)
1288 		return NULL;
1289 
1290 	cf = &ctx->job;
1291 	ctx->op = op;
1292 
1293 	/* input */
1294 	rte_prefetch0(cf->sg);
1295 	sg = &cf->sg[2];
1296 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1297 	if (is_encode(ses)) {
1298 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1299 		sg->length = ses->iv.length;
1300 		length += sg->length;
1301 		cpu_to_hw_sg(sg);
1302 
1303 		sg++;
1304 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1305 		sg->length = sym->auth.data.length;
1306 		length += sg->length;
1307 		sg->final = 1;
1308 		cpu_to_hw_sg(sg);
1309 	} else {
1310 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1311 		sg->length = ses->iv.length;
1312 		length += sg->length;
1313 		cpu_to_hw_sg(sg);
1314 
1315 		sg++;
1316 
1317 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1318 		sg->length = sym->auth.data.length;
1319 		length += sg->length;
1320 		cpu_to_hw_sg(sg);
1321 
1322 		memcpy(ctx->digest, sym->auth.digest.data,
1323 		       ses->digest_length);
1324 		sg++;
1325 
1326 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1327 		sg->length = ses->digest_length;
1328 		length += sg->length;
1329 		sg->final = 1;
1330 		cpu_to_hw_sg(sg);
1331 	}
1332 	/* input compound frame */
1333 	cf->sg[1].length = length;
1334 	cf->sg[1].extension = 1;
1335 	cf->sg[1].final = 1;
1336 	cpu_to_hw_sg(&cf->sg[1]);
1337 
1338 	/* output */
1339 	sg++;
1340 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1341 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1342 	sg->length = sym->cipher.data.length;
1343 	length = sg->length;
1344 	if (is_encode(ses)) {
1345 		cpu_to_hw_sg(sg);
1346 		/* set auth output */
1347 		sg++;
1348 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1349 		sg->length = ses->digest_length;
1350 		length += sg->length;
1351 	}
1352 	sg->final = 1;
1353 	cpu_to_hw_sg(sg);
1354 
1355 	/* output compound frame */
1356 	cf->sg[0].length = length;
1357 	cf->sg[0].extension = 1;
1358 	cpu_to_hw_sg(&cf->sg[0]);
1359 
1360 	return cf;
1361 }
1362 
1363 static inline struct dpaa_sec_job *
1364 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1365 {
1366 	struct rte_crypto_sym_op *sym = op->sym;
1367 	struct dpaa_sec_job *cf;
1368 	struct dpaa_sec_op_ctx *ctx;
1369 	struct qm_sg_entry *sg;
1370 	phys_addr_t src_start_addr, dst_start_addr;
1371 
1372 	ctx = dpaa_sec_alloc_ctx(ses);
1373 	if (!ctx)
1374 		return NULL;
1375 	cf = &ctx->job;
1376 	ctx->op = op;
1377 
1378 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1379 
1380 	if (sym->m_dst)
1381 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1382 	else
1383 		dst_start_addr = src_start_addr;
1384 
1385 	/* input */
1386 	sg = &cf->sg[1];
1387 	qm_sg_entry_set64(sg, src_start_addr);
1388 	sg->length = sym->m_src->pkt_len;
1389 	sg->final = 1;
1390 	cpu_to_hw_sg(sg);
1391 
1392 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1393 	/* output */
1394 	sg = &cf->sg[0];
1395 	qm_sg_entry_set64(sg, dst_start_addr);
1396 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1397 	cpu_to_hw_sg(sg);
1398 
1399 	return cf;
1400 }
1401 
1402 static uint16_t
1403 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1404 		       uint16_t nb_ops)
1405 {
1406 	/* Function to transmit the frames to given device and queuepair */
1407 	uint32_t loop;
1408 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1409 	uint16_t num_tx = 0;
1410 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1411 	uint32_t frames_to_send;
1412 	struct rte_crypto_op *op;
1413 	struct dpaa_sec_job *cf;
1414 	dpaa_sec_session *ses;
1415 	struct dpaa_sec_op_ctx *ctx;
1416 	uint32_t auth_only_len;
1417 	struct qman_fq *inq[DPAA_SEC_BURST];
1418 
1419 	while (nb_ops) {
1420 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1421 				DPAA_SEC_BURST : nb_ops;
1422 		for (loop = 0; loop < frames_to_send; loop++) {
1423 			op = *(ops++);
1424 			switch (op->sess_type) {
1425 			case RTE_CRYPTO_OP_WITH_SESSION:
1426 				ses = (dpaa_sec_session *)
1427 					get_session_private_data(
1428 							op->sym->session,
1429 							cryptodev_driver_id);
1430 				break;
1431 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1432 				ses = (dpaa_sec_session *)
1433 					get_sec_session_private_data(
1434 							op->sym->sec_session);
1435 				break;
1436 			default:
1437 				DPAA_SEC_DP_ERR(
1438 					"sessionless crypto op not supported");
1439 				frames_to_send = loop;
1440 				nb_ops = loop;
1441 				goto send_pkts;
1442 			}
1443 			if (unlikely(!ses->qp || ses->qp != qp)) {
1444 				DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1445 					     ses->qp, qp);
1446 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1447 					frames_to_send = loop;
1448 					nb_ops = loop;
1449 					goto send_pkts;
1450 				}
1451 			}
1452 
1453 			auth_only_len = op->sym->auth.data.length -
1454 						op->sym->cipher.data.length;
1455 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1456 				if (is_auth_only(ses)) {
1457 					cf = build_auth_only(op, ses);
1458 				} else if (is_cipher_only(ses)) {
1459 					cf = build_cipher_only(op, ses);
1460 				} else if (is_aead(ses)) {
1461 					cf = build_cipher_auth_gcm(op, ses);
1462 					auth_only_len = ses->auth_only_len;
1463 				} else if (is_auth_cipher(ses)) {
1464 					cf = build_cipher_auth(op, ses);
1465 				} else if (is_proto_ipsec(ses)) {
1466 					cf = build_proto(op, ses);
1467 				} else {
1468 					DPAA_SEC_DP_ERR("not supported ops");
1469 					frames_to_send = loop;
1470 					nb_ops = loop;
1471 					goto send_pkts;
1472 				}
1473 			} else {
1474 				if (is_auth_only(ses)) {
1475 					cf = build_auth_only_sg(op, ses);
1476 				} else if (is_cipher_only(ses)) {
1477 					cf = build_cipher_only_sg(op, ses);
1478 				} else if (is_aead(ses)) {
1479 					cf = build_cipher_auth_gcm_sg(op, ses);
1480 					auth_only_len = ses->auth_only_len;
1481 				} else if (is_auth_cipher(ses)) {
1482 					cf = build_cipher_auth_sg(op, ses);
1483 				} else {
1484 					DPAA_SEC_DP_ERR("not supported ops");
1485 					frames_to_send = loop;
1486 					nb_ops = loop;
1487 					goto send_pkts;
1488 				}
1489 			}
1490 			if (unlikely(!cf)) {
1491 				frames_to_send = loop;
1492 				nb_ops = loop;
1493 				goto send_pkts;
1494 			}
1495 
1496 			fd = &fds[loop];
1497 			inq[loop] = ses->inq;
1498 			fd->opaque_addr = 0;
1499 			fd->cmd = 0;
1500 			ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1501 			qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1502 			fd->_format1 = qm_fd_compound;
1503 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1504 			/* Auth_only_len is set as 0 in descriptor and it is
1505 			 * overwritten here in the fd.cmd which will update
1506 			 * the DPOVRD reg.
1507 			 */
1508 			if (auth_only_len)
1509 				fd->cmd = 0x80000000 | auth_only_len;
1510 
1511 		}
1512 send_pkts:
1513 		loop = 0;
1514 		while (loop < frames_to_send) {
1515 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1516 					frames_to_send - loop);
1517 		}
1518 		nb_ops -= frames_to_send;
1519 		num_tx += frames_to_send;
1520 	}
1521 
1522 	dpaa_qp->tx_pkts += num_tx;
1523 	dpaa_qp->tx_errs += nb_ops - num_tx;
1524 
1525 	return num_tx;
1526 }
1527 
1528 static uint16_t
1529 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1530 		       uint16_t nb_ops)
1531 {
1532 	uint16_t num_rx;
1533 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1534 
1535 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1536 
1537 	dpaa_qp->rx_pkts += num_rx;
1538 	dpaa_qp->rx_errs += nb_ops - num_rx;
1539 
1540 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1541 
1542 	return num_rx;
1543 }
1544 
1545 /** Release queue pair */
1546 static int
1547 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1548 			    uint16_t qp_id)
1549 {
1550 	struct dpaa_sec_dev_private *internals;
1551 	struct dpaa_sec_qp *qp = NULL;
1552 
1553 	PMD_INIT_FUNC_TRACE();
1554 
1555 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1556 
1557 	internals = dev->data->dev_private;
1558 	if (qp_id >= internals->max_nb_queue_pairs) {
1559 		DPAA_SEC_ERR("Max supported qpid %d",
1560 			     internals->max_nb_queue_pairs);
1561 		return -EINVAL;
1562 	}
1563 
1564 	qp = &internals->qps[qp_id];
1565 	qp->internals = NULL;
1566 	dev->data->queue_pairs[qp_id] = NULL;
1567 
1568 	return 0;
1569 }
1570 
1571 /** Setup a queue pair */
1572 static int
1573 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1574 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1575 		__rte_unused int socket_id,
1576 		__rte_unused struct rte_mempool *session_pool)
1577 {
1578 	struct dpaa_sec_dev_private *internals;
1579 	struct dpaa_sec_qp *qp = NULL;
1580 
1581 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1582 
1583 	internals = dev->data->dev_private;
1584 	if (qp_id >= internals->max_nb_queue_pairs) {
1585 		DPAA_SEC_ERR("Max supported qpid %d",
1586 			     internals->max_nb_queue_pairs);
1587 		return -EINVAL;
1588 	}
1589 
1590 	qp = &internals->qps[qp_id];
1591 	qp->internals = internals;
1592 	dev->data->queue_pairs[qp_id] = qp;
1593 
1594 	return 0;
1595 }
1596 
1597 /** Start queue pair */
1598 static int
1599 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1600 			  __rte_unused uint16_t queue_pair_id)
1601 {
1602 	PMD_INIT_FUNC_TRACE();
1603 
1604 	return 0;
1605 }
1606 
1607 /** Stop queue pair */
1608 static int
1609 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1610 			 __rte_unused uint16_t queue_pair_id)
1611 {
1612 	PMD_INIT_FUNC_TRACE();
1613 
1614 	return 0;
1615 }
1616 
1617 /** Return the number of allocated queue pairs */
1618 static uint32_t
1619 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1620 {
1621 	PMD_INIT_FUNC_TRACE();
1622 
1623 	return dev->data->nb_queue_pairs;
1624 }
1625 
1626 /** Returns the size of session structure */
1627 static unsigned int
1628 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1629 {
1630 	PMD_INIT_FUNC_TRACE();
1631 
1632 	return sizeof(dpaa_sec_session);
1633 }
1634 
1635 static int
1636 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1637 		     struct rte_crypto_sym_xform *xform,
1638 		     dpaa_sec_session *session)
1639 {
1640 	session->cipher_alg = xform->cipher.algo;
1641 	session->iv.length = xform->cipher.iv.length;
1642 	session->iv.offset = xform->cipher.iv.offset;
1643 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1644 					       RTE_CACHE_LINE_SIZE);
1645 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1646 		DPAA_SEC_ERR("No Memory for cipher key");
1647 		return -ENOMEM;
1648 	}
1649 	session->cipher_key.length = xform->cipher.key.length;
1650 
1651 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1652 	       xform->cipher.key.length);
1653 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1654 			DIR_ENC : DIR_DEC;
1655 
1656 	return 0;
1657 }
1658 
1659 static int
1660 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1661 		   struct rte_crypto_sym_xform *xform,
1662 		   dpaa_sec_session *session)
1663 {
1664 	session->auth_alg = xform->auth.algo;
1665 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1666 					     RTE_CACHE_LINE_SIZE);
1667 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1668 		DPAA_SEC_ERR("No Memory for auth key");
1669 		return -ENOMEM;
1670 	}
1671 	session->auth_key.length = xform->auth.key.length;
1672 	session->digest_length = xform->auth.digest_length;
1673 
1674 	memcpy(session->auth_key.data, xform->auth.key.data,
1675 	       xform->auth.key.length);
1676 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1677 			DIR_ENC : DIR_DEC;
1678 
1679 	return 0;
1680 }
1681 
1682 static int
1683 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1684 		   struct rte_crypto_sym_xform *xform,
1685 		   dpaa_sec_session *session)
1686 {
1687 	session->aead_alg = xform->aead.algo;
1688 	session->iv.length = xform->aead.iv.length;
1689 	session->iv.offset = xform->aead.iv.offset;
1690 	session->auth_only_len = xform->aead.aad_length;
1691 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1692 					     RTE_CACHE_LINE_SIZE);
1693 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1694 		DPAA_SEC_ERR("No Memory for aead key\n");
1695 		return -ENOMEM;
1696 	}
1697 	session->aead_key.length = xform->aead.key.length;
1698 	session->digest_length = xform->aead.digest_length;
1699 
1700 	memcpy(session->aead_key.data, xform->aead.key.data,
1701 	       xform->aead.key.length);
1702 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1703 			DIR_ENC : DIR_DEC;
1704 
1705 	return 0;
1706 }
1707 
1708 static struct qman_fq *
1709 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1710 {
1711 	unsigned int i;
1712 
1713 	for (i = 0; i < qi->max_nb_sessions; i++) {
1714 		if (qi->inq_attach[i] == 0) {
1715 			qi->inq_attach[i] = 1;
1716 			return &qi->inq[i];
1717 		}
1718 	}
1719 	DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1720 
1721 	return NULL;
1722 }
1723 
1724 static int
1725 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1726 {
1727 	unsigned int i;
1728 
1729 	for (i = 0; i < qi->max_nb_sessions; i++) {
1730 		if (&qi->inq[i] == fq) {
1731 			qman_retire_fq(fq, NULL);
1732 			qman_oos_fq(fq);
1733 			qi->inq_attach[i] = 0;
1734 			return 0;
1735 		}
1736 	}
1737 	return -1;
1738 }
1739 
1740 static int
1741 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1742 {
1743 	int ret;
1744 
1745 	sess->qp = qp;
1746 	ret = dpaa_sec_prep_cdb(sess);
1747 	if (ret) {
1748 		DPAA_SEC_ERR("Unable to prepare sec cdb");
1749 		return -1;
1750 	}
1751 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1752 		ret = rte_dpaa_portal_init((void *)0);
1753 		if (ret) {
1754 			DPAA_SEC_ERR("Failure in affining portal");
1755 			return ret;
1756 		}
1757 	}
1758 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1759 			       qman_fq_fqid(&qp->outq));
1760 	if (ret)
1761 		DPAA_SEC_ERR("Unable to init sec queue");
1762 
1763 	return ret;
1764 }
1765 
1766 static int
1767 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1768 			uint16_t qp_id __rte_unused,
1769 			void *ses __rte_unused)
1770 {
1771 	PMD_INIT_FUNC_TRACE();
1772 	return 0;
1773 }
1774 
1775 static int
1776 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1777 			uint16_t qp_id  __rte_unused,
1778 			void *ses)
1779 {
1780 	dpaa_sec_session *sess = ses;
1781 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1782 
1783 	PMD_INIT_FUNC_TRACE();
1784 
1785 	if (sess->inq)
1786 		dpaa_sec_detach_rxq(qi, sess->inq);
1787 	sess->inq = NULL;
1788 
1789 	sess->qp = NULL;
1790 
1791 	return 0;
1792 }
1793 
1794 static int
1795 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1796 			    struct rte_crypto_sym_xform *xform,	void *sess)
1797 {
1798 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1799 	dpaa_sec_session *session = sess;
1800 
1801 	PMD_INIT_FUNC_TRACE();
1802 
1803 	if (unlikely(sess == NULL)) {
1804 		DPAA_SEC_ERR("invalid session struct");
1805 		return -EINVAL;
1806 	}
1807 
1808 	/* Default IV length = 0 */
1809 	session->iv.length = 0;
1810 
1811 	/* Cipher Only */
1812 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1813 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1814 		dpaa_sec_cipher_init(dev, xform, session);
1815 
1816 	/* Authentication Only */
1817 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1818 		   xform->next == NULL) {
1819 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1820 		dpaa_sec_auth_init(dev, xform, session);
1821 
1822 	/* Cipher then Authenticate */
1823 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1824 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1825 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1826 			dpaa_sec_cipher_init(dev, xform, session);
1827 			dpaa_sec_auth_init(dev, xform->next, session);
1828 		} else {
1829 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1830 			return -EINVAL;
1831 		}
1832 
1833 	/* Authenticate then Cipher */
1834 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1835 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1836 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1837 			dpaa_sec_auth_init(dev, xform, session);
1838 			dpaa_sec_cipher_init(dev, xform->next, session);
1839 		} else {
1840 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1841 			return -EINVAL;
1842 		}
1843 
1844 	/* AEAD operation for AES-GCM kind of Algorithms */
1845 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1846 		   xform->next == NULL) {
1847 		dpaa_sec_aead_init(dev, xform, session);
1848 
1849 	} else {
1850 		DPAA_SEC_ERR("Invalid crypto type");
1851 		return -EINVAL;
1852 	}
1853 	session->ctx_pool = internals->ctx_pool;
1854 	session->inq = dpaa_sec_attach_rxq(internals);
1855 	if (session->inq == NULL) {
1856 		DPAA_SEC_ERR("unable to attach sec queue");
1857 		goto err1;
1858 	}
1859 
1860 	return 0;
1861 
1862 err1:
1863 	rte_free(session->cipher_key.data);
1864 	rte_free(session->auth_key.data);
1865 	memset(session, 0, sizeof(dpaa_sec_session));
1866 
1867 	return -EINVAL;
1868 }
1869 
1870 static int
1871 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1872 		struct rte_crypto_sym_xform *xform,
1873 		struct rte_cryptodev_sym_session *sess,
1874 		struct rte_mempool *mempool)
1875 {
1876 	void *sess_private_data;
1877 	int ret;
1878 
1879 	PMD_INIT_FUNC_TRACE();
1880 
1881 	if (rte_mempool_get(mempool, &sess_private_data)) {
1882 		DPAA_SEC_ERR("Couldn't get object from session mempool");
1883 		return -ENOMEM;
1884 	}
1885 
1886 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1887 	if (ret != 0) {
1888 		DPAA_SEC_ERR("failed to configure session parameters");
1889 
1890 		/* Return session to mempool */
1891 		rte_mempool_put(mempool, sess_private_data);
1892 		return ret;
1893 	}
1894 
1895 	set_session_private_data(sess, dev->driver_id,
1896 			sess_private_data);
1897 
1898 
1899 	return 0;
1900 }
1901 
1902 /** Clear the memory of session so it doesn't leave key material behind */
1903 static void
1904 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1905 		struct rte_cryptodev_sym_session *sess)
1906 {
1907 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1908 	uint8_t index = dev->driver_id;
1909 	void *sess_priv = get_session_private_data(sess, index);
1910 
1911 	PMD_INIT_FUNC_TRACE();
1912 
1913 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1914 
1915 	if (sess_priv) {
1916 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1917 
1918 		if (s->inq)
1919 			dpaa_sec_detach_rxq(qi, s->inq);
1920 		rte_free(s->cipher_key.data);
1921 		rte_free(s->auth_key.data);
1922 		memset(s, 0, sizeof(dpaa_sec_session));
1923 		set_session_private_data(sess, index, NULL);
1924 		rte_mempool_put(sess_mp, sess_priv);
1925 	}
1926 }
1927 
1928 static int
1929 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1930 			   struct rte_security_session_conf *conf,
1931 			   void *sess)
1932 {
1933 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1934 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1935 	struct rte_crypto_auth_xform *auth_xform;
1936 	struct rte_crypto_cipher_xform *cipher_xform;
1937 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1938 
1939 	PMD_INIT_FUNC_TRACE();
1940 
1941 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1942 		cipher_xform = &conf->crypto_xform->cipher;
1943 		auth_xform = &conf->crypto_xform->next->auth;
1944 	} else {
1945 		auth_xform = &conf->crypto_xform->auth;
1946 		cipher_xform = &conf->crypto_xform->next->cipher;
1947 	}
1948 	session->proto_alg = conf->protocol;
1949 	session->cipher_key.data = rte_zmalloc(NULL,
1950 					       cipher_xform->key.length,
1951 					       RTE_CACHE_LINE_SIZE);
1952 	if (session->cipher_key.data == NULL &&
1953 			cipher_xform->key.length > 0) {
1954 		DPAA_SEC_ERR("No Memory for cipher key");
1955 		return -ENOMEM;
1956 	}
1957 
1958 	session->cipher_key.length = cipher_xform->key.length;
1959 	session->auth_key.data = rte_zmalloc(NULL,
1960 					auth_xform->key.length,
1961 					RTE_CACHE_LINE_SIZE);
1962 	if (session->auth_key.data == NULL &&
1963 			auth_xform->key.length > 0) {
1964 		DPAA_SEC_ERR("No Memory for auth key");
1965 		rte_free(session->cipher_key.data);
1966 		return -ENOMEM;
1967 	}
1968 	session->auth_key.length = auth_xform->key.length;
1969 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1970 			cipher_xform->key.length);
1971 	memcpy(session->auth_key.data, auth_xform->key.data,
1972 			auth_xform->key.length);
1973 
1974 	switch (auth_xform->algo) {
1975 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1976 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1977 		break;
1978 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1979 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1980 		break;
1981 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1982 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1983 		break;
1984 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1985 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1986 		break;
1987 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1988 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1989 		break;
1990 	case RTE_CRYPTO_AUTH_AES_CMAC:
1991 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1992 		break;
1993 	case RTE_CRYPTO_AUTH_NULL:
1994 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1995 		break;
1996 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1997 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1998 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1999 	case RTE_CRYPTO_AUTH_SHA1:
2000 	case RTE_CRYPTO_AUTH_SHA256:
2001 	case RTE_CRYPTO_AUTH_SHA512:
2002 	case RTE_CRYPTO_AUTH_SHA224:
2003 	case RTE_CRYPTO_AUTH_SHA384:
2004 	case RTE_CRYPTO_AUTH_MD5:
2005 	case RTE_CRYPTO_AUTH_AES_GMAC:
2006 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2007 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2008 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2009 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2010 			auth_xform->algo);
2011 		goto out;
2012 	default:
2013 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2014 			auth_xform->algo);
2015 		goto out;
2016 	}
2017 
2018 	switch (cipher_xform->algo) {
2019 	case RTE_CRYPTO_CIPHER_AES_CBC:
2020 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2021 		break;
2022 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2023 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2024 		break;
2025 	case RTE_CRYPTO_CIPHER_AES_CTR:
2026 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2027 		break;
2028 	case RTE_CRYPTO_CIPHER_NULL:
2029 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2030 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2031 	case RTE_CRYPTO_CIPHER_AES_ECB:
2032 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2033 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2034 			cipher_xform->algo);
2035 		goto out;
2036 	default:
2037 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2038 			cipher_xform->algo);
2039 		goto out;
2040 	}
2041 
2042 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2043 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2044 				sizeof(session->ip4_hdr));
2045 		session->ip4_hdr.ip_v = IPVERSION;
2046 		session->ip4_hdr.ip_hl = 5;
2047 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2048 						sizeof(session->ip4_hdr));
2049 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2050 		session->ip4_hdr.ip_id = 0;
2051 		session->ip4_hdr.ip_off = 0;
2052 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2053 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2054 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2055 				: IPPROTO_AH;
2056 		session->ip4_hdr.ip_sum = 0;
2057 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2058 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2059 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2060 						(void *)&session->ip4_hdr,
2061 						sizeof(struct ip));
2062 
2063 		session->encap_pdb.options =
2064 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2065 			PDBOPTS_ESP_OIHI_PDB_INL |
2066 			PDBOPTS_ESP_IVSRC |
2067 			PDBHMO_ESP_ENCAP_DTTL;
2068 		session->encap_pdb.spi = ipsec_xform->spi;
2069 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2070 
2071 		session->dir = DIR_ENC;
2072 	} else if (ipsec_xform->direction ==
2073 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2074 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2075 		session->decap_pdb.options = sizeof(struct ip) << 16;
2076 		session->dir = DIR_DEC;
2077 	} else
2078 		goto out;
2079 	session->ctx_pool = internals->ctx_pool;
2080 	session->inq = dpaa_sec_attach_rxq(internals);
2081 	if (session->inq == NULL) {
2082 		DPAA_SEC_ERR("unable to attach sec queue");
2083 		goto out;
2084 	}
2085 
2086 
2087 	return 0;
2088 out:
2089 	rte_free(session->auth_key.data);
2090 	rte_free(session->cipher_key.data);
2091 	memset(session, 0, sizeof(dpaa_sec_session));
2092 	return -1;
2093 }
2094 
2095 static int
2096 dpaa_sec_security_session_create(void *dev,
2097 				 struct rte_security_session_conf *conf,
2098 				 struct rte_security_session *sess,
2099 				 struct rte_mempool *mempool)
2100 {
2101 	void *sess_private_data;
2102 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2103 	int ret;
2104 
2105 	if (rte_mempool_get(mempool, &sess_private_data)) {
2106 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2107 		return -ENOMEM;
2108 	}
2109 
2110 	switch (conf->protocol) {
2111 	case RTE_SECURITY_PROTOCOL_IPSEC:
2112 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2113 				sess_private_data);
2114 		break;
2115 	case RTE_SECURITY_PROTOCOL_MACSEC:
2116 		return -ENOTSUP;
2117 	default:
2118 		return -EINVAL;
2119 	}
2120 	if (ret != 0) {
2121 		DPAA_SEC_ERR("failed to configure session parameters");
2122 		/* Return session to mempool */
2123 		rte_mempool_put(mempool, sess_private_data);
2124 		return ret;
2125 	}
2126 
2127 	set_sec_session_private_data(sess, sess_private_data);
2128 
2129 	return ret;
2130 }
2131 
2132 /** Clear the memory of session so it doesn't leave key material behind */
2133 static int
2134 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2135 		struct rte_security_session *sess)
2136 {
2137 	PMD_INIT_FUNC_TRACE();
2138 	void *sess_priv = get_sec_session_private_data(sess);
2139 
2140 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2141 
2142 	if (sess_priv) {
2143 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2144 
2145 		rte_free(s->cipher_key.data);
2146 		rte_free(s->auth_key.data);
2147 		memset(sess, 0, sizeof(dpaa_sec_session));
2148 		set_sec_session_private_data(sess, NULL);
2149 		rte_mempool_put(sess_mp, sess_priv);
2150 	}
2151 	return 0;
2152 }
2153 
2154 
2155 static int
2156 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2157 		       struct rte_cryptodev_config *config __rte_unused)
2158 {
2159 
2160 	char str[20];
2161 	struct dpaa_sec_dev_private *internals;
2162 
2163 	PMD_INIT_FUNC_TRACE();
2164 
2165 	internals = dev->data->dev_private;
2166 	sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2167 	if (!internals->ctx_pool) {
2168 		internals->ctx_pool = rte_mempool_create((const char *)str,
2169 							CTX_POOL_NUM_BUFS,
2170 							CTX_POOL_BUF_SIZE,
2171 							CTX_POOL_CACHE_SIZE, 0,
2172 							NULL, NULL, NULL, NULL,
2173 							SOCKET_ID_ANY, 0);
2174 		if (!internals->ctx_pool) {
2175 			DPAA_SEC_ERR("%s create failed\n", str);
2176 			return -ENOMEM;
2177 		}
2178 	} else
2179 		DPAA_SEC_INFO("mempool already created for dev_id : %d",
2180 				dev->data->dev_id);
2181 
2182 	return 0;
2183 }
2184 
2185 static int
2186 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2187 {
2188 	PMD_INIT_FUNC_TRACE();
2189 	return 0;
2190 }
2191 
2192 static void
2193 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2194 {
2195 	PMD_INIT_FUNC_TRACE();
2196 }
2197 
2198 static int
2199 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2200 {
2201 	struct dpaa_sec_dev_private *internals;
2202 
2203 	PMD_INIT_FUNC_TRACE();
2204 
2205 	if (dev == NULL)
2206 		return -ENOMEM;
2207 
2208 	internals = dev->data->dev_private;
2209 	rte_mempool_free(internals->ctx_pool);
2210 	internals->ctx_pool = NULL;
2211 
2212 	return 0;
2213 }
2214 
2215 static void
2216 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2217 		       struct rte_cryptodev_info *info)
2218 {
2219 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2220 
2221 	PMD_INIT_FUNC_TRACE();
2222 	if (info != NULL) {
2223 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2224 		info->feature_flags = dev->feature_flags;
2225 		info->capabilities = dpaa_sec_capabilities;
2226 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2227 		info->sym.max_nb_sessions_per_qp =
2228 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2229 			RTE_DPAA_MAX_NB_SEC_QPS;
2230 		info->driver_id = cryptodev_driver_id;
2231 	}
2232 }
2233 
2234 static struct rte_cryptodev_ops crypto_ops = {
2235 	.dev_configure	      = dpaa_sec_dev_configure,
2236 	.dev_start	      = dpaa_sec_dev_start,
2237 	.dev_stop	      = dpaa_sec_dev_stop,
2238 	.dev_close	      = dpaa_sec_dev_close,
2239 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2240 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2241 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2242 	.queue_pair_start     = dpaa_sec_queue_pair_start,
2243 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
2244 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2245 	.session_get_size     = dpaa_sec_session_get_size,
2246 	.session_configure    = dpaa_sec_session_configure,
2247 	.session_clear        = dpaa_sec_session_clear,
2248 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
2249 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
2250 };
2251 
2252 static const struct rte_security_capability *
2253 dpaa_sec_capabilities_get(void *device __rte_unused)
2254 {
2255 	return dpaa_sec_security_cap;
2256 }
2257 
2258 struct rte_security_ops dpaa_sec_security_ops = {
2259 	.session_create = dpaa_sec_security_session_create,
2260 	.session_update = NULL,
2261 	.session_stats_get = NULL,
2262 	.session_destroy = dpaa_sec_security_session_destroy,
2263 	.set_pkt_metadata = NULL,
2264 	.capabilities_get = dpaa_sec_capabilities_get
2265 };
2266 
2267 static int
2268 dpaa_sec_uninit(struct rte_cryptodev *dev)
2269 {
2270 	struct dpaa_sec_dev_private *internals;
2271 
2272 	if (dev == NULL)
2273 		return -ENODEV;
2274 
2275 	internals = dev->data->dev_private;
2276 	rte_free(dev->security_ctx);
2277 
2278 	/* In case close has been called, internals->ctx_pool would be NULL */
2279 	rte_mempool_free(internals->ctx_pool);
2280 	rte_free(internals);
2281 
2282 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2283 		      dev->data->name, rte_socket_id());
2284 
2285 	return 0;
2286 }
2287 
2288 static int
2289 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2290 {
2291 	struct dpaa_sec_dev_private *internals;
2292 	struct rte_security_ctx *security_instance;
2293 	struct dpaa_sec_qp *qp;
2294 	uint32_t i, flags;
2295 	int ret;
2296 
2297 	PMD_INIT_FUNC_TRACE();
2298 
2299 	cryptodev->driver_id = cryptodev_driver_id;
2300 	cryptodev->dev_ops = &crypto_ops;
2301 
2302 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2303 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2304 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2305 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2306 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2307 			RTE_CRYPTODEV_FF_SECURITY |
2308 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2309 
2310 	internals = cryptodev->data->dev_private;
2311 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2312 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2313 
2314 	/*
2315 	 * For secondary processes, we don't initialise any further as primary
2316 	 * has already done this work. Only check we don't need a different
2317 	 * RX function
2318 	 */
2319 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2320 		DPAA_SEC_WARN("Device already init by primary process");
2321 		return 0;
2322 	}
2323 
2324 	/* Initialize security_ctx only for primary process*/
2325 	security_instance = rte_malloc("rte_security_instances_ops",
2326 				sizeof(struct rte_security_ctx), 0);
2327 	if (security_instance == NULL)
2328 		return -ENOMEM;
2329 	security_instance->device = (void *)cryptodev;
2330 	security_instance->ops = &dpaa_sec_security_ops;
2331 	security_instance->sess_cnt = 0;
2332 	cryptodev->security_ctx = security_instance;
2333 
2334 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2335 		/* init qman fq for queue pair */
2336 		qp = &internals->qps[i];
2337 		ret = dpaa_sec_init_tx(&qp->outq);
2338 		if (ret) {
2339 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
2340 			goto init_error;
2341 		}
2342 	}
2343 
2344 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2345 		QMAN_FQ_FLAG_TO_DCPORTAL;
2346 	for (i = 0; i < internals->max_nb_sessions; i++) {
2347 		/* create rx qman fq for sessions*/
2348 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2349 		if (unlikely(ret != 0)) {
2350 			DPAA_SEC_ERR("sec qman_create_fq failed");
2351 			goto init_error;
2352 		}
2353 	}
2354 
2355 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2356 	return 0;
2357 
2358 init_error:
2359 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2360 
2361 	dpaa_sec_uninit(cryptodev);
2362 	return -EFAULT;
2363 }
2364 
2365 static int
2366 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2367 				struct rte_dpaa_device *dpaa_dev)
2368 {
2369 	struct rte_cryptodev *cryptodev;
2370 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2371 
2372 	int retval;
2373 
2374 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2375 
2376 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2377 	if (cryptodev == NULL)
2378 		return -ENOMEM;
2379 
2380 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2381 		cryptodev->data->dev_private = rte_zmalloc_socket(
2382 					"cryptodev private structure",
2383 					sizeof(struct dpaa_sec_dev_private),
2384 					RTE_CACHE_LINE_SIZE,
2385 					rte_socket_id());
2386 
2387 		if (cryptodev->data->dev_private == NULL)
2388 			rte_panic("Cannot allocate memzone for private "
2389 					"device data");
2390 	}
2391 
2392 	dpaa_dev->crypto_dev = cryptodev;
2393 	cryptodev->device = &dpaa_dev->device;
2394 	cryptodev->device->driver = &dpaa_drv->driver;
2395 
2396 	/* init user callbacks */
2397 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2398 
2399 	/* if sec device version is not configured */
2400 	if (!rta_get_sec_era()) {
2401 		const struct device_node *caam_node;
2402 
2403 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2404 			const uint32_t *prop = of_get_property(caam_node,
2405 					"fsl,sec-era",
2406 					NULL);
2407 			if (prop) {
2408 				rta_set_sec_era(
2409 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2410 				break;
2411 			}
2412 		}
2413 	}
2414 
2415 	/* Invoke PMD device initialization function */
2416 	retval = dpaa_sec_dev_init(cryptodev);
2417 	if (retval == 0)
2418 		return 0;
2419 
2420 	/* In case of error, cleanup is done */
2421 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2422 		rte_free(cryptodev->data->dev_private);
2423 
2424 	rte_cryptodev_pmd_release_device(cryptodev);
2425 
2426 	return -ENXIO;
2427 }
2428 
2429 static int
2430 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2431 {
2432 	struct rte_cryptodev *cryptodev;
2433 	int ret;
2434 
2435 	cryptodev = dpaa_dev->crypto_dev;
2436 	if (cryptodev == NULL)
2437 		return -ENODEV;
2438 
2439 	ret = dpaa_sec_uninit(cryptodev);
2440 	if (ret)
2441 		return ret;
2442 
2443 	return rte_cryptodev_pmd_destroy(cryptodev);
2444 }
2445 
2446 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2447 	.drv_type = FSL_DPAA_CRYPTO,
2448 	.driver = {
2449 		.name = "DPAA SEC PMD"
2450 	},
2451 	.probe = cryptodev_dpaa_sec_probe,
2452 	.remove = cryptodev_dpaa_sec_remove,
2453 };
2454 
2455 static struct cryptodev_driver dpaa_sec_crypto_drv;
2456 
2457 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2458 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2459 		cryptodev_driver_id);
2460 
2461 RTE_INIT(dpaa_sec_init_log);
2462 static void
2463 dpaa_sec_init_log(void)
2464 {
2465 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2466 	if (dpaa_logtype_sec >= 0)
2467 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2468 }
2469