xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 250c9eb3ca895127f21a729caf4a928eb2f04d2c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 int dpaa_logtype_sec;
43 
44 static uint8_t cryptodev_driver_id;
45 
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
48 
49 static int
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 
52 static inline void
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
54 {
55 	if (!ctx->fd_status) {
56 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
57 	} else {
58 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 	}
61 
62 	/* report op status to sym->op and then free the ctx memeory  */
63 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 }
65 
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
68 {
69 	struct dpaa_sec_op_ctx *ctx;
70 	int retval;
71 
72 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
73 	if (!ctx || retval) {
74 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 		return NULL;
76 	}
77 	/*
78 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 	 * each packet, memset is costlier than dcbz_64().
82 	 */
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
87 
88 	ctx->ctx_pool = ses->ctx_pool;
89 	ctx->vtop_offset = (size_t) ctx
90 				- rte_mempool_virt2iova(ctx);
91 
92 	return ctx;
93 }
94 
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98 	const struct rte_memseg *ms;
99 
100 	ms = rte_mem_virt2memseg(vaddr, NULL);
101 	if (ms)
102 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
103 	return (size_t)NULL;
104 }
105 
106 static inline void *
107 dpaa_mem_ptov(rte_iova_t paddr)
108 {
109 	return rte_mem_iova2virt(paddr);
110 }
111 
112 static void
113 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
114 		   struct qman_fq *fq,
115 		   const struct qm_mr_entry *msg)
116 {
117 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
118 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
119 }
120 
121 /* initialize the queue with dest chan as caam chan so that
122  * all the packets in this queue could be dispatched into caam
123  */
124 static int
125 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
126 		 uint32_t fqid_out)
127 {
128 	struct qm_mcc_initfq fq_opts;
129 	uint32_t flags;
130 	int ret = -1;
131 
132 	/* Clear FQ options */
133 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
134 
135 	flags = QMAN_INITFQ_FLAG_SCHED;
136 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
137 			  QM_INITFQ_WE_CONTEXTB;
138 
139 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
140 	fq_opts.fqd.context_b = fqid_out;
141 	fq_opts.fqd.dest.channel = qm_channel_caam;
142 	fq_opts.fqd.dest.wq = 0;
143 
144 	fq_in->cb.ern  = ern_sec_fq_handler;
145 
146 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
147 
148 	ret = qman_init_fq(fq_in, flags, &fq_opts);
149 	if (unlikely(ret != 0))
150 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
151 
152 	return ret;
153 }
154 
155 /* something is put into in_fq and caam put the crypto result into out_fq */
156 static enum qman_cb_dqrr_result
157 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
158 		  struct qman_fq *fq __always_unused,
159 		  const struct qm_dqrr_entry *dqrr)
160 {
161 	const struct qm_fd *fd;
162 	struct dpaa_sec_job *job;
163 	struct dpaa_sec_op_ctx *ctx;
164 
165 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
166 		return qman_cb_dqrr_defer;
167 
168 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
169 		return qman_cb_dqrr_consume;
170 
171 	fd = &dqrr->fd;
172 	/* sg is embedded in an op ctx,
173 	 * sg[0] is for output
174 	 * sg[1] for input
175 	 */
176 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
177 
178 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
179 	ctx->fd_status = fd->status;
180 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
181 		struct qm_sg_entry *sg_out;
182 		uint32_t len;
183 
184 		sg_out = &job->sg[0];
185 		hw_sg_to_cpu(sg_out);
186 		len = sg_out->length;
187 		ctx->op->sym->m_src->pkt_len = len;
188 		ctx->op->sym->m_src->data_len = len;
189 	}
190 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
191 	dpaa_sec_op_ending(ctx);
192 
193 	return qman_cb_dqrr_consume;
194 }
195 
196 /* caam result is put into this queue */
197 static int
198 dpaa_sec_init_tx(struct qman_fq *fq)
199 {
200 	int ret;
201 	struct qm_mcc_initfq opts;
202 	uint32_t flags;
203 
204 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
205 		QMAN_FQ_FLAG_DYNAMIC_FQID;
206 
207 	ret = qman_create_fq(0, flags, fq);
208 	if (unlikely(ret)) {
209 		DPAA_SEC_ERR("qman_create_fq failed");
210 		return ret;
211 	}
212 
213 	memset(&opts, 0, sizeof(opts));
214 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
215 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
216 
217 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
218 
219 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
220 	fq->cb.ern  = ern_sec_fq_handler;
221 
222 	ret = qman_init_fq(fq, 0, &opts);
223 	if (unlikely(ret)) {
224 		DPAA_SEC_ERR("unable to init caam source fq!");
225 		return ret;
226 	}
227 
228 	return ret;
229 }
230 
231 static inline int is_cipher_only(dpaa_sec_session *ses)
232 {
233 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
234 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
235 }
236 
237 static inline int is_auth_only(dpaa_sec_session *ses)
238 {
239 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
240 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
241 }
242 
243 static inline int is_aead(dpaa_sec_session *ses)
244 {
245 	return ((ses->cipher_alg == 0) &&
246 		(ses->auth_alg == 0) &&
247 		(ses->aead_alg != 0));
248 }
249 
250 static inline int is_auth_cipher(dpaa_sec_session *ses)
251 {
252 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
253 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
254 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
255 }
256 
257 static inline int is_proto_ipsec(dpaa_sec_session *ses)
258 {
259 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
260 }
261 
262 static inline int is_encode(dpaa_sec_session *ses)
263 {
264 	return ses->dir == DIR_ENC;
265 }
266 
267 static inline int is_decode(dpaa_sec_session *ses)
268 {
269 	return ses->dir == DIR_DEC;
270 }
271 
272 static inline void
273 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
274 {
275 	switch (ses->auth_alg) {
276 	case RTE_CRYPTO_AUTH_NULL:
277 		ses->digest_length = 0;
278 		break;
279 	case RTE_CRYPTO_AUTH_MD5_HMAC:
280 		alginfo_a->algtype =
281 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
282 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
283 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
284 		break;
285 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
286 		alginfo_a->algtype =
287 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
288 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
289 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
290 		break;
291 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
292 		alginfo_a->algtype =
293 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
294 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
295 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 		break;
297 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
298 		alginfo_a->algtype =
299 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
301 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
302 		break;
303 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
304 		alginfo_a->algtype =
305 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
307 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 		break;
309 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
310 		alginfo_a->algtype =
311 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
313 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 		break;
315 	default:
316 		DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
317 	}
318 }
319 
320 static inline void
321 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
322 {
323 	switch (ses->cipher_alg) {
324 	case RTE_CRYPTO_CIPHER_NULL:
325 		break;
326 	case RTE_CRYPTO_CIPHER_AES_CBC:
327 		alginfo_c->algtype =
328 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
330 		alginfo_c->algmode = OP_ALG_AAI_CBC;
331 		break;
332 	case RTE_CRYPTO_CIPHER_3DES_CBC:
333 		alginfo_c->algtype =
334 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
336 		alginfo_c->algmode = OP_ALG_AAI_CBC;
337 		break;
338 	case RTE_CRYPTO_CIPHER_AES_CTR:
339 		alginfo_c->algtype =
340 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
341 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
342 		alginfo_c->algmode = OP_ALG_AAI_CTR;
343 		break;
344 	default:
345 		DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
346 	}
347 }
348 
349 static inline void
350 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
351 {
352 	switch (ses->aead_alg) {
353 	case RTE_CRYPTO_AEAD_AES_GCM:
354 		alginfo->algtype = OP_ALG_ALGSEL_AES;
355 		alginfo->algmode = OP_ALG_AAI_GCM;
356 		break;
357 	default:
358 		DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
359 	}
360 }
361 
362 
363 /* prepare command block of the session */
364 static int
365 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
366 {
367 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
368 	int32_t shared_desc_len = 0;
369 	struct sec_cdb *cdb = &ses->cdb;
370 	int err;
371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
372 	int swap = false;
373 #else
374 	int swap = true;
375 #endif
376 
377 	memset(cdb, 0, sizeof(struct sec_cdb));
378 
379 	if (is_cipher_only(ses)) {
380 		caam_cipher_alg(ses, &alginfo_c);
381 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
382 			DPAA_SEC_ERR("not supported cipher alg");
383 			return -ENOTSUP;
384 		}
385 
386 		alginfo_c.key = (size_t)ses->cipher_key.data;
387 		alginfo_c.keylen = ses->cipher_key.length;
388 		alginfo_c.key_enc_flags = 0;
389 		alginfo_c.key_type = RTA_DATA_IMM;
390 
391 		shared_desc_len = cnstr_shdsc_blkcipher(
392 						cdb->sh_desc, true,
393 						swap, &alginfo_c,
394 						NULL,
395 						ses->iv.length,
396 						ses->dir);
397 	} else if (is_auth_only(ses)) {
398 		caam_auth_alg(ses, &alginfo_a);
399 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
400 			DPAA_SEC_ERR("not supported auth alg");
401 			return -ENOTSUP;
402 		}
403 
404 		alginfo_a.key = (size_t)ses->auth_key.data;
405 		alginfo_a.keylen = ses->auth_key.length;
406 		alginfo_a.key_enc_flags = 0;
407 		alginfo_a.key_type = RTA_DATA_IMM;
408 
409 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
410 						   swap, &alginfo_a,
411 						   !ses->dir,
412 						   ses->digest_length);
413 	} else if (is_aead(ses)) {
414 		caam_aead_alg(ses, &alginfo);
415 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
416 			DPAA_SEC_ERR("not supported aead alg");
417 			return -ENOTSUP;
418 		}
419 		alginfo.key = (size_t)ses->aead_key.data;
420 		alginfo.keylen = ses->aead_key.length;
421 		alginfo.key_enc_flags = 0;
422 		alginfo.key_type = RTA_DATA_IMM;
423 
424 		if (ses->dir == DIR_ENC)
425 			shared_desc_len = cnstr_shdsc_gcm_encap(
426 					cdb->sh_desc, true, swap,
427 					&alginfo,
428 					ses->iv.length,
429 					ses->digest_length);
430 		else
431 			shared_desc_len = cnstr_shdsc_gcm_decap(
432 					cdb->sh_desc, true, swap,
433 					&alginfo,
434 					ses->iv.length,
435 					ses->digest_length);
436 	} else {
437 		caam_cipher_alg(ses, &alginfo_c);
438 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 			DPAA_SEC_ERR("not supported cipher alg");
440 			return -ENOTSUP;
441 		}
442 
443 		alginfo_c.key = (size_t)ses->cipher_key.data;
444 		alginfo_c.keylen = ses->cipher_key.length;
445 		alginfo_c.key_enc_flags = 0;
446 		alginfo_c.key_type = RTA_DATA_IMM;
447 
448 		caam_auth_alg(ses, &alginfo_a);
449 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
450 			DPAA_SEC_ERR("not supported auth alg");
451 			return -ENOTSUP;
452 		}
453 
454 		alginfo_a.key = (size_t)ses->auth_key.data;
455 		alginfo_a.keylen = ses->auth_key.length;
456 		alginfo_a.key_enc_flags = 0;
457 		alginfo_a.key_type = RTA_DATA_IMM;
458 
459 		cdb->sh_desc[0] = alginfo_c.keylen;
460 		cdb->sh_desc[1] = alginfo_a.keylen;
461 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
462 				       MIN_JOB_DESC_SIZE,
463 				       (unsigned int *)cdb->sh_desc,
464 				       &cdb->sh_desc[2], 2);
465 
466 		if (err < 0) {
467 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
468 			return err;
469 		}
470 		if (cdb->sh_desc[2] & 1)
471 			alginfo_c.key_type = RTA_DATA_IMM;
472 		else {
473 			alginfo_c.key = (size_t)dpaa_mem_vtop(
474 						(void *)(size_t)alginfo_c.key);
475 			alginfo_c.key_type = RTA_DATA_PTR;
476 		}
477 		if (cdb->sh_desc[2] & (1<<1))
478 			alginfo_a.key_type = RTA_DATA_IMM;
479 		else {
480 			alginfo_a.key = (size_t)dpaa_mem_vtop(
481 						(void *)(size_t)alginfo_a.key);
482 			alginfo_a.key_type = RTA_DATA_PTR;
483 		}
484 		cdb->sh_desc[0] = 0;
485 		cdb->sh_desc[1] = 0;
486 		cdb->sh_desc[2] = 0;
487 		if (is_proto_ipsec(ses)) {
488 			if (ses->dir == DIR_ENC) {
489 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
490 						cdb->sh_desc,
491 						true, swap, &ses->encap_pdb,
492 						(uint8_t *)&ses->ip4_hdr,
493 						&alginfo_c, &alginfo_a);
494 			} else if (ses->dir == DIR_DEC) {
495 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
496 						cdb->sh_desc,
497 						true, swap, &ses->decap_pdb,
498 						&alginfo_c, &alginfo_a);
499 			}
500 		} else {
501 			/* Auth_only_len is set as 0 here and it will be
502 			 * overwritten in fd for each packet.
503 			 */
504 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
505 					true, swap, &alginfo_c, &alginfo_a,
506 					ses->iv.length, 0,
507 					ses->digest_length, ses->dir);
508 		}
509 	}
510 
511 	if (shared_desc_len < 0) {
512 		DPAA_SEC_ERR("error in preparing command block");
513 		return shared_desc_len;
514 	}
515 
516 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
517 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
518 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
519 
520 	return 0;
521 }
522 
523 /* qp is lockless, should be accessed by only one thread */
524 static int
525 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
526 {
527 	struct qman_fq *fq;
528 	unsigned int pkts = 0;
529 	int ret;
530 	struct qm_dqrr_entry *dq;
531 
532 	fq = &qp->outq;
533 	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
534 				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
535 	if (ret)
536 		return 0;
537 
538 	do {
539 		const struct qm_fd *fd;
540 		struct dpaa_sec_job *job;
541 		struct dpaa_sec_op_ctx *ctx;
542 		struct rte_crypto_op *op;
543 
544 		dq = qman_dequeue(fq);
545 		if (!dq)
546 			continue;
547 
548 		fd = &dq->fd;
549 		/* sg is embedded in an op ctx,
550 		 * sg[0] is for output
551 		 * sg[1] for input
552 		 */
553 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
554 
555 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
556 		ctx->fd_status = fd->status;
557 		op = ctx->op;
558 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
559 			struct qm_sg_entry *sg_out;
560 			uint32_t len;
561 
562 			sg_out = &job->sg[0];
563 			hw_sg_to_cpu(sg_out);
564 			len = sg_out->length;
565 			op->sym->m_src->pkt_len = len;
566 			op->sym->m_src->data_len = len;
567 		}
568 		if (!ctx->fd_status) {
569 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
570 		} else {
571 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
572 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
573 		}
574 		ops[pkts++] = op;
575 
576 		/* report op status to sym->op and then free the ctx memeory */
577 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
578 
579 		qman_dqrr_consume(fq, dq);
580 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
581 
582 	return pkts;
583 }
584 
585 static inline struct dpaa_sec_job *
586 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
587 {
588 	struct rte_crypto_sym_op *sym = op->sym;
589 	struct rte_mbuf *mbuf = sym->m_src;
590 	struct dpaa_sec_job *cf;
591 	struct dpaa_sec_op_ctx *ctx;
592 	struct qm_sg_entry *sg, *out_sg, *in_sg;
593 	phys_addr_t start_addr;
594 	uint8_t *old_digest, extra_segs;
595 
596 	if (is_decode(ses))
597 		extra_segs = 3;
598 	else
599 		extra_segs = 2;
600 
601 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
602 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
603 				MAX_SG_ENTRIES);
604 		return NULL;
605 	}
606 	ctx = dpaa_sec_alloc_ctx(ses);
607 	if (!ctx)
608 		return NULL;
609 
610 	cf = &ctx->job;
611 	ctx->op = op;
612 	old_digest = ctx->digest;
613 
614 	/* output */
615 	out_sg = &cf->sg[0];
616 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
617 	out_sg->length = ses->digest_length;
618 	cpu_to_hw_sg(out_sg);
619 
620 	/* input */
621 	in_sg = &cf->sg[1];
622 	/* need to extend the input to a compound frame */
623 	in_sg->extension = 1;
624 	in_sg->final = 1;
625 	in_sg->length = sym->auth.data.length;
626 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
627 
628 	/* 1st seg */
629 	sg = in_sg + 1;
630 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
631 	sg->length = mbuf->data_len - sym->auth.data.offset;
632 	sg->offset = sym->auth.data.offset;
633 
634 	/* Successive segs */
635 	mbuf = mbuf->next;
636 	while (mbuf) {
637 		cpu_to_hw_sg(sg);
638 		sg++;
639 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
640 		sg->length = mbuf->data_len;
641 		mbuf = mbuf->next;
642 	}
643 
644 	if (is_decode(ses)) {
645 		/* Digest verification case */
646 		cpu_to_hw_sg(sg);
647 		sg++;
648 		rte_memcpy(old_digest, sym->auth.digest.data,
649 				ses->digest_length);
650 		start_addr = dpaa_mem_vtop(old_digest);
651 		qm_sg_entry_set64(sg, start_addr);
652 		sg->length = ses->digest_length;
653 		in_sg->length += ses->digest_length;
654 	} else {
655 		/* Digest calculation case */
656 		sg->length -= ses->digest_length;
657 	}
658 	sg->final = 1;
659 	cpu_to_hw_sg(sg);
660 	cpu_to_hw_sg(in_sg);
661 
662 	return cf;
663 }
664 
665 /**
666  * packet looks like:
667  *		|<----data_len------->|
668  *    |ip_header|ah_header|icv|payload|
669  *              ^
670  *		|
671  *	   mbuf->pkt.data
672  */
673 static inline struct dpaa_sec_job *
674 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
675 {
676 	struct rte_crypto_sym_op *sym = op->sym;
677 	struct rte_mbuf *mbuf = sym->m_src;
678 	struct dpaa_sec_job *cf;
679 	struct dpaa_sec_op_ctx *ctx;
680 	struct qm_sg_entry *sg;
681 	rte_iova_t start_addr;
682 	uint8_t *old_digest;
683 
684 	ctx = dpaa_sec_alloc_ctx(ses);
685 	if (!ctx)
686 		return NULL;
687 
688 	cf = &ctx->job;
689 	ctx->op = op;
690 	old_digest = ctx->digest;
691 
692 	start_addr = rte_pktmbuf_iova(mbuf);
693 	/* output */
694 	sg = &cf->sg[0];
695 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
696 	sg->length = ses->digest_length;
697 	cpu_to_hw_sg(sg);
698 
699 	/* input */
700 	sg = &cf->sg[1];
701 	if (is_decode(ses)) {
702 		/* need to extend the input to a compound frame */
703 		sg->extension = 1;
704 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
705 		sg->length = sym->auth.data.length + ses->digest_length;
706 		sg->final = 1;
707 		cpu_to_hw_sg(sg);
708 
709 		sg = &cf->sg[2];
710 		/* hash result or digest, save digest first */
711 		rte_memcpy(old_digest, sym->auth.digest.data,
712 			   ses->digest_length);
713 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
714 		sg->length = sym->auth.data.length;
715 		cpu_to_hw_sg(sg);
716 
717 		/* let's check digest by hw */
718 		start_addr = dpaa_mem_vtop(old_digest);
719 		sg++;
720 		qm_sg_entry_set64(sg, start_addr);
721 		sg->length = ses->digest_length;
722 		sg->final = 1;
723 		cpu_to_hw_sg(sg);
724 	} else {
725 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
726 		sg->length = sym->auth.data.length;
727 		sg->final = 1;
728 		cpu_to_hw_sg(sg);
729 	}
730 
731 	return cf;
732 }
733 
734 static inline struct dpaa_sec_job *
735 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
736 {
737 	struct rte_crypto_sym_op *sym = op->sym;
738 	struct dpaa_sec_job *cf;
739 	struct dpaa_sec_op_ctx *ctx;
740 	struct qm_sg_entry *sg, *out_sg, *in_sg;
741 	struct rte_mbuf *mbuf;
742 	uint8_t req_segs;
743 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
744 			ses->iv.offset);
745 
746 	if (sym->m_dst) {
747 		mbuf = sym->m_dst;
748 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
749 	} else {
750 		mbuf = sym->m_src;
751 		req_segs = mbuf->nb_segs * 2 + 3;
752 	}
753 
754 	if (req_segs > MAX_SG_ENTRIES) {
755 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
756 				MAX_SG_ENTRIES);
757 		return NULL;
758 	}
759 
760 	ctx = dpaa_sec_alloc_ctx(ses);
761 	if (!ctx)
762 		return NULL;
763 
764 	cf = &ctx->job;
765 	ctx->op = op;
766 
767 	/* output */
768 	out_sg = &cf->sg[0];
769 	out_sg->extension = 1;
770 	out_sg->length = sym->cipher.data.length;
771 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
772 	cpu_to_hw_sg(out_sg);
773 
774 	/* 1st seg */
775 	sg = &cf->sg[2];
776 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
777 	sg->length = mbuf->data_len - sym->cipher.data.offset;
778 	sg->offset = sym->cipher.data.offset;
779 
780 	/* Successive segs */
781 	mbuf = mbuf->next;
782 	while (mbuf) {
783 		cpu_to_hw_sg(sg);
784 		sg++;
785 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
786 		sg->length = mbuf->data_len;
787 		mbuf = mbuf->next;
788 	}
789 	sg->final = 1;
790 	cpu_to_hw_sg(sg);
791 
792 	/* input */
793 	mbuf = sym->m_src;
794 	in_sg = &cf->sg[1];
795 	in_sg->extension = 1;
796 	in_sg->final = 1;
797 	in_sg->length = sym->cipher.data.length + ses->iv.length;
798 
799 	sg++;
800 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
801 	cpu_to_hw_sg(in_sg);
802 
803 	/* IV */
804 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
805 	sg->length = ses->iv.length;
806 	cpu_to_hw_sg(sg);
807 
808 	/* 1st seg */
809 	sg++;
810 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
811 	sg->length = mbuf->data_len - sym->cipher.data.offset;
812 	sg->offset = sym->cipher.data.offset;
813 
814 	/* Successive segs */
815 	mbuf = mbuf->next;
816 	while (mbuf) {
817 		cpu_to_hw_sg(sg);
818 		sg++;
819 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
820 		sg->length = mbuf->data_len;
821 		mbuf = mbuf->next;
822 	}
823 	sg->final = 1;
824 	cpu_to_hw_sg(sg);
825 
826 	return cf;
827 }
828 
829 static inline struct dpaa_sec_job *
830 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
831 {
832 	struct rte_crypto_sym_op *sym = op->sym;
833 	struct dpaa_sec_job *cf;
834 	struct dpaa_sec_op_ctx *ctx;
835 	struct qm_sg_entry *sg;
836 	rte_iova_t src_start_addr, dst_start_addr;
837 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
838 			ses->iv.offset);
839 
840 	ctx = dpaa_sec_alloc_ctx(ses);
841 	if (!ctx)
842 		return NULL;
843 
844 	cf = &ctx->job;
845 	ctx->op = op;
846 
847 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
848 
849 	if (sym->m_dst)
850 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
851 	else
852 		dst_start_addr = src_start_addr;
853 
854 	/* output */
855 	sg = &cf->sg[0];
856 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
857 	sg->length = sym->cipher.data.length + ses->iv.length;
858 	cpu_to_hw_sg(sg);
859 
860 	/* input */
861 	sg = &cf->sg[1];
862 
863 	/* need to extend the input to a compound frame */
864 	sg->extension = 1;
865 	sg->final = 1;
866 	sg->length = sym->cipher.data.length + ses->iv.length;
867 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
868 	cpu_to_hw_sg(sg);
869 
870 	sg = &cf->sg[2];
871 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
872 	sg->length = ses->iv.length;
873 	cpu_to_hw_sg(sg);
874 
875 	sg++;
876 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
877 	sg->length = sym->cipher.data.length;
878 	sg->final = 1;
879 	cpu_to_hw_sg(sg);
880 
881 	return cf;
882 }
883 
884 static inline struct dpaa_sec_job *
885 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
886 {
887 	struct rte_crypto_sym_op *sym = op->sym;
888 	struct dpaa_sec_job *cf;
889 	struct dpaa_sec_op_ctx *ctx;
890 	struct qm_sg_entry *sg, *out_sg, *in_sg;
891 	struct rte_mbuf *mbuf;
892 	uint8_t req_segs;
893 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
894 			ses->iv.offset);
895 
896 	if (sym->m_dst) {
897 		mbuf = sym->m_dst;
898 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
899 	} else {
900 		mbuf = sym->m_src;
901 		req_segs = mbuf->nb_segs * 2 + 4;
902 	}
903 
904 	if (ses->auth_only_len)
905 		req_segs++;
906 
907 	if (req_segs > MAX_SG_ENTRIES) {
908 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
909 				MAX_SG_ENTRIES);
910 		return NULL;
911 	}
912 
913 	ctx = dpaa_sec_alloc_ctx(ses);
914 	if (!ctx)
915 		return NULL;
916 
917 	cf = &ctx->job;
918 	ctx->op = op;
919 
920 	rte_prefetch0(cf->sg);
921 
922 	/* output */
923 	out_sg = &cf->sg[0];
924 	out_sg->extension = 1;
925 	if (is_encode(ses))
926 		out_sg->length = sym->aead.data.length + ses->auth_only_len
927 						+ ses->digest_length;
928 	else
929 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
930 
931 	/* output sg entries */
932 	sg = &cf->sg[2];
933 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
934 	cpu_to_hw_sg(out_sg);
935 
936 	/* 1st seg */
937 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
938 	sg->length = mbuf->data_len - sym->aead.data.offset +
939 					ses->auth_only_len;
940 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
941 
942 	/* Successive segs */
943 	mbuf = mbuf->next;
944 	while (mbuf) {
945 		cpu_to_hw_sg(sg);
946 		sg++;
947 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
948 		sg->length = mbuf->data_len;
949 		mbuf = mbuf->next;
950 	}
951 	sg->length -= ses->digest_length;
952 
953 	if (is_encode(ses)) {
954 		cpu_to_hw_sg(sg);
955 		/* set auth output */
956 		sg++;
957 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
958 		sg->length = ses->digest_length;
959 	}
960 	sg->final = 1;
961 	cpu_to_hw_sg(sg);
962 
963 	/* input */
964 	mbuf = sym->m_src;
965 	in_sg = &cf->sg[1];
966 	in_sg->extension = 1;
967 	in_sg->final = 1;
968 	if (is_encode(ses))
969 		in_sg->length = ses->iv.length + sym->aead.data.length
970 							+ ses->auth_only_len;
971 	else
972 		in_sg->length = ses->iv.length + sym->aead.data.length
973 				+ ses->auth_only_len + ses->digest_length;
974 
975 	/* input sg entries */
976 	sg++;
977 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
978 	cpu_to_hw_sg(in_sg);
979 
980 	/* 1st seg IV */
981 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
982 	sg->length = ses->iv.length;
983 	cpu_to_hw_sg(sg);
984 
985 	/* 2nd seg auth only */
986 	if (ses->auth_only_len) {
987 		sg++;
988 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
989 		sg->length = ses->auth_only_len;
990 		cpu_to_hw_sg(sg);
991 	}
992 
993 	/* 3rd seg */
994 	sg++;
995 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
996 	sg->length = mbuf->data_len - sym->aead.data.offset;
997 	sg->offset = sym->aead.data.offset;
998 
999 	/* Successive segs */
1000 	mbuf = mbuf->next;
1001 	while (mbuf) {
1002 		cpu_to_hw_sg(sg);
1003 		sg++;
1004 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1005 		sg->length = mbuf->data_len;
1006 		mbuf = mbuf->next;
1007 	}
1008 
1009 	if (is_decode(ses)) {
1010 		cpu_to_hw_sg(sg);
1011 		sg++;
1012 		memcpy(ctx->digest, sym->aead.digest.data,
1013 			ses->digest_length);
1014 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1015 		sg->length = ses->digest_length;
1016 	}
1017 	sg->final = 1;
1018 	cpu_to_hw_sg(sg);
1019 
1020 	return cf;
1021 }
1022 
1023 static inline struct dpaa_sec_job *
1024 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1025 {
1026 	struct rte_crypto_sym_op *sym = op->sym;
1027 	struct dpaa_sec_job *cf;
1028 	struct dpaa_sec_op_ctx *ctx;
1029 	struct qm_sg_entry *sg;
1030 	uint32_t length = 0;
1031 	rte_iova_t src_start_addr, dst_start_addr;
1032 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1033 			ses->iv.offset);
1034 
1035 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1036 
1037 	if (sym->m_dst)
1038 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1039 	else
1040 		dst_start_addr = src_start_addr;
1041 
1042 	ctx = dpaa_sec_alloc_ctx(ses);
1043 	if (!ctx)
1044 		return NULL;
1045 
1046 	cf = &ctx->job;
1047 	ctx->op = op;
1048 
1049 	/* input */
1050 	rte_prefetch0(cf->sg);
1051 	sg = &cf->sg[2];
1052 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1053 	if (is_encode(ses)) {
1054 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1055 		sg->length = ses->iv.length;
1056 		length += sg->length;
1057 		cpu_to_hw_sg(sg);
1058 
1059 		sg++;
1060 		if (ses->auth_only_len) {
1061 			qm_sg_entry_set64(sg,
1062 					  dpaa_mem_vtop(sym->aead.aad.data));
1063 			sg->length = ses->auth_only_len;
1064 			length += sg->length;
1065 			cpu_to_hw_sg(sg);
1066 			sg++;
1067 		}
1068 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1069 		sg->length = sym->aead.data.length;
1070 		length += sg->length;
1071 		sg->final = 1;
1072 		cpu_to_hw_sg(sg);
1073 	} else {
1074 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1075 		sg->length = ses->iv.length;
1076 		length += sg->length;
1077 		cpu_to_hw_sg(sg);
1078 
1079 		sg++;
1080 		if (ses->auth_only_len) {
1081 			qm_sg_entry_set64(sg,
1082 					  dpaa_mem_vtop(sym->aead.aad.data));
1083 			sg->length = ses->auth_only_len;
1084 			length += sg->length;
1085 			cpu_to_hw_sg(sg);
1086 			sg++;
1087 		}
1088 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1089 		sg->length = sym->aead.data.length;
1090 		length += sg->length;
1091 		cpu_to_hw_sg(sg);
1092 
1093 		memcpy(ctx->digest, sym->aead.digest.data,
1094 		       ses->digest_length);
1095 		sg++;
1096 
1097 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1098 		sg->length = ses->digest_length;
1099 		length += sg->length;
1100 		sg->final = 1;
1101 		cpu_to_hw_sg(sg);
1102 	}
1103 	/* input compound frame */
1104 	cf->sg[1].length = length;
1105 	cf->sg[1].extension = 1;
1106 	cf->sg[1].final = 1;
1107 	cpu_to_hw_sg(&cf->sg[1]);
1108 
1109 	/* output */
1110 	sg++;
1111 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1112 	qm_sg_entry_set64(sg,
1113 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1114 	sg->length = sym->aead.data.length + ses->auth_only_len;
1115 	length = sg->length;
1116 	if (is_encode(ses)) {
1117 		cpu_to_hw_sg(sg);
1118 		/* set auth output */
1119 		sg++;
1120 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1121 		sg->length = ses->digest_length;
1122 		length += sg->length;
1123 	}
1124 	sg->final = 1;
1125 	cpu_to_hw_sg(sg);
1126 
1127 	/* output compound frame */
1128 	cf->sg[0].length = length;
1129 	cf->sg[0].extension = 1;
1130 	cpu_to_hw_sg(&cf->sg[0]);
1131 
1132 	return cf;
1133 }
1134 
1135 static inline struct dpaa_sec_job *
1136 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1137 {
1138 	struct rte_crypto_sym_op *sym = op->sym;
1139 	struct dpaa_sec_job *cf;
1140 	struct dpaa_sec_op_ctx *ctx;
1141 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1142 	struct rte_mbuf *mbuf;
1143 	uint8_t req_segs;
1144 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1145 			ses->iv.offset);
1146 
1147 	if (sym->m_dst) {
1148 		mbuf = sym->m_dst;
1149 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1150 	} else {
1151 		mbuf = sym->m_src;
1152 		req_segs = mbuf->nb_segs * 2 + 4;
1153 	}
1154 
1155 	if (req_segs > MAX_SG_ENTRIES) {
1156 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1157 				MAX_SG_ENTRIES);
1158 		return NULL;
1159 	}
1160 
1161 	ctx = dpaa_sec_alloc_ctx(ses);
1162 	if (!ctx)
1163 		return NULL;
1164 
1165 	cf = &ctx->job;
1166 	ctx->op = op;
1167 
1168 	rte_prefetch0(cf->sg);
1169 
1170 	/* output */
1171 	out_sg = &cf->sg[0];
1172 	out_sg->extension = 1;
1173 	if (is_encode(ses))
1174 		out_sg->length = sym->auth.data.length + ses->digest_length;
1175 	else
1176 		out_sg->length = sym->auth.data.length;
1177 
1178 	/* output sg entries */
1179 	sg = &cf->sg[2];
1180 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1181 	cpu_to_hw_sg(out_sg);
1182 
1183 	/* 1st seg */
1184 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1185 	sg->length = mbuf->data_len - sym->auth.data.offset;
1186 	sg->offset = sym->auth.data.offset;
1187 
1188 	/* Successive segs */
1189 	mbuf = mbuf->next;
1190 	while (mbuf) {
1191 		cpu_to_hw_sg(sg);
1192 		sg++;
1193 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194 		sg->length = mbuf->data_len;
1195 		mbuf = mbuf->next;
1196 	}
1197 	sg->length -= ses->digest_length;
1198 
1199 	if (is_encode(ses)) {
1200 		cpu_to_hw_sg(sg);
1201 		/* set auth output */
1202 		sg++;
1203 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1204 		sg->length = ses->digest_length;
1205 	}
1206 	sg->final = 1;
1207 	cpu_to_hw_sg(sg);
1208 
1209 	/* input */
1210 	mbuf = sym->m_src;
1211 	in_sg = &cf->sg[1];
1212 	in_sg->extension = 1;
1213 	in_sg->final = 1;
1214 	if (is_encode(ses))
1215 		in_sg->length = ses->iv.length + sym->auth.data.length;
1216 	else
1217 		in_sg->length = ses->iv.length + sym->auth.data.length
1218 						+ ses->digest_length;
1219 
1220 	/* input sg entries */
1221 	sg++;
1222 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1223 	cpu_to_hw_sg(in_sg);
1224 
1225 	/* 1st seg IV */
1226 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1227 	sg->length = ses->iv.length;
1228 	cpu_to_hw_sg(sg);
1229 
1230 	/* 2nd seg */
1231 	sg++;
1232 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1233 	sg->length = mbuf->data_len - sym->auth.data.offset;
1234 	sg->offset = sym->auth.data.offset;
1235 
1236 	/* Successive segs */
1237 	mbuf = mbuf->next;
1238 	while (mbuf) {
1239 		cpu_to_hw_sg(sg);
1240 		sg++;
1241 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1242 		sg->length = mbuf->data_len;
1243 		mbuf = mbuf->next;
1244 	}
1245 
1246 	sg->length -= ses->digest_length;
1247 	if (is_decode(ses)) {
1248 		cpu_to_hw_sg(sg);
1249 		sg++;
1250 		memcpy(ctx->digest, sym->auth.digest.data,
1251 			ses->digest_length);
1252 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1253 		sg->length = ses->digest_length;
1254 	}
1255 	sg->final = 1;
1256 	cpu_to_hw_sg(sg);
1257 
1258 	return cf;
1259 }
1260 
1261 static inline struct dpaa_sec_job *
1262 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1263 {
1264 	struct rte_crypto_sym_op *sym = op->sym;
1265 	struct dpaa_sec_job *cf;
1266 	struct dpaa_sec_op_ctx *ctx;
1267 	struct qm_sg_entry *sg;
1268 	rte_iova_t src_start_addr, dst_start_addr;
1269 	uint32_t length = 0;
1270 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1271 			ses->iv.offset);
1272 
1273 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1274 	if (sym->m_dst)
1275 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1276 	else
1277 		dst_start_addr = src_start_addr;
1278 
1279 	ctx = dpaa_sec_alloc_ctx(ses);
1280 	if (!ctx)
1281 		return NULL;
1282 
1283 	cf = &ctx->job;
1284 	ctx->op = op;
1285 
1286 	/* input */
1287 	rte_prefetch0(cf->sg);
1288 	sg = &cf->sg[2];
1289 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1290 	if (is_encode(ses)) {
1291 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1292 		sg->length = ses->iv.length;
1293 		length += sg->length;
1294 		cpu_to_hw_sg(sg);
1295 
1296 		sg++;
1297 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1298 		sg->length = sym->auth.data.length;
1299 		length += sg->length;
1300 		sg->final = 1;
1301 		cpu_to_hw_sg(sg);
1302 	} else {
1303 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1304 		sg->length = ses->iv.length;
1305 		length += sg->length;
1306 		cpu_to_hw_sg(sg);
1307 
1308 		sg++;
1309 
1310 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1311 		sg->length = sym->auth.data.length;
1312 		length += sg->length;
1313 		cpu_to_hw_sg(sg);
1314 
1315 		memcpy(ctx->digest, sym->auth.digest.data,
1316 		       ses->digest_length);
1317 		sg++;
1318 
1319 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1320 		sg->length = ses->digest_length;
1321 		length += sg->length;
1322 		sg->final = 1;
1323 		cpu_to_hw_sg(sg);
1324 	}
1325 	/* input compound frame */
1326 	cf->sg[1].length = length;
1327 	cf->sg[1].extension = 1;
1328 	cf->sg[1].final = 1;
1329 	cpu_to_hw_sg(&cf->sg[1]);
1330 
1331 	/* output */
1332 	sg++;
1333 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1334 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1335 	sg->length = sym->cipher.data.length;
1336 	length = sg->length;
1337 	if (is_encode(ses)) {
1338 		cpu_to_hw_sg(sg);
1339 		/* set auth output */
1340 		sg++;
1341 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1342 		sg->length = ses->digest_length;
1343 		length += sg->length;
1344 	}
1345 	sg->final = 1;
1346 	cpu_to_hw_sg(sg);
1347 
1348 	/* output compound frame */
1349 	cf->sg[0].length = length;
1350 	cf->sg[0].extension = 1;
1351 	cpu_to_hw_sg(&cf->sg[0]);
1352 
1353 	return cf;
1354 }
1355 
1356 static inline struct dpaa_sec_job *
1357 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1358 {
1359 	struct rte_crypto_sym_op *sym = op->sym;
1360 	struct dpaa_sec_job *cf;
1361 	struct dpaa_sec_op_ctx *ctx;
1362 	struct qm_sg_entry *sg;
1363 	phys_addr_t src_start_addr, dst_start_addr;
1364 
1365 	ctx = dpaa_sec_alloc_ctx(ses);
1366 	if (!ctx)
1367 		return NULL;
1368 	cf = &ctx->job;
1369 	ctx->op = op;
1370 
1371 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1372 
1373 	if (sym->m_dst)
1374 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1375 	else
1376 		dst_start_addr = src_start_addr;
1377 
1378 	/* input */
1379 	sg = &cf->sg[1];
1380 	qm_sg_entry_set64(sg, src_start_addr);
1381 	sg->length = sym->m_src->pkt_len;
1382 	sg->final = 1;
1383 	cpu_to_hw_sg(sg);
1384 
1385 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1386 	/* output */
1387 	sg = &cf->sg[0];
1388 	qm_sg_entry_set64(sg, dst_start_addr);
1389 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1390 	cpu_to_hw_sg(sg);
1391 
1392 	return cf;
1393 }
1394 
1395 static uint16_t
1396 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1397 		       uint16_t nb_ops)
1398 {
1399 	/* Function to transmit the frames to given device and queuepair */
1400 	uint32_t loop;
1401 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1402 	uint16_t num_tx = 0;
1403 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1404 	uint32_t frames_to_send;
1405 	struct rte_crypto_op *op;
1406 	struct dpaa_sec_job *cf;
1407 	dpaa_sec_session *ses;
1408 	uint32_t auth_only_len;
1409 	struct qman_fq *inq[DPAA_SEC_BURST];
1410 
1411 	while (nb_ops) {
1412 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1413 				DPAA_SEC_BURST : nb_ops;
1414 		for (loop = 0; loop < frames_to_send; loop++) {
1415 			op = *(ops++);
1416 			switch (op->sess_type) {
1417 			case RTE_CRYPTO_OP_WITH_SESSION:
1418 				ses = (dpaa_sec_session *)
1419 					get_sym_session_private_data(
1420 							op->sym->session,
1421 							cryptodev_driver_id);
1422 				break;
1423 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1424 				ses = (dpaa_sec_session *)
1425 					get_sec_session_private_data(
1426 							op->sym->sec_session);
1427 				break;
1428 			default:
1429 				DPAA_SEC_DP_ERR(
1430 					"sessionless crypto op not supported");
1431 				frames_to_send = loop;
1432 				nb_ops = loop;
1433 				goto send_pkts;
1434 			}
1435 			if (unlikely(!ses->qp || ses->qp != qp)) {
1436 				DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1437 					     ses->qp, qp);
1438 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1439 					frames_to_send = loop;
1440 					nb_ops = loop;
1441 					goto send_pkts;
1442 				}
1443 			}
1444 
1445 			auth_only_len = op->sym->auth.data.length -
1446 						op->sym->cipher.data.length;
1447 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1448 				if (is_auth_only(ses)) {
1449 					cf = build_auth_only(op, ses);
1450 				} else if (is_cipher_only(ses)) {
1451 					cf = build_cipher_only(op, ses);
1452 				} else if (is_aead(ses)) {
1453 					cf = build_cipher_auth_gcm(op, ses);
1454 					auth_only_len = ses->auth_only_len;
1455 				} else if (is_auth_cipher(ses)) {
1456 					cf = build_cipher_auth(op, ses);
1457 				} else if (is_proto_ipsec(ses)) {
1458 					cf = build_proto(op, ses);
1459 				} else {
1460 					DPAA_SEC_DP_ERR("not supported ops");
1461 					frames_to_send = loop;
1462 					nb_ops = loop;
1463 					goto send_pkts;
1464 				}
1465 			} else {
1466 				if (is_auth_only(ses)) {
1467 					cf = build_auth_only_sg(op, ses);
1468 				} else if (is_cipher_only(ses)) {
1469 					cf = build_cipher_only_sg(op, ses);
1470 				} else if (is_aead(ses)) {
1471 					cf = build_cipher_auth_gcm_sg(op, ses);
1472 					auth_only_len = ses->auth_only_len;
1473 				} else if (is_auth_cipher(ses)) {
1474 					cf = build_cipher_auth_sg(op, ses);
1475 				} else {
1476 					DPAA_SEC_DP_ERR("not supported ops");
1477 					frames_to_send = loop;
1478 					nb_ops = loop;
1479 					goto send_pkts;
1480 				}
1481 			}
1482 			if (unlikely(!cf)) {
1483 				frames_to_send = loop;
1484 				nb_ops = loop;
1485 				goto send_pkts;
1486 			}
1487 
1488 			fd = &fds[loop];
1489 			inq[loop] = ses->inq;
1490 			fd->opaque_addr = 0;
1491 			fd->cmd = 0;
1492 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1493 			fd->_format1 = qm_fd_compound;
1494 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1495 			/* Auth_only_len is set as 0 in descriptor and it is
1496 			 * overwritten here in the fd.cmd which will update
1497 			 * the DPOVRD reg.
1498 			 */
1499 			if (auth_only_len)
1500 				fd->cmd = 0x80000000 | auth_only_len;
1501 
1502 		}
1503 send_pkts:
1504 		loop = 0;
1505 		while (loop < frames_to_send) {
1506 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1507 					frames_to_send - loop);
1508 		}
1509 		nb_ops -= frames_to_send;
1510 		num_tx += frames_to_send;
1511 	}
1512 
1513 	dpaa_qp->tx_pkts += num_tx;
1514 	dpaa_qp->tx_errs += nb_ops - num_tx;
1515 
1516 	return num_tx;
1517 }
1518 
1519 static uint16_t
1520 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1521 		       uint16_t nb_ops)
1522 {
1523 	uint16_t num_rx;
1524 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1525 
1526 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1527 
1528 	dpaa_qp->rx_pkts += num_rx;
1529 	dpaa_qp->rx_errs += nb_ops - num_rx;
1530 
1531 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1532 
1533 	return num_rx;
1534 }
1535 
1536 /** Release queue pair */
1537 static int
1538 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1539 			    uint16_t qp_id)
1540 {
1541 	struct dpaa_sec_dev_private *internals;
1542 	struct dpaa_sec_qp *qp = NULL;
1543 
1544 	PMD_INIT_FUNC_TRACE();
1545 
1546 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1547 
1548 	internals = dev->data->dev_private;
1549 	if (qp_id >= internals->max_nb_queue_pairs) {
1550 		DPAA_SEC_ERR("Max supported qpid %d",
1551 			     internals->max_nb_queue_pairs);
1552 		return -EINVAL;
1553 	}
1554 
1555 	qp = &internals->qps[qp_id];
1556 	qp->internals = NULL;
1557 	dev->data->queue_pairs[qp_id] = NULL;
1558 
1559 	return 0;
1560 }
1561 
1562 /** Setup a queue pair */
1563 static int
1564 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1565 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1566 		__rte_unused int socket_id,
1567 		__rte_unused struct rte_mempool *session_pool)
1568 {
1569 	struct dpaa_sec_dev_private *internals;
1570 	struct dpaa_sec_qp *qp = NULL;
1571 
1572 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1573 
1574 	internals = dev->data->dev_private;
1575 	if (qp_id >= internals->max_nb_queue_pairs) {
1576 		DPAA_SEC_ERR("Max supported qpid %d",
1577 			     internals->max_nb_queue_pairs);
1578 		return -EINVAL;
1579 	}
1580 
1581 	qp = &internals->qps[qp_id];
1582 	qp->internals = internals;
1583 	dev->data->queue_pairs[qp_id] = qp;
1584 
1585 	return 0;
1586 }
1587 
1588 /** Return the number of allocated queue pairs */
1589 static uint32_t
1590 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1591 {
1592 	PMD_INIT_FUNC_TRACE();
1593 
1594 	return dev->data->nb_queue_pairs;
1595 }
1596 
1597 /** Returns the size of session structure */
1598 static unsigned int
1599 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1600 {
1601 	PMD_INIT_FUNC_TRACE();
1602 
1603 	return sizeof(dpaa_sec_session);
1604 }
1605 
1606 static int
1607 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1608 		     struct rte_crypto_sym_xform *xform,
1609 		     dpaa_sec_session *session)
1610 {
1611 	session->cipher_alg = xform->cipher.algo;
1612 	session->iv.length = xform->cipher.iv.length;
1613 	session->iv.offset = xform->cipher.iv.offset;
1614 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1615 					       RTE_CACHE_LINE_SIZE);
1616 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1617 		DPAA_SEC_ERR("No Memory for cipher key");
1618 		return -ENOMEM;
1619 	}
1620 	session->cipher_key.length = xform->cipher.key.length;
1621 
1622 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1623 	       xform->cipher.key.length);
1624 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1625 			DIR_ENC : DIR_DEC;
1626 
1627 	return 0;
1628 }
1629 
1630 static int
1631 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1632 		   struct rte_crypto_sym_xform *xform,
1633 		   dpaa_sec_session *session)
1634 {
1635 	session->auth_alg = xform->auth.algo;
1636 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1637 					     RTE_CACHE_LINE_SIZE);
1638 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1639 		DPAA_SEC_ERR("No Memory for auth key");
1640 		return -ENOMEM;
1641 	}
1642 	session->auth_key.length = xform->auth.key.length;
1643 	session->digest_length = xform->auth.digest_length;
1644 
1645 	memcpy(session->auth_key.data, xform->auth.key.data,
1646 	       xform->auth.key.length);
1647 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1648 			DIR_ENC : DIR_DEC;
1649 
1650 	return 0;
1651 }
1652 
1653 static int
1654 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1655 		   struct rte_crypto_sym_xform *xform,
1656 		   dpaa_sec_session *session)
1657 {
1658 	session->aead_alg = xform->aead.algo;
1659 	session->iv.length = xform->aead.iv.length;
1660 	session->iv.offset = xform->aead.iv.offset;
1661 	session->auth_only_len = xform->aead.aad_length;
1662 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1663 					     RTE_CACHE_LINE_SIZE);
1664 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1665 		DPAA_SEC_ERR("No Memory for aead key\n");
1666 		return -ENOMEM;
1667 	}
1668 	session->aead_key.length = xform->aead.key.length;
1669 	session->digest_length = xform->aead.digest_length;
1670 
1671 	memcpy(session->aead_key.data, xform->aead.key.data,
1672 	       xform->aead.key.length);
1673 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1674 			DIR_ENC : DIR_DEC;
1675 
1676 	return 0;
1677 }
1678 
1679 static struct qman_fq *
1680 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1681 {
1682 	unsigned int i;
1683 
1684 	for (i = 0; i < qi->max_nb_sessions; i++) {
1685 		if (qi->inq_attach[i] == 0) {
1686 			qi->inq_attach[i] = 1;
1687 			return &qi->inq[i];
1688 		}
1689 	}
1690 	DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1691 
1692 	return NULL;
1693 }
1694 
1695 static int
1696 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1697 {
1698 	unsigned int i;
1699 
1700 	for (i = 0; i < qi->max_nb_sessions; i++) {
1701 		if (&qi->inq[i] == fq) {
1702 			qman_retire_fq(fq, NULL);
1703 			qman_oos_fq(fq);
1704 			qi->inq_attach[i] = 0;
1705 			return 0;
1706 		}
1707 	}
1708 	return -1;
1709 }
1710 
1711 static int
1712 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1713 {
1714 	int ret;
1715 
1716 	sess->qp = qp;
1717 	ret = dpaa_sec_prep_cdb(sess);
1718 	if (ret) {
1719 		DPAA_SEC_ERR("Unable to prepare sec cdb");
1720 		return -1;
1721 	}
1722 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1723 		ret = rte_dpaa_portal_init((void *)0);
1724 		if (ret) {
1725 			DPAA_SEC_ERR("Failure in affining portal");
1726 			return ret;
1727 		}
1728 	}
1729 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1730 			       qman_fq_fqid(&qp->outq));
1731 	if (ret)
1732 		DPAA_SEC_ERR("Unable to init sec queue");
1733 
1734 	return ret;
1735 }
1736 
1737 static int
1738 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1739 			    struct rte_crypto_sym_xform *xform,	void *sess)
1740 {
1741 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1742 	dpaa_sec_session *session = sess;
1743 
1744 	PMD_INIT_FUNC_TRACE();
1745 
1746 	if (unlikely(sess == NULL)) {
1747 		DPAA_SEC_ERR("invalid session struct");
1748 		return -EINVAL;
1749 	}
1750 
1751 	/* Default IV length = 0 */
1752 	session->iv.length = 0;
1753 
1754 	/* Cipher Only */
1755 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1756 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1757 		dpaa_sec_cipher_init(dev, xform, session);
1758 
1759 	/* Authentication Only */
1760 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1761 		   xform->next == NULL) {
1762 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1763 		dpaa_sec_auth_init(dev, xform, session);
1764 
1765 	/* Cipher then Authenticate */
1766 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1767 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1768 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1769 			dpaa_sec_cipher_init(dev, xform, session);
1770 			dpaa_sec_auth_init(dev, xform->next, session);
1771 		} else {
1772 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1773 			return -EINVAL;
1774 		}
1775 
1776 	/* Authenticate then Cipher */
1777 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1778 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1779 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1780 			dpaa_sec_auth_init(dev, xform, session);
1781 			dpaa_sec_cipher_init(dev, xform->next, session);
1782 		} else {
1783 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1784 			return -EINVAL;
1785 		}
1786 
1787 	/* AEAD operation for AES-GCM kind of Algorithms */
1788 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1789 		   xform->next == NULL) {
1790 		dpaa_sec_aead_init(dev, xform, session);
1791 
1792 	} else {
1793 		DPAA_SEC_ERR("Invalid crypto type");
1794 		return -EINVAL;
1795 	}
1796 	session->ctx_pool = internals->ctx_pool;
1797 	session->inq = dpaa_sec_attach_rxq(internals);
1798 	if (session->inq == NULL) {
1799 		DPAA_SEC_ERR("unable to attach sec queue");
1800 		goto err1;
1801 	}
1802 
1803 	return 0;
1804 
1805 err1:
1806 	rte_free(session->cipher_key.data);
1807 	rte_free(session->auth_key.data);
1808 	memset(session, 0, sizeof(dpaa_sec_session));
1809 
1810 	return -EINVAL;
1811 }
1812 
1813 static int
1814 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1815 		struct rte_crypto_sym_xform *xform,
1816 		struct rte_cryptodev_sym_session *sess,
1817 		struct rte_mempool *mempool)
1818 {
1819 	void *sess_private_data;
1820 	int ret;
1821 
1822 	PMD_INIT_FUNC_TRACE();
1823 
1824 	if (rte_mempool_get(mempool, &sess_private_data)) {
1825 		DPAA_SEC_ERR("Couldn't get object from session mempool");
1826 		return -ENOMEM;
1827 	}
1828 
1829 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1830 	if (ret != 0) {
1831 		DPAA_SEC_ERR("failed to configure session parameters");
1832 
1833 		/* Return session to mempool */
1834 		rte_mempool_put(mempool, sess_private_data);
1835 		return ret;
1836 	}
1837 
1838 	set_sym_session_private_data(sess, dev->driver_id,
1839 			sess_private_data);
1840 
1841 
1842 	return 0;
1843 }
1844 
1845 /** Clear the memory of session so it doesn't leave key material behind */
1846 static void
1847 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1848 		struct rte_cryptodev_sym_session *sess)
1849 {
1850 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1851 	uint8_t index = dev->driver_id;
1852 	void *sess_priv = get_sym_session_private_data(sess, index);
1853 
1854 	PMD_INIT_FUNC_TRACE();
1855 
1856 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1857 
1858 	if (sess_priv) {
1859 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1860 
1861 		if (s->inq)
1862 			dpaa_sec_detach_rxq(qi, s->inq);
1863 		rte_free(s->cipher_key.data);
1864 		rte_free(s->auth_key.data);
1865 		memset(s, 0, sizeof(dpaa_sec_session));
1866 		set_sym_session_private_data(sess, index, NULL);
1867 		rte_mempool_put(sess_mp, sess_priv);
1868 	}
1869 }
1870 
1871 static int
1872 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1873 			   struct rte_security_session_conf *conf,
1874 			   void *sess)
1875 {
1876 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1877 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1878 	struct rte_crypto_auth_xform *auth_xform;
1879 	struct rte_crypto_cipher_xform *cipher_xform;
1880 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1881 
1882 	PMD_INIT_FUNC_TRACE();
1883 
1884 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1885 		cipher_xform = &conf->crypto_xform->cipher;
1886 		auth_xform = &conf->crypto_xform->next->auth;
1887 	} else {
1888 		auth_xform = &conf->crypto_xform->auth;
1889 		cipher_xform = &conf->crypto_xform->next->cipher;
1890 	}
1891 	session->proto_alg = conf->protocol;
1892 	session->cipher_key.data = rte_zmalloc(NULL,
1893 					       cipher_xform->key.length,
1894 					       RTE_CACHE_LINE_SIZE);
1895 	if (session->cipher_key.data == NULL &&
1896 			cipher_xform->key.length > 0) {
1897 		DPAA_SEC_ERR("No Memory for cipher key");
1898 		return -ENOMEM;
1899 	}
1900 
1901 	session->cipher_key.length = cipher_xform->key.length;
1902 	session->auth_key.data = rte_zmalloc(NULL,
1903 					auth_xform->key.length,
1904 					RTE_CACHE_LINE_SIZE);
1905 	if (session->auth_key.data == NULL &&
1906 			auth_xform->key.length > 0) {
1907 		DPAA_SEC_ERR("No Memory for auth key");
1908 		rte_free(session->cipher_key.data);
1909 		return -ENOMEM;
1910 	}
1911 	session->auth_key.length = auth_xform->key.length;
1912 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1913 			cipher_xform->key.length);
1914 	memcpy(session->auth_key.data, auth_xform->key.data,
1915 			auth_xform->key.length);
1916 
1917 	switch (auth_xform->algo) {
1918 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1919 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1920 		break;
1921 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1922 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1923 		break;
1924 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1925 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1926 		break;
1927 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1928 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1929 		break;
1930 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1931 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1932 		break;
1933 	case RTE_CRYPTO_AUTH_AES_CMAC:
1934 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1935 		break;
1936 	case RTE_CRYPTO_AUTH_NULL:
1937 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1938 		break;
1939 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1940 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1941 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1942 	case RTE_CRYPTO_AUTH_SHA1:
1943 	case RTE_CRYPTO_AUTH_SHA256:
1944 	case RTE_CRYPTO_AUTH_SHA512:
1945 	case RTE_CRYPTO_AUTH_SHA224:
1946 	case RTE_CRYPTO_AUTH_SHA384:
1947 	case RTE_CRYPTO_AUTH_MD5:
1948 	case RTE_CRYPTO_AUTH_AES_GMAC:
1949 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1950 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1951 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
1952 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
1953 			auth_xform->algo);
1954 		goto out;
1955 	default:
1956 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
1957 			auth_xform->algo);
1958 		goto out;
1959 	}
1960 
1961 	switch (cipher_xform->algo) {
1962 	case RTE_CRYPTO_CIPHER_AES_CBC:
1963 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
1964 		break;
1965 	case RTE_CRYPTO_CIPHER_3DES_CBC:
1966 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
1967 		break;
1968 	case RTE_CRYPTO_CIPHER_AES_CTR:
1969 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
1970 		break;
1971 	case RTE_CRYPTO_CIPHER_NULL:
1972 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
1973 	case RTE_CRYPTO_CIPHER_3DES_ECB:
1974 	case RTE_CRYPTO_CIPHER_AES_ECB:
1975 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
1976 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1977 			cipher_xform->algo);
1978 		goto out;
1979 	default:
1980 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
1981 			cipher_xform->algo);
1982 		goto out;
1983 	}
1984 
1985 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1986 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
1987 				sizeof(session->ip4_hdr));
1988 		session->ip4_hdr.ip_v = IPVERSION;
1989 		session->ip4_hdr.ip_hl = 5;
1990 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
1991 						sizeof(session->ip4_hdr));
1992 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
1993 		session->ip4_hdr.ip_id = 0;
1994 		session->ip4_hdr.ip_off = 0;
1995 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
1996 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
1997 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
1998 				: IPPROTO_AH;
1999 		session->ip4_hdr.ip_sum = 0;
2000 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2001 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2002 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2003 						(void *)&session->ip4_hdr,
2004 						sizeof(struct ip));
2005 
2006 		session->encap_pdb.options =
2007 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2008 			PDBOPTS_ESP_OIHI_PDB_INL |
2009 			PDBOPTS_ESP_IVSRC |
2010 			PDBHMO_ESP_ENCAP_DTTL;
2011 		session->encap_pdb.spi = ipsec_xform->spi;
2012 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2013 
2014 		session->dir = DIR_ENC;
2015 	} else if (ipsec_xform->direction ==
2016 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2017 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2018 		session->decap_pdb.options = sizeof(struct ip) << 16;
2019 		session->dir = DIR_DEC;
2020 	} else
2021 		goto out;
2022 	session->ctx_pool = internals->ctx_pool;
2023 	session->inq = dpaa_sec_attach_rxq(internals);
2024 	if (session->inq == NULL) {
2025 		DPAA_SEC_ERR("unable to attach sec queue");
2026 		goto out;
2027 	}
2028 
2029 
2030 	return 0;
2031 out:
2032 	rte_free(session->auth_key.data);
2033 	rte_free(session->cipher_key.data);
2034 	memset(session, 0, sizeof(dpaa_sec_session));
2035 	return -1;
2036 }
2037 
2038 static int
2039 dpaa_sec_security_session_create(void *dev,
2040 				 struct rte_security_session_conf *conf,
2041 				 struct rte_security_session *sess,
2042 				 struct rte_mempool *mempool)
2043 {
2044 	void *sess_private_data;
2045 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2046 	int ret;
2047 
2048 	if (rte_mempool_get(mempool, &sess_private_data)) {
2049 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2050 		return -ENOMEM;
2051 	}
2052 
2053 	switch (conf->protocol) {
2054 	case RTE_SECURITY_PROTOCOL_IPSEC:
2055 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2056 				sess_private_data);
2057 		break;
2058 	case RTE_SECURITY_PROTOCOL_MACSEC:
2059 		return -ENOTSUP;
2060 	default:
2061 		return -EINVAL;
2062 	}
2063 	if (ret != 0) {
2064 		DPAA_SEC_ERR("failed to configure session parameters");
2065 		/* Return session to mempool */
2066 		rte_mempool_put(mempool, sess_private_data);
2067 		return ret;
2068 	}
2069 
2070 	set_sec_session_private_data(sess, sess_private_data);
2071 
2072 	return ret;
2073 }
2074 
2075 /** Clear the memory of session so it doesn't leave key material behind */
2076 static int
2077 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2078 		struct rte_security_session *sess)
2079 {
2080 	PMD_INIT_FUNC_TRACE();
2081 	void *sess_priv = get_sec_session_private_data(sess);
2082 
2083 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2084 
2085 	if (sess_priv) {
2086 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2087 
2088 		rte_free(s->cipher_key.data);
2089 		rte_free(s->auth_key.data);
2090 		memset(sess, 0, sizeof(dpaa_sec_session));
2091 		set_sec_session_private_data(sess, NULL);
2092 		rte_mempool_put(sess_mp, sess_priv);
2093 	}
2094 	return 0;
2095 }
2096 
2097 
2098 static int
2099 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2100 		       struct rte_cryptodev_config *config __rte_unused)
2101 {
2102 
2103 	char str[20];
2104 	struct dpaa_sec_dev_private *internals;
2105 
2106 	PMD_INIT_FUNC_TRACE();
2107 
2108 	internals = dev->data->dev_private;
2109 	sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2110 	if (!internals->ctx_pool) {
2111 		internals->ctx_pool = rte_mempool_create((const char *)str,
2112 							CTX_POOL_NUM_BUFS,
2113 							CTX_POOL_BUF_SIZE,
2114 							CTX_POOL_CACHE_SIZE, 0,
2115 							NULL, NULL, NULL, NULL,
2116 							SOCKET_ID_ANY, 0);
2117 		if (!internals->ctx_pool) {
2118 			DPAA_SEC_ERR("%s create failed\n", str);
2119 			return -ENOMEM;
2120 		}
2121 	} else
2122 		DPAA_SEC_INFO("mempool already created for dev_id : %d",
2123 				dev->data->dev_id);
2124 
2125 	return 0;
2126 }
2127 
2128 static int
2129 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2130 {
2131 	PMD_INIT_FUNC_TRACE();
2132 	return 0;
2133 }
2134 
2135 static void
2136 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2137 {
2138 	PMD_INIT_FUNC_TRACE();
2139 }
2140 
2141 static int
2142 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2143 {
2144 	struct dpaa_sec_dev_private *internals;
2145 
2146 	PMD_INIT_FUNC_TRACE();
2147 
2148 	if (dev == NULL)
2149 		return -ENOMEM;
2150 
2151 	internals = dev->data->dev_private;
2152 	rte_mempool_free(internals->ctx_pool);
2153 	internals->ctx_pool = NULL;
2154 
2155 	return 0;
2156 }
2157 
2158 static void
2159 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2160 		       struct rte_cryptodev_info *info)
2161 {
2162 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2163 
2164 	PMD_INIT_FUNC_TRACE();
2165 	if (info != NULL) {
2166 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2167 		info->feature_flags = dev->feature_flags;
2168 		info->capabilities = dpaa_sec_capabilities;
2169 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2170 		info->driver_id = cryptodev_driver_id;
2171 	}
2172 }
2173 
2174 static struct rte_cryptodev_ops crypto_ops = {
2175 	.dev_configure	      = dpaa_sec_dev_configure,
2176 	.dev_start	      = dpaa_sec_dev_start,
2177 	.dev_stop	      = dpaa_sec_dev_stop,
2178 	.dev_close	      = dpaa_sec_dev_close,
2179 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2180 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2181 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2182 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2183 	.sym_session_get_size     = dpaa_sec_sym_session_get_size,
2184 	.sym_session_configure    = dpaa_sec_sym_session_configure,
2185 	.sym_session_clear        = dpaa_sec_sym_session_clear
2186 };
2187 
2188 static const struct rte_security_capability *
2189 dpaa_sec_capabilities_get(void *device __rte_unused)
2190 {
2191 	return dpaa_sec_security_cap;
2192 }
2193 
2194 struct rte_security_ops dpaa_sec_security_ops = {
2195 	.session_create = dpaa_sec_security_session_create,
2196 	.session_update = NULL,
2197 	.session_stats_get = NULL,
2198 	.session_destroy = dpaa_sec_security_session_destroy,
2199 	.set_pkt_metadata = NULL,
2200 	.capabilities_get = dpaa_sec_capabilities_get
2201 };
2202 
2203 static int
2204 dpaa_sec_uninit(struct rte_cryptodev *dev)
2205 {
2206 	struct dpaa_sec_dev_private *internals;
2207 
2208 	if (dev == NULL)
2209 		return -ENODEV;
2210 
2211 	internals = dev->data->dev_private;
2212 	rte_free(dev->security_ctx);
2213 
2214 	/* In case close has been called, internals->ctx_pool would be NULL */
2215 	rte_mempool_free(internals->ctx_pool);
2216 	rte_free(internals);
2217 
2218 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2219 		      dev->data->name, rte_socket_id());
2220 
2221 	return 0;
2222 }
2223 
2224 static int
2225 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2226 {
2227 	struct dpaa_sec_dev_private *internals;
2228 	struct rte_security_ctx *security_instance;
2229 	struct dpaa_sec_qp *qp;
2230 	uint32_t i, flags;
2231 	int ret;
2232 
2233 	PMD_INIT_FUNC_TRACE();
2234 
2235 	cryptodev->driver_id = cryptodev_driver_id;
2236 	cryptodev->dev_ops = &crypto_ops;
2237 
2238 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2239 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2240 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2241 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2242 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2243 			RTE_CRYPTODEV_FF_SECURITY |
2244 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2245 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2246 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2247 			RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2248 			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2249 
2250 	internals = cryptodev->data->dev_private;
2251 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2252 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2253 
2254 	/*
2255 	 * For secondary processes, we don't initialise any further as primary
2256 	 * has already done this work. Only check we don't need a different
2257 	 * RX function
2258 	 */
2259 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2260 		DPAA_SEC_WARN("Device already init by primary process");
2261 		return 0;
2262 	}
2263 
2264 	/* Initialize security_ctx only for primary process*/
2265 	security_instance = rte_malloc("rte_security_instances_ops",
2266 				sizeof(struct rte_security_ctx), 0);
2267 	if (security_instance == NULL)
2268 		return -ENOMEM;
2269 	security_instance->device = (void *)cryptodev;
2270 	security_instance->ops = &dpaa_sec_security_ops;
2271 	security_instance->sess_cnt = 0;
2272 	cryptodev->security_ctx = security_instance;
2273 
2274 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2275 		/* init qman fq for queue pair */
2276 		qp = &internals->qps[i];
2277 		ret = dpaa_sec_init_tx(&qp->outq);
2278 		if (ret) {
2279 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
2280 			goto init_error;
2281 		}
2282 	}
2283 
2284 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2285 		QMAN_FQ_FLAG_TO_DCPORTAL;
2286 	for (i = 0; i < internals->max_nb_sessions; i++) {
2287 		/* create rx qman fq for sessions*/
2288 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2289 		if (unlikely(ret != 0)) {
2290 			DPAA_SEC_ERR("sec qman_create_fq failed");
2291 			goto init_error;
2292 		}
2293 	}
2294 
2295 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2296 	return 0;
2297 
2298 init_error:
2299 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2300 
2301 	dpaa_sec_uninit(cryptodev);
2302 	return -EFAULT;
2303 }
2304 
2305 static int
2306 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2307 				struct rte_dpaa_device *dpaa_dev)
2308 {
2309 	struct rte_cryptodev *cryptodev;
2310 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2311 
2312 	int retval;
2313 
2314 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2315 
2316 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2317 	if (cryptodev == NULL)
2318 		return -ENOMEM;
2319 
2320 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2321 		cryptodev->data->dev_private = rte_zmalloc_socket(
2322 					"cryptodev private structure",
2323 					sizeof(struct dpaa_sec_dev_private),
2324 					RTE_CACHE_LINE_SIZE,
2325 					rte_socket_id());
2326 
2327 		if (cryptodev->data->dev_private == NULL)
2328 			rte_panic("Cannot allocate memzone for private "
2329 					"device data");
2330 	}
2331 
2332 	dpaa_dev->crypto_dev = cryptodev;
2333 	cryptodev->device = &dpaa_dev->device;
2334 	cryptodev->device->driver = &dpaa_drv->driver;
2335 
2336 	/* init user callbacks */
2337 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2338 
2339 	/* if sec device version is not configured */
2340 	if (!rta_get_sec_era()) {
2341 		const struct device_node *caam_node;
2342 
2343 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2344 			const uint32_t *prop = of_get_property(caam_node,
2345 					"fsl,sec-era",
2346 					NULL);
2347 			if (prop) {
2348 				rta_set_sec_era(
2349 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2350 				break;
2351 			}
2352 		}
2353 	}
2354 
2355 	/* Invoke PMD device initialization function */
2356 	retval = dpaa_sec_dev_init(cryptodev);
2357 	if (retval == 0)
2358 		return 0;
2359 
2360 	/* In case of error, cleanup is done */
2361 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2362 		rte_free(cryptodev->data->dev_private);
2363 
2364 	rte_cryptodev_pmd_release_device(cryptodev);
2365 
2366 	return -ENXIO;
2367 }
2368 
2369 static int
2370 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2371 {
2372 	struct rte_cryptodev *cryptodev;
2373 	int ret;
2374 
2375 	cryptodev = dpaa_dev->crypto_dev;
2376 	if (cryptodev == NULL)
2377 		return -ENODEV;
2378 
2379 	ret = dpaa_sec_uninit(cryptodev);
2380 	if (ret)
2381 		return ret;
2382 
2383 	return rte_cryptodev_pmd_destroy(cryptodev);
2384 }
2385 
2386 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2387 	.drv_type = FSL_DPAA_CRYPTO,
2388 	.driver = {
2389 		.name = "DPAA SEC PMD"
2390 	},
2391 	.probe = cryptodev_dpaa_sec_probe,
2392 	.remove = cryptodev_dpaa_sec_remove,
2393 };
2394 
2395 static struct cryptodev_driver dpaa_sec_crypto_drv;
2396 
2397 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2398 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2399 		cryptodev_driver_id);
2400 
2401 RTE_INIT(dpaa_sec_init_log);
2402 static void
2403 dpaa_sec_init_log(void)
2404 {
2405 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2406 	if (dpaa_logtype_sec >= 0)
2407 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2408 }
2409