xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision 3b8bcfcd96e64d199392550928be7c7665571bcb)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 int dpaa_logtype_sec;
43 
44 static uint8_t cryptodev_driver_id;
45 
46 static __thread struct rte_crypto_op **dpaa_sec_ops;
47 static __thread int dpaa_sec_op_nb;
48 
49 static int
50 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
51 
52 static inline void
53 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
54 {
55 	if (!ctx->fd_status) {
56 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
57 	} else {
58 		DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
59 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
60 	}
61 
62 	/* report op status to sym->op and then free the ctx memeory  */
63 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
64 }
65 
66 static inline struct dpaa_sec_op_ctx *
67 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
68 {
69 	struct dpaa_sec_op_ctx *ctx;
70 	int retval;
71 
72 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
73 	if (!ctx || retval) {
74 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75 		return NULL;
76 	}
77 	/*
78 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81 	 * each packet, memset is costlier than dcbz_64().
82 	 */
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
85 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
86 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
87 
88 	ctx->ctx_pool = ses->ctx_pool;
89 	ctx->vtop_offset = (size_t) ctx
90 				- rte_mempool_virt2iova(ctx);
91 
92 	return ctx;
93 }
94 
95 static inline rte_iova_t
96 dpaa_mem_vtop(void *vaddr)
97 {
98 	const struct rte_memseg *ms;
99 
100 	ms = rte_mem_virt2memseg(vaddr, NULL);
101 	if (ms)
102 		return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
103 	return (size_t)NULL;
104 }
105 
106 static inline void *
107 dpaa_mem_ptov(rte_iova_t paddr)
108 {
109 	return rte_mem_iova2virt(paddr);
110 }
111 
112 static void
113 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
114 		   struct qman_fq *fq,
115 		   const struct qm_mr_entry *msg)
116 {
117 	DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
118 			fq->fqid, msg->ern.rc, msg->ern.seqnum);
119 }
120 
121 /* initialize the queue with dest chan as caam chan so that
122  * all the packets in this queue could be dispatched into caam
123  */
124 static int
125 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
126 		 uint32_t fqid_out)
127 {
128 	struct qm_mcc_initfq fq_opts;
129 	uint32_t flags;
130 	int ret = -1;
131 
132 	/* Clear FQ options */
133 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
134 
135 	flags = QMAN_INITFQ_FLAG_SCHED;
136 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
137 			  QM_INITFQ_WE_CONTEXTB;
138 
139 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
140 	fq_opts.fqd.context_b = fqid_out;
141 	fq_opts.fqd.dest.channel = qm_channel_caam;
142 	fq_opts.fqd.dest.wq = 0;
143 
144 	fq_in->cb.ern  = ern_sec_fq_handler;
145 
146 	DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
147 
148 	ret = qman_init_fq(fq_in, flags, &fq_opts);
149 	if (unlikely(ret != 0))
150 		DPAA_SEC_ERR("qman_init_fq failed %d", ret);
151 
152 	return ret;
153 }
154 
155 /* something is put into in_fq and caam put the crypto result into out_fq */
156 static enum qman_cb_dqrr_result
157 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
158 		  struct qman_fq *fq __always_unused,
159 		  const struct qm_dqrr_entry *dqrr)
160 {
161 	const struct qm_fd *fd;
162 	struct dpaa_sec_job *job;
163 	struct dpaa_sec_op_ctx *ctx;
164 
165 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
166 		return qman_cb_dqrr_defer;
167 
168 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
169 		return qman_cb_dqrr_consume;
170 
171 	fd = &dqrr->fd;
172 	/* sg is embedded in an op ctx,
173 	 * sg[0] is for output
174 	 * sg[1] for input
175 	 */
176 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
177 
178 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
179 	ctx->fd_status = fd->status;
180 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
181 		struct qm_sg_entry *sg_out;
182 		uint32_t len;
183 
184 		sg_out = &job->sg[0];
185 		hw_sg_to_cpu(sg_out);
186 		len = sg_out->length;
187 		ctx->op->sym->m_src->pkt_len = len;
188 		ctx->op->sym->m_src->data_len = len;
189 	}
190 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
191 	dpaa_sec_op_ending(ctx);
192 
193 	return qman_cb_dqrr_consume;
194 }
195 
196 /* caam result is put into this queue */
197 static int
198 dpaa_sec_init_tx(struct qman_fq *fq)
199 {
200 	int ret;
201 	struct qm_mcc_initfq opts;
202 	uint32_t flags;
203 
204 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
205 		QMAN_FQ_FLAG_DYNAMIC_FQID;
206 
207 	ret = qman_create_fq(0, flags, fq);
208 	if (unlikely(ret)) {
209 		DPAA_SEC_ERR("qman_create_fq failed");
210 		return ret;
211 	}
212 
213 	memset(&opts, 0, sizeof(opts));
214 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
215 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
216 
217 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
218 
219 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
220 	fq->cb.ern  = ern_sec_fq_handler;
221 
222 	ret = qman_init_fq(fq, 0, &opts);
223 	if (unlikely(ret)) {
224 		DPAA_SEC_ERR("unable to init caam source fq!");
225 		return ret;
226 	}
227 
228 	return ret;
229 }
230 
231 static inline int is_cipher_only(dpaa_sec_session *ses)
232 {
233 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
234 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
235 }
236 
237 static inline int is_auth_only(dpaa_sec_session *ses)
238 {
239 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
240 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
241 }
242 
243 static inline int is_aead(dpaa_sec_session *ses)
244 {
245 	return ((ses->cipher_alg == 0) &&
246 		(ses->auth_alg == 0) &&
247 		(ses->aead_alg != 0));
248 }
249 
250 static inline int is_auth_cipher(dpaa_sec_session *ses)
251 {
252 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
253 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
254 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
255 }
256 
257 static inline int is_proto_ipsec(dpaa_sec_session *ses)
258 {
259 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
260 }
261 
262 static inline int is_encode(dpaa_sec_session *ses)
263 {
264 	return ses->dir == DIR_ENC;
265 }
266 
267 static inline int is_decode(dpaa_sec_session *ses)
268 {
269 	return ses->dir == DIR_DEC;
270 }
271 
272 static inline void
273 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
274 {
275 	switch (ses->auth_alg) {
276 	case RTE_CRYPTO_AUTH_NULL:
277 		ses->digest_length = 0;
278 		break;
279 	case RTE_CRYPTO_AUTH_MD5_HMAC:
280 		alginfo_a->algtype =
281 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
282 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
283 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
284 		break;
285 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
286 		alginfo_a->algtype =
287 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
288 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
289 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
290 		break;
291 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
292 		alginfo_a->algtype =
293 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
294 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
295 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
296 		break;
297 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
298 		alginfo_a->algtype =
299 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
300 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
301 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
302 		break;
303 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
304 		alginfo_a->algtype =
305 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
306 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
307 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
308 		break;
309 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
310 		alginfo_a->algtype =
311 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
312 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
313 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
314 		break;
315 	default:
316 		DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
317 	}
318 }
319 
320 static inline void
321 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
322 {
323 	switch (ses->cipher_alg) {
324 	case RTE_CRYPTO_CIPHER_NULL:
325 		break;
326 	case RTE_CRYPTO_CIPHER_AES_CBC:
327 		alginfo_c->algtype =
328 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
330 		alginfo_c->algmode = OP_ALG_AAI_CBC;
331 		break;
332 	case RTE_CRYPTO_CIPHER_3DES_CBC:
333 		alginfo_c->algtype =
334 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
336 		alginfo_c->algmode = OP_ALG_AAI_CBC;
337 		break;
338 	case RTE_CRYPTO_CIPHER_AES_CTR:
339 		alginfo_c->algtype =
340 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
341 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
342 		alginfo_c->algmode = OP_ALG_AAI_CTR;
343 		break;
344 	default:
345 		DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
346 	}
347 }
348 
349 static inline void
350 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
351 {
352 	switch (ses->aead_alg) {
353 	case RTE_CRYPTO_AEAD_AES_GCM:
354 		alginfo->algtype = OP_ALG_ALGSEL_AES;
355 		alginfo->algmode = OP_ALG_AAI_GCM;
356 		break;
357 	default:
358 		DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
359 	}
360 }
361 
362 
363 /* prepare command block of the session */
364 static int
365 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
366 {
367 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
368 	int32_t shared_desc_len = 0;
369 	struct sec_cdb *cdb = &ses->cdb;
370 	int err;
371 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
372 	int swap = false;
373 #else
374 	int swap = true;
375 #endif
376 
377 	memset(cdb, 0, sizeof(struct sec_cdb));
378 
379 	if (is_cipher_only(ses)) {
380 		caam_cipher_alg(ses, &alginfo_c);
381 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
382 			DPAA_SEC_ERR("not supported cipher alg");
383 			return -ENOTSUP;
384 		}
385 
386 		alginfo_c.key = (size_t)ses->cipher_key.data;
387 		alginfo_c.keylen = ses->cipher_key.length;
388 		alginfo_c.key_enc_flags = 0;
389 		alginfo_c.key_type = RTA_DATA_IMM;
390 
391 		shared_desc_len = cnstr_shdsc_blkcipher(
392 						cdb->sh_desc, true,
393 						swap, &alginfo_c,
394 						NULL,
395 						ses->iv.length,
396 						ses->dir);
397 	} else if (is_auth_only(ses)) {
398 		caam_auth_alg(ses, &alginfo_a);
399 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
400 			DPAA_SEC_ERR("not supported auth alg");
401 			return -ENOTSUP;
402 		}
403 
404 		alginfo_a.key = (size_t)ses->auth_key.data;
405 		alginfo_a.keylen = ses->auth_key.length;
406 		alginfo_a.key_enc_flags = 0;
407 		alginfo_a.key_type = RTA_DATA_IMM;
408 
409 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
410 						   swap, &alginfo_a,
411 						   !ses->dir,
412 						   ses->digest_length);
413 	} else if (is_aead(ses)) {
414 		caam_aead_alg(ses, &alginfo);
415 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
416 			DPAA_SEC_ERR("not supported aead alg");
417 			return -ENOTSUP;
418 		}
419 		alginfo.key = (size_t)ses->aead_key.data;
420 		alginfo.keylen = ses->aead_key.length;
421 		alginfo.key_enc_flags = 0;
422 		alginfo.key_type = RTA_DATA_IMM;
423 
424 		if (ses->dir == DIR_ENC)
425 			shared_desc_len = cnstr_shdsc_gcm_encap(
426 					cdb->sh_desc, true, swap,
427 					&alginfo,
428 					ses->iv.length,
429 					ses->digest_length);
430 		else
431 			shared_desc_len = cnstr_shdsc_gcm_decap(
432 					cdb->sh_desc, true, swap,
433 					&alginfo,
434 					ses->iv.length,
435 					ses->digest_length);
436 	} else {
437 		caam_cipher_alg(ses, &alginfo_c);
438 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 			DPAA_SEC_ERR("not supported cipher alg");
440 			return -ENOTSUP;
441 		}
442 
443 		alginfo_c.key = (size_t)ses->cipher_key.data;
444 		alginfo_c.keylen = ses->cipher_key.length;
445 		alginfo_c.key_enc_flags = 0;
446 		alginfo_c.key_type = RTA_DATA_IMM;
447 
448 		caam_auth_alg(ses, &alginfo_a);
449 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
450 			DPAA_SEC_ERR("not supported auth alg");
451 			return -ENOTSUP;
452 		}
453 
454 		alginfo_a.key = (size_t)ses->auth_key.data;
455 		alginfo_a.keylen = ses->auth_key.length;
456 		alginfo_a.key_enc_flags = 0;
457 		alginfo_a.key_type = RTA_DATA_IMM;
458 
459 		cdb->sh_desc[0] = alginfo_c.keylen;
460 		cdb->sh_desc[1] = alginfo_a.keylen;
461 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
462 				       MIN_JOB_DESC_SIZE,
463 				       (unsigned int *)cdb->sh_desc,
464 				       &cdb->sh_desc[2], 2);
465 
466 		if (err < 0) {
467 			DPAA_SEC_ERR("Crypto: Incorrect key lengths");
468 			return err;
469 		}
470 		if (cdb->sh_desc[2] & 1)
471 			alginfo_c.key_type = RTA_DATA_IMM;
472 		else {
473 			alginfo_c.key = (size_t)dpaa_mem_vtop(
474 						(void *)(size_t)alginfo_c.key);
475 			alginfo_c.key_type = RTA_DATA_PTR;
476 		}
477 		if (cdb->sh_desc[2] & (1<<1))
478 			alginfo_a.key_type = RTA_DATA_IMM;
479 		else {
480 			alginfo_a.key = (size_t)dpaa_mem_vtop(
481 						(void *)(size_t)alginfo_a.key);
482 			alginfo_a.key_type = RTA_DATA_PTR;
483 		}
484 		cdb->sh_desc[0] = 0;
485 		cdb->sh_desc[1] = 0;
486 		cdb->sh_desc[2] = 0;
487 		if (is_proto_ipsec(ses)) {
488 			if (ses->dir == DIR_ENC) {
489 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
490 						cdb->sh_desc,
491 						true, swap, &ses->encap_pdb,
492 						(uint8_t *)&ses->ip4_hdr,
493 						&alginfo_c, &alginfo_a);
494 			} else if (ses->dir == DIR_DEC) {
495 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
496 						cdb->sh_desc,
497 						true, swap, &ses->decap_pdb,
498 						&alginfo_c, &alginfo_a);
499 			}
500 		} else {
501 			/* Auth_only_len is set as 0 here and it will be
502 			 * overwritten in fd for each packet.
503 			 */
504 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
505 					true, swap, &alginfo_c, &alginfo_a,
506 					ses->iv.length, 0,
507 					ses->digest_length, ses->dir);
508 		}
509 	}
510 
511 	if (shared_desc_len < 0) {
512 		DPAA_SEC_ERR("error in preparing command block");
513 		return shared_desc_len;
514 	}
515 
516 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
517 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
518 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
519 
520 	return 0;
521 }
522 
523 /* qp is lockless, should be accessed by only one thread */
524 static int
525 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
526 {
527 	struct qman_fq *fq;
528 	unsigned int pkts = 0;
529 	int ret;
530 	struct qm_dqrr_entry *dq;
531 
532 	fq = &qp->outq;
533 	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
534 				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
535 	if (ret)
536 		return 0;
537 
538 	do {
539 		const struct qm_fd *fd;
540 		struct dpaa_sec_job *job;
541 		struct dpaa_sec_op_ctx *ctx;
542 		struct rte_crypto_op *op;
543 
544 		dq = qman_dequeue(fq);
545 		if (!dq)
546 			continue;
547 
548 		fd = &dq->fd;
549 		/* sg is embedded in an op ctx,
550 		 * sg[0] is for output
551 		 * sg[1] for input
552 		 */
553 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
554 
555 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
556 		ctx->fd_status = fd->status;
557 		op = ctx->op;
558 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
559 			struct qm_sg_entry *sg_out;
560 			uint32_t len;
561 
562 			sg_out = &job->sg[0];
563 			hw_sg_to_cpu(sg_out);
564 			len = sg_out->length;
565 			op->sym->m_src->pkt_len = len;
566 			op->sym->m_src->data_len = len;
567 		}
568 		if (!ctx->fd_status) {
569 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
570 		} else {
571 			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
572 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
573 		}
574 		ops[pkts++] = op;
575 
576 		/* report op status to sym->op and then free the ctx memeory */
577 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
578 
579 		qman_dqrr_consume(fq, dq);
580 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
581 
582 	return pkts;
583 }
584 
585 static inline struct dpaa_sec_job *
586 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
587 {
588 	struct rte_crypto_sym_op *sym = op->sym;
589 	struct rte_mbuf *mbuf = sym->m_src;
590 	struct dpaa_sec_job *cf;
591 	struct dpaa_sec_op_ctx *ctx;
592 	struct qm_sg_entry *sg, *out_sg, *in_sg;
593 	phys_addr_t start_addr;
594 	uint8_t *old_digest, extra_segs;
595 
596 	if (is_decode(ses))
597 		extra_segs = 3;
598 	else
599 		extra_segs = 2;
600 
601 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
602 		DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
603 				MAX_SG_ENTRIES);
604 		return NULL;
605 	}
606 	ctx = dpaa_sec_alloc_ctx(ses);
607 	if (!ctx)
608 		return NULL;
609 
610 	cf = &ctx->job;
611 	ctx->op = op;
612 	old_digest = ctx->digest;
613 
614 	/* output */
615 	out_sg = &cf->sg[0];
616 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
617 	out_sg->length = ses->digest_length;
618 	cpu_to_hw_sg(out_sg);
619 
620 	/* input */
621 	in_sg = &cf->sg[1];
622 	/* need to extend the input to a compound frame */
623 	in_sg->extension = 1;
624 	in_sg->final = 1;
625 	in_sg->length = sym->auth.data.length;
626 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
627 
628 	/* 1st seg */
629 	sg = in_sg + 1;
630 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
631 	sg->length = mbuf->data_len - sym->auth.data.offset;
632 	sg->offset = sym->auth.data.offset;
633 
634 	/* Successive segs */
635 	mbuf = mbuf->next;
636 	while (mbuf) {
637 		cpu_to_hw_sg(sg);
638 		sg++;
639 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
640 		sg->length = mbuf->data_len;
641 		mbuf = mbuf->next;
642 	}
643 
644 	if (is_decode(ses)) {
645 		/* Digest verification case */
646 		cpu_to_hw_sg(sg);
647 		sg++;
648 		rte_memcpy(old_digest, sym->auth.digest.data,
649 				ses->digest_length);
650 		start_addr = dpaa_mem_vtop(old_digest);
651 		qm_sg_entry_set64(sg, start_addr);
652 		sg->length = ses->digest_length;
653 		in_sg->length += ses->digest_length;
654 	} else {
655 		/* Digest calculation case */
656 		sg->length -= ses->digest_length;
657 	}
658 	sg->final = 1;
659 	cpu_to_hw_sg(sg);
660 	cpu_to_hw_sg(in_sg);
661 
662 	return cf;
663 }
664 
665 /**
666  * packet looks like:
667  *		|<----data_len------->|
668  *    |ip_header|ah_header|icv|payload|
669  *              ^
670  *		|
671  *	   mbuf->pkt.data
672  */
673 static inline struct dpaa_sec_job *
674 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
675 {
676 	struct rte_crypto_sym_op *sym = op->sym;
677 	struct rte_mbuf *mbuf = sym->m_src;
678 	struct dpaa_sec_job *cf;
679 	struct dpaa_sec_op_ctx *ctx;
680 	struct qm_sg_entry *sg;
681 	rte_iova_t start_addr;
682 	uint8_t *old_digest;
683 
684 	ctx = dpaa_sec_alloc_ctx(ses);
685 	if (!ctx)
686 		return NULL;
687 
688 	cf = &ctx->job;
689 	ctx->op = op;
690 	old_digest = ctx->digest;
691 
692 	start_addr = rte_pktmbuf_iova(mbuf);
693 	/* output */
694 	sg = &cf->sg[0];
695 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
696 	sg->length = ses->digest_length;
697 	cpu_to_hw_sg(sg);
698 
699 	/* input */
700 	sg = &cf->sg[1];
701 	if (is_decode(ses)) {
702 		/* need to extend the input to a compound frame */
703 		sg->extension = 1;
704 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
705 		sg->length = sym->auth.data.length + ses->digest_length;
706 		sg->final = 1;
707 		cpu_to_hw_sg(sg);
708 
709 		sg = &cf->sg[2];
710 		/* hash result or digest, save digest first */
711 		rte_memcpy(old_digest, sym->auth.digest.data,
712 			   ses->digest_length);
713 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
714 		sg->length = sym->auth.data.length;
715 		cpu_to_hw_sg(sg);
716 
717 		/* let's check digest by hw */
718 		start_addr = dpaa_mem_vtop(old_digest);
719 		sg++;
720 		qm_sg_entry_set64(sg, start_addr);
721 		sg->length = ses->digest_length;
722 		sg->final = 1;
723 		cpu_to_hw_sg(sg);
724 	} else {
725 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
726 		sg->length = sym->auth.data.length;
727 		sg->final = 1;
728 		cpu_to_hw_sg(sg);
729 	}
730 
731 	return cf;
732 }
733 
734 static inline struct dpaa_sec_job *
735 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
736 {
737 	struct rte_crypto_sym_op *sym = op->sym;
738 	struct dpaa_sec_job *cf;
739 	struct dpaa_sec_op_ctx *ctx;
740 	struct qm_sg_entry *sg, *out_sg, *in_sg;
741 	struct rte_mbuf *mbuf;
742 	uint8_t req_segs;
743 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
744 			ses->iv.offset);
745 
746 	if (sym->m_dst) {
747 		mbuf = sym->m_dst;
748 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
749 	} else {
750 		mbuf = sym->m_src;
751 		req_segs = mbuf->nb_segs * 2 + 3;
752 	}
753 
754 	if (req_segs > MAX_SG_ENTRIES) {
755 		DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
756 				MAX_SG_ENTRIES);
757 		return NULL;
758 	}
759 
760 	ctx = dpaa_sec_alloc_ctx(ses);
761 	if (!ctx)
762 		return NULL;
763 
764 	cf = &ctx->job;
765 	ctx->op = op;
766 
767 	/* output */
768 	out_sg = &cf->sg[0];
769 	out_sg->extension = 1;
770 	out_sg->length = sym->cipher.data.length;
771 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
772 	cpu_to_hw_sg(out_sg);
773 
774 	/* 1st seg */
775 	sg = &cf->sg[2];
776 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
777 	sg->length = mbuf->data_len - sym->cipher.data.offset;
778 	sg->offset = sym->cipher.data.offset;
779 
780 	/* Successive segs */
781 	mbuf = mbuf->next;
782 	while (mbuf) {
783 		cpu_to_hw_sg(sg);
784 		sg++;
785 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
786 		sg->length = mbuf->data_len;
787 		mbuf = mbuf->next;
788 	}
789 	sg->final = 1;
790 	cpu_to_hw_sg(sg);
791 
792 	/* input */
793 	mbuf = sym->m_src;
794 	in_sg = &cf->sg[1];
795 	in_sg->extension = 1;
796 	in_sg->final = 1;
797 	in_sg->length = sym->cipher.data.length + ses->iv.length;
798 
799 	sg++;
800 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
801 	cpu_to_hw_sg(in_sg);
802 
803 	/* IV */
804 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
805 	sg->length = ses->iv.length;
806 	cpu_to_hw_sg(sg);
807 
808 	/* 1st seg */
809 	sg++;
810 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
811 	sg->length = mbuf->data_len - sym->cipher.data.offset;
812 	sg->offset = sym->cipher.data.offset;
813 
814 	/* Successive segs */
815 	mbuf = mbuf->next;
816 	while (mbuf) {
817 		cpu_to_hw_sg(sg);
818 		sg++;
819 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
820 		sg->length = mbuf->data_len;
821 		mbuf = mbuf->next;
822 	}
823 	sg->final = 1;
824 	cpu_to_hw_sg(sg);
825 
826 	return cf;
827 }
828 
829 static inline struct dpaa_sec_job *
830 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
831 {
832 	struct rte_crypto_sym_op *sym = op->sym;
833 	struct dpaa_sec_job *cf;
834 	struct dpaa_sec_op_ctx *ctx;
835 	struct qm_sg_entry *sg;
836 	rte_iova_t src_start_addr, dst_start_addr;
837 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
838 			ses->iv.offset);
839 
840 	ctx = dpaa_sec_alloc_ctx(ses);
841 	if (!ctx)
842 		return NULL;
843 
844 	cf = &ctx->job;
845 	ctx->op = op;
846 
847 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
848 
849 	if (sym->m_dst)
850 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
851 	else
852 		dst_start_addr = src_start_addr;
853 
854 	/* output */
855 	sg = &cf->sg[0];
856 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
857 	sg->length = sym->cipher.data.length + ses->iv.length;
858 	cpu_to_hw_sg(sg);
859 
860 	/* input */
861 	sg = &cf->sg[1];
862 
863 	/* need to extend the input to a compound frame */
864 	sg->extension = 1;
865 	sg->final = 1;
866 	sg->length = sym->cipher.data.length + ses->iv.length;
867 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
868 	cpu_to_hw_sg(sg);
869 
870 	sg = &cf->sg[2];
871 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
872 	sg->length = ses->iv.length;
873 	cpu_to_hw_sg(sg);
874 
875 	sg++;
876 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
877 	sg->length = sym->cipher.data.length;
878 	sg->final = 1;
879 	cpu_to_hw_sg(sg);
880 
881 	return cf;
882 }
883 
884 static inline struct dpaa_sec_job *
885 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
886 {
887 	struct rte_crypto_sym_op *sym = op->sym;
888 	struct dpaa_sec_job *cf;
889 	struct dpaa_sec_op_ctx *ctx;
890 	struct qm_sg_entry *sg, *out_sg, *in_sg;
891 	struct rte_mbuf *mbuf;
892 	uint8_t req_segs;
893 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
894 			ses->iv.offset);
895 
896 	if (sym->m_dst) {
897 		mbuf = sym->m_dst;
898 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
899 	} else {
900 		mbuf = sym->m_src;
901 		req_segs = mbuf->nb_segs * 2 + 4;
902 	}
903 
904 	if (ses->auth_only_len)
905 		req_segs++;
906 
907 	if (req_segs > MAX_SG_ENTRIES) {
908 		DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
909 				MAX_SG_ENTRIES);
910 		return NULL;
911 	}
912 
913 	ctx = dpaa_sec_alloc_ctx(ses);
914 	if (!ctx)
915 		return NULL;
916 
917 	cf = &ctx->job;
918 	ctx->op = op;
919 
920 	rte_prefetch0(cf->sg);
921 
922 	/* output */
923 	out_sg = &cf->sg[0];
924 	out_sg->extension = 1;
925 	if (is_encode(ses))
926 		out_sg->length = sym->aead.data.length + ses->auth_only_len
927 						+ ses->digest_length;
928 	else
929 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
930 
931 	/* output sg entries */
932 	sg = &cf->sg[2];
933 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
934 	cpu_to_hw_sg(out_sg);
935 
936 	/* 1st seg */
937 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
938 	sg->length = mbuf->data_len - sym->aead.data.offset +
939 					ses->auth_only_len;
940 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
941 
942 	/* Successive segs */
943 	mbuf = mbuf->next;
944 	while (mbuf) {
945 		cpu_to_hw_sg(sg);
946 		sg++;
947 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
948 		sg->length = mbuf->data_len;
949 		mbuf = mbuf->next;
950 	}
951 	sg->length -= ses->digest_length;
952 
953 	if (is_encode(ses)) {
954 		cpu_to_hw_sg(sg);
955 		/* set auth output */
956 		sg++;
957 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
958 		sg->length = ses->digest_length;
959 	}
960 	sg->final = 1;
961 	cpu_to_hw_sg(sg);
962 
963 	/* input */
964 	mbuf = sym->m_src;
965 	in_sg = &cf->sg[1];
966 	in_sg->extension = 1;
967 	in_sg->final = 1;
968 	if (is_encode(ses))
969 		in_sg->length = ses->iv.length + sym->aead.data.length
970 							+ ses->auth_only_len;
971 	else
972 		in_sg->length = ses->iv.length + sym->aead.data.length
973 				+ ses->auth_only_len + ses->digest_length;
974 
975 	/* input sg entries */
976 	sg++;
977 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
978 	cpu_to_hw_sg(in_sg);
979 
980 	/* 1st seg IV */
981 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
982 	sg->length = ses->iv.length;
983 	cpu_to_hw_sg(sg);
984 
985 	/* 2nd seg auth only */
986 	if (ses->auth_only_len) {
987 		sg++;
988 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
989 		sg->length = ses->auth_only_len;
990 		cpu_to_hw_sg(sg);
991 	}
992 
993 	/* 3rd seg */
994 	sg++;
995 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
996 	sg->length = mbuf->data_len - sym->aead.data.offset;
997 	sg->offset = sym->aead.data.offset;
998 
999 	/* Successive segs */
1000 	mbuf = mbuf->next;
1001 	while (mbuf) {
1002 		cpu_to_hw_sg(sg);
1003 		sg++;
1004 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1005 		sg->length = mbuf->data_len;
1006 		mbuf = mbuf->next;
1007 	}
1008 
1009 	if (is_decode(ses)) {
1010 		cpu_to_hw_sg(sg);
1011 		sg++;
1012 		memcpy(ctx->digest, sym->aead.digest.data,
1013 			ses->digest_length);
1014 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1015 		sg->length = ses->digest_length;
1016 	}
1017 	sg->final = 1;
1018 	cpu_to_hw_sg(sg);
1019 
1020 	return cf;
1021 }
1022 
1023 static inline struct dpaa_sec_job *
1024 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1025 {
1026 	struct rte_crypto_sym_op *sym = op->sym;
1027 	struct dpaa_sec_job *cf;
1028 	struct dpaa_sec_op_ctx *ctx;
1029 	struct qm_sg_entry *sg;
1030 	uint32_t length = 0;
1031 	rte_iova_t src_start_addr, dst_start_addr;
1032 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1033 			ses->iv.offset);
1034 
1035 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1036 
1037 	if (sym->m_dst)
1038 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1039 	else
1040 		dst_start_addr = src_start_addr;
1041 
1042 	ctx = dpaa_sec_alloc_ctx(ses);
1043 	if (!ctx)
1044 		return NULL;
1045 
1046 	cf = &ctx->job;
1047 	ctx->op = op;
1048 
1049 	/* input */
1050 	rte_prefetch0(cf->sg);
1051 	sg = &cf->sg[2];
1052 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1053 	if (is_encode(ses)) {
1054 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1055 		sg->length = ses->iv.length;
1056 		length += sg->length;
1057 		cpu_to_hw_sg(sg);
1058 
1059 		sg++;
1060 		if (ses->auth_only_len) {
1061 			qm_sg_entry_set64(sg,
1062 					  dpaa_mem_vtop(sym->aead.aad.data));
1063 			sg->length = ses->auth_only_len;
1064 			length += sg->length;
1065 			cpu_to_hw_sg(sg);
1066 			sg++;
1067 		}
1068 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1069 		sg->length = sym->aead.data.length;
1070 		length += sg->length;
1071 		sg->final = 1;
1072 		cpu_to_hw_sg(sg);
1073 	} else {
1074 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1075 		sg->length = ses->iv.length;
1076 		length += sg->length;
1077 		cpu_to_hw_sg(sg);
1078 
1079 		sg++;
1080 		if (ses->auth_only_len) {
1081 			qm_sg_entry_set64(sg,
1082 					  dpaa_mem_vtop(sym->aead.aad.data));
1083 			sg->length = ses->auth_only_len;
1084 			length += sg->length;
1085 			cpu_to_hw_sg(sg);
1086 			sg++;
1087 		}
1088 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1089 		sg->length = sym->aead.data.length;
1090 		length += sg->length;
1091 		cpu_to_hw_sg(sg);
1092 
1093 		memcpy(ctx->digest, sym->aead.digest.data,
1094 		       ses->digest_length);
1095 		sg++;
1096 
1097 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1098 		sg->length = ses->digest_length;
1099 		length += sg->length;
1100 		sg->final = 1;
1101 		cpu_to_hw_sg(sg);
1102 	}
1103 	/* input compound frame */
1104 	cf->sg[1].length = length;
1105 	cf->sg[1].extension = 1;
1106 	cf->sg[1].final = 1;
1107 	cpu_to_hw_sg(&cf->sg[1]);
1108 
1109 	/* output */
1110 	sg++;
1111 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1112 	qm_sg_entry_set64(sg,
1113 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1114 	sg->length = sym->aead.data.length + ses->auth_only_len;
1115 	length = sg->length;
1116 	if (is_encode(ses)) {
1117 		cpu_to_hw_sg(sg);
1118 		/* set auth output */
1119 		sg++;
1120 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1121 		sg->length = ses->digest_length;
1122 		length += sg->length;
1123 	}
1124 	sg->final = 1;
1125 	cpu_to_hw_sg(sg);
1126 
1127 	/* output compound frame */
1128 	cf->sg[0].length = length;
1129 	cf->sg[0].extension = 1;
1130 	cpu_to_hw_sg(&cf->sg[0]);
1131 
1132 	return cf;
1133 }
1134 
1135 static inline struct dpaa_sec_job *
1136 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1137 {
1138 	struct rte_crypto_sym_op *sym = op->sym;
1139 	struct dpaa_sec_job *cf;
1140 	struct dpaa_sec_op_ctx *ctx;
1141 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1142 	struct rte_mbuf *mbuf;
1143 	uint8_t req_segs;
1144 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1145 			ses->iv.offset);
1146 
1147 	if (sym->m_dst) {
1148 		mbuf = sym->m_dst;
1149 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1150 	} else {
1151 		mbuf = sym->m_src;
1152 		req_segs = mbuf->nb_segs * 2 + 4;
1153 	}
1154 
1155 	if (req_segs > MAX_SG_ENTRIES) {
1156 		DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1157 				MAX_SG_ENTRIES);
1158 		return NULL;
1159 	}
1160 
1161 	ctx = dpaa_sec_alloc_ctx(ses);
1162 	if (!ctx)
1163 		return NULL;
1164 
1165 	cf = &ctx->job;
1166 	ctx->op = op;
1167 
1168 	rte_prefetch0(cf->sg);
1169 
1170 	/* output */
1171 	out_sg = &cf->sg[0];
1172 	out_sg->extension = 1;
1173 	if (is_encode(ses))
1174 		out_sg->length = sym->auth.data.length + ses->digest_length;
1175 	else
1176 		out_sg->length = sym->auth.data.length;
1177 
1178 	/* output sg entries */
1179 	sg = &cf->sg[2];
1180 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1181 	cpu_to_hw_sg(out_sg);
1182 
1183 	/* 1st seg */
1184 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1185 	sg->length = mbuf->data_len - sym->auth.data.offset;
1186 	sg->offset = sym->auth.data.offset;
1187 
1188 	/* Successive segs */
1189 	mbuf = mbuf->next;
1190 	while (mbuf) {
1191 		cpu_to_hw_sg(sg);
1192 		sg++;
1193 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194 		sg->length = mbuf->data_len;
1195 		mbuf = mbuf->next;
1196 	}
1197 	sg->length -= ses->digest_length;
1198 
1199 	if (is_encode(ses)) {
1200 		cpu_to_hw_sg(sg);
1201 		/* set auth output */
1202 		sg++;
1203 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1204 		sg->length = ses->digest_length;
1205 	}
1206 	sg->final = 1;
1207 	cpu_to_hw_sg(sg);
1208 
1209 	/* input */
1210 	mbuf = sym->m_src;
1211 	in_sg = &cf->sg[1];
1212 	in_sg->extension = 1;
1213 	in_sg->final = 1;
1214 	if (is_encode(ses))
1215 		in_sg->length = ses->iv.length + sym->auth.data.length;
1216 	else
1217 		in_sg->length = ses->iv.length + sym->auth.data.length
1218 						+ ses->digest_length;
1219 
1220 	/* input sg entries */
1221 	sg++;
1222 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1223 	cpu_to_hw_sg(in_sg);
1224 
1225 	/* 1st seg IV */
1226 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1227 	sg->length = ses->iv.length;
1228 	cpu_to_hw_sg(sg);
1229 
1230 	/* 2nd seg */
1231 	sg++;
1232 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1233 	sg->length = mbuf->data_len - sym->auth.data.offset;
1234 	sg->offset = sym->auth.data.offset;
1235 
1236 	/* Successive segs */
1237 	mbuf = mbuf->next;
1238 	while (mbuf) {
1239 		cpu_to_hw_sg(sg);
1240 		sg++;
1241 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1242 		sg->length = mbuf->data_len;
1243 		mbuf = mbuf->next;
1244 	}
1245 
1246 	sg->length -= ses->digest_length;
1247 	if (is_decode(ses)) {
1248 		cpu_to_hw_sg(sg);
1249 		sg++;
1250 		memcpy(ctx->digest, sym->auth.digest.data,
1251 			ses->digest_length);
1252 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1253 		sg->length = ses->digest_length;
1254 	}
1255 	sg->final = 1;
1256 	cpu_to_hw_sg(sg);
1257 
1258 	return cf;
1259 }
1260 
1261 static inline struct dpaa_sec_job *
1262 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1263 {
1264 	struct rte_crypto_sym_op *sym = op->sym;
1265 	struct dpaa_sec_job *cf;
1266 	struct dpaa_sec_op_ctx *ctx;
1267 	struct qm_sg_entry *sg;
1268 	rte_iova_t src_start_addr, dst_start_addr;
1269 	uint32_t length = 0;
1270 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1271 			ses->iv.offset);
1272 
1273 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1274 	if (sym->m_dst)
1275 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1276 	else
1277 		dst_start_addr = src_start_addr;
1278 
1279 	ctx = dpaa_sec_alloc_ctx(ses);
1280 	if (!ctx)
1281 		return NULL;
1282 
1283 	cf = &ctx->job;
1284 	ctx->op = op;
1285 
1286 	/* input */
1287 	rte_prefetch0(cf->sg);
1288 	sg = &cf->sg[2];
1289 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1290 	if (is_encode(ses)) {
1291 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1292 		sg->length = ses->iv.length;
1293 		length += sg->length;
1294 		cpu_to_hw_sg(sg);
1295 
1296 		sg++;
1297 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1298 		sg->length = sym->auth.data.length;
1299 		length += sg->length;
1300 		sg->final = 1;
1301 		cpu_to_hw_sg(sg);
1302 	} else {
1303 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1304 		sg->length = ses->iv.length;
1305 		length += sg->length;
1306 		cpu_to_hw_sg(sg);
1307 
1308 		sg++;
1309 
1310 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1311 		sg->length = sym->auth.data.length;
1312 		length += sg->length;
1313 		cpu_to_hw_sg(sg);
1314 
1315 		memcpy(ctx->digest, sym->auth.digest.data,
1316 		       ses->digest_length);
1317 		sg++;
1318 
1319 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1320 		sg->length = ses->digest_length;
1321 		length += sg->length;
1322 		sg->final = 1;
1323 		cpu_to_hw_sg(sg);
1324 	}
1325 	/* input compound frame */
1326 	cf->sg[1].length = length;
1327 	cf->sg[1].extension = 1;
1328 	cf->sg[1].final = 1;
1329 	cpu_to_hw_sg(&cf->sg[1]);
1330 
1331 	/* output */
1332 	sg++;
1333 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1334 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1335 	sg->length = sym->cipher.data.length;
1336 	length = sg->length;
1337 	if (is_encode(ses)) {
1338 		cpu_to_hw_sg(sg);
1339 		/* set auth output */
1340 		sg++;
1341 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1342 		sg->length = ses->digest_length;
1343 		length += sg->length;
1344 	}
1345 	sg->final = 1;
1346 	cpu_to_hw_sg(sg);
1347 
1348 	/* output compound frame */
1349 	cf->sg[0].length = length;
1350 	cf->sg[0].extension = 1;
1351 	cpu_to_hw_sg(&cf->sg[0]);
1352 
1353 	return cf;
1354 }
1355 
1356 static inline struct dpaa_sec_job *
1357 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1358 {
1359 	struct rte_crypto_sym_op *sym = op->sym;
1360 	struct dpaa_sec_job *cf;
1361 	struct dpaa_sec_op_ctx *ctx;
1362 	struct qm_sg_entry *sg;
1363 	phys_addr_t src_start_addr, dst_start_addr;
1364 
1365 	ctx = dpaa_sec_alloc_ctx(ses);
1366 	if (!ctx)
1367 		return NULL;
1368 	cf = &ctx->job;
1369 	ctx->op = op;
1370 
1371 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1372 
1373 	if (sym->m_dst)
1374 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1375 	else
1376 		dst_start_addr = src_start_addr;
1377 
1378 	/* input */
1379 	sg = &cf->sg[1];
1380 	qm_sg_entry_set64(sg, src_start_addr);
1381 	sg->length = sym->m_src->pkt_len;
1382 	sg->final = 1;
1383 	cpu_to_hw_sg(sg);
1384 
1385 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1386 	/* output */
1387 	sg = &cf->sg[0];
1388 	qm_sg_entry_set64(sg, dst_start_addr);
1389 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1390 	cpu_to_hw_sg(sg);
1391 
1392 	return cf;
1393 }
1394 
1395 static uint16_t
1396 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1397 		       uint16_t nb_ops)
1398 {
1399 	/* Function to transmit the frames to given device and queuepair */
1400 	uint32_t loop;
1401 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1402 	uint16_t num_tx = 0;
1403 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1404 	uint32_t frames_to_send;
1405 	struct rte_crypto_op *op;
1406 	struct dpaa_sec_job *cf;
1407 	dpaa_sec_session *ses;
1408 	uint32_t auth_only_len;
1409 	struct qman_fq *inq[DPAA_SEC_BURST];
1410 
1411 	while (nb_ops) {
1412 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1413 				DPAA_SEC_BURST : nb_ops;
1414 		for (loop = 0; loop < frames_to_send; loop++) {
1415 			op = *(ops++);
1416 			switch (op->sess_type) {
1417 			case RTE_CRYPTO_OP_WITH_SESSION:
1418 				ses = (dpaa_sec_session *)
1419 					get_session_private_data(
1420 							op->sym->session,
1421 							cryptodev_driver_id);
1422 				break;
1423 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1424 				ses = (dpaa_sec_session *)
1425 					get_sec_session_private_data(
1426 							op->sym->sec_session);
1427 				break;
1428 			default:
1429 				DPAA_SEC_DP_ERR(
1430 					"sessionless crypto op not supported");
1431 				frames_to_send = loop;
1432 				nb_ops = loop;
1433 				goto send_pkts;
1434 			}
1435 			if (unlikely(!ses->qp || ses->qp != qp)) {
1436 				DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
1437 					     ses->qp, qp);
1438 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1439 					frames_to_send = loop;
1440 					nb_ops = loop;
1441 					goto send_pkts;
1442 				}
1443 			}
1444 
1445 			auth_only_len = op->sym->auth.data.length -
1446 						op->sym->cipher.data.length;
1447 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1448 				if (is_auth_only(ses)) {
1449 					cf = build_auth_only(op, ses);
1450 				} else if (is_cipher_only(ses)) {
1451 					cf = build_cipher_only(op, ses);
1452 				} else if (is_aead(ses)) {
1453 					cf = build_cipher_auth_gcm(op, ses);
1454 					auth_only_len = ses->auth_only_len;
1455 				} else if (is_auth_cipher(ses)) {
1456 					cf = build_cipher_auth(op, ses);
1457 				} else if (is_proto_ipsec(ses)) {
1458 					cf = build_proto(op, ses);
1459 				} else {
1460 					DPAA_SEC_DP_ERR("not supported ops");
1461 					frames_to_send = loop;
1462 					nb_ops = loop;
1463 					goto send_pkts;
1464 				}
1465 			} else {
1466 				if (is_auth_only(ses)) {
1467 					cf = build_auth_only_sg(op, ses);
1468 				} else if (is_cipher_only(ses)) {
1469 					cf = build_cipher_only_sg(op, ses);
1470 				} else if (is_aead(ses)) {
1471 					cf = build_cipher_auth_gcm_sg(op, ses);
1472 					auth_only_len = ses->auth_only_len;
1473 				} else if (is_auth_cipher(ses)) {
1474 					cf = build_cipher_auth_sg(op, ses);
1475 				} else {
1476 					DPAA_SEC_DP_ERR("not supported ops");
1477 					frames_to_send = loop;
1478 					nb_ops = loop;
1479 					goto send_pkts;
1480 				}
1481 			}
1482 			if (unlikely(!cf)) {
1483 				frames_to_send = loop;
1484 				nb_ops = loop;
1485 				goto send_pkts;
1486 			}
1487 
1488 			fd = &fds[loop];
1489 			inq[loop] = ses->inq;
1490 			fd->opaque_addr = 0;
1491 			fd->cmd = 0;
1492 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1493 			fd->_format1 = qm_fd_compound;
1494 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1495 			/* Auth_only_len is set as 0 in descriptor and it is
1496 			 * overwritten here in the fd.cmd which will update
1497 			 * the DPOVRD reg.
1498 			 */
1499 			if (auth_only_len)
1500 				fd->cmd = 0x80000000 | auth_only_len;
1501 
1502 		}
1503 send_pkts:
1504 		loop = 0;
1505 		while (loop < frames_to_send) {
1506 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1507 					frames_to_send - loop);
1508 		}
1509 		nb_ops -= frames_to_send;
1510 		num_tx += frames_to_send;
1511 	}
1512 
1513 	dpaa_qp->tx_pkts += num_tx;
1514 	dpaa_qp->tx_errs += nb_ops - num_tx;
1515 
1516 	return num_tx;
1517 }
1518 
1519 static uint16_t
1520 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1521 		       uint16_t nb_ops)
1522 {
1523 	uint16_t num_rx;
1524 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1525 
1526 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1527 
1528 	dpaa_qp->rx_pkts += num_rx;
1529 	dpaa_qp->rx_errs += nb_ops - num_rx;
1530 
1531 	DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1532 
1533 	return num_rx;
1534 }
1535 
1536 /** Release queue pair */
1537 static int
1538 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1539 			    uint16_t qp_id)
1540 {
1541 	struct dpaa_sec_dev_private *internals;
1542 	struct dpaa_sec_qp *qp = NULL;
1543 
1544 	PMD_INIT_FUNC_TRACE();
1545 
1546 	DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1547 
1548 	internals = dev->data->dev_private;
1549 	if (qp_id >= internals->max_nb_queue_pairs) {
1550 		DPAA_SEC_ERR("Max supported qpid %d",
1551 			     internals->max_nb_queue_pairs);
1552 		return -EINVAL;
1553 	}
1554 
1555 	qp = &internals->qps[qp_id];
1556 	qp->internals = NULL;
1557 	dev->data->queue_pairs[qp_id] = NULL;
1558 
1559 	return 0;
1560 }
1561 
1562 /** Setup a queue pair */
1563 static int
1564 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1565 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1566 		__rte_unused int socket_id,
1567 		__rte_unused struct rte_mempool *session_pool)
1568 {
1569 	struct dpaa_sec_dev_private *internals;
1570 	struct dpaa_sec_qp *qp = NULL;
1571 
1572 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1573 
1574 	internals = dev->data->dev_private;
1575 	if (qp_id >= internals->max_nb_queue_pairs) {
1576 		DPAA_SEC_ERR("Max supported qpid %d",
1577 			     internals->max_nb_queue_pairs);
1578 		return -EINVAL;
1579 	}
1580 
1581 	qp = &internals->qps[qp_id];
1582 	qp->internals = internals;
1583 	dev->data->queue_pairs[qp_id] = qp;
1584 
1585 	return 0;
1586 }
1587 
1588 /** Start queue pair */
1589 static int
1590 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1591 			  __rte_unused uint16_t queue_pair_id)
1592 {
1593 	PMD_INIT_FUNC_TRACE();
1594 
1595 	return 0;
1596 }
1597 
1598 /** Stop queue pair */
1599 static int
1600 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1601 			 __rte_unused uint16_t queue_pair_id)
1602 {
1603 	PMD_INIT_FUNC_TRACE();
1604 
1605 	return 0;
1606 }
1607 
1608 /** Return the number of allocated queue pairs */
1609 static uint32_t
1610 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1611 {
1612 	PMD_INIT_FUNC_TRACE();
1613 
1614 	return dev->data->nb_queue_pairs;
1615 }
1616 
1617 /** Returns the size of session structure */
1618 static unsigned int
1619 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1620 {
1621 	PMD_INIT_FUNC_TRACE();
1622 
1623 	return sizeof(dpaa_sec_session);
1624 }
1625 
1626 static int
1627 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1628 		     struct rte_crypto_sym_xform *xform,
1629 		     dpaa_sec_session *session)
1630 {
1631 	session->cipher_alg = xform->cipher.algo;
1632 	session->iv.length = xform->cipher.iv.length;
1633 	session->iv.offset = xform->cipher.iv.offset;
1634 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1635 					       RTE_CACHE_LINE_SIZE);
1636 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1637 		DPAA_SEC_ERR("No Memory for cipher key");
1638 		return -ENOMEM;
1639 	}
1640 	session->cipher_key.length = xform->cipher.key.length;
1641 
1642 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1643 	       xform->cipher.key.length);
1644 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1645 			DIR_ENC : DIR_DEC;
1646 
1647 	return 0;
1648 }
1649 
1650 static int
1651 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1652 		   struct rte_crypto_sym_xform *xform,
1653 		   dpaa_sec_session *session)
1654 {
1655 	session->auth_alg = xform->auth.algo;
1656 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1657 					     RTE_CACHE_LINE_SIZE);
1658 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1659 		DPAA_SEC_ERR("No Memory for auth key");
1660 		return -ENOMEM;
1661 	}
1662 	session->auth_key.length = xform->auth.key.length;
1663 	session->digest_length = xform->auth.digest_length;
1664 
1665 	memcpy(session->auth_key.data, xform->auth.key.data,
1666 	       xform->auth.key.length);
1667 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1668 			DIR_ENC : DIR_DEC;
1669 
1670 	return 0;
1671 }
1672 
1673 static int
1674 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1675 		   struct rte_crypto_sym_xform *xform,
1676 		   dpaa_sec_session *session)
1677 {
1678 	session->aead_alg = xform->aead.algo;
1679 	session->iv.length = xform->aead.iv.length;
1680 	session->iv.offset = xform->aead.iv.offset;
1681 	session->auth_only_len = xform->aead.aad_length;
1682 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1683 					     RTE_CACHE_LINE_SIZE);
1684 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1685 		DPAA_SEC_ERR("No Memory for aead key\n");
1686 		return -ENOMEM;
1687 	}
1688 	session->aead_key.length = xform->aead.key.length;
1689 	session->digest_length = xform->aead.digest_length;
1690 
1691 	memcpy(session->aead_key.data, xform->aead.key.data,
1692 	       xform->aead.key.length);
1693 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1694 			DIR_ENC : DIR_DEC;
1695 
1696 	return 0;
1697 }
1698 
1699 static struct qman_fq *
1700 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1701 {
1702 	unsigned int i;
1703 
1704 	for (i = 0; i < qi->max_nb_sessions; i++) {
1705 		if (qi->inq_attach[i] == 0) {
1706 			qi->inq_attach[i] = 1;
1707 			return &qi->inq[i];
1708 		}
1709 	}
1710 	DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1711 
1712 	return NULL;
1713 }
1714 
1715 static int
1716 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1717 {
1718 	unsigned int i;
1719 
1720 	for (i = 0; i < qi->max_nb_sessions; i++) {
1721 		if (&qi->inq[i] == fq) {
1722 			qman_retire_fq(fq, NULL);
1723 			qman_oos_fq(fq);
1724 			qi->inq_attach[i] = 0;
1725 			return 0;
1726 		}
1727 	}
1728 	return -1;
1729 }
1730 
1731 static int
1732 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1733 {
1734 	int ret;
1735 
1736 	sess->qp = qp;
1737 	ret = dpaa_sec_prep_cdb(sess);
1738 	if (ret) {
1739 		DPAA_SEC_ERR("Unable to prepare sec cdb");
1740 		return -1;
1741 	}
1742 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1743 		ret = rte_dpaa_portal_init((void *)0);
1744 		if (ret) {
1745 			DPAA_SEC_ERR("Failure in affining portal");
1746 			return ret;
1747 		}
1748 	}
1749 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1750 			       qman_fq_fqid(&qp->outq));
1751 	if (ret)
1752 		DPAA_SEC_ERR("Unable to init sec queue");
1753 
1754 	return ret;
1755 }
1756 
1757 static int
1758 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1759 			uint16_t qp_id __rte_unused,
1760 			void *ses __rte_unused)
1761 {
1762 	PMD_INIT_FUNC_TRACE();
1763 	return 0;
1764 }
1765 
1766 static int
1767 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1768 			uint16_t qp_id  __rte_unused,
1769 			void *ses)
1770 {
1771 	dpaa_sec_session *sess = ses;
1772 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1773 
1774 	PMD_INIT_FUNC_TRACE();
1775 
1776 	if (sess->inq)
1777 		dpaa_sec_detach_rxq(qi, sess->inq);
1778 	sess->inq = NULL;
1779 
1780 	sess->qp = NULL;
1781 
1782 	return 0;
1783 }
1784 
1785 static int
1786 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1787 			    struct rte_crypto_sym_xform *xform,	void *sess)
1788 {
1789 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1790 	dpaa_sec_session *session = sess;
1791 
1792 	PMD_INIT_FUNC_TRACE();
1793 
1794 	if (unlikely(sess == NULL)) {
1795 		DPAA_SEC_ERR("invalid session struct");
1796 		return -EINVAL;
1797 	}
1798 
1799 	/* Default IV length = 0 */
1800 	session->iv.length = 0;
1801 
1802 	/* Cipher Only */
1803 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1804 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1805 		dpaa_sec_cipher_init(dev, xform, session);
1806 
1807 	/* Authentication Only */
1808 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1809 		   xform->next == NULL) {
1810 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1811 		dpaa_sec_auth_init(dev, xform, session);
1812 
1813 	/* Cipher then Authenticate */
1814 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1815 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1816 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1817 			dpaa_sec_cipher_init(dev, xform, session);
1818 			dpaa_sec_auth_init(dev, xform->next, session);
1819 		} else {
1820 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1821 			return -EINVAL;
1822 		}
1823 
1824 	/* Authenticate then Cipher */
1825 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1826 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1827 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1828 			dpaa_sec_auth_init(dev, xform, session);
1829 			dpaa_sec_cipher_init(dev, xform->next, session);
1830 		} else {
1831 			DPAA_SEC_ERR("Not supported: Auth then Cipher");
1832 			return -EINVAL;
1833 		}
1834 
1835 	/* AEAD operation for AES-GCM kind of Algorithms */
1836 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1837 		   xform->next == NULL) {
1838 		dpaa_sec_aead_init(dev, xform, session);
1839 
1840 	} else {
1841 		DPAA_SEC_ERR("Invalid crypto type");
1842 		return -EINVAL;
1843 	}
1844 	session->ctx_pool = internals->ctx_pool;
1845 	session->inq = dpaa_sec_attach_rxq(internals);
1846 	if (session->inq == NULL) {
1847 		DPAA_SEC_ERR("unable to attach sec queue");
1848 		goto err1;
1849 	}
1850 
1851 	return 0;
1852 
1853 err1:
1854 	rte_free(session->cipher_key.data);
1855 	rte_free(session->auth_key.data);
1856 	memset(session, 0, sizeof(dpaa_sec_session));
1857 
1858 	return -EINVAL;
1859 }
1860 
1861 static int
1862 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1863 		struct rte_crypto_sym_xform *xform,
1864 		struct rte_cryptodev_sym_session *sess,
1865 		struct rte_mempool *mempool)
1866 {
1867 	void *sess_private_data;
1868 	int ret;
1869 
1870 	PMD_INIT_FUNC_TRACE();
1871 
1872 	if (rte_mempool_get(mempool, &sess_private_data)) {
1873 		DPAA_SEC_ERR("Couldn't get object from session mempool");
1874 		return -ENOMEM;
1875 	}
1876 
1877 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1878 	if (ret != 0) {
1879 		DPAA_SEC_ERR("failed to configure session parameters");
1880 
1881 		/* Return session to mempool */
1882 		rte_mempool_put(mempool, sess_private_data);
1883 		return ret;
1884 	}
1885 
1886 	set_session_private_data(sess, dev->driver_id,
1887 			sess_private_data);
1888 
1889 
1890 	return 0;
1891 }
1892 
1893 /** Clear the memory of session so it doesn't leave key material behind */
1894 static void
1895 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1896 		struct rte_cryptodev_sym_session *sess)
1897 {
1898 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1899 	uint8_t index = dev->driver_id;
1900 	void *sess_priv = get_session_private_data(sess, index);
1901 
1902 	PMD_INIT_FUNC_TRACE();
1903 
1904 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1905 
1906 	if (sess_priv) {
1907 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1908 
1909 		if (s->inq)
1910 			dpaa_sec_detach_rxq(qi, s->inq);
1911 		rte_free(s->cipher_key.data);
1912 		rte_free(s->auth_key.data);
1913 		memset(s, 0, sizeof(dpaa_sec_session));
1914 		set_session_private_data(sess, index, NULL);
1915 		rte_mempool_put(sess_mp, sess_priv);
1916 	}
1917 }
1918 
1919 static int
1920 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1921 			   struct rte_security_session_conf *conf,
1922 			   void *sess)
1923 {
1924 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1925 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1926 	struct rte_crypto_auth_xform *auth_xform;
1927 	struct rte_crypto_cipher_xform *cipher_xform;
1928 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1929 
1930 	PMD_INIT_FUNC_TRACE();
1931 
1932 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1933 		cipher_xform = &conf->crypto_xform->cipher;
1934 		auth_xform = &conf->crypto_xform->next->auth;
1935 	} else {
1936 		auth_xform = &conf->crypto_xform->auth;
1937 		cipher_xform = &conf->crypto_xform->next->cipher;
1938 	}
1939 	session->proto_alg = conf->protocol;
1940 	session->cipher_key.data = rte_zmalloc(NULL,
1941 					       cipher_xform->key.length,
1942 					       RTE_CACHE_LINE_SIZE);
1943 	if (session->cipher_key.data == NULL &&
1944 			cipher_xform->key.length > 0) {
1945 		DPAA_SEC_ERR("No Memory for cipher key");
1946 		return -ENOMEM;
1947 	}
1948 
1949 	session->cipher_key.length = cipher_xform->key.length;
1950 	session->auth_key.data = rte_zmalloc(NULL,
1951 					auth_xform->key.length,
1952 					RTE_CACHE_LINE_SIZE);
1953 	if (session->auth_key.data == NULL &&
1954 			auth_xform->key.length > 0) {
1955 		DPAA_SEC_ERR("No Memory for auth key");
1956 		rte_free(session->cipher_key.data);
1957 		return -ENOMEM;
1958 	}
1959 	session->auth_key.length = auth_xform->key.length;
1960 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1961 			cipher_xform->key.length);
1962 	memcpy(session->auth_key.data, auth_xform->key.data,
1963 			auth_xform->key.length);
1964 
1965 	switch (auth_xform->algo) {
1966 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1967 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1968 		break;
1969 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1970 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1971 		break;
1972 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1973 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1974 		break;
1975 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1976 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1977 		break;
1978 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1979 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1980 		break;
1981 	case RTE_CRYPTO_AUTH_AES_CMAC:
1982 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1983 		break;
1984 	case RTE_CRYPTO_AUTH_NULL:
1985 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1986 		break;
1987 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
1988 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1989 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1990 	case RTE_CRYPTO_AUTH_SHA1:
1991 	case RTE_CRYPTO_AUTH_SHA256:
1992 	case RTE_CRYPTO_AUTH_SHA512:
1993 	case RTE_CRYPTO_AUTH_SHA224:
1994 	case RTE_CRYPTO_AUTH_SHA384:
1995 	case RTE_CRYPTO_AUTH_MD5:
1996 	case RTE_CRYPTO_AUTH_AES_GMAC:
1997 	case RTE_CRYPTO_AUTH_KASUMI_F9:
1998 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
1999 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2000 		DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2001 			auth_xform->algo);
2002 		goto out;
2003 	default:
2004 		DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2005 			auth_xform->algo);
2006 		goto out;
2007 	}
2008 
2009 	switch (cipher_xform->algo) {
2010 	case RTE_CRYPTO_CIPHER_AES_CBC:
2011 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2012 		break;
2013 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2014 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2015 		break;
2016 	case RTE_CRYPTO_CIPHER_AES_CTR:
2017 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2018 		break;
2019 	case RTE_CRYPTO_CIPHER_NULL:
2020 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2021 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2022 	case RTE_CRYPTO_CIPHER_AES_ECB:
2023 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2024 		DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2025 			cipher_xform->algo);
2026 		goto out;
2027 	default:
2028 		DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2029 			cipher_xform->algo);
2030 		goto out;
2031 	}
2032 
2033 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2034 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2035 				sizeof(session->ip4_hdr));
2036 		session->ip4_hdr.ip_v = IPVERSION;
2037 		session->ip4_hdr.ip_hl = 5;
2038 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2039 						sizeof(session->ip4_hdr));
2040 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2041 		session->ip4_hdr.ip_id = 0;
2042 		session->ip4_hdr.ip_off = 0;
2043 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2044 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2045 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2046 				: IPPROTO_AH;
2047 		session->ip4_hdr.ip_sum = 0;
2048 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2049 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2050 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2051 						(void *)&session->ip4_hdr,
2052 						sizeof(struct ip));
2053 
2054 		session->encap_pdb.options =
2055 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2056 			PDBOPTS_ESP_OIHI_PDB_INL |
2057 			PDBOPTS_ESP_IVSRC |
2058 			PDBHMO_ESP_ENCAP_DTTL;
2059 		session->encap_pdb.spi = ipsec_xform->spi;
2060 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2061 
2062 		session->dir = DIR_ENC;
2063 	} else if (ipsec_xform->direction ==
2064 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2065 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2066 		session->decap_pdb.options = sizeof(struct ip) << 16;
2067 		session->dir = DIR_DEC;
2068 	} else
2069 		goto out;
2070 	session->ctx_pool = internals->ctx_pool;
2071 	session->inq = dpaa_sec_attach_rxq(internals);
2072 	if (session->inq == NULL) {
2073 		DPAA_SEC_ERR("unable to attach sec queue");
2074 		goto out;
2075 	}
2076 
2077 
2078 	return 0;
2079 out:
2080 	rte_free(session->auth_key.data);
2081 	rte_free(session->cipher_key.data);
2082 	memset(session, 0, sizeof(dpaa_sec_session));
2083 	return -1;
2084 }
2085 
2086 static int
2087 dpaa_sec_security_session_create(void *dev,
2088 				 struct rte_security_session_conf *conf,
2089 				 struct rte_security_session *sess,
2090 				 struct rte_mempool *mempool)
2091 {
2092 	void *sess_private_data;
2093 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2094 	int ret;
2095 
2096 	if (rte_mempool_get(mempool, &sess_private_data)) {
2097 		DPAA_SEC_ERR("Couldn't get object from session mempool");
2098 		return -ENOMEM;
2099 	}
2100 
2101 	switch (conf->protocol) {
2102 	case RTE_SECURITY_PROTOCOL_IPSEC:
2103 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2104 				sess_private_data);
2105 		break;
2106 	case RTE_SECURITY_PROTOCOL_MACSEC:
2107 		return -ENOTSUP;
2108 	default:
2109 		return -EINVAL;
2110 	}
2111 	if (ret != 0) {
2112 		DPAA_SEC_ERR("failed to configure session parameters");
2113 		/* Return session to mempool */
2114 		rte_mempool_put(mempool, sess_private_data);
2115 		return ret;
2116 	}
2117 
2118 	set_sec_session_private_data(sess, sess_private_data);
2119 
2120 	return ret;
2121 }
2122 
2123 /** Clear the memory of session so it doesn't leave key material behind */
2124 static int
2125 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2126 		struct rte_security_session *sess)
2127 {
2128 	PMD_INIT_FUNC_TRACE();
2129 	void *sess_priv = get_sec_session_private_data(sess);
2130 
2131 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2132 
2133 	if (sess_priv) {
2134 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2135 
2136 		rte_free(s->cipher_key.data);
2137 		rte_free(s->auth_key.data);
2138 		memset(sess, 0, sizeof(dpaa_sec_session));
2139 		set_sec_session_private_data(sess, NULL);
2140 		rte_mempool_put(sess_mp, sess_priv);
2141 	}
2142 	return 0;
2143 }
2144 
2145 
2146 static int
2147 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2148 		       struct rte_cryptodev_config *config __rte_unused)
2149 {
2150 
2151 	char str[20];
2152 	struct dpaa_sec_dev_private *internals;
2153 
2154 	PMD_INIT_FUNC_TRACE();
2155 
2156 	internals = dev->data->dev_private;
2157 	sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2158 	if (!internals->ctx_pool) {
2159 		internals->ctx_pool = rte_mempool_create((const char *)str,
2160 							CTX_POOL_NUM_BUFS,
2161 							CTX_POOL_BUF_SIZE,
2162 							CTX_POOL_CACHE_SIZE, 0,
2163 							NULL, NULL, NULL, NULL,
2164 							SOCKET_ID_ANY, 0);
2165 		if (!internals->ctx_pool) {
2166 			DPAA_SEC_ERR("%s create failed\n", str);
2167 			return -ENOMEM;
2168 		}
2169 	} else
2170 		DPAA_SEC_INFO("mempool already created for dev_id : %d",
2171 				dev->data->dev_id);
2172 
2173 	return 0;
2174 }
2175 
2176 static int
2177 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2178 {
2179 	PMD_INIT_FUNC_TRACE();
2180 	return 0;
2181 }
2182 
2183 static void
2184 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2185 {
2186 	PMD_INIT_FUNC_TRACE();
2187 }
2188 
2189 static int
2190 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2191 {
2192 	struct dpaa_sec_dev_private *internals;
2193 
2194 	PMD_INIT_FUNC_TRACE();
2195 
2196 	if (dev == NULL)
2197 		return -ENOMEM;
2198 
2199 	internals = dev->data->dev_private;
2200 	rte_mempool_free(internals->ctx_pool);
2201 	internals->ctx_pool = NULL;
2202 
2203 	return 0;
2204 }
2205 
2206 static void
2207 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2208 		       struct rte_cryptodev_info *info)
2209 {
2210 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2211 
2212 	PMD_INIT_FUNC_TRACE();
2213 	if (info != NULL) {
2214 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2215 		info->feature_flags = dev->feature_flags;
2216 		info->capabilities = dpaa_sec_capabilities;
2217 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2218 		info->sym.max_nb_sessions_per_qp =
2219 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2220 			RTE_DPAA_MAX_NB_SEC_QPS;
2221 		info->driver_id = cryptodev_driver_id;
2222 	}
2223 }
2224 
2225 static struct rte_cryptodev_ops crypto_ops = {
2226 	.dev_configure	      = dpaa_sec_dev_configure,
2227 	.dev_start	      = dpaa_sec_dev_start,
2228 	.dev_stop	      = dpaa_sec_dev_stop,
2229 	.dev_close	      = dpaa_sec_dev_close,
2230 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2231 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2232 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2233 	.queue_pair_start     = dpaa_sec_queue_pair_start,
2234 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
2235 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2236 	.session_get_size     = dpaa_sec_session_get_size,
2237 	.session_configure    = dpaa_sec_session_configure,
2238 	.session_clear        = dpaa_sec_session_clear,
2239 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
2240 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
2241 };
2242 
2243 static const struct rte_security_capability *
2244 dpaa_sec_capabilities_get(void *device __rte_unused)
2245 {
2246 	return dpaa_sec_security_cap;
2247 }
2248 
2249 struct rte_security_ops dpaa_sec_security_ops = {
2250 	.session_create = dpaa_sec_security_session_create,
2251 	.session_update = NULL,
2252 	.session_stats_get = NULL,
2253 	.session_destroy = dpaa_sec_security_session_destroy,
2254 	.set_pkt_metadata = NULL,
2255 	.capabilities_get = dpaa_sec_capabilities_get
2256 };
2257 
2258 static int
2259 dpaa_sec_uninit(struct rte_cryptodev *dev)
2260 {
2261 	struct dpaa_sec_dev_private *internals;
2262 
2263 	if (dev == NULL)
2264 		return -ENODEV;
2265 
2266 	internals = dev->data->dev_private;
2267 	rte_free(dev->security_ctx);
2268 
2269 	/* In case close has been called, internals->ctx_pool would be NULL */
2270 	rte_mempool_free(internals->ctx_pool);
2271 	rte_free(internals);
2272 
2273 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2274 		      dev->data->name, rte_socket_id());
2275 
2276 	return 0;
2277 }
2278 
2279 static int
2280 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2281 {
2282 	struct dpaa_sec_dev_private *internals;
2283 	struct rte_security_ctx *security_instance;
2284 	struct dpaa_sec_qp *qp;
2285 	uint32_t i, flags;
2286 	int ret;
2287 
2288 	PMD_INIT_FUNC_TRACE();
2289 
2290 	cryptodev->driver_id = cryptodev_driver_id;
2291 	cryptodev->dev_ops = &crypto_ops;
2292 
2293 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2294 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2295 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2296 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2297 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2298 			RTE_CRYPTODEV_FF_SECURITY |
2299 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2300 
2301 	internals = cryptodev->data->dev_private;
2302 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2303 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2304 
2305 	/*
2306 	 * For secondary processes, we don't initialise any further as primary
2307 	 * has already done this work. Only check we don't need a different
2308 	 * RX function
2309 	 */
2310 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2311 		DPAA_SEC_WARN("Device already init by primary process");
2312 		return 0;
2313 	}
2314 
2315 	/* Initialize security_ctx only for primary process*/
2316 	security_instance = rte_malloc("rte_security_instances_ops",
2317 				sizeof(struct rte_security_ctx), 0);
2318 	if (security_instance == NULL)
2319 		return -ENOMEM;
2320 	security_instance->device = (void *)cryptodev;
2321 	security_instance->ops = &dpaa_sec_security_ops;
2322 	security_instance->sess_cnt = 0;
2323 	cryptodev->security_ctx = security_instance;
2324 
2325 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2326 		/* init qman fq for queue pair */
2327 		qp = &internals->qps[i];
2328 		ret = dpaa_sec_init_tx(&qp->outq);
2329 		if (ret) {
2330 			DPAA_SEC_ERR("config tx of queue pair  %d", i);
2331 			goto init_error;
2332 		}
2333 	}
2334 
2335 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2336 		QMAN_FQ_FLAG_TO_DCPORTAL;
2337 	for (i = 0; i < internals->max_nb_sessions; i++) {
2338 		/* create rx qman fq for sessions*/
2339 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2340 		if (unlikely(ret != 0)) {
2341 			DPAA_SEC_ERR("sec qman_create_fq failed");
2342 			goto init_error;
2343 		}
2344 	}
2345 
2346 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2347 	return 0;
2348 
2349 init_error:
2350 	DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2351 
2352 	dpaa_sec_uninit(cryptodev);
2353 	return -EFAULT;
2354 }
2355 
2356 static int
2357 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2358 				struct rte_dpaa_device *dpaa_dev)
2359 {
2360 	struct rte_cryptodev *cryptodev;
2361 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2362 
2363 	int retval;
2364 
2365 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2366 
2367 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2368 	if (cryptodev == NULL)
2369 		return -ENOMEM;
2370 
2371 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2372 		cryptodev->data->dev_private = rte_zmalloc_socket(
2373 					"cryptodev private structure",
2374 					sizeof(struct dpaa_sec_dev_private),
2375 					RTE_CACHE_LINE_SIZE,
2376 					rte_socket_id());
2377 
2378 		if (cryptodev->data->dev_private == NULL)
2379 			rte_panic("Cannot allocate memzone for private "
2380 					"device data");
2381 	}
2382 
2383 	dpaa_dev->crypto_dev = cryptodev;
2384 	cryptodev->device = &dpaa_dev->device;
2385 	cryptodev->device->driver = &dpaa_drv->driver;
2386 
2387 	/* init user callbacks */
2388 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2389 
2390 	/* if sec device version is not configured */
2391 	if (!rta_get_sec_era()) {
2392 		const struct device_node *caam_node;
2393 
2394 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2395 			const uint32_t *prop = of_get_property(caam_node,
2396 					"fsl,sec-era",
2397 					NULL);
2398 			if (prop) {
2399 				rta_set_sec_era(
2400 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2401 				break;
2402 			}
2403 		}
2404 	}
2405 
2406 	/* Invoke PMD device initialization function */
2407 	retval = dpaa_sec_dev_init(cryptodev);
2408 	if (retval == 0)
2409 		return 0;
2410 
2411 	/* In case of error, cleanup is done */
2412 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2413 		rte_free(cryptodev->data->dev_private);
2414 
2415 	rte_cryptodev_pmd_release_device(cryptodev);
2416 
2417 	return -ENXIO;
2418 }
2419 
2420 static int
2421 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2422 {
2423 	struct rte_cryptodev *cryptodev;
2424 	int ret;
2425 
2426 	cryptodev = dpaa_dev->crypto_dev;
2427 	if (cryptodev == NULL)
2428 		return -ENODEV;
2429 
2430 	ret = dpaa_sec_uninit(cryptodev);
2431 	if (ret)
2432 		return ret;
2433 
2434 	return rte_cryptodev_pmd_destroy(cryptodev);
2435 }
2436 
2437 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2438 	.drv_type = FSL_DPAA_CRYPTO,
2439 	.driver = {
2440 		.name = "DPAA SEC PMD"
2441 	},
2442 	.probe = cryptodev_dpaa_sec_probe,
2443 	.remove = cryptodev_dpaa_sec_remove,
2444 };
2445 
2446 static struct cryptodev_driver dpaa_sec_crypto_drv;
2447 
2448 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2449 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2450 		cryptodev_driver_id);
2451 
2452 RTE_INIT(dpaa_sec_init_log);
2453 static void
2454 dpaa_sec_init_log(void)
2455 {
2456 	dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2457 	if (dpaa_logtype_sec >= 0)
2458 		rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2459 }
2460