xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision d81734caccade4dc17d24d2ffd8b71244d35a69f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cycles.h>
19 #include <rte_dev.h>
20 #include <rte_kvargs.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_memcpy.h>
24 #include <rte_string_fns.h>
25 
26 #include <fsl_usd.h>
27 #include <fsl_qman.h>
28 #include <of.h>
29 
30 /* RTA header files */
31 #include <hw/desc/common.h>
32 #include <hw/desc/algo.h>
33 #include <hw/desc/ipsec.h>
34 
35 #include <rte_dpaa_bus.h>
36 #include <dpaa_sec.h>
37 #include <dpaa_sec_log.h>
38 
39 enum rta_sec_era rta_sec_era;
40 
41 static uint8_t cryptodev_driver_id;
42 
43 static __thread struct rte_crypto_op **dpaa_sec_ops;
44 static __thread int dpaa_sec_op_nb;
45 
46 static inline void
47 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
48 {
49 	if (!ctx->fd_status) {
50 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
51 	} else {
52 		PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
53 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
54 	}
55 
56 	/* report op status to sym->op and then free the ctx memeory  */
57 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
58 }
59 
60 static inline struct dpaa_sec_op_ctx *
61 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
62 {
63 	struct dpaa_sec_op_ctx *ctx;
64 	int retval;
65 
66 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
67 	if (!ctx || retval) {
68 		PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
69 		return NULL;
70 	}
71 	/*
72 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
73 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
74 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
75 	 * each packet, memset is costlier than dcbz_64().
76 	 */
77 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
78 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
79 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
80 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
81 
82 	ctx->ctx_pool = ses->ctx_pool;
83 
84 	return ctx;
85 }
86 
87 static inline rte_iova_t
88 dpaa_mem_vtop(void *vaddr)
89 {
90 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
91 	uint64_t vaddr_64, paddr;
92 	int i;
93 
94 	vaddr_64 = (uint64_t)vaddr;
95 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
96 		if (vaddr_64 >= memseg[i].addr_64 &&
97 		    vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
98 			paddr = memseg[i].phys_addr +
99 				(vaddr_64 - memseg[i].addr_64);
100 
101 			return (rte_iova_t)paddr;
102 		}
103 	}
104 	return (rte_iova_t)(NULL);
105 }
106 
107 static inline void *
108 dpaa_mem_ptov(rte_iova_t paddr)
109 {
110 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
111 	int i;
112 
113 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
114 		if (paddr >= memseg[i].phys_addr &&
115 		    (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
116 			return (void *)(memseg[i].addr_64 +
117 					(paddr - memseg[i].phys_addr));
118 	}
119 	return NULL;
120 }
121 
122 static void
123 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
124 		   struct qman_fq *fq,
125 		   const struct qm_mr_entry *msg)
126 {
127 	RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
128 		   fq->fqid, msg->ern.rc, msg->ern.seqnum);
129 }
130 
131 /* initialize the queue with dest chan as caam chan so that
132  * all the packets in this queue could be dispatched into caam
133  */
134 static int
135 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
136 		 uint32_t fqid_out)
137 {
138 	struct qm_mcc_initfq fq_opts;
139 	uint32_t flags;
140 	int ret = -1;
141 
142 	/* Clear FQ options */
143 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
144 
145 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
146 		QMAN_FQ_FLAG_TO_DCPORTAL;
147 
148 	ret = qman_create_fq(0, flags, fq_in);
149 	if (unlikely(ret != 0)) {
150 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
151 		return ret;
152 	}
153 
154 	flags = QMAN_INITFQ_FLAG_SCHED;
155 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
156 			  QM_INITFQ_WE_CONTEXTB;
157 
158 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
159 	fq_opts.fqd.context_b = fqid_out;
160 	fq_opts.fqd.dest.channel = qm_channel_caam;
161 	fq_opts.fqd.dest.wq = 0;
162 
163 	fq_in->cb.ern  = ern_sec_fq_handler;
164 
165 	ret = qman_init_fq(fq_in, flags, &fq_opts);
166 	if (unlikely(ret != 0))
167 		PMD_INIT_LOG(ERR, "qman_init_fq failed");
168 
169 	return ret;
170 }
171 
172 /* something is put into in_fq and caam put the crypto result into out_fq */
173 static enum qman_cb_dqrr_result
174 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
175 		  struct qman_fq *fq __always_unused,
176 		  const struct qm_dqrr_entry *dqrr)
177 {
178 	const struct qm_fd *fd;
179 	struct dpaa_sec_job *job;
180 	struct dpaa_sec_op_ctx *ctx;
181 
182 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
183 		return qman_cb_dqrr_defer;
184 
185 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
186 		return qman_cb_dqrr_consume;
187 
188 	fd = &dqrr->fd;
189 	/* sg is embedded in an op ctx,
190 	 * sg[0] is for output
191 	 * sg[1] for input
192 	 */
193 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
194 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
195 	ctx->fd_status = fd->status;
196 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
197 	dpaa_sec_op_ending(ctx);
198 
199 	return qman_cb_dqrr_consume;
200 }
201 
202 /* caam result is put into this queue */
203 static int
204 dpaa_sec_init_tx(struct qman_fq *fq)
205 {
206 	int ret;
207 	struct qm_mcc_initfq opts;
208 	uint32_t flags;
209 
210 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
211 		QMAN_FQ_FLAG_DYNAMIC_FQID;
212 
213 	ret = qman_create_fq(0, flags, fq);
214 	if (unlikely(ret)) {
215 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
216 		return ret;
217 	}
218 
219 	memset(&opts, 0, sizeof(opts));
220 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
221 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
222 
223 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
224 
225 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
226 	fq->cb.ern  = ern_sec_fq_handler;
227 
228 	ret = qman_init_fq(fq, 0, &opts);
229 	if (unlikely(ret)) {
230 		PMD_INIT_LOG(ERR, "unable to init caam source fq!");
231 		return ret;
232 	}
233 
234 	return ret;
235 }
236 
237 static inline int is_cipher_only(dpaa_sec_session *ses)
238 {
239 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
240 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
241 }
242 
243 static inline int is_auth_only(dpaa_sec_session *ses)
244 {
245 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
246 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
247 }
248 
249 static inline int is_aead(dpaa_sec_session *ses)
250 {
251 	return ((ses->cipher_alg == 0) &&
252 		(ses->auth_alg == 0) &&
253 		(ses->aead_alg != 0));
254 }
255 
256 static inline int is_auth_cipher(dpaa_sec_session *ses)
257 {
258 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
259 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
260 }
261 
262 static inline int is_encode(dpaa_sec_session *ses)
263 {
264 	return ses->dir == DIR_ENC;
265 }
266 
267 static inline int is_decode(dpaa_sec_session *ses)
268 {
269 	return ses->dir == DIR_DEC;
270 }
271 
272 static inline void
273 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
274 {
275 	switch (ses->auth_alg) {
276 	case RTE_CRYPTO_AUTH_NULL:
277 		ses->digest_length = 0;
278 		break;
279 	case RTE_CRYPTO_AUTH_MD5_HMAC:
280 		alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
281 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
282 		break;
283 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
284 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
285 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
286 		break;
287 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
288 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
289 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
290 		break;
291 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
292 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
293 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
294 		break;
295 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
296 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
297 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
298 		break;
299 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
300 		alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
301 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
302 		break;
303 	default:
304 		PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
305 	}
306 }
307 
308 static inline void
309 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
310 {
311 	switch (ses->cipher_alg) {
312 	case RTE_CRYPTO_CIPHER_NULL:
313 		break;
314 	case RTE_CRYPTO_CIPHER_AES_CBC:
315 		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
316 		alginfo_c->algmode = OP_ALG_AAI_CBC;
317 		break;
318 	case RTE_CRYPTO_CIPHER_3DES_CBC:
319 		alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
320 		alginfo_c->algmode = OP_ALG_AAI_CBC;
321 		break;
322 	case RTE_CRYPTO_CIPHER_AES_CTR:
323 		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
324 		alginfo_c->algmode = OP_ALG_AAI_CTR;
325 		break;
326 	default:
327 		PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
328 	}
329 }
330 
331 static inline void
332 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
333 {
334 	switch (ses->aead_alg) {
335 	case RTE_CRYPTO_AEAD_AES_GCM:
336 		alginfo->algtype = OP_ALG_ALGSEL_AES;
337 		alginfo->algmode = OP_ALG_AAI_GCM;
338 		break;
339 	default:
340 		PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
341 	}
342 }
343 
344 
345 /* prepare command block of the session */
346 static int
347 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
348 {
349 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
350 	uint32_t shared_desc_len = 0;
351 	struct sec_cdb *cdb = &ses->qp->cdb;
352 	int err;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
354 	int swap = false;
355 #else
356 	int swap = true;
357 #endif
358 
359 	memset(cdb, 0, sizeof(struct sec_cdb));
360 
361 	if (is_cipher_only(ses)) {
362 		caam_cipher_alg(ses, &alginfo_c);
363 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
364 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
365 			return -ENOTSUP;
366 		}
367 
368 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
369 		alginfo_c.keylen = ses->cipher_key.length;
370 		alginfo_c.key_enc_flags = 0;
371 		alginfo_c.key_type = RTA_DATA_IMM;
372 
373 		shared_desc_len = cnstr_shdsc_blkcipher(
374 						cdb->sh_desc, true,
375 						swap, &alginfo_c,
376 						NULL,
377 						ses->iv.length,
378 						ses->dir);
379 	} else if (is_auth_only(ses)) {
380 		caam_auth_alg(ses, &alginfo_a);
381 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
382 			PMD_TX_LOG(ERR, "not supported auth alg\n");
383 			return -ENOTSUP;
384 		}
385 
386 		alginfo_a.key = (uint64_t)ses->auth_key.data;
387 		alginfo_a.keylen = ses->auth_key.length;
388 		alginfo_a.key_enc_flags = 0;
389 		alginfo_a.key_type = RTA_DATA_IMM;
390 
391 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
392 						   swap, &alginfo_a,
393 						   !ses->dir,
394 						   ses->digest_length);
395 	} else if (is_aead(ses)) {
396 		caam_aead_alg(ses, &alginfo);
397 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
398 			PMD_TX_LOG(ERR, "not supported aead alg\n");
399 			return -ENOTSUP;
400 		}
401 		alginfo.key = (uint64_t)ses->aead_key.data;
402 		alginfo.keylen = ses->aead_key.length;
403 		alginfo.key_enc_flags = 0;
404 		alginfo.key_type = RTA_DATA_IMM;
405 
406 		if (ses->dir == DIR_ENC)
407 			shared_desc_len = cnstr_shdsc_gcm_encap(
408 					cdb->sh_desc, true, swap,
409 					&alginfo,
410 					ses->iv.length,
411 					ses->digest_length);
412 		else
413 			shared_desc_len = cnstr_shdsc_gcm_decap(
414 					cdb->sh_desc, true, swap,
415 					&alginfo,
416 					ses->iv.length,
417 					ses->digest_length);
418 	} else {
419 		caam_cipher_alg(ses, &alginfo_c);
420 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
421 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
422 			return -ENOTSUP;
423 		}
424 
425 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
426 		alginfo_c.keylen = ses->cipher_key.length;
427 		alginfo_c.key_enc_flags = 0;
428 		alginfo_c.key_type = RTA_DATA_IMM;
429 
430 		caam_auth_alg(ses, &alginfo_a);
431 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
432 			PMD_TX_LOG(ERR, "not supported auth alg\n");
433 			return -ENOTSUP;
434 		}
435 
436 		alginfo_a.key = (uint64_t)ses->auth_key.data;
437 		alginfo_a.keylen = ses->auth_key.length;
438 		alginfo_a.key_enc_flags = 0;
439 		alginfo_a.key_type = RTA_DATA_IMM;
440 
441 		cdb->sh_desc[0] = alginfo_c.keylen;
442 		cdb->sh_desc[1] = alginfo_a.keylen;
443 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
444 				       MIN_JOB_DESC_SIZE,
445 				       (unsigned int *)cdb->sh_desc,
446 				       &cdb->sh_desc[2], 2);
447 
448 		if (err < 0) {
449 			PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
450 			return err;
451 		}
452 		if (cdb->sh_desc[2] & 1)
453 			alginfo_c.key_type = RTA_DATA_IMM;
454 		else {
455 			alginfo_c.key = (uint64_t)dpaa_mem_vtop(
456 							(void *)alginfo_c.key);
457 			alginfo_c.key_type = RTA_DATA_PTR;
458 		}
459 		if (cdb->sh_desc[2] & (1<<1))
460 			alginfo_a.key_type = RTA_DATA_IMM;
461 		else {
462 			alginfo_a.key = (uint64_t)dpaa_mem_vtop(
463 							(void *)alginfo_a.key);
464 			alginfo_a.key_type = RTA_DATA_PTR;
465 		}
466 		cdb->sh_desc[0] = 0;
467 		cdb->sh_desc[1] = 0;
468 		cdb->sh_desc[2] = 0;
469 
470 		/* Auth_only_len is set as 0 here and it will be overwritten
471 		 *  in fd for each packet.
472 		 */
473 		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
474 				true, swap, &alginfo_c, &alginfo_a,
475 				ses->iv.length, 0,
476 				ses->digest_length, ses->dir);
477 	}
478 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
479 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
480 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
481 
482 	return 0;
483 }
484 
485 static inline unsigned int
486 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
487 {
488 	unsigned int pkts = 0;
489 	int ret;
490 	struct qm_mcr_queryfq_np np;
491 	enum qman_fq_state state;
492 	uint32_t flags;
493 	uint32_t vdqcr;
494 
495 	qman_query_fq_np(fq, &np);
496 	if (np.frm_cnt) {
497 		vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
498 		if (exact)
499 			vdqcr |= QM_VDQCR_EXACT;
500 		ret = qman_volatile_dequeue(fq, 0, vdqcr);
501 		if (ret)
502 			return 0;
503 		do {
504 			pkts += qman_poll_dqrr(len);
505 			qman_fq_state(fq, &state, &flags);
506 		} while (flags & QMAN_FQ_STATE_VDQCR);
507 	}
508 	return pkts;
509 }
510 
511 /* qp is lockless, should be accessed by only one thread */
512 static int
513 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
514 {
515 	struct qman_fq *fq;
516 
517 	fq = &qp->outq;
518 	dpaa_sec_op_nb = 0;
519 	dpaa_sec_ops = ops;
520 
521 	if (unlikely(nb_ops > DPAA_SEC_BURST))
522 		nb_ops = DPAA_SEC_BURST;
523 
524 	return dpaa_volatile_deq(fq, nb_ops, 1);
525 }
526 
527 /**
528  * packet looks like:
529  *		|<----data_len------->|
530  *    |ip_header|ah_header|icv|payload|
531  *              ^
532  *		|
533  *	   mbuf->pkt.data
534  */
535 static inline struct dpaa_sec_job *
536 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
537 {
538 	struct rte_crypto_sym_op *sym = op->sym;
539 	struct rte_mbuf *mbuf = sym->m_src;
540 	struct dpaa_sec_job *cf;
541 	struct dpaa_sec_op_ctx *ctx;
542 	struct qm_sg_entry *sg;
543 	rte_iova_t start_addr;
544 	uint8_t *old_digest;
545 
546 	ctx = dpaa_sec_alloc_ctx(ses);
547 	if (!ctx)
548 		return NULL;
549 
550 	cf = &ctx->job;
551 	ctx->op = op;
552 	old_digest = ctx->digest;
553 
554 	start_addr = rte_pktmbuf_iova(mbuf);
555 	/* output */
556 	sg = &cf->sg[0];
557 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
558 	sg->length = ses->digest_length;
559 	cpu_to_hw_sg(sg);
560 
561 	/* input */
562 	sg = &cf->sg[1];
563 	if (is_decode(ses)) {
564 		/* need to extend the input to a compound frame */
565 		sg->extension = 1;
566 		qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
567 		sg->length = sym->auth.data.length + ses->digest_length;
568 		sg->final = 1;
569 		cpu_to_hw_sg(sg);
570 
571 		sg = &cf->sg[2];
572 		/* hash result or digest, save digest first */
573 		rte_memcpy(old_digest, sym->auth.digest.data,
574 			   ses->digest_length);
575 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
576 		sg->length = sym->auth.data.length;
577 		cpu_to_hw_sg(sg);
578 
579 		/* let's check digest by hw */
580 		start_addr = dpaa_mem_vtop(old_digest);
581 		sg++;
582 		qm_sg_entry_set64(sg, start_addr);
583 		sg->length = ses->digest_length;
584 		sg->final = 1;
585 		cpu_to_hw_sg(sg);
586 	} else {
587 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
588 		sg->length = sym->auth.data.length;
589 		sg->final = 1;
590 		cpu_to_hw_sg(sg);
591 	}
592 
593 	return cf;
594 }
595 
596 static inline struct dpaa_sec_job *
597 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
598 {
599 	struct rte_crypto_sym_op *sym = op->sym;
600 	struct dpaa_sec_job *cf;
601 	struct dpaa_sec_op_ctx *ctx;
602 	struct qm_sg_entry *sg;
603 	rte_iova_t src_start_addr, dst_start_addr;
604 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
605 			ses->iv.offset);
606 
607 	ctx = dpaa_sec_alloc_ctx(ses);
608 	if (!ctx)
609 		return NULL;
610 
611 	cf = &ctx->job;
612 	ctx->op = op;
613 
614 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
615 
616 	if (sym->m_dst)
617 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
618 	else
619 		dst_start_addr = src_start_addr;
620 
621 	/* output */
622 	sg = &cf->sg[0];
623 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
624 	sg->length = sym->cipher.data.length + ses->iv.length;
625 	cpu_to_hw_sg(sg);
626 
627 	/* input */
628 	sg = &cf->sg[1];
629 
630 	/* need to extend the input to a compound frame */
631 	sg->extension = 1;
632 	sg->final = 1;
633 	sg->length = sym->cipher.data.length + ses->iv.length;
634 	qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
635 	cpu_to_hw_sg(sg);
636 
637 	sg = &cf->sg[2];
638 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
639 	sg->length = ses->iv.length;
640 	cpu_to_hw_sg(sg);
641 
642 	sg++;
643 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
644 	sg->length = sym->cipher.data.length;
645 	sg->final = 1;
646 	cpu_to_hw_sg(sg);
647 
648 	return cf;
649 }
650 
651 static inline struct dpaa_sec_job *
652 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
653 {
654 	struct rte_crypto_sym_op *sym = op->sym;
655 	struct dpaa_sec_job *cf;
656 	struct dpaa_sec_op_ctx *ctx;
657 	struct qm_sg_entry *sg;
658 	uint32_t length = 0;
659 	rte_iova_t src_start_addr, dst_start_addr;
660 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
661 			ses->iv.offset);
662 
663 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
664 
665 	if (sym->m_dst)
666 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
667 	else
668 		dst_start_addr = src_start_addr;
669 
670 	ctx = dpaa_sec_alloc_ctx(ses);
671 	if (!ctx)
672 		return NULL;
673 
674 	cf = &ctx->job;
675 	ctx->op = op;
676 
677 	/* input */
678 	rte_prefetch0(cf->sg);
679 	sg = &cf->sg[2];
680 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
681 	if (is_encode(ses)) {
682 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
683 		sg->length = ses->iv.length;
684 		length += sg->length;
685 		cpu_to_hw_sg(sg);
686 
687 		sg++;
688 		if (ses->auth_only_len) {
689 			qm_sg_entry_set64(sg,
690 					  dpaa_mem_vtop(sym->aead.aad.data));
691 			sg->length = ses->auth_only_len;
692 			length += sg->length;
693 			cpu_to_hw_sg(sg);
694 			sg++;
695 		}
696 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
697 		sg->length = sym->aead.data.length;
698 		length += sg->length;
699 		sg->final = 1;
700 		cpu_to_hw_sg(sg);
701 	} else {
702 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
703 		sg->length = ses->iv.length;
704 		length += sg->length;
705 		cpu_to_hw_sg(sg);
706 
707 		sg++;
708 		if (ses->auth_only_len) {
709 			qm_sg_entry_set64(sg,
710 					  dpaa_mem_vtop(sym->aead.aad.data));
711 			sg->length = ses->auth_only_len;
712 			length += sg->length;
713 			cpu_to_hw_sg(sg);
714 			sg++;
715 		}
716 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
717 		sg->length = sym->aead.data.length;
718 		length += sg->length;
719 		cpu_to_hw_sg(sg);
720 
721 		memcpy(ctx->digest, sym->aead.digest.data,
722 		       ses->digest_length);
723 		sg++;
724 
725 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
726 		sg->length = ses->digest_length;
727 		length += sg->length;
728 		sg->final = 1;
729 		cpu_to_hw_sg(sg);
730 	}
731 	/* input compound frame */
732 	cf->sg[1].length = length;
733 	cf->sg[1].extension = 1;
734 	cf->sg[1].final = 1;
735 	cpu_to_hw_sg(&cf->sg[1]);
736 
737 	/* output */
738 	sg++;
739 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
740 	qm_sg_entry_set64(sg,
741 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
742 	sg->length = sym->aead.data.length + ses->auth_only_len;
743 	length = sg->length;
744 	if (is_encode(ses)) {
745 		cpu_to_hw_sg(sg);
746 		/* set auth output */
747 		sg++;
748 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
749 		sg->length = ses->digest_length;
750 		length += sg->length;
751 	}
752 	sg->final = 1;
753 	cpu_to_hw_sg(sg);
754 
755 	/* output compound frame */
756 	cf->sg[0].length = length;
757 	cf->sg[0].extension = 1;
758 	cpu_to_hw_sg(&cf->sg[0]);
759 
760 	return cf;
761 }
762 
763 static inline struct dpaa_sec_job *
764 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
765 {
766 	struct rte_crypto_sym_op *sym = op->sym;
767 	struct dpaa_sec_job *cf;
768 	struct dpaa_sec_op_ctx *ctx;
769 	struct qm_sg_entry *sg;
770 	rte_iova_t src_start_addr, dst_start_addr;
771 	uint32_t length = 0;
772 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
773 			ses->iv.offset);
774 
775 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
776 	if (sym->m_dst)
777 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
778 	else
779 		dst_start_addr = src_start_addr;
780 
781 	ctx = dpaa_sec_alloc_ctx(ses);
782 	if (!ctx)
783 		return NULL;
784 
785 	cf = &ctx->job;
786 	ctx->op = op;
787 
788 	/* input */
789 	rte_prefetch0(cf->sg);
790 	sg = &cf->sg[2];
791 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
792 	if (is_encode(ses)) {
793 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
794 		sg->length = ses->iv.length;
795 		length += sg->length;
796 		cpu_to_hw_sg(sg);
797 
798 		sg++;
799 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
800 		sg->length = sym->auth.data.length;
801 		length += sg->length;
802 		sg->final = 1;
803 		cpu_to_hw_sg(sg);
804 	} else {
805 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
806 		sg->length = ses->iv.length;
807 		length += sg->length;
808 		cpu_to_hw_sg(sg);
809 
810 		sg++;
811 
812 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
813 		sg->length = sym->auth.data.length;
814 		length += sg->length;
815 		cpu_to_hw_sg(sg);
816 
817 		memcpy(ctx->digest, sym->auth.digest.data,
818 		       ses->digest_length);
819 		sg++;
820 
821 		qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
822 		sg->length = ses->digest_length;
823 		length += sg->length;
824 		sg->final = 1;
825 		cpu_to_hw_sg(sg);
826 	}
827 	/* input compound frame */
828 	cf->sg[1].length = length;
829 	cf->sg[1].extension = 1;
830 	cf->sg[1].final = 1;
831 	cpu_to_hw_sg(&cf->sg[1]);
832 
833 	/* output */
834 	sg++;
835 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
836 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
837 	sg->length = sym->cipher.data.length;
838 	length = sg->length;
839 	if (is_encode(ses)) {
840 		cpu_to_hw_sg(sg);
841 		/* set auth output */
842 		sg++;
843 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
844 		sg->length = ses->digest_length;
845 		length += sg->length;
846 	}
847 	sg->final = 1;
848 	cpu_to_hw_sg(sg);
849 
850 	/* output compound frame */
851 	cf->sg[0].length = length;
852 	cf->sg[0].extension = 1;
853 	cpu_to_hw_sg(&cf->sg[0]);
854 
855 	return cf;
856 }
857 
858 static int
859 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
860 {
861 	struct dpaa_sec_job *cf;
862 	dpaa_sec_session *ses;
863 	struct qm_fd fd;
864 	int ret;
865 	uint32_t auth_only_len = op->sym->auth.data.length -
866 				op->sym->cipher.data.length;
867 
868 	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
869 					cryptodev_driver_id);
870 
871 	if (unlikely(!qp->ses || qp->ses != ses)) {
872 		qp->ses = ses;
873 		ses->qp = qp;
874 		ret = dpaa_sec_prep_cdb(ses);
875 		if (ret)
876 			return ret;
877 	}
878 
879 	/*
880 	 * Segmented buffer is not supported.
881 	 */
882 	if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
883 		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
884 		return -ENOTSUP;
885 	}
886 	if (is_auth_only(ses)) {
887 		cf = build_auth_only(op, ses);
888 	} else if (is_cipher_only(ses)) {
889 		cf = build_cipher_only(op, ses);
890 	} else if (is_aead(ses)) {
891 		cf = build_cipher_auth_gcm(op, ses);
892 		auth_only_len = ses->auth_only_len;
893 	} else if (is_auth_cipher(ses)) {
894 		cf = build_cipher_auth(op, ses);
895 	} else {
896 		PMD_TX_LOG(ERR, "not supported sec op");
897 		return -ENOTSUP;
898 	}
899 	if (unlikely(!cf))
900 		return -ENOMEM;
901 
902 	memset(&fd, 0, sizeof(struct qm_fd));
903 	qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
904 	fd._format1 = qm_fd_compound;
905 	fd.length29 = 2 * sizeof(struct qm_sg_entry);
906 	/* Auth_only_len is set as 0 in descriptor and it is overwritten
907 	 * here in the fd.cmd which will update the DPOVRD reg.
908 	 */
909 	if (auth_only_len)
910 		fd.cmd = 0x80000000 | auth_only_len;
911 	do {
912 		ret = qman_enqueue(&qp->inq, &fd, 0);
913 	} while (ret != 0);
914 
915 	return 0;
916 }
917 
918 static uint16_t
919 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
920 		       uint16_t nb_ops)
921 {
922 	/* Function to transmit the frames to given device and queuepair */
923 	uint32_t loop;
924 	int32_t ret;
925 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
926 	uint16_t num_tx = 0;
927 
928 	if (unlikely(nb_ops == 0))
929 		return 0;
930 
931 	/*Prepare each packet which is to be sent*/
932 	for (loop = 0; loop < nb_ops; loop++) {
933 		if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
934 			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
935 			return 0;
936 		}
937 		ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
938 		if (!ret)
939 			num_tx++;
940 	}
941 	dpaa_qp->tx_pkts += num_tx;
942 	dpaa_qp->tx_errs += nb_ops - num_tx;
943 
944 	return num_tx;
945 }
946 
947 static uint16_t
948 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
949 		       uint16_t nb_ops)
950 {
951 	uint16_t num_rx;
952 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
953 
954 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
955 
956 	dpaa_qp->rx_pkts += num_rx;
957 	dpaa_qp->rx_errs += nb_ops - num_rx;
958 
959 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
960 
961 	return num_rx;
962 }
963 
964 /** Release queue pair */
965 static int
966 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
967 			    uint16_t qp_id)
968 {
969 	struct dpaa_sec_dev_private *internals;
970 	struct dpaa_sec_qp *qp = NULL;
971 
972 	PMD_INIT_FUNC_TRACE();
973 
974 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
975 
976 	internals = dev->data->dev_private;
977 	if (qp_id >= internals->max_nb_queue_pairs) {
978 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
979 			     internals->max_nb_queue_pairs);
980 		return -EINVAL;
981 	}
982 
983 	qp = &internals->qps[qp_id];
984 	qp->internals = NULL;
985 	dev->data->queue_pairs[qp_id] = NULL;
986 
987 	return 0;
988 }
989 
990 /** Setup a queue pair */
991 static int
992 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
993 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
994 		__rte_unused int socket_id,
995 		__rte_unused struct rte_mempool *session_pool)
996 {
997 	struct dpaa_sec_dev_private *internals;
998 	struct dpaa_sec_qp *qp = NULL;
999 
1000 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1001 		     dev, qp_id, qp_conf);
1002 
1003 	internals = dev->data->dev_private;
1004 	if (qp_id >= internals->max_nb_queue_pairs) {
1005 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1006 			     internals->max_nb_queue_pairs);
1007 		return -EINVAL;
1008 	}
1009 
1010 	qp = &internals->qps[qp_id];
1011 	qp->internals = internals;
1012 	dev->data->queue_pairs[qp_id] = qp;
1013 
1014 	return 0;
1015 }
1016 
1017 /** Start queue pair */
1018 static int
1019 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1020 			  __rte_unused uint16_t queue_pair_id)
1021 {
1022 	PMD_INIT_FUNC_TRACE();
1023 
1024 	return 0;
1025 }
1026 
1027 /** Stop queue pair */
1028 static int
1029 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1030 			 __rte_unused uint16_t queue_pair_id)
1031 {
1032 	PMD_INIT_FUNC_TRACE();
1033 
1034 	return 0;
1035 }
1036 
1037 /** Return the number of allocated queue pairs */
1038 static uint32_t
1039 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1040 {
1041 	PMD_INIT_FUNC_TRACE();
1042 
1043 	return dev->data->nb_queue_pairs;
1044 }
1045 
1046 /** Returns the size of session structure */
1047 static unsigned int
1048 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1049 {
1050 	PMD_INIT_FUNC_TRACE();
1051 
1052 	return sizeof(dpaa_sec_session);
1053 }
1054 
1055 static int
1056 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1057 		     struct rte_crypto_sym_xform *xform,
1058 		     dpaa_sec_session *session)
1059 {
1060 	session->cipher_alg = xform->cipher.algo;
1061 	session->iv.length = xform->cipher.iv.length;
1062 	session->iv.offset = xform->cipher.iv.offset;
1063 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1064 					       RTE_CACHE_LINE_SIZE);
1065 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1066 		PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1067 		return -ENOMEM;
1068 	}
1069 	session->cipher_key.length = xform->cipher.key.length;
1070 
1071 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1072 	       xform->cipher.key.length);
1073 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1074 			DIR_ENC : DIR_DEC;
1075 
1076 	return 0;
1077 }
1078 
1079 static int
1080 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1081 		   struct rte_crypto_sym_xform *xform,
1082 		   dpaa_sec_session *session)
1083 {
1084 	session->auth_alg = xform->auth.algo;
1085 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1086 					     RTE_CACHE_LINE_SIZE);
1087 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1088 		PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1089 		return -ENOMEM;
1090 	}
1091 	session->auth_key.length = xform->auth.key.length;
1092 	session->digest_length = xform->auth.digest_length;
1093 
1094 	memcpy(session->auth_key.data, xform->auth.key.data,
1095 	       xform->auth.key.length);
1096 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1097 			DIR_ENC : DIR_DEC;
1098 
1099 	return 0;
1100 }
1101 
1102 static int
1103 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1104 		   struct rte_crypto_sym_xform *xform,
1105 		   dpaa_sec_session *session)
1106 {
1107 	session->aead_alg = xform->aead.algo;
1108 	session->iv.length = xform->aead.iv.length;
1109 	session->iv.offset = xform->aead.iv.offset;
1110 	session->auth_only_len = xform->aead.aad_length;
1111 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1112 					     RTE_CACHE_LINE_SIZE);
1113 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1114 		PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1115 		return -ENOMEM;
1116 	}
1117 	session->aead_key.length = xform->aead.key.length;
1118 	session->digest_length = xform->aead.digest_length;
1119 
1120 	memcpy(session->aead_key.data, xform->aead.key.data,
1121 	       xform->aead.key.length);
1122 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1123 			DIR_ENC : DIR_DEC;
1124 
1125 	return 0;
1126 }
1127 
1128 static int
1129 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1130 {
1131 	dpaa_sec_session *sess = ses;
1132 	struct dpaa_sec_qp *qp;
1133 
1134 	PMD_INIT_FUNC_TRACE();
1135 
1136 	qp = dev->data->queue_pairs[qp_id];
1137 	if (qp->ses != NULL) {
1138 		PMD_INIT_LOG(ERR, "qp in-use by another session\n");
1139 		return -EBUSY;
1140 	}
1141 
1142 	qp->ses = sess;
1143 	sess->qp = qp;
1144 
1145 	return dpaa_sec_prep_cdb(sess);
1146 }
1147 
1148 static int
1149 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1150 {
1151 	dpaa_sec_session *sess = ses;
1152 	struct dpaa_sec_qp *qp;
1153 
1154 	PMD_INIT_FUNC_TRACE();
1155 
1156 	qp = dev->data->queue_pairs[qp_id];
1157 	if (qp->ses != NULL) {
1158 		qp->ses = NULL;
1159 		sess->qp = NULL;
1160 		return 0;
1161 	}
1162 
1163 	PMD_DRV_LOG(ERR, "No session attached to qp");
1164 	return -EINVAL;
1165 }
1166 
1167 static int
1168 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1169 			    struct rte_crypto_sym_xform *xform,	void *sess)
1170 {
1171 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1172 	dpaa_sec_session *session = sess;
1173 
1174 	PMD_INIT_FUNC_TRACE();
1175 
1176 	if (unlikely(sess == NULL)) {
1177 		RTE_LOG(ERR, PMD, "invalid session struct\n");
1178 		return -EINVAL;
1179 	}
1180 
1181 	/* Default IV length = 0 */
1182 	session->iv.length = 0;
1183 
1184 	/* Cipher Only */
1185 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1186 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1187 		dpaa_sec_cipher_init(dev, xform, session);
1188 
1189 	/* Authentication Only */
1190 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1191 		   xform->next == NULL) {
1192 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1193 		dpaa_sec_auth_init(dev, xform, session);
1194 
1195 	/* Cipher then Authenticate */
1196 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1197 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1198 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1199 			dpaa_sec_cipher_init(dev, xform, session);
1200 			dpaa_sec_auth_init(dev, xform->next, session);
1201 		} else {
1202 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1203 			return -EINVAL;
1204 		}
1205 
1206 	/* Authenticate then Cipher */
1207 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1208 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1209 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1210 			dpaa_sec_auth_init(dev, xform, session);
1211 			dpaa_sec_cipher_init(dev, xform->next, session);
1212 		} else {
1213 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1214 			return -EINVAL;
1215 		}
1216 
1217 	/* AEAD operation for AES-GCM kind of Algorithms */
1218 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1219 		   xform->next == NULL) {
1220 		dpaa_sec_aead_init(dev, xform, session);
1221 
1222 	} else {
1223 		PMD_DRV_LOG(ERR, "Invalid crypto type");
1224 		return -EINVAL;
1225 	}
1226 	session->ctx_pool = internals->ctx_pool;
1227 
1228 	return 0;
1229 }
1230 
1231 static int
1232 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1233 		struct rte_crypto_sym_xform *xform,
1234 		struct rte_cryptodev_sym_session *sess,
1235 		struct rte_mempool *mempool)
1236 {
1237 	void *sess_private_data;
1238 	int ret;
1239 
1240 	PMD_INIT_FUNC_TRACE();
1241 
1242 	if (rte_mempool_get(mempool, &sess_private_data)) {
1243 		CDEV_LOG_ERR(
1244 			"Couldn't get object from session mempool");
1245 		return -ENOMEM;
1246 	}
1247 
1248 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1249 	if (ret != 0) {
1250 		PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1251 				"session parameters");
1252 
1253 		/* Return session to mempool */
1254 		rte_mempool_put(mempool, sess_private_data);
1255 		return ret;
1256 	}
1257 
1258 	set_session_private_data(sess, dev->driver_id,
1259 			sess_private_data);
1260 
1261 	return 0;
1262 }
1263 
1264 /** Clear the memory of session so it doesn't leave key material behind */
1265 static void
1266 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1267 		struct rte_cryptodev_sym_session *sess)
1268 {
1269 	PMD_INIT_FUNC_TRACE();
1270 	uint8_t index = dev->driver_id;
1271 	void *sess_priv = get_session_private_data(sess, index);
1272 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1273 
1274 	if (sess_priv) {
1275 		rte_free(s->cipher_key.data);
1276 		rte_free(s->auth_key.data);
1277 		memset(s, 0, sizeof(dpaa_sec_session));
1278 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1279 		set_session_private_data(sess, index, NULL);
1280 		rte_mempool_put(sess_mp, sess_priv);
1281 	}
1282 }
1283 
1284 static int
1285 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1286 		       struct rte_cryptodev_config *config __rte_unused)
1287 {
1288 	PMD_INIT_FUNC_TRACE();
1289 
1290 	return 0;
1291 }
1292 
1293 static int
1294 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1295 {
1296 	PMD_INIT_FUNC_TRACE();
1297 	return 0;
1298 }
1299 
1300 static void
1301 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1302 {
1303 	PMD_INIT_FUNC_TRACE();
1304 }
1305 
1306 static int
1307 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1308 {
1309 	PMD_INIT_FUNC_TRACE();
1310 	return 0;
1311 }
1312 
1313 static void
1314 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1315 		       struct rte_cryptodev_info *info)
1316 {
1317 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1318 
1319 	PMD_INIT_FUNC_TRACE();
1320 	if (info != NULL) {
1321 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1322 		info->feature_flags = dev->feature_flags;
1323 		info->capabilities = dpaa_sec_capabilities;
1324 		info->sym.max_nb_sessions = internals->max_nb_sessions;
1325 		info->sym.max_nb_sessions_per_qp =
1326 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
1327 		info->driver_id = cryptodev_driver_id;
1328 	}
1329 }
1330 
1331 static struct rte_cryptodev_ops crypto_ops = {
1332 	.dev_configure	      = dpaa_sec_dev_configure,
1333 	.dev_start	      = dpaa_sec_dev_start,
1334 	.dev_stop	      = dpaa_sec_dev_stop,
1335 	.dev_close	      = dpaa_sec_dev_close,
1336 	.dev_infos_get        = dpaa_sec_dev_infos_get,
1337 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
1338 	.queue_pair_release   = dpaa_sec_queue_pair_release,
1339 	.queue_pair_start     = dpaa_sec_queue_pair_start,
1340 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
1341 	.queue_pair_count     = dpaa_sec_queue_pair_count,
1342 	.session_get_size     = dpaa_sec_session_get_size,
1343 	.session_configure    = dpaa_sec_session_configure,
1344 	.session_clear        = dpaa_sec_session_clear,
1345 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
1346 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
1347 };
1348 
1349 static int
1350 dpaa_sec_uninit(struct rte_cryptodev *dev)
1351 {
1352 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1353 
1354 	if (dev == NULL)
1355 		return -ENODEV;
1356 
1357 	rte_mempool_free(internals->ctx_pool);
1358 	rte_free(internals);
1359 
1360 	PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1361 		     dev->data->name, rte_socket_id());
1362 
1363 	return 0;
1364 }
1365 
1366 static int
1367 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1368 {
1369 	struct dpaa_sec_dev_private *internals;
1370 	struct dpaa_sec_qp *qp;
1371 	uint32_t i;
1372 	int ret;
1373 	char str[20];
1374 
1375 	PMD_INIT_FUNC_TRACE();
1376 
1377 	cryptodev->driver_id = cryptodev_driver_id;
1378 	cryptodev->dev_ops = &crypto_ops;
1379 
1380 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1381 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1382 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1383 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
1384 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1385 
1386 	internals = cryptodev->data->dev_private;
1387 	internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
1388 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1389 
1390 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1391 		/* init qman fq for queue pair */
1392 		qp = &internals->qps[i];
1393 		ret = dpaa_sec_init_tx(&qp->outq);
1394 		if (ret) {
1395 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
1396 			goto init_error;
1397 		}
1398 		ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
1399 				       qman_fq_fqid(&qp->outq));
1400 		if (ret) {
1401 			PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
1402 			goto init_error;
1403 		}
1404 	}
1405 
1406 	sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1407 	internals->ctx_pool = rte_mempool_create((const char *)str,
1408 			CTX_POOL_NUM_BUFS,
1409 			CTX_POOL_BUF_SIZE,
1410 			CTX_POOL_CACHE_SIZE, 0,
1411 			NULL, NULL, NULL, NULL,
1412 			SOCKET_ID_ANY, 0);
1413 	if (!internals->ctx_pool) {
1414 		RTE_LOG(ERR, PMD, "%s create failed\n", str);
1415 		goto init_error;
1416 	}
1417 
1418 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1419 	return 0;
1420 
1421 init_error:
1422 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1423 
1424 	dpaa_sec_uninit(cryptodev);
1425 	return -EFAULT;
1426 }
1427 
1428 static int
1429 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1430 				struct rte_dpaa_device *dpaa_dev)
1431 {
1432 	struct rte_cryptodev *cryptodev;
1433 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1434 
1435 	int retval;
1436 
1437 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1438 
1439 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1440 	if (cryptodev == NULL)
1441 		return -ENOMEM;
1442 
1443 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1444 		cryptodev->data->dev_private = rte_zmalloc_socket(
1445 					"cryptodev private structure",
1446 					sizeof(struct dpaa_sec_dev_private),
1447 					RTE_CACHE_LINE_SIZE,
1448 					rte_socket_id());
1449 
1450 		if (cryptodev->data->dev_private == NULL)
1451 			rte_panic("Cannot allocate memzone for private "
1452 					"device data");
1453 	}
1454 
1455 	dpaa_dev->crypto_dev = cryptodev;
1456 	cryptodev->device = &dpaa_dev->device;
1457 	cryptodev->device->driver = &dpaa_drv->driver;
1458 
1459 	/* init user callbacks */
1460 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
1461 
1462 	/* if sec device version is not configured */
1463 	if (!rta_get_sec_era()) {
1464 		const struct device_node *caam_node;
1465 
1466 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1467 			const uint32_t *prop = of_get_property(caam_node,
1468 					"fsl,sec-era",
1469 					NULL);
1470 			if (prop) {
1471 				rta_set_sec_era(
1472 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1473 				break;
1474 			}
1475 		}
1476 	}
1477 
1478 	/* Invoke PMD device initialization function */
1479 	retval = dpaa_sec_dev_init(cryptodev);
1480 	if (retval == 0)
1481 		return 0;
1482 
1483 	/* In case of error, cleanup is done */
1484 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1485 		rte_free(cryptodev->data->dev_private);
1486 
1487 	rte_cryptodev_pmd_release_device(cryptodev);
1488 
1489 	return -ENXIO;
1490 }
1491 
1492 static int
1493 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1494 {
1495 	struct rte_cryptodev *cryptodev;
1496 	int ret;
1497 
1498 	cryptodev = dpaa_dev->crypto_dev;
1499 	if (cryptodev == NULL)
1500 		return -ENODEV;
1501 
1502 	ret = dpaa_sec_uninit(cryptodev);
1503 	if (ret)
1504 		return ret;
1505 
1506 	return rte_cryptodev_pmd_destroy(cryptodev);
1507 }
1508 
1509 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1510 	.drv_type = FSL_DPAA_CRYPTO,
1511 	.driver = {
1512 		.name = "DPAA SEC PMD"
1513 	},
1514 	.probe = cryptodev_dpaa_sec_probe,
1515 	.remove = cryptodev_dpaa_sec_remove,
1516 };
1517 
1518 static struct cryptodev_driver dpaa_sec_crypto_drv;
1519 
1520 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1521 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1522 		cryptodev_driver_id);
1523