xref: /dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c (revision a74af788c6324d4b2284d55b282b4aa3522e3135)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7 
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12 
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30 
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35 
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39 
40 enum rta_sec_era rta_sec_era;
41 
42 static uint8_t cryptodev_driver_id;
43 
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
46 
47 static int
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
49 
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53 	if (!ctx->fd_status) {
54 		ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55 	} else {
56 		PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57 		ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58 	}
59 
60 	/* report op status to sym->op and then free the ctx memeory  */
61 	rte_mempool_put(ctx->ctx_pool, (void *)ctx);
62 }
63 
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 {
67 	struct dpaa_sec_op_ctx *ctx;
68 	int retval;
69 
70 	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71 	if (!ctx || retval) {
72 		PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
73 		return NULL;
74 	}
75 	/*
76 	 * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77 	 * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78 	 * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79 	 * each packet, memset is costlier than dcbz_64().
80 	 */
81 	dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82 	dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83 	dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84 	dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85 
86 	ctx->ctx_pool = ses->ctx_pool;
87 	ctx->vtop_offset = (uint64_t) ctx
88 				- rte_mempool_virt2iova(ctx);
89 
90 	return ctx;
91 }
92 
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
95 {
96 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
97 	uint64_t vaddr_64, paddr;
98 	int i;
99 
100 	vaddr_64 = (uint64_t)vaddr;
101 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
102 		if (vaddr_64 >= memseg[i].addr_64 &&
103 		    vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
104 			paddr = memseg[i].iova +
105 				(vaddr_64 - memseg[i].addr_64);
106 
107 			return (rte_iova_t)paddr;
108 		}
109 	}
110 	return (rte_iova_t)(NULL);
111 }
112 
113 /* virtual address conversin when mempool support is available for ctx */
114 static inline phys_addr_t
115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
116 {
117 	return (uint64_t)vaddr - ctx->vtop_offset;
118 }
119 
120 static inline void *
121 dpaa_mem_ptov(rte_iova_t paddr)
122 {
123 	const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
124 	int i;
125 
126 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
127 		if (paddr >= memseg[i].iova &&
128 		    (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
129 			return (void *)(memseg[i].addr_64 +
130 					(paddr - memseg[i].iova));
131 	}
132 	return NULL;
133 }
134 
135 static void
136 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
137 		   struct qman_fq *fq,
138 		   const struct qm_mr_entry *msg)
139 {
140 	RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
141 		   fq->fqid, msg->ern.rc, msg->ern.seqnum);
142 }
143 
144 /* initialize the queue with dest chan as caam chan so that
145  * all the packets in this queue could be dispatched into caam
146  */
147 static int
148 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
149 		 uint32_t fqid_out)
150 {
151 	struct qm_mcc_initfq fq_opts;
152 	uint32_t flags;
153 	int ret = -1;
154 
155 	/* Clear FQ options */
156 	memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
157 
158 	flags = QMAN_INITFQ_FLAG_SCHED;
159 	fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
160 			  QM_INITFQ_WE_CONTEXTB;
161 
162 	qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
163 	fq_opts.fqd.context_b = fqid_out;
164 	fq_opts.fqd.dest.channel = qm_channel_caam;
165 	fq_opts.fqd.dest.wq = 0;
166 
167 	fq_in->cb.ern  = ern_sec_fq_handler;
168 
169 	PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
170 
171 	ret = qman_init_fq(fq_in, flags, &fq_opts);
172 	if (unlikely(ret != 0))
173 		PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
174 
175 	return ret;
176 }
177 
178 /* something is put into in_fq and caam put the crypto result into out_fq */
179 static enum qman_cb_dqrr_result
180 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
181 		  struct qman_fq *fq __always_unused,
182 		  const struct qm_dqrr_entry *dqrr)
183 {
184 	const struct qm_fd *fd;
185 	struct dpaa_sec_job *job;
186 	struct dpaa_sec_op_ctx *ctx;
187 
188 	if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
189 		return qman_cb_dqrr_defer;
190 
191 	if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
192 		return qman_cb_dqrr_consume;
193 
194 	fd = &dqrr->fd;
195 	/* sg is embedded in an op ctx,
196 	 * sg[0] is for output
197 	 * sg[1] for input
198 	 */
199 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
200 
201 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
202 	ctx->fd_status = fd->status;
203 	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
204 		struct qm_sg_entry *sg_out;
205 		uint32_t len;
206 
207 		sg_out = &job->sg[0];
208 		hw_sg_to_cpu(sg_out);
209 		len = sg_out->length;
210 		ctx->op->sym->m_src->pkt_len = len;
211 		ctx->op->sym->m_src->data_len = len;
212 	}
213 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
214 	dpaa_sec_op_ending(ctx);
215 
216 	return qman_cb_dqrr_consume;
217 }
218 
219 /* caam result is put into this queue */
220 static int
221 dpaa_sec_init_tx(struct qman_fq *fq)
222 {
223 	int ret;
224 	struct qm_mcc_initfq opts;
225 	uint32_t flags;
226 
227 	flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
228 		QMAN_FQ_FLAG_DYNAMIC_FQID;
229 
230 	ret = qman_create_fq(0, flags, fq);
231 	if (unlikely(ret)) {
232 		PMD_INIT_LOG(ERR, "qman_create_fq failed");
233 		return ret;
234 	}
235 
236 	memset(&opts, 0, sizeof(opts));
237 	opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
238 		       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
239 
240 	/* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
241 
242 	fq->cb.dqrr = dqrr_out_fq_cb_rx;
243 	fq->cb.ern  = ern_sec_fq_handler;
244 
245 	ret = qman_init_fq(fq, 0, &opts);
246 	if (unlikely(ret)) {
247 		PMD_INIT_LOG(ERR, "unable to init caam source fq!");
248 		return ret;
249 	}
250 
251 	return ret;
252 }
253 
254 static inline int is_cipher_only(dpaa_sec_session *ses)
255 {
256 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
257 		(ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
258 }
259 
260 static inline int is_auth_only(dpaa_sec_session *ses)
261 {
262 	return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
263 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
264 }
265 
266 static inline int is_aead(dpaa_sec_session *ses)
267 {
268 	return ((ses->cipher_alg == 0) &&
269 		(ses->auth_alg == 0) &&
270 		(ses->aead_alg != 0));
271 }
272 
273 static inline int is_auth_cipher(dpaa_sec_session *ses)
274 {
275 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
276 		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
277 		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
278 }
279 
280 static inline int is_proto_ipsec(dpaa_sec_session *ses)
281 {
282 	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
283 }
284 
285 static inline int is_encode(dpaa_sec_session *ses)
286 {
287 	return ses->dir == DIR_ENC;
288 }
289 
290 static inline int is_decode(dpaa_sec_session *ses)
291 {
292 	return ses->dir == DIR_DEC;
293 }
294 
295 static inline void
296 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
297 {
298 	switch (ses->auth_alg) {
299 	case RTE_CRYPTO_AUTH_NULL:
300 		ses->digest_length = 0;
301 		break;
302 	case RTE_CRYPTO_AUTH_MD5_HMAC:
303 		alginfo_a->algtype =
304 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305 			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
306 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
307 		break;
308 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
309 		alginfo_a->algtype =
310 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311 			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
312 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
313 		break;
314 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
315 		alginfo_a->algtype =
316 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317 			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
318 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
319 		break;
320 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
321 		alginfo_a->algtype =
322 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
323 			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
324 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
325 		break;
326 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
327 		alginfo_a->algtype =
328 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
329 			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
330 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
331 		break;
332 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
333 		alginfo_a->algtype =
334 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
335 			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
336 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
337 		break;
338 	default:
339 		PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
340 	}
341 }
342 
343 static inline void
344 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
345 {
346 	switch (ses->cipher_alg) {
347 	case RTE_CRYPTO_CIPHER_NULL:
348 		break;
349 	case RTE_CRYPTO_CIPHER_AES_CBC:
350 		alginfo_c->algtype =
351 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
352 			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
353 		alginfo_c->algmode = OP_ALG_AAI_CBC;
354 		break;
355 	case RTE_CRYPTO_CIPHER_3DES_CBC:
356 		alginfo_c->algtype =
357 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
358 			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
359 		alginfo_c->algmode = OP_ALG_AAI_CBC;
360 		break;
361 	case RTE_CRYPTO_CIPHER_AES_CTR:
362 		alginfo_c->algtype =
363 			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
364 			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
365 		alginfo_c->algmode = OP_ALG_AAI_CTR;
366 		break;
367 	default:
368 		PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
369 	}
370 }
371 
372 static inline void
373 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
374 {
375 	switch (ses->aead_alg) {
376 	case RTE_CRYPTO_AEAD_AES_GCM:
377 		alginfo->algtype = OP_ALG_ALGSEL_AES;
378 		alginfo->algmode = OP_ALG_AAI_GCM;
379 		break;
380 	default:
381 		PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
382 	}
383 }
384 
385 
386 /* prepare command block of the session */
387 static int
388 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
389 {
390 	struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
391 	uint32_t shared_desc_len = 0;
392 	struct sec_cdb *cdb = &ses->cdb;
393 	int err;
394 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
395 	int swap = false;
396 #else
397 	int swap = true;
398 #endif
399 
400 	memset(cdb, 0, sizeof(struct sec_cdb));
401 
402 	if (is_cipher_only(ses)) {
403 		caam_cipher_alg(ses, &alginfo_c);
404 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
406 			return -ENOTSUP;
407 		}
408 
409 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
410 		alginfo_c.keylen = ses->cipher_key.length;
411 		alginfo_c.key_enc_flags = 0;
412 		alginfo_c.key_type = RTA_DATA_IMM;
413 
414 		shared_desc_len = cnstr_shdsc_blkcipher(
415 						cdb->sh_desc, true,
416 						swap, &alginfo_c,
417 						NULL,
418 						ses->iv.length,
419 						ses->dir);
420 	} else if (is_auth_only(ses)) {
421 		caam_auth_alg(ses, &alginfo_a);
422 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
423 			PMD_TX_LOG(ERR, "not supported auth alg\n");
424 			return -ENOTSUP;
425 		}
426 
427 		alginfo_a.key = (uint64_t)ses->auth_key.data;
428 		alginfo_a.keylen = ses->auth_key.length;
429 		alginfo_a.key_enc_flags = 0;
430 		alginfo_a.key_type = RTA_DATA_IMM;
431 
432 		shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
433 						   swap, &alginfo_a,
434 						   !ses->dir,
435 						   ses->digest_length);
436 	} else if (is_aead(ses)) {
437 		caam_aead_alg(ses, &alginfo);
438 		if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
439 			PMD_TX_LOG(ERR, "not supported aead alg\n");
440 			return -ENOTSUP;
441 		}
442 		alginfo.key = (uint64_t)ses->aead_key.data;
443 		alginfo.keylen = ses->aead_key.length;
444 		alginfo.key_enc_flags = 0;
445 		alginfo.key_type = RTA_DATA_IMM;
446 
447 		if (ses->dir == DIR_ENC)
448 			shared_desc_len = cnstr_shdsc_gcm_encap(
449 					cdb->sh_desc, true, swap,
450 					&alginfo,
451 					ses->iv.length,
452 					ses->digest_length);
453 		else
454 			shared_desc_len = cnstr_shdsc_gcm_decap(
455 					cdb->sh_desc, true, swap,
456 					&alginfo,
457 					ses->iv.length,
458 					ses->digest_length);
459 	} else {
460 		caam_cipher_alg(ses, &alginfo_c);
461 		if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
462 			PMD_TX_LOG(ERR, "not supported cipher alg\n");
463 			return -ENOTSUP;
464 		}
465 
466 		alginfo_c.key = (uint64_t)ses->cipher_key.data;
467 		alginfo_c.keylen = ses->cipher_key.length;
468 		alginfo_c.key_enc_flags = 0;
469 		alginfo_c.key_type = RTA_DATA_IMM;
470 
471 		caam_auth_alg(ses, &alginfo_a);
472 		if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
473 			PMD_TX_LOG(ERR, "not supported auth alg\n");
474 			return -ENOTSUP;
475 		}
476 
477 		alginfo_a.key = (uint64_t)ses->auth_key.data;
478 		alginfo_a.keylen = ses->auth_key.length;
479 		alginfo_a.key_enc_flags = 0;
480 		alginfo_a.key_type = RTA_DATA_IMM;
481 
482 		cdb->sh_desc[0] = alginfo_c.keylen;
483 		cdb->sh_desc[1] = alginfo_a.keylen;
484 		err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
485 				       MIN_JOB_DESC_SIZE,
486 				       (unsigned int *)cdb->sh_desc,
487 				       &cdb->sh_desc[2], 2);
488 
489 		if (err < 0) {
490 			PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
491 			return err;
492 		}
493 		if (cdb->sh_desc[2] & 1)
494 			alginfo_c.key_type = RTA_DATA_IMM;
495 		else {
496 			alginfo_c.key = (uint64_t)dpaa_mem_vtop(
497 							(void *)alginfo_c.key);
498 			alginfo_c.key_type = RTA_DATA_PTR;
499 		}
500 		if (cdb->sh_desc[2] & (1<<1))
501 			alginfo_a.key_type = RTA_DATA_IMM;
502 		else {
503 			alginfo_a.key = (uint64_t)dpaa_mem_vtop(
504 							(void *)alginfo_a.key);
505 			alginfo_a.key_type = RTA_DATA_PTR;
506 		}
507 		cdb->sh_desc[0] = 0;
508 		cdb->sh_desc[1] = 0;
509 		cdb->sh_desc[2] = 0;
510 		if (is_proto_ipsec(ses)) {
511 			if (ses->dir == DIR_ENC) {
512 				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
513 						cdb->sh_desc,
514 						true, swap, &ses->encap_pdb,
515 						(uint8_t *)&ses->ip4_hdr,
516 						&alginfo_c, &alginfo_a);
517 			} else if (ses->dir == DIR_DEC) {
518 				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
519 						cdb->sh_desc,
520 						true, swap, &ses->decap_pdb,
521 						&alginfo_c, &alginfo_a);
522 			}
523 		} else {
524 			/* Auth_only_len is set as 0 here and it will be
525 			 * overwritten in fd for each packet.
526 			 */
527 			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
528 					true, swap, &alginfo_c, &alginfo_a,
529 					ses->iv.length, 0,
530 					ses->digest_length, ses->dir);
531 		}
532 	}
533 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
534 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
535 	cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
536 
537 	return 0;
538 }
539 
540 /* qp is lockless, should be accessed by only one thread */
541 static int
542 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
543 {
544 	struct qman_fq *fq;
545 	unsigned int pkts = 0;
546 	int ret;
547 	struct qm_dqrr_entry *dq;
548 
549 	fq = &qp->outq;
550 	ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
551 				DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
552 	if (ret)
553 		return 0;
554 
555 	do {
556 		const struct qm_fd *fd;
557 		struct dpaa_sec_job *job;
558 		struct dpaa_sec_op_ctx *ctx;
559 		struct rte_crypto_op *op;
560 
561 		dq = qman_dequeue(fq);
562 		if (!dq)
563 			continue;
564 
565 		fd = &dq->fd;
566 		/* sg is embedded in an op ctx,
567 		 * sg[0] is for output
568 		 * sg[1] for input
569 		 */
570 		job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
571 
572 		ctx = container_of(job, struct dpaa_sec_op_ctx, job);
573 		ctx->fd_status = fd->status;
574 		op = ctx->op;
575 		if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
576 			struct qm_sg_entry *sg_out;
577 			uint32_t len;
578 
579 			sg_out = &job->sg[0];
580 			hw_sg_to_cpu(sg_out);
581 			len = sg_out->length;
582 			op->sym->m_src->pkt_len = len;
583 			op->sym->m_src->data_len = len;
584 		}
585 		if (!ctx->fd_status) {
586 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
587 		} else {
588 			printf("\nSEC return err: 0x%x", ctx->fd_status);
589 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
590 		}
591 		ops[pkts++] = op;
592 
593 		/* report op status to sym->op and then free the ctx memeory */
594 		rte_mempool_put(ctx->ctx_pool, (void *)ctx);
595 
596 		qman_dqrr_consume(fq, dq);
597 	} while (fq->flags & QMAN_FQ_STATE_VDQCR);
598 
599 	return pkts;
600 }
601 
602 static inline struct dpaa_sec_job *
603 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
604 {
605 	struct rte_crypto_sym_op *sym = op->sym;
606 	struct rte_mbuf *mbuf = sym->m_src;
607 	struct dpaa_sec_job *cf;
608 	struct dpaa_sec_op_ctx *ctx;
609 	struct qm_sg_entry *sg, *out_sg, *in_sg;
610 	phys_addr_t start_addr;
611 	uint8_t *old_digest, extra_segs;
612 
613 	if (is_decode(ses))
614 		extra_segs = 3;
615 	else
616 		extra_segs = 2;
617 
618 	if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
619 		PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
620 								MAX_SG_ENTRIES);
621 		return NULL;
622 	}
623 	ctx = dpaa_sec_alloc_ctx(ses);
624 	if (!ctx)
625 		return NULL;
626 
627 	cf = &ctx->job;
628 	ctx->op = op;
629 	old_digest = ctx->digest;
630 
631 	/* output */
632 	out_sg = &cf->sg[0];
633 	qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
634 	out_sg->length = ses->digest_length;
635 	cpu_to_hw_sg(out_sg);
636 
637 	/* input */
638 	in_sg = &cf->sg[1];
639 	/* need to extend the input to a compound frame */
640 	in_sg->extension = 1;
641 	in_sg->final = 1;
642 	in_sg->length = sym->auth.data.length;
643 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
644 
645 	/* 1st seg */
646 	sg = in_sg + 1;
647 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
648 	sg->length = mbuf->data_len - sym->auth.data.offset;
649 	sg->offset = sym->auth.data.offset;
650 
651 	/* Successive segs */
652 	mbuf = mbuf->next;
653 	while (mbuf) {
654 		cpu_to_hw_sg(sg);
655 		sg++;
656 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
657 		sg->length = mbuf->data_len;
658 		mbuf = mbuf->next;
659 	}
660 
661 	if (is_decode(ses)) {
662 		/* Digest verification case */
663 		cpu_to_hw_sg(sg);
664 		sg++;
665 		rte_memcpy(old_digest, sym->auth.digest.data,
666 				ses->digest_length);
667 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
668 		qm_sg_entry_set64(sg, start_addr);
669 		sg->length = ses->digest_length;
670 		in_sg->length += ses->digest_length;
671 	} else {
672 		/* Digest calculation case */
673 		sg->length -= ses->digest_length;
674 	}
675 	sg->final = 1;
676 	cpu_to_hw_sg(sg);
677 	cpu_to_hw_sg(in_sg);
678 
679 	return cf;
680 }
681 
682 /**
683  * packet looks like:
684  *		|<----data_len------->|
685  *    |ip_header|ah_header|icv|payload|
686  *              ^
687  *		|
688  *	   mbuf->pkt.data
689  */
690 static inline struct dpaa_sec_job *
691 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
692 {
693 	struct rte_crypto_sym_op *sym = op->sym;
694 	struct rte_mbuf *mbuf = sym->m_src;
695 	struct dpaa_sec_job *cf;
696 	struct dpaa_sec_op_ctx *ctx;
697 	struct qm_sg_entry *sg;
698 	rte_iova_t start_addr;
699 	uint8_t *old_digest;
700 
701 	ctx = dpaa_sec_alloc_ctx(ses);
702 	if (!ctx)
703 		return NULL;
704 
705 	cf = &ctx->job;
706 	ctx->op = op;
707 	old_digest = ctx->digest;
708 
709 	start_addr = rte_pktmbuf_iova(mbuf);
710 	/* output */
711 	sg = &cf->sg[0];
712 	qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
713 	sg->length = ses->digest_length;
714 	cpu_to_hw_sg(sg);
715 
716 	/* input */
717 	sg = &cf->sg[1];
718 	if (is_decode(ses)) {
719 		/* need to extend the input to a compound frame */
720 		sg->extension = 1;
721 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
722 		sg->length = sym->auth.data.length + ses->digest_length;
723 		sg->final = 1;
724 		cpu_to_hw_sg(sg);
725 
726 		sg = &cf->sg[2];
727 		/* hash result or digest, save digest first */
728 		rte_memcpy(old_digest, sym->auth.digest.data,
729 			   ses->digest_length);
730 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
731 		sg->length = sym->auth.data.length;
732 		cpu_to_hw_sg(sg);
733 
734 		/* let's check digest by hw */
735 		start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
736 		sg++;
737 		qm_sg_entry_set64(sg, start_addr);
738 		sg->length = ses->digest_length;
739 		sg->final = 1;
740 		cpu_to_hw_sg(sg);
741 	} else {
742 		qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
743 		sg->length = sym->auth.data.length;
744 		sg->final = 1;
745 		cpu_to_hw_sg(sg);
746 	}
747 
748 	return cf;
749 }
750 
751 static inline struct dpaa_sec_job *
752 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
753 {
754 	struct rte_crypto_sym_op *sym = op->sym;
755 	struct dpaa_sec_job *cf;
756 	struct dpaa_sec_op_ctx *ctx;
757 	struct qm_sg_entry *sg, *out_sg, *in_sg;
758 	struct rte_mbuf *mbuf;
759 	uint8_t req_segs;
760 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
761 			ses->iv.offset);
762 
763 	if (sym->m_dst) {
764 		mbuf = sym->m_dst;
765 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
766 	} else {
767 		mbuf = sym->m_src;
768 		req_segs = mbuf->nb_segs * 2 + 3;
769 	}
770 
771 	if (req_segs > MAX_SG_ENTRIES) {
772 		PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
773 								MAX_SG_ENTRIES);
774 		return NULL;
775 	}
776 
777 	ctx = dpaa_sec_alloc_ctx(ses);
778 	if (!ctx)
779 		return NULL;
780 
781 	cf = &ctx->job;
782 	ctx->op = op;
783 
784 	/* output */
785 	out_sg = &cf->sg[0];
786 	out_sg->extension = 1;
787 	out_sg->length = sym->cipher.data.length;
788 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
789 	cpu_to_hw_sg(out_sg);
790 
791 	/* 1st seg */
792 	sg = &cf->sg[2];
793 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
794 	sg->length = mbuf->data_len - sym->cipher.data.offset;
795 	sg->offset = sym->cipher.data.offset;
796 
797 	/* Successive segs */
798 	mbuf = mbuf->next;
799 	while (mbuf) {
800 		cpu_to_hw_sg(sg);
801 		sg++;
802 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
803 		sg->length = mbuf->data_len;
804 		mbuf = mbuf->next;
805 	}
806 	sg->final = 1;
807 	cpu_to_hw_sg(sg);
808 
809 	/* input */
810 	mbuf = sym->m_src;
811 	in_sg = &cf->sg[1];
812 	in_sg->extension = 1;
813 	in_sg->final = 1;
814 	in_sg->length = sym->cipher.data.length + ses->iv.length;
815 
816 	sg++;
817 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
818 	cpu_to_hw_sg(in_sg);
819 
820 	/* IV */
821 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
822 	sg->length = ses->iv.length;
823 	cpu_to_hw_sg(sg);
824 
825 	/* 1st seg */
826 	sg++;
827 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
828 	sg->length = mbuf->data_len - sym->cipher.data.offset;
829 	sg->offset = sym->cipher.data.offset;
830 
831 	/* Successive segs */
832 	mbuf = mbuf->next;
833 	while (mbuf) {
834 		cpu_to_hw_sg(sg);
835 		sg++;
836 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
837 		sg->length = mbuf->data_len;
838 		mbuf = mbuf->next;
839 	}
840 	sg->final = 1;
841 	cpu_to_hw_sg(sg);
842 
843 	return cf;
844 }
845 
846 static inline struct dpaa_sec_job *
847 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
848 {
849 	struct rte_crypto_sym_op *sym = op->sym;
850 	struct dpaa_sec_job *cf;
851 	struct dpaa_sec_op_ctx *ctx;
852 	struct qm_sg_entry *sg;
853 	rte_iova_t src_start_addr, dst_start_addr;
854 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
855 			ses->iv.offset);
856 
857 	ctx = dpaa_sec_alloc_ctx(ses);
858 	if (!ctx)
859 		return NULL;
860 
861 	cf = &ctx->job;
862 	ctx->op = op;
863 
864 	src_start_addr = rte_pktmbuf_iova(sym->m_src);
865 
866 	if (sym->m_dst)
867 		dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
868 	else
869 		dst_start_addr = src_start_addr;
870 
871 	/* output */
872 	sg = &cf->sg[0];
873 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
874 	sg->length = sym->cipher.data.length + ses->iv.length;
875 	cpu_to_hw_sg(sg);
876 
877 	/* input */
878 	sg = &cf->sg[1];
879 
880 	/* need to extend the input to a compound frame */
881 	sg->extension = 1;
882 	sg->final = 1;
883 	sg->length = sym->cipher.data.length + ses->iv.length;
884 	qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
885 	cpu_to_hw_sg(sg);
886 
887 	sg = &cf->sg[2];
888 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
889 	sg->length = ses->iv.length;
890 	cpu_to_hw_sg(sg);
891 
892 	sg++;
893 	qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
894 	sg->length = sym->cipher.data.length;
895 	sg->final = 1;
896 	cpu_to_hw_sg(sg);
897 
898 	return cf;
899 }
900 
901 static inline struct dpaa_sec_job *
902 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
903 {
904 	struct rte_crypto_sym_op *sym = op->sym;
905 	struct dpaa_sec_job *cf;
906 	struct dpaa_sec_op_ctx *ctx;
907 	struct qm_sg_entry *sg, *out_sg, *in_sg;
908 	struct rte_mbuf *mbuf;
909 	uint8_t req_segs;
910 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
911 			ses->iv.offset);
912 
913 	if (sym->m_dst) {
914 		mbuf = sym->m_dst;
915 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
916 	} else {
917 		mbuf = sym->m_src;
918 		req_segs = mbuf->nb_segs * 2 + 4;
919 	}
920 
921 	if (ses->auth_only_len)
922 		req_segs++;
923 
924 	if (req_segs > MAX_SG_ENTRIES) {
925 		PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
926 				MAX_SG_ENTRIES);
927 		return NULL;
928 	}
929 
930 	ctx = dpaa_sec_alloc_ctx(ses);
931 	if (!ctx)
932 		return NULL;
933 
934 	cf = &ctx->job;
935 	ctx->op = op;
936 
937 	rte_prefetch0(cf->sg);
938 
939 	/* output */
940 	out_sg = &cf->sg[0];
941 	out_sg->extension = 1;
942 	if (is_encode(ses))
943 		out_sg->length = sym->aead.data.length + ses->auth_only_len
944 						+ ses->digest_length;
945 	else
946 		out_sg->length = sym->aead.data.length + ses->auth_only_len;
947 
948 	/* output sg entries */
949 	sg = &cf->sg[2];
950 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
951 	cpu_to_hw_sg(out_sg);
952 
953 	/* 1st seg */
954 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
955 	sg->length = mbuf->data_len - sym->aead.data.offset +
956 					ses->auth_only_len;
957 	sg->offset = sym->aead.data.offset - ses->auth_only_len;
958 
959 	/* Successive segs */
960 	mbuf = mbuf->next;
961 	while (mbuf) {
962 		cpu_to_hw_sg(sg);
963 		sg++;
964 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
965 		sg->length = mbuf->data_len;
966 		mbuf = mbuf->next;
967 	}
968 	sg->length -= ses->digest_length;
969 
970 	if (is_encode(ses)) {
971 		cpu_to_hw_sg(sg);
972 		/* set auth output */
973 		sg++;
974 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
975 		sg->length = ses->digest_length;
976 	}
977 	sg->final = 1;
978 	cpu_to_hw_sg(sg);
979 
980 	/* input */
981 	mbuf = sym->m_src;
982 	in_sg = &cf->sg[1];
983 	in_sg->extension = 1;
984 	in_sg->final = 1;
985 	if (is_encode(ses))
986 		in_sg->length = ses->iv.length + sym->aead.data.length
987 							+ ses->auth_only_len;
988 	else
989 		in_sg->length = ses->iv.length + sym->aead.data.length
990 				+ ses->auth_only_len + ses->digest_length;
991 
992 	/* input sg entries */
993 	sg++;
994 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
995 	cpu_to_hw_sg(in_sg);
996 
997 	/* 1st seg IV */
998 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
999 	sg->length = ses->iv.length;
1000 	cpu_to_hw_sg(sg);
1001 
1002 	/* 2nd seg auth only */
1003 	if (ses->auth_only_len) {
1004 		sg++;
1005 		qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1006 		sg->length = ses->auth_only_len;
1007 		cpu_to_hw_sg(sg);
1008 	}
1009 
1010 	/* 3rd seg */
1011 	sg++;
1012 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1013 	sg->length = mbuf->data_len - sym->aead.data.offset;
1014 	sg->offset = sym->aead.data.offset;
1015 
1016 	/* Successive segs */
1017 	mbuf = mbuf->next;
1018 	while (mbuf) {
1019 		cpu_to_hw_sg(sg);
1020 		sg++;
1021 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1022 		sg->length = mbuf->data_len;
1023 		mbuf = mbuf->next;
1024 	}
1025 
1026 	if (is_decode(ses)) {
1027 		cpu_to_hw_sg(sg);
1028 		sg++;
1029 		memcpy(ctx->digest, sym->aead.digest.data,
1030 			ses->digest_length);
1031 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1032 		sg->length = ses->digest_length;
1033 	}
1034 	sg->final = 1;
1035 	cpu_to_hw_sg(sg);
1036 
1037 	return cf;
1038 }
1039 
1040 static inline struct dpaa_sec_job *
1041 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1042 {
1043 	struct rte_crypto_sym_op *sym = op->sym;
1044 	struct dpaa_sec_job *cf;
1045 	struct dpaa_sec_op_ctx *ctx;
1046 	struct qm_sg_entry *sg;
1047 	uint32_t length = 0;
1048 	rte_iova_t src_start_addr, dst_start_addr;
1049 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1050 			ses->iv.offset);
1051 
1052 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1053 
1054 	if (sym->m_dst)
1055 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1056 	else
1057 		dst_start_addr = src_start_addr;
1058 
1059 	ctx = dpaa_sec_alloc_ctx(ses);
1060 	if (!ctx)
1061 		return NULL;
1062 
1063 	cf = &ctx->job;
1064 	ctx->op = op;
1065 
1066 	/* input */
1067 	rte_prefetch0(cf->sg);
1068 	sg = &cf->sg[2];
1069 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1070 	if (is_encode(ses)) {
1071 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1072 		sg->length = ses->iv.length;
1073 		length += sg->length;
1074 		cpu_to_hw_sg(sg);
1075 
1076 		sg++;
1077 		if (ses->auth_only_len) {
1078 			qm_sg_entry_set64(sg,
1079 					  dpaa_mem_vtop(sym->aead.aad.data));
1080 			sg->length = ses->auth_only_len;
1081 			length += sg->length;
1082 			cpu_to_hw_sg(sg);
1083 			sg++;
1084 		}
1085 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1086 		sg->length = sym->aead.data.length;
1087 		length += sg->length;
1088 		sg->final = 1;
1089 		cpu_to_hw_sg(sg);
1090 	} else {
1091 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1092 		sg->length = ses->iv.length;
1093 		length += sg->length;
1094 		cpu_to_hw_sg(sg);
1095 
1096 		sg++;
1097 		if (ses->auth_only_len) {
1098 			qm_sg_entry_set64(sg,
1099 					  dpaa_mem_vtop(sym->aead.aad.data));
1100 			sg->length = ses->auth_only_len;
1101 			length += sg->length;
1102 			cpu_to_hw_sg(sg);
1103 			sg++;
1104 		}
1105 		qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1106 		sg->length = sym->aead.data.length;
1107 		length += sg->length;
1108 		cpu_to_hw_sg(sg);
1109 
1110 		memcpy(ctx->digest, sym->aead.digest.data,
1111 		       ses->digest_length);
1112 		sg++;
1113 
1114 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1115 		sg->length = ses->digest_length;
1116 		length += sg->length;
1117 		sg->final = 1;
1118 		cpu_to_hw_sg(sg);
1119 	}
1120 	/* input compound frame */
1121 	cf->sg[1].length = length;
1122 	cf->sg[1].extension = 1;
1123 	cf->sg[1].final = 1;
1124 	cpu_to_hw_sg(&cf->sg[1]);
1125 
1126 	/* output */
1127 	sg++;
1128 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1129 	qm_sg_entry_set64(sg,
1130 		dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1131 	sg->length = sym->aead.data.length + ses->auth_only_len;
1132 	length = sg->length;
1133 	if (is_encode(ses)) {
1134 		cpu_to_hw_sg(sg);
1135 		/* set auth output */
1136 		sg++;
1137 		qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1138 		sg->length = ses->digest_length;
1139 		length += sg->length;
1140 	}
1141 	sg->final = 1;
1142 	cpu_to_hw_sg(sg);
1143 
1144 	/* output compound frame */
1145 	cf->sg[0].length = length;
1146 	cf->sg[0].extension = 1;
1147 	cpu_to_hw_sg(&cf->sg[0]);
1148 
1149 	return cf;
1150 }
1151 
1152 static inline struct dpaa_sec_job *
1153 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1154 {
1155 	struct rte_crypto_sym_op *sym = op->sym;
1156 	struct dpaa_sec_job *cf;
1157 	struct dpaa_sec_op_ctx *ctx;
1158 	struct qm_sg_entry *sg, *out_sg, *in_sg;
1159 	struct rte_mbuf *mbuf;
1160 	uint8_t req_segs;
1161 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1162 			ses->iv.offset);
1163 
1164 	if (sym->m_dst) {
1165 		mbuf = sym->m_dst;
1166 		req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1167 	} else {
1168 		mbuf = sym->m_src;
1169 		req_segs = mbuf->nb_segs * 2 + 4;
1170 	}
1171 
1172 	if (req_segs > MAX_SG_ENTRIES) {
1173 		PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1174 				MAX_SG_ENTRIES);
1175 		return NULL;
1176 	}
1177 
1178 	ctx = dpaa_sec_alloc_ctx(ses);
1179 	if (!ctx)
1180 		return NULL;
1181 
1182 	cf = &ctx->job;
1183 	ctx->op = op;
1184 
1185 	rte_prefetch0(cf->sg);
1186 
1187 	/* output */
1188 	out_sg = &cf->sg[0];
1189 	out_sg->extension = 1;
1190 	if (is_encode(ses))
1191 		out_sg->length = sym->auth.data.length + ses->digest_length;
1192 	else
1193 		out_sg->length = sym->auth.data.length;
1194 
1195 	/* output sg entries */
1196 	sg = &cf->sg[2];
1197 	qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1198 	cpu_to_hw_sg(out_sg);
1199 
1200 	/* 1st seg */
1201 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1202 	sg->length = mbuf->data_len - sym->auth.data.offset;
1203 	sg->offset = sym->auth.data.offset;
1204 
1205 	/* Successive segs */
1206 	mbuf = mbuf->next;
1207 	while (mbuf) {
1208 		cpu_to_hw_sg(sg);
1209 		sg++;
1210 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1211 		sg->length = mbuf->data_len;
1212 		mbuf = mbuf->next;
1213 	}
1214 	sg->length -= ses->digest_length;
1215 
1216 	if (is_encode(ses)) {
1217 		cpu_to_hw_sg(sg);
1218 		/* set auth output */
1219 		sg++;
1220 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1221 		sg->length = ses->digest_length;
1222 	}
1223 	sg->final = 1;
1224 	cpu_to_hw_sg(sg);
1225 
1226 	/* input */
1227 	mbuf = sym->m_src;
1228 	in_sg = &cf->sg[1];
1229 	in_sg->extension = 1;
1230 	in_sg->final = 1;
1231 	if (is_encode(ses))
1232 		in_sg->length = ses->iv.length + sym->auth.data.length;
1233 	else
1234 		in_sg->length = ses->iv.length + sym->auth.data.length
1235 						+ ses->digest_length;
1236 
1237 	/* input sg entries */
1238 	sg++;
1239 	qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1240 	cpu_to_hw_sg(in_sg);
1241 
1242 	/* 1st seg IV */
1243 	qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1244 	sg->length = ses->iv.length;
1245 	cpu_to_hw_sg(sg);
1246 
1247 	/* 2nd seg */
1248 	sg++;
1249 	qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1250 	sg->length = mbuf->data_len - sym->auth.data.offset;
1251 	sg->offset = sym->auth.data.offset;
1252 
1253 	/* Successive segs */
1254 	mbuf = mbuf->next;
1255 	while (mbuf) {
1256 		cpu_to_hw_sg(sg);
1257 		sg++;
1258 		qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1259 		sg->length = mbuf->data_len;
1260 		mbuf = mbuf->next;
1261 	}
1262 
1263 	sg->length -= ses->digest_length;
1264 	if (is_decode(ses)) {
1265 		cpu_to_hw_sg(sg);
1266 		sg++;
1267 		memcpy(ctx->digest, sym->auth.digest.data,
1268 			ses->digest_length);
1269 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1270 		sg->length = ses->digest_length;
1271 	}
1272 	sg->final = 1;
1273 	cpu_to_hw_sg(sg);
1274 
1275 	return cf;
1276 }
1277 
1278 static inline struct dpaa_sec_job *
1279 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1280 {
1281 	struct rte_crypto_sym_op *sym = op->sym;
1282 	struct dpaa_sec_job *cf;
1283 	struct dpaa_sec_op_ctx *ctx;
1284 	struct qm_sg_entry *sg;
1285 	rte_iova_t src_start_addr, dst_start_addr;
1286 	uint32_t length = 0;
1287 	uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1288 			ses->iv.offset);
1289 
1290 	src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1291 	if (sym->m_dst)
1292 		dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1293 	else
1294 		dst_start_addr = src_start_addr;
1295 
1296 	ctx = dpaa_sec_alloc_ctx(ses);
1297 	if (!ctx)
1298 		return NULL;
1299 
1300 	cf = &ctx->job;
1301 	ctx->op = op;
1302 
1303 	/* input */
1304 	rte_prefetch0(cf->sg);
1305 	sg = &cf->sg[2];
1306 	qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1307 	if (is_encode(ses)) {
1308 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1309 		sg->length = ses->iv.length;
1310 		length += sg->length;
1311 		cpu_to_hw_sg(sg);
1312 
1313 		sg++;
1314 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1315 		sg->length = sym->auth.data.length;
1316 		length += sg->length;
1317 		sg->final = 1;
1318 		cpu_to_hw_sg(sg);
1319 	} else {
1320 		qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1321 		sg->length = ses->iv.length;
1322 		length += sg->length;
1323 		cpu_to_hw_sg(sg);
1324 
1325 		sg++;
1326 
1327 		qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1328 		sg->length = sym->auth.data.length;
1329 		length += sg->length;
1330 		cpu_to_hw_sg(sg);
1331 
1332 		memcpy(ctx->digest, sym->auth.digest.data,
1333 		       ses->digest_length);
1334 		sg++;
1335 
1336 		qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1337 		sg->length = ses->digest_length;
1338 		length += sg->length;
1339 		sg->final = 1;
1340 		cpu_to_hw_sg(sg);
1341 	}
1342 	/* input compound frame */
1343 	cf->sg[1].length = length;
1344 	cf->sg[1].extension = 1;
1345 	cf->sg[1].final = 1;
1346 	cpu_to_hw_sg(&cf->sg[1]);
1347 
1348 	/* output */
1349 	sg++;
1350 	qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1351 	qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1352 	sg->length = sym->cipher.data.length;
1353 	length = sg->length;
1354 	if (is_encode(ses)) {
1355 		cpu_to_hw_sg(sg);
1356 		/* set auth output */
1357 		sg++;
1358 		qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1359 		sg->length = ses->digest_length;
1360 		length += sg->length;
1361 	}
1362 	sg->final = 1;
1363 	cpu_to_hw_sg(sg);
1364 
1365 	/* output compound frame */
1366 	cf->sg[0].length = length;
1367 	cf->sg[0].extension = 1;
1368 	cpu_to_hw_sg(&cf->sg[0]);
1369 
1370 	return cf;
1371 }
1372 
1373 static inline struct dpaa_sec_job *
1374 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1375 {
1376 	struct rte_crypto_sym_op *sym = op->sym;
1377 	struct dpaa_sec_job *cf;
1378 	struct dpaa_sec_op_ctx *ctx;
1379 	struct qm_sg_entry *sg;
1380 	phys_addr_t src_start_addr, dst_start_addr;
1381 
1382 	ctx = dpaa_sec_alloc_ctx(ses);
1383 	if (!ctx)
1384 		return NULL;
1385 	cf = &ctx->job;
1386 	ctx->op = op;
1387 
1388 	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1389 
1390 	if (sym->m_dst)
1391 		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1392 	else
1393 		dst_start_addr = src_start_addr;
1394 
1395 	/* input */
1396 	sg = &cf->sg[1];
1397 	qm_sg_entry_set64(sg, src_start_addr);
1398 	sg->length = sym->m_src->pkt_len;
1399 	sg->final = 1;
1400 	cpu_to_hw_sg(sg);
1401 
1402 	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1403 	/* output */
1404 	sg = &cf->sg[0];
1405 	qm_sg_entry_set64(sg, dst_start_addr);
1406 	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1407 	cpu_to_hw_sg(sg);
1408 
1409 	return cf;
1410 }
1411 
1412 static uint16_t
1413 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1414 		       uint16_t nb_ops)
1415 {
1416 	/* Function to transmit the frames to given device and queuepair */
1417 	uint32_t loop;
1418 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1419 	uint16_t num_tx = 0;
1420 	struct qm_fd fds[DPAA_SEC_BURST], *fd;
1421 	uint32_t frames_to_send;
1422 	struct rte_crypto_op *op;
1423 	struct dpaa_sec_job *cf;
1424 	dpaa_sec_session *ses;
1425 	struct dpaa_sec_op_ctx *ctx;
1426 	uint32_t auth_only_len;
1427 	struct qman_fq *inq[DPAA_SEC_BURST];
1428 
1429 	while (nb_ops) {
1430 		frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1431 				DPAA_SEC_BURST : nb_ops;
1432 		for (loop = 0; loop < frames_to_send; loop++) {
1433 			op = *(ops++);
1434 			switch (op->sess_type) {
1435 			case RTE_CRYPTO_OP_WITH_SESSION:
1436 				ses = (dpaa_sec_session *)
1437 					get_session_private_data(
1438 							op->sym->session,
1439 							cryptodev_driver_id);
1440 				break;
1441 			case RTE_CRYPTO_OP_SECURITY_SESSION:
1442 				ses = (dpaa_sec_session *)
1443 					get_sec_session_private_data(
1444 							op->sym->sec_session);
1445 				break;
1446 			default:
1447 				PMD_TX_LOG(ERR,
1448 					"sessionless crypto op not supported");
1449 				frames_to_send = loop;
1450 				nb_ops = loop;
1451 				goto send_pkts;
1452 			}
1453 			if (unlikely(!ses->qp || ses->qp != qp)) {
1454 				PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1455 						ses->qp, qp);
1456 				if (dpaa_sec_attach_sess_q(qp, ses)) {
1457 					frames_to_send = loop;
1458 					nb_ops = loop;
1459 					goto send_pkts;
1460 				}
1461 			}
1462 
1463 			auth_only_len = op->sym->auth.data.length -
1464 						op->sym->cipher.data.length;
1465 			if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1466 				if (is_auth_only(ses)) {
1467 					cf = build_auth_only(op, ses);
1468 				} else if (is_cipher_only(ses)) {
1469 					cf = build_cipher_only(op, ses);
1470 				} else if (is_aead(ses)) {
1471 					cf = build_cipher_auth_gcm(op, ses);
1472 					auth_only_len = ses->auth_only_len;
1473 				} else if (is_auth_cipher(ses)) {
1474 					cf = build_cipher_auth(op, ses);
1475 				} else if (is_proto_ipsec(ses)) {
1476 					cf = build_proto(op, ses);
1477 				} else {
1478 					PMD_TX_LOG(ERR, "not supported sec op");
1479 					frames_to_send = loop;
1480 					nb_ops = loop;
1481 					goto send_pkts;
1482 				}
1483 			} else {
1484 				if (is_auth_only(ses)) {
1485 					cf = build_auth_only_sg(op, ses);
1486 				} else if (is_cipher_only(ses)) {
1487 					cf = build_cipher_only_sg(op, ses);
1488 				} else if (is_aead(ses)) {
1489 					cf = build_cipher_auth_gcm_sg(op, ses);
1490 					auth_only_len = ses->auth_only_len;
1491 				} else if (is_auth_cipher(ses)) {
1492 					cf = build_cipher_auth_sg(op, ses);
1493 				} else {
1494 					PMD_TX_LOG(ERR, "not supported sec op");
1495 					frames_to_send = loop;
1496 					nb_ops = loop;
1497 					goto send_pkts;
1498 				}
1499 			}
1500 			if (unlikely(!cf)) {
1501 				frames_to_send = loop;
1502 				nb_ops = loop;
1503 				goto send_pkts;
1504 			}
1505 
1506 			fd = &fds[loop];
1507 			inq[loop] = ses->inq;
1508 			fd->opaque_addr = 0;
1509 			fd->cmd = 0;
1510 			ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1511 			qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1512 			fd->_format1 = qm_fd_compound;
1513 			fd->length29 = 2 * sizeof(struct qm_sg_entry);
1514 			/* Auth_only_len is set as 0 in descriptor and it is
1515 			 * overwritten here in the fd.cmd which will update
1516 			 * the DPOVRD reg.
1517 			 */
1518 			if (auth_only_len)
1519 				fd->cmd = 0x80000000 | auth_only_len;
1520 
1521 		}
1522 send_pkts:
1523 		loop = 0;
1524 		while (loop < frames_to_send) {
1525 			loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1526 					frames_to_send - loop);
1527 		}
1528 		nb_ops -= frames_to_send;
1529 		num_tx += frames_to_send;
1530 	}
1531 
1532 	dpaa_qp->tx_pkts += num_tx;
1533 	dpaa_qp->tx_errs += nb_ops - num_tx;
1534 
1535 	return num_tx;
1536 }
1537 
1538 static uint16_t
1539 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1540 		       uint16_t nb_ops)
1541 {
1542 	uint16_t num_rx;
1543 	struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1544 
1545 	num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1546 
1547 	dpaa_qp->rx_pkts += num_rx;
1548 	dpaa_qp->rx_errs += nb_ops - num_rx;
1549 
1550 	PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1551 
1552 	return num_rx;
1553 }
1554 
1555 /** Release queue pair */
1556 static int
1557 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1558 			    uint16_t qp_id)
1559 {
1560 	struct dpaa_sec_dev_private *internals;
1561 	struct dpaa_sec_qp *qp = NULL;
1562 
1563 	PMD_INIT_FUNC_TRACE();
1564 
1565 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1566 
1567 	internals = dev->data->dev_private;
1568 	if (qp_id >= internals->max_nb_queue_pairs) {
1569 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1570 			     internals->max_nb_queue_pairs);
1571 		return -EINVAL;
1572 	}
1573 
1574 	qp = &internals->qps[qp_id];
1575 	qp->internals = NULL;
1576 	dev->data->queue_pairs[qp_id] = NULL;
1577 
1578 	return 0;
1579 }
1580 
1581 /** Setup a queue pair */
1582 static int
1583 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1584 		__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1585 		__rte_unused int socket_id,
1586 		__rte_unused struct rte_mempool *session_pool)
1587 {
1588 	struct dpaa_sec_dev_private *internals;
1589 	struct dpaa_sec_qp *qp = NULL;
1590 
1591 	PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1592 		     dev, qp_id, qp_conf);
1593 
1594 	internals = dev->data->dev_private;
1595 	if (qp_id >= internals->max_nb_queue_pairs) {
1596 		PMD_INIT_LOG(ERR, "Max supported qpid %d",
1597 			     internals->max_nb_queue_pairs);
1598 		return -EINVAL;
1599 	}
1600 
1601 	qp = &internals->qps[qp_id];
1602 	qp->internals = internals;
1603 	dev->data->queue_pairs[qp_id] = qp;
1604 
1605 	return 0;
1606 }
1607 
1608 /** Start queue pair */
1609 static int
1610 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1611 			  __rte_unused uint16_t queue_pair_id)
1612 {
1613 	PMD_INIT_FUNC_TRACE();
1614 
1615 	return 0;
1616 }
1617 
1618 /** Stop queue pair */
1619 static int
1620 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1621 			 __rte_unused uint16_t queue_pair_id)
1622 {
1623 	PMD_INIT_FUNC_TRACE();
1624 
1625 	return 0;
1626 }
1627 
1628 /** Return the number of allocated queue pairs */
1629 static uint32_t
1630 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1631 {
1632 	PMD_INIT_FUNC_TRACE();
1633 
1634 	return dev->data->nb_queue_pairs;
1635 }
1636 
1637 /** Returns the size of session structure */
1638 static unsigned int
1639 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1640 {
1641 	PMD_INIT_FUNC_TRACE();
1642 
1643 	return sizeof(dpaa_sec_session);
1644 }
1645 
1646 static int
1647 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1648 		     struct rte_crypto_sym_xform *xform,
1649 		     dpaa_sec_session *session)
1650 {
1651 	session->cipher_alg = xform->cipher.algo;
1652 	session->iv.length = xform->cipher.iv.length;
1653 	session->iv.offset = xform->cipher.iv.offset;
1654 	session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1655 					       RTE_CACHE_LINE_SIZE);
1656 	if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1657 		PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1658 		return -ENOMEM;
1659 	}
1660 	session->cipher_key.length = xform->cipher.key.length;
1661 
1662 	memcpy(session->cipher_key.data, xform->cipher.key.data,
1663 	       xform->cipher.key.length);
1664 	session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1665 			DIR_ENC : DIR_DEC;
1666 
1667 	return 0;
1668 }
1669 
1670 static int
1671 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1672 		   struct rte_crypto_sym_xform *xform,
1673 		   dpaa_sec_session *session)
1674 {
1675 	session->auth_alg = xform->auth.algo;
1676 	session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1677 					     RTE_CACHE_LINE_SIZE);
1678 	if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1679 		PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1680 		return -ENOMEM;
1681 	}
1682 	session->auth_key.length = xform->auth.key.length;
1683 	session->digest_length = xform->auth.digest_length;
1684 
1685 	memcpy(session->auth_key.data, xform->auth.key.data,
1686 	       xform->auth.key.length);
1687 	session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1688 			DIR_ENC : DIR_DEC;
1689 
1690 	return 0;
1691 }
1692 
1693 static int
1694 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1695 		   struct rte_crypto_sym_xform *xform,
1696 		   dpaa_sec_session *session)
1697 {
1698 	session->aead_alg = xform->aead.algo;
1699 	session->iv.length = xform->aead.iv.length;
1700 	session->iv.offset = xform->aead.iv.offset;
1701 	session->auth_only_len = xform->aead.aad_length;
1702 	session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1703 					     RTE_CACHE_LINE_SIZE);
1704 	if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1705 		PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1706 		return -ENOMEM;
1707 	}
1708 	session->aead_key.length = xform->aead.key.length;
1709 	session->digest_length = xform->aead.digest_length;
1710 
1711 	memcpy(session->aead_key.data, xform->aead.key.data,
1712 	       xform->aead.key.length);
1713 	session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1714 			DIR_ENC : DIR_DEC;
1715 
1716 	return 0;
1717 }
1718 
1719 static struct qman_fq *
1720 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1721 {
1722 	unsigned int i;
1723 
1724 	for (i = 0; i < qi->max_nb_sessions; i++) {
1725 		if (qi->inq_attach[i] == 0) {
1726 			qi->inq_attach[i] = 1;
1727 			return &qi->inq[i];
1728 		}
1729 	}
1730 	PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1731 
1732 	return NULL;
1733 }
1734 
1735 static int
1736 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1737 {
1738 	unsigned int i;
1739 
1740 	for (i = 0; i < qi->max_nb_sessions; i++) {
1741 		if (&qi->inq[i] == fq) {
1742 			qman_retire_fq(fq, NULL);
1743 			qman_oos_fq(fq);
1744 			qi->inq_attach[i] = 0;
1745 			return 0;
1746 		}
1747 	}
1748 	return -1;
1749 }
1750 
1751 static int
1752 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1753 {
1754 	int ret;
1755 
1756 	sess->qp = qp;
1757 	ret = dpaa_sec_prep_cdb(sess);
1758 	if (ret) {
1759 		PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1760 		return -1;
1761 	}
1762 
1763 	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1764 			       qman_fq_fqid(&qp->outq));
1765 	if (ret)
1766 		PMD_DRV_LOG(ERR, "Unable to init sec queue");
1767 
1768 	return ret;
1769 }
1770 
1771 static int
1772 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1773 			uint16_t qp_id __rte_unused,
1774 			void *ses __rte_unused)
1775 {
1776 	PMD_INIT_FUNC_TRACE();
1777 	return 0;
1778 }
1779 
1780 static int
1781 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1782 			uint16_t qp_id  __rte_unused,
1783 			void *ses)
1784 {
1785 	dpaa_sec_session *sess = ses;
1786 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1787 
1788 	PMD_INIT_FUNC_TRACE();
1789 
1790 	if (sess->inq)
1791 		dpaa_sec_detach_rxq(qi, sess->inq);
1792 	sess->inq = NULL;
1793 
1794 	sess->qp = NULL;
1795 
1796 	return 0;
1797 }
1798 
1799 static int
1800 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1801 			    struct rte_crypto_sym_xform *xform,	void *sess)
1802 {
1803 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1804 	dpaa_sec_session *session = sess;
1805 
1806 	PMD_INIT_FUNC_TRACE();
1807 
1808 	if (unlikely(sess == NULL)) {
1809 		RTE_LOG(ERR, PMD, "invalid session struct\n");
1810 		return -EINVAL;
1811 	}
1812 
1813 	/* Default IV length = 0 */
1814 	session->iv.length = 0;
1815 
1816 	/* Cipher Only */
1817 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1818 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1819 		dpaa_sec_cipher_init(dev, xform, session);
1820 
1821 	/* Authentication Only */
1822 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1823 		   xform->next == NULL) {
1824 		session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1825 		dpaa_sec_auth_init(dev, xform, session);
1826 
1827 	/* Cipher then Authenticate */
1828 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1829 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1830 		if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1831 			dpaa_sec_cipher_init(dev, xform, session);
1832 			dpaa_sec_auth_init(dev, xform->next, session);
1833 		} else {
1834 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1835 			return -EINVAL;
1836 		}
1837 
1838 	/* Authenticate then Cipher */
1839 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1840 		   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1841 		if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1842 			dpaa_sec_auth_init(dev, xform, session);
1843 			dpaa_sec_cipher_init(dev, xform->next, session);
1844 		} else {
1845 			PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1846 			return -EINVAL;
1847 		}
1848 
1849 	/* AEAD operation for AES-GCM kind of Algorithms */
1850 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1851 		   xform->next == NULL) {
1852 		dpaa_sec_aead_init(dev, xform, session);
1853 
1854 	} else {
1855 		PMD_DRV_LOG(ERR, "Invalid crypto type");
1856 		return -EINVAL;
1857 	}
1858 	session->ctx_pool = internals->ctx_pool;
1859 	session->inq = dpaa_sec_attach_rxq(internals);
1860 	if (session->inq == NULL) {
1861 		PMD_DRV_LOG(ERR, "unable to attach sec queue");
1862 		goto err1;
1863 	}
1864 
1865 	return 0;
1866 
1867 err1:
1868 	rte_free(session->cipher_key.data);
1869 	rte_free(session->auth_key.data);
1870 	memset(session, 0, sizeof(dpaa_sec_session));
1871 
1872 	return -EINVAL;
1873 }
1874 
1875 static int
1876 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1877 		struct rte_crypto_sym_xform *xform,
1878 		struct rte_cryptodev_sym_session *sess,
1879 		struct rte_mempool *mempool)
1880 {
1881 	void *sess_private_data;
1882 	int ret;
1883 
1884 	PMD_INIT_FUNC_TRACE();
1885 
1886 	if (rte_mempool_get(mempool, &sess_private_data)) {
1887 		CDEV_LOG_ERR(
1888 			"Couldn't get object from session mempool");
1889 		return -ENOMEM;
1890 	}
1891 
1892 	ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1893 	if (ret != 0) {
1894 		PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1895 				"session parameters");
1896 
1897 		/* Return session to mempool */
1898 		rte_mempool_put(mempool, sess_private_data);
1899 		return ret;
1900 	}
1901 
1902 	set_session_private_data(sess, dev->driver_id,
1903 			sess_private_data);
1904 
1905 
1906 	return 0;
1907 }
1908 
1909 /** Clear the memory of session so it doesn't leave key material behind */
1910 static void
1911 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1912 		struct rte_cryptodev_sym_session *sess)
1913 {
1914 	struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1915 	uint8_t index = dev->driver_id;
1916 	void *sess_priv = get_session_private_data(sess, index);
1917 
1918 	PMD_INIT_FUNC_TRACE();
1919 
1920 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1921 
1922 	if (sess_priv) {
1923 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1924 
1925 		if (s->inq)
1926 			dpaa_sec_detach_rxq(qi, s->inq);
1927 		rte_free(s->cipher_key.data);
1928 		rte_free(s->auth_key.data);
1929 		memset(s, 0, sizeof(dpaa_sec_session));
1930 		set_session_private_data(sess, index, NULL);
1931 		rte_mempool_put(sess_mp, sess_priv);
1932 	}
1933 }
1934 
1935 static int
1936 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1937 			   struct rte_security_session_conf *conf,
1938 			   void *sess)
1939 {
1940 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1941 	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1942 	struct rte_crypto_auth_xform *auth_xform;
1943 	struct rte_crypto_cipher_xform *cipher_xform;
1944 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
1945 
1946 	PMD_INIT_FUNC_TRACE();
1947 
1948 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1949 		cipher_xform = &conf->crypto_xform->cipher;
1950 		auth_xform = &conf->crypto_xform->next->auth;
1951 	} else {
1952 		auth_xform = &conf->crypto_xform->auth;
1953 		cipher_xform = &conf->crypto_xform->next->cipher;
1954 	}
1955 	session->proto_alg = conf->protocol;
1956 	session->cipher_key.data = rte_zmalloc(NULL,
1957 					       cipher_xform->key.length,
1958 					       RTE_CACHE_LINE_SIZE);
1959 	if (session->cipher_key.data == NULL &&
1960 			cipher_xform->key.length > 0) {
1961 		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1962 		return -ENOMEM;
1963 	}
1964 
1965 	session->cipher_key.length = cipher_xform->key.length;
1966 	session->auth_key.data = rte_zmalloc(NULL,
1967 					auth_xform->key.length,
1968 					RTE_CACHE_LINE_SIZE);
1969 	if (session->auth_key.data == NULL &&
1970 			auth_xform->key.length > 0) {
1971 		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1972 		rte_free(session->cipher_key.data);
1973 		return -ENOMEM;
1974 	}
1975 	session->auth_key.length = auth_xform->key.length;
1976 	memcpy(session->cipher_key.data, cipher_xform->key.data,
1977 			cipher_xform->key.length);
1978 	memcpy(session->auth_key.data, auth_xform->key.data,
1979 			auth_xform->key.length);
1980 
1981 	switch (auth_xform->algo) {
1982 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
1983 		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1984 		break;
1985 	case RTE_CRYPTO_AUTH_MD5_HMAC:
1986 		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1987 		break;
1988 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
1989 		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1990 		break;
1991 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
1992 		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1993 		break;
1994 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
1995 		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1996 		break;
1997 	case RTE_CRYPTO_AUTH_AES_CMAC:
1998 		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1999 		break;
2000 	case RTE_CRYPTO_AUTH_NULL:
2001 		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2002 		break;
2003 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
2004 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2005 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2006 	case RTE_CRYPTO_AUTH_SHA1:
2007 	case RTE_CRYPTO_AUTH_SHA256:
2008 	case RTE_CRYPTO_AUTH_SHA512:
2009 	case RTE_CRYPTO_AUTH_SHA224:
2010 	case RTE_CRYPTO_AUTH_SHA384:
2011 	case RTE_CRYPTO_AUTH_MD5:
2012 	case RTE_CRYPTO_AUTH_AES_GMAC:
2013 	case RTE_CRYPTO_AUTH_KASUMI_F9:
2014 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2015 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
2016 		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2017 			auth_xform->algo);
2018 		goto out;
2019 	default:
2020 		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2021 			auth_xform->algo);
2022 		goto out;
2023 	}
2024 
2025 	switch (cipher_xform->algo) {
2026 	case RTE_CRYPTO_CIPHER_AES_CBC:
2027 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2028 		break;
2029 	case RTE_CRYPTO_CIPHER_3DES_CBC:
2030 		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2031 		break;
2032 	case RTE_CRYPTO_CIPHER_AES_CTR:
2033 		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2034 		break;
2035 	case RTE_CRYPTO_CIPHER_NULL:
2036 	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2037 	case RTE_CRYPTO_CIPHER_3DES_ECB:
2038 	case RTE_CRYPTO_CIPHER_AES_ECB:
2039 	case RTE_CRYPTO_CIPHER_KASUMI_F8:
2040 		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2041 			cipher_xform->algo);
2042 		goto out;
2043 	default:
2044 		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2045 			cipher_xform->algo);
2046 		goto out;
2047 	}
2048 
2049 	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2050 		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2051 				sizeof(session->ip4_hdr));
2052 		session->ip4_hdr.ip_v = IPVERSION;
2053 		session->ip4_hdr.ip_hl = 5;
2054 		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2055 						sizeof(session->ip4_hdr));
2056 		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2057 		session->ip4_hdr.ip_id = 0;
2058 		session->ip4_hdr.ip_off = 0;
2059 		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2060 		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2061 				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2062 				: IPPROTO_AH;
2063 		session->ip4_hdr.ip_sum = 0;
2064 		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2065 		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2066 		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2067 						(void *)&session->ip4_hdr,
2068 						sizeof(struct ip));
2069 
2070 		session->encap_pdb.options =
2071 			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2072 			PDBOPTS_ESP_OIHI_PDB_INL |
2073 			PDBOPTS_ESP_IVSRC |
2074 			PDBHMO_ESP_ENCAP_DTTL;
2075 		session->encap_pdb.spi = ipsec_xform->spi;
2076 		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2077 
2078 		session->dir = DIR_ENC;
2079 	} else if (ipsec_xform->direction ==
2080 			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2081 		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2082 		session->decap_pdb.options = sizeof(struct ip) << 16;
2083 		session->dir = DIR_DEC;
2084 	} else
2085 		goto out;
2086 	session->ctx_pool = internals->ctx_pool;
2087 	session->inq = dpaa_sec_attach_rxq(internals);
2088 	if (session->inq == NULL) {
2089 		PMD_DRV_LOG(ERR, "unable to attach sec queue");
2090 		goto out;
2091 	}
2092 
2093 
2094 	return 0;
2095 out:
2096 	rte_free(session->auth_key.data);
2097 	rte_free(session->cipher_key.data);
2098 	memset(session, 0, sizeof(dpaa_sec_session));
2099 	return -1;
2100 }
2101 
2102 static int
2103 dpaa_sec_security_session_create(void *dev,
2104 				 struct rte_security_session_conf *conf,
2105 				 struct rte_security_session *sess,
2106 				 struct rte_mempool *mempool)
2107 {
2108 	void *sess_private_data;
2109 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2110 	int ret;
2111 
2112 	if (rte_mempool_get(mempool, &sess_private_data)) {
2113 		CDEV_LOG_ERR(
2114 			"Couldn't get object from session mempool");
2115 		return -ENOMEM;
2116 	}
2117 
2118 	switch (conf->protocol) {
2119 	case RTE_SECURITY_PROTOCOL_IPSEC:
2120 		ret = dpaa_sec_set_ipsec_session(cdev, conf,
2121 				sess_private_data);
2122 		break;
2123 	case RTE_SECURITY_PROTOCOL_MACSEC:
2124 		return -ENOTSUP;
2125 	default:
2126 		return -EINVAL;
2127 	}
2128 	if (ret != 0) {
2129 		PMD_DRV_LOG(ERR,
2130 			"DPAA2 PMD: failed to configure session parameters");
2131 
2132 		/* Return session to mempool */
2133 		rte_mempool_put(mempool, sess_private_data);
2134 		return ret;
2135 	}
2136 
2137 	set_sec_session_private_data(sess, sess_private_data);
2138 
2139 	return ret;
2140 }
2141 
2142 /** Clear the memory of session so it doesn't leave key material behind */
2143 static int
2144 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2145 		struct rte_security_session *sess)
2146 {
2147 	PMD_INIT_FUNC_TRACE();
2148 	void *sess_priv = get_sec_session_private_data(sess);
2149 
2150 	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2151 
2152 	if (sess_priv) {
2153 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2154 
2155 		rte_free(s->cipher_key.data);
2156 		rte_free(s->auth_key.data);
2157 		memset(sess, 0, sizeof(dpaa_sec_session));
2158 		set_sec_session_private_data(sess, NULL);
2159 		rte_mempool_put(sess_mp, sess_priv);
2160 	}
2161 	return 0;
2162 }
2163 
2164 
2165 static int
2166 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2167 		       struct rte_cryptodev_config *config __rte_unused)
2168 {
2169 	PMD_INIT_FUNC_TRACE();
2170 
2171 	return 0;
2172 }
2173 
2174 static int
2175 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2176 {
2177 	PMD_INIT_FUNC_TRACE();
2178 	return 0;
2179 }
2180 
2181 static void
2182 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2183 {
2184 	PMD_INIT_FUNC_TRACE();
2185 }
2186 
2187 static int
2188 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
2189 {
2190 	PMD_INIT_FUNC_TRACE();
2191 	return 0;
2192 }
2193 
2194 static void
2195 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2196 		       struct rte_cryptodev_info *info)
2197 {
2198 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2199 
2200 	PMD_INIT_FUNC_TRACE();
2201 	if (info != NULL) {
2202 		info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2203 		info->feature_flags = dev->feature_flags;
2204 		info->capabilities = dpaa_sec_capabilities;
2205 		info->sym.max_nb_sessions = internals->max_nb_sessions;
2206 		info->sym.max_nb_sessions_per_qp =
2207 			RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2208 			RTE_DPAA_MAX_NB_SEC_QPS;
2209 		info->driver_id = cryptodev_driver_id;
2210 	}
2211 }
2212 
2213 static struct rte_cryptodev_ops crypto_ops = {
2214 	.dev_configure	      = dpaa_sec_dev_configure,
2215 	.dev_start	      = dpaa_sec_dev_start,
2216 	.dev_stop	      = dpaa_sec_dev_stop,
2217 	.dev_close	      = dpaa_sec_dev_close,
2218 	.dev_infos_get        = dpaa_sec_dev_infos_get,
2219 	.queue_pair_setup     = dpaa_sec_queue_pair_setup,
2220 	.queue_pair_release   = dpaa_sec_queue_pair_release,
2221 	.queue_pair_start     = dpaa_sec_queue_pair_start,
2222 	.queue_pair_stop      = dpaa_sec_queue_pair_stop,
2223 	.queue_pair_count     = dpaa_sec_queue_pair_count,
2224 	.session_get_size     = dpaa_sec_session_get_size,
2225 	.session_configure    = dpaa_sec_session_configure,
2226 	.session_clear        = dpaa_sec_session_clear,
2227 	.qp_attach_session    = dpaa_sec_qp_attach_sess,
2228 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
2229 };
2230 
2231 static const struct rte_security_capability *
2232 dpaa_sec_capabilities_get(void *device __rte_unused)
2233 {
2234 	return dpaa_sec_security_cap;
2235 }
2236 
2237 struct rte_security_ops dpaa_sec_security_ops = {
2238 	.session_create = dpaa_sec_security_session_create,
2239 	.session_update = NULL,
2240 	.session_stats_get = NULL,
2241 	.session_destroy = dpaa_sec_security_session_destroy,
2242 	.set_pkt_metadata = NULL,
2243 	.capabilities_get = dpaa_sec_capabilities_get
2244 };
2245 
2246 static int
2247 dpaa_sec_uninit(struct rte_cryptodev *dev)
2248 {
2249 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2250 
2251 	if (dev == NULL)
2252 		return -ENODEV;
2253 
2254 	rte_free(dev->security_ctx);
2255 
2256 	rte_mempool_free(internals->ctx_pool);
2257 	rte_free(internals);
2258 
2259 	PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2260 		     dev->data->name, rte_socket_id());
2261 
2262 	return 0;
2263 }
2264 
2265 static int
2266 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2267 {
2268 	struct dpaa_sec_dev_private *internals;
2269 	struct rte_security_ctx *security_instance;
2270 	struct dpaa_sec_qp *qp;
2271 	uint32_t i, flags;
2272 	int ret;
2273 	char str[20];
2274 
2275 	PMD_INIT_FUNC_TRACE();
2276 
2277 	cryptodev->driver_id = cryptodev_driver_id;
2278 	cryptodev->dev_ops = &crypto_ops;
2279 
2280 	cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2281 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2282 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2283 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
2284 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2285 			RTE_CRYPTODEV_FF_SECURITY |
2286 			RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2287 
2288 	internals = cryptodev->data->dev_private;
2289 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2290 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2291 
2292 	/*
2293 	 * For secondary processes, we don't initialise any further as primary
2294 	 * has already done this work. Only check we don't need a different
2295 	 * RX function
2296 	 */
2297 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2298 		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2299 		return 0;
2300 	}
2301 
2302 	/* Initialize security_ctx only for primary process*/
2303 	security_instance = rte_malloc("rte_security_instances_ops",
2304 				sizeof(struct rte_security_ctx), 0);
2305 	if (security_instance == NULL)
2306 		return -ENOMEM;
2307 	security_instance->device = (void *)cryptodev;
2308 	security_instance->ops = &dpaa_sec_security_ops;
2309 	security_instance->sess_cnt = 0;
2310 	cryptodev->security_ctx = security_instance;
2311 
2312 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2313 		/* init qman fq for queue pair */
2314 		qp = &internals->qps[i];
2315 		ret = dpaa_sec_init_tx(&qp->outq);
2316 		if (ret) {
2317 			PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
2318 			goto init_error;
2319 		}
2320 	}
2321 
2322 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2323 		QMAN_FQ_FLAG_TO_DCPORTAL;
2324 	for (i = 0; i < internals->max_nb_sessions; i++) {
2325 		/* create rx qman fq for sessions*/
2326 		ret = qman_create_fq(0, flags, &internals->inq[i]);
2327 		if (unlikely(ret != 0)) {
2328 			PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2329 			goto init_error;
2330 		}
2331 	}
2332 
2333 	sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
2334 	internals->ctx_pool = rte_mempool_create((const char *)str,
2335 			CTX_POOL_NUM_BUFS,
2336 			CTX_POOL_BUF_SIZE,
2337 			CTX_POOL_CACHE_SIZE, 0,
2338 			NULL, NULL, NULL, NULL,
2339 			SOCKET_ID_ANY, 0);
2340 	if (!internals->ctx_pool) {
2341 		RTE_LOG(ERR, PMD, "%s create failed\n", str);
2342 		goto init_error;
2343 	}
2344 
2345 	PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2346 	return 0;
2347 
2348 init_error:
2349 	PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2350 
2351 	dpaa_sec_uninit(cryptodev);
2352 	return -EFAULT;
2353 }
2354 
2355 static int
2356 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2357 				struct rte_dpaa_device *dpaa_dev)
2358 {
2359 	struct rte_cryptodev *cryptodev;
2360 	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2361 
2362 	int retval;
2363 
2364 	sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2365 
2366 	cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2367 	if (cryptodev == NULL)
2368 		return -ENOMEM;
2369 
2370 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2371 		cryptodev->data->dev_private = rte_zmalloc_socket(
2372 					"cryptodev private structure",
2373 					sizeof(struct dpaa_sec_dev_private),
2374 					RTE_CACHE_LINE_SIZE,
2375 					rte_socket_id());
2376 
2377 		if (cryptodev->data->dev_private == NULL)
2378 			rte_panic("Cannot allocate memzone for private "
2379 					"device data");
2380 	}
2381 
2382 	dpaa_dev->crypto_dev = cryptodev;
2383 	cryptodev->device = &dpaa_dev->device;
2384 	cryptodev->device->driver = &dpaa_drv->driver;
2385 
2386 	/* init user callbacks */
2387 	TAILQ_INIT(&(cryptodev->link_intr_cbs));
2388 
2389 	/* if sec device version is not configured */
2390 	if (!rta_get_sec_era()) {
2391 		const struct device_node *caam_node;
2392 
2393 		for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2394 			const uint32_t *prop = of_get_property(caam_node,
2395 					"fsl,sec-era",
2396 					NULL);
2397 			if (prop) {
2398 				rta_set_sec_era(
2399 					INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2400 				break;
2401 			}
2402 		}
2403 	}
2404 
2405 	/* Invoke PMD device initialization function */
2406 	retval = dpaa_sec_dev_init(cryptodev);
2407 	if (retval == 0)
2408 		return 0;
2409 
2410 	/* In case of error, cleanup is done */
2411 	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2412 		rte_free(cryptodev->data->dev_private);
2413 
2414 	rte_cryptodev_pmd_release_device(cryptodev);
2415 
2416 	return -ENXIO;
2417 }
2418 
2419 static int
2420 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2421 {
2422 	struct rte_cryptodev *cryptodev;
2423 	int ret;
2424 
2425 	cryptodev = dpaa_dev->crypto_dev;
2426 	if (cryptodev == NULL)
2427 		return -ENODEV;
2428 
2429 	ret = dpaa_sec_uninit(cryptodev);
2430 	if (ret)
2431 		return ret;
2432 
2433 	return rte_cryptodev_pmd_destroy(cryptodev);
2434 }
2435 
2436 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2437 	.drv_type = FSL_DPAA_CRYPTO,
2438 	.driver = {
2439 		.name = "DPAA SEC PMD"
2440 	},
2441 	.probe = cryptodev_dpaa_sec_probe,
2442 	.remove = cryptodev_dpaa_sec_remove,
2443 };
2444 
2445 static struct cryptodev_driver dpaa_sec_crypto_drv;
2446 
2447 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2448 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
2449 		cryptodev_driver_id);
2450